diff -uNr a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSnapshotSplit.java b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSnapshotSplit.java --- a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSnapshotSplit.java 1970-01-01 07:00:00.000000000 +0700 +++ b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSnapshotSplit.java 2014-06-19 10:35:18.261229255 +0800 @@ -0,0 +1,69 @@ +package org.apache.hadoop.hive.hbase; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat.TableSnapshotRegionSplit; +import org.apache.hadoop.mapred.FileSplit; +import org.apache.hadoop.mapred.InputSplit; + +public class HBaseSnapshotSplit extends FileSplit implements InputSplit{ + static final Log LOG = LogFactory.getLog(HBaseSnapshotSplit.class); + + private final TableSnapshotRegionSplit split; + + public HBaseSnapshotSplit() { + super((Path) null, 0, 0, (String[]) null); + split = new TableSnapshotRegionSplit(); + } + + public HBaseSnapshotSplit(TableSnapshotRegionSplit split, Path dummyPath) { + super(dummyPath, 0, 0, (String[]) null); + this.split = split; + } + + public TableSnapshotRegionSplit getSplit() { + return this.split; + } + + @Override + public String toString() { + return "TableSnapshotRegionSplit " + split; + } + + @Override + public void readFields(DataInput in) throws IOException { + super.readFields(in); + split.readFields(in); + } + + @Override + public void write(DataOutput out) throws IOException { + super.write(out); + split.write(out); + } + + @Override + public long getLength() { + try { + return split.getLength(); + } catch (Exception e) { + LOG.error(e.getMessage(),e); + return -1; + } + } + + @Override + public String[] getLocations() throws IOException { + try { + return split.getLocations(); + } catch (InterruptedException e) { + LOG.error(e.getMessage(),e); + return null; + } + } +} diff -uNr a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSnapshotStorageHandler.java b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSnapshotStorageHandler.java --- a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSnapshotStorageHandler.java 1970-01-01 07:00:00.000000000 +0700 +++ b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSnapshotStorageHandler.java 2014-06-19 10:35:18.261229255 +0800 @@ -0,0 +1,185 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.hbase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Properties; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableInfoMissingException; +import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl; +import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +import org.apache.hadoop.hbase.util.FSTableDescriptors; +import org.apache.hadoop.hive.hbase.HBaseSerDe.ColumnMapping; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.ql.plan.TableDesc; +import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.util.StringUtils; + +public class HBaseSnapshotStorageHandler extends HBaseStorageHandler { + private static final Log LOG = LogFactory.getLog(HBaseSnapshotStorageHandler.class); + + public static final String SNAPSHOT_NAME_KEY = "hive.hbase.snapshot"; + public static final String SNAPSHOT_RESTORE_DIR_KEY = "hive.hbase.snapshot.restoredir"; + public static final String HBASE_DIR_KEY = "hive.hbase.rootdir"; + + @Override + public Class getInputFormatClass() { + return HiveHBaseTableSnapshotInputFormat.class; + } + + @Override + public void preCreateTable(Table tbl) throws MetaException { + boolean isExternal = MetaStoreUtils.isExternalTable(tbl); + + // We'd like to move this to HiveMetaStore for any non-native table, but + // first we need to support storing NULL for location on a table + if (tbl.getSd().getLocation() != null) { + throw new MetaException("LOCATION may not be specified for HBase."); + } + + try { + String snapshotName = tbl.getParameters().get(SNAPSHOT_NAME_KEY); + Map serdeParam = tbl.getSd().getSerdeInfo().getParameters(); + String hbaseColumnsMapping = serdeParam.get(HBaseSerDe.HBASE_COLUMNS_MAPPING); + + List columnsMapping = HBaseSerDe.parseColumnsMapping(hbaseColumnsMapping); + + HTableDescriptor tableDesc; + + if (isExternal) { + Path rootDir = new Path(getConf().get(HConstants.HBASE_DIR)); + FileSystem fs = rootDir.getFileSystem(getConf()); + + Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); + + //load table descriptor + try { + tableDesc = FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir); + } catch (TableInfoMissingException e) { + throw new MetaException("HBase table or snapshot " + snapshotName + " doesn't exist."); + } + + for (int i = 0; i < columnsMapping.size(); i++) { + ColumnMapping colMap = columnsMapping.get(i); + + if (colMap.hbaseRowKey) { + continue; + } + + if (!tableDesc.hasFamily(colMap.familyNameBytes)) { + throw new MetaException("Column Family " + colMap.familyName + + " is not defined in hbase table " + snapshotName); + } + } + + } else { + throw new MetaException("Hive only support HBase Snapshot as an external table" + + ";please use CREATE EXTERNAL TABLE instead to" + + " register it in Hive."); + + } + + } catch (Exception se) { + throw new MetaException(StringUtils.stringifyException(se)); + } + } + + @Override + public void configureTableJobProperties( + TableDesc tableDesc, + Map jobProperties) { + + Properties tableProperties = tableDesc.getProperties(); + + jobProperties.put( + HBaseSerDe.HBASE_COLUMNS_MAPPING, + tableProperties.getProperty(HBaseSerDe.HBASE_COLUMNS_MAPPING)); + jobProperties.put(HBaseSerDe.HBASE_COLUMNS_REGEX_MATCHING, + tableProperties.getProperty(HBaseSerDe.HBASE_COLUMNS_REGEX_MATCHING, "true")); + jobProperties.put(HBaseSerDe.HBASE_TABLE_DEFAULT_STORAGE_TYPE, + tableProperties.getProperty(HBaseSerDe.HBASE_TABLE_DEFAULT_STORAGE_TYPE,"string")); + String scanCache = tableProperties.getProperty(HBaseSerDe.HBASE_SCAN_CACHE); + if (scanCache != null) { + jobProperties.put(HBaseSerDe.HBASE_SCAN_CACHE, scanCache); + } + String scanCacheBlocks = tableProperties.getProperty(HBaseSerDe.HBASE_SCAN_CACHEBLOCKS); + if (scanCacheBlocks != null) { + jobProperties.put(HBaseSerDe.HBASE_SCAN_CACHEBLOCKS, scanCacheBlocks); + } + String scanBatch = tableProperties.getProperty(HBaseSerDe.HBASE_SCAN_BATCH); + if (scanBatch != null) { + jobProperties.put(HBaseSerDe.HBASE_SCAN_BATCH, scanBatch); + } + + Configuration jobConf = getJobConf(); + addHBaseResources(jobConf, jobProperties); + + // do this for reconciling HBaseStorageHandler for use in HCatalog + // check to see if this an input job or an outputjob + if (this.configureInputJobProps) { + String snapshotName = tableProperties.getProperty("hive.hbase.snapshot"); + if (snapshotName != null) { + try { + // TODO: automatically provide a reasonable restore path when none provided. + String restoreDir = tableProperties.getProperty("hive.hbase.snapshot.restoredir"); + String rootDir = tableProperties.getProperty("hive.hbase.rootdir"); + if (restoreDir == null) { + throw new IllegalArgumentException( + "Cannot process HBase snapshot without specifying hive.hbase.snapshot.restoredir"); + } + if (rootDir == null) { + throw new IllegalArgumentException( + "Cannot process HBase snapshot without specifying hive.hbase.rootdir"); + } + jobConf.set(HConstants.HBASE_DIR, rootDir); + LOG.debug("Restoring snapshot '" + snapshotName + "' into path under '" + restoreDir + "'"); + TableSnapshotInputFormatImpl.setInput(jobConf, snapshotName, new Path(restoreDir)); + + } catch (IOException e) { + throw new IllegalArgumentException(e); + } + } + + for (String k : jobProperties.keySet()) { + jobConf.set(k, jobProperties.get(k)); + } + try { + addHBaseDelegationToken(jobConf); + }//try + catch (IOException e) { + throw new IllegalStateException("Error while configuring input job properties", e); + } //input job properties + } + else { + + } // output job properties + } +} diff -uNr a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java --- a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java 2014-06-19 10:44:20.301268164 +0800 +++ b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseStorageHandler.java 2014-06-19 10:38:17.967273242 +0800 @@ -382,7 +382,7 @@ * @param jobConf Job configuration * @param newJobProperties Map to which new properties should be added */ - private void addHBaseResources(Configuration jobConf, + protected void addHBaseResources(Configuration jobConf, Map newJobProperties) { Configuration conf = new Configuration(false); HBaseConfiguration.addHbaseResources(conf); @@ -393,7 +393,7 @@ } } - private void addHBaseDelegationToken(Configuration conf) throws IOException { + protected void addHBaseDelegationToken(Configuration conf) throws IOException { if (User.isHBaseSecurityEnabled(conf)) { try { User.getCurrent().obtainAuthTokenForJob(conf,new Job(conf)); diff -uNr a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableSnapshotInputFormat.java b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableSnapshotInputFormat.java --- a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableSnapshotInputFormat.java 1970-01-01 07:00:00.000000000 +0700 +++ b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableSnapshotInputFormat.java 2014-06-19 10:36:13.222209764 +0800 @@ -0,0 +1,370 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.hbase; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat; +import org.apache.hadoop.hbase.mapreduce.TableInputFormat; +import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; +import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat.TableSnapshotRegionSplit; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hive.hbase.HBaseSerDe.ColumnMapping; +import org.apache.hadoop.hive.ql.exec.ExprNodeConstantEvaluator; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.index.IndexPredicateAnalyzer; +import org.apache.hadoop.hive.ql.index.IndexSearchCondition; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.TableScanDesc; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.ByteStream; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.io.ByteWritable; +import org.apache.hadoop.hive.serde2.io.DoubleWritable; +import org.apache.hadoop.hive.serde2.io.ShortWritable; +import org.apache.hadoop.hive.serde2.lazy.LazyUtils; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory; +import org.apache.hadoop.hive.shims.ShimLoader; +import org.apache.hadoop.io.BooleanWritable; +import org.apache.hadoop.io.FloatWritable; +import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.JobContext; +import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class HiveHBaseTableSnapshotInputFormat + implements InputFormat { + + static final Log LOG = LogFactory.getLog(HiveHBaseTableSnapshotInputFormat.class); + + TableSnapshotInputFormat delegate = new TableSnapshotInputFormat(); + + private void setScan(JobConf job) throws IOException { + // hbase mapred API doesn't support scan at the moment. + String hbaseColumnsMapping = job.get(HBaseSerDe.HBASE_COLUMNS_MAPPING); + boolean doColumnRegexMatching = job.getBoolean(HBaseSerDe.HBASE_COLUMNS_REGEX_MATCHING, true); + + if (hbaseColumnsMapping == null) { + throw new IOException(HBaseSerDe.HBASE_COLUMNS_MAPPING + " required for HBase Table."); + } + + List columnsMapping = null; + try { + columnsMapping = HBaseSerDe.parseColumnsMapping(hbaseColumnsMapping, doColumnRegexMatching); + } catch (SerDeException e) { + throw new IOException(e); + } + + int iKey; + try { + iKey = HBaseSerDe.getRowKeyColumnOffset(columnsMapping); + } catch (SerDeException e) { + throw new IOException(e); + } + + // Take filter pushdown into account while calculating splits; this + // allows us to prune off regions immediately. Note that although + // the Javadoc for the superclass getSplits says that it returns one + // split per region, the implementation actually takes the scan + // definition into account and excludes regions which don't satisfy + // the start/stop row conditions (HBASE-1829). + Scan scan = createFilterScan(job, iKey, + HiveHBaseInputFormatUtil.getStorageFormatOfKey(columnsMapping.get(iKey).mappingSpec, + job.get(HBaseSerDe.HBASE_TABLE_DEFAULT_STORAGE_TYPE))); + + + // The list of families that have been added to the scan + List addedFamilies = new ArrayList(); + + // REVIEW: are we supposed to be applying the getReadColumnIDs + // same as in getRecordReader? + for (int i = 0; i searchConditions = + new ArrayList(); + ExprNodeDesc residualPredicate = + analyzer.analyzePredicate(filterExpr, searchConditions); + + // There should be no residual since we already negotiated that earlier in + // HBaseStorageHandler.decomposePredicate. However, with hive.optimize.index.filter + // OpProcFactory#pushFilterToStorageHandler pushes the original filter back down again. + // Since pushed-down filters are not ommitted at the higher levels (and thus the + // contract of negotiation is ignored anyway), just ignore the residuals. + // Re-assess this when negotiation is honored and the duplicate evaluation is removed. + // THIS IGNORES RESIDUAL PARSING FROM HBaseStorageHandler#decomposePredicate + if (residualPredicate != null) { + LOG.debug("Ignoring residual predicate " + residualPredicate.getExprString()); + } + + // Convert the search condition into a restriction on the HBase scan + byte [] startRow = HConstants.EMPTY_START_ROW, stopRow = HConstants.EMPTY_END_ROW; + for (IndexSearchCondition sc : searchConditions){ + + ExprNodeConstantEvaluator eval = new ExprNodeConstantEvaluator(sc.getConstantDesc()); + PrimitiveObjectInspector objInspector; + Object writable; + + try { + objInspector = (PrimitiveObjectInspector)eval.initialize(null); + writable = eval.evaluate(null); + } catch (ClassCastException cce) { + throw new IOException("Currently only primitve types are supported. Found: " + + sc.getConstantDesc().getTypeString()); + } catch (HiveException e) { + throw new IOException(e); + } + + byte [] constantVal = getConstantVal(writable, objInspector, isKeyBinary); + String comparisonOp = sc.getComparisonOp(); + + if("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual".equals(comparisonOp)){ + startRow = constantVal; + stopRow = getNextBA(constantVal); + } else if ("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan".equals(comparisonOp)){ + stopRow = constantVal; + } else if ("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan" + .equals(comparisonOp)) { + startRow = constantVal; + } else if ("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPGreaterThan" + .equals(comparisonOp)){ + startRow = getNextBA(constantVal); + } else if ("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrLessThan" + .equals(comparisonOp)){ + stopRow = getNextBA(constantVal); + } else { + throw new IOException(comparisonOp + " is not a supported comparison operator"); + } + } + scan.setStartRow(startRow); + scan.setStopRow(stopRow); + scan.setMaxVersions(1); + return scan; + } + + private byte[] getConstantVal(Object writable, PrimitiveObjectInspector poi, + boolean isKeyBinary) throws IOException{ + + if (!isKeyBinary){ + // Key is stored in text format. Get bytes representation of constant also of + // text format. + byte[] startRow; + ByteStream.Output serializeStream = new ByteStream.Output(); + LazyUtils.writePrimitiveUTF8(serializeStream, writable, poi, false, (byte) 0, null); + startRow = new byte[serializeStream.getCount()]; + System.arraycopy(serializeStream.getData(), 0, startRow, 0, serializeStream.getCount()); + return startRow; + } + + PrimitiveCategory pc = poi.getPrimitiveCategory(); + switch (poi.getPrimitiveCategory()) { + case INT: + return Bytes.toBytes(((IntWritable)writable).get()); + case BOOLEAN: + return Bytes.toBytes(((BooleanWritable)writable).get()); + case LONG: + return Bytes.toBytes(((LongWritable)writable).get()); + case FLOAT: + return Bytes.toBytes(((FloatWritable)writable).get()); + case DOUBLE: + return Bytes.toBytes(((DoubleWritable)writable).get()); + case SHORT: + return Bytes.toBytes(((ShortWritable)writable).get()); + case STRING: + return Bytes.toBytes(((Text)writable).toString()); + case BYTE: + return Bytes.toBytes(((ByteWritable)writable).get()); + + default: + throw new IOException("Type not supported " + pc); + } + } + + + private byte[] getNextBA(byte[] current){ + // startRow is inclusive while stopRow is exclusive, + // this util method returns very next bytearray which will occur after the current one + // by padding current one with a trailing 0 byte. + byte[] next = new byte[current.length + 1]; + System.arraycopy(current, 0, next, 0, current.length); + return next; + } + + + @Override + public InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException { + setScan(jobConf); + Job job = new Job(jobConf); + JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job); + Path [] tablePaths = FileInputFormat.getInputPaths(jobContext); + + try { + List splits = delegate.getSplits(jobContext); + InputSplit [] results = new InputSplit[splits.size()]; + + for (int i = 0; i < splits.size(); i++) { + results[i] = new HBaseSnapshotSplit((TableSnapshotRegionSplit)splits.get(i), tablePaths[0]); + } + return results; + } catch (InterruptedException e) { + LOG.error(e.getMessage(),e); + return null; + } + + } + + @Override + public RecordReader getRecordReader( + InputSplit split, JobConf jobConf, Reporter reporter) throws IOException { + setScan(jobConf); + + HBaseSnapshotSplit hbaseSplit = (HBaseSnapshotSplit) split; + TableSnapshotRegionSplit tableSplit = hbaseSplit.getSplit(); + + Job job = new Job(jobConf); + TaskAttemptContext tac = ShimLoader.getHadoopShims().newTaskAttemptContext( + job.getConfiguration(), reporter); + final org.apache.hadoop.mapreduce.RecordReader rr = delegate.createRecordReader(tableSplit, tac); + try { + rr.initialize(tableSplit, tac); + } catch (InterruptedException e1) { + throw new IOException("Failed to initialize RecordReader", e1); + } + + return new RecordReader() { + + @Override + public void close() throws IOException { + rr.close(); + } + + @Override + public ImmutableBytesWritable createKey() { + return new ImmutableBytesWritable(); + } + + @Override + public ResultWritable createValue() { + return new ResultWritable(new Result()); + } + + @Override + public long getPos() throws IOException { + return 0; + } + + @Override + public float getProgress() throws IOException { + float progress = 0.0F; + + try { + progress = rr.getProgress(); + } catch (InterruptedException e) { + throw new IOException(e); + } + + return progress; + } + + @Override + public boolean next(ImmutableBytesWritable rowKey, ResultWritable value) throws IOException { + + boolean next = false; + + try { + next = rr.nextKeyValue(); + + if (next) { + rowKey.set(rr.getCurrentValue().getRow()); + value.setResult(rr.getCurrentValue()); + } + } catch (InterruptedException e) { + throw new IOException(e); + } + + return next; + } + }; + } +} Binary files a/hcatalog/src/test/e2e/harness/.svn/wc.db and b/hcatalog/src/test/e2e/harness/.svn/wc.db differ