Index: src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java (revision 0) +++ src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java (revision 0) @@ -0,0 +1,130 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.coprocessor.example; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HConstants.OperationStatusCode; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValue.Type; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.coprocessor.BaseEndpointCoprocessor; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.OperationStatus; +import org.apache.hadoop.hbase.regionserver.RegionScanner; +import org.apache.hadoop.hbase.util.Pair; + +public class BulkDeleteEndpoint extends BaseEndpointCoprocessor implements BulkDeleteProtocol { + private static final Log LOG = LogFactory.getLog(BulkDeleteEndpoint.class); + + @Override + public BulkDeleteResponse delete(Scan scan, Delete deleteTemplate, int rowBatchSize) { + long totalRowsDeleted = 0L; + BulkDeleteResponse response = new BulkDeleteResponse(); + HRegion region = ((RegionCoprocessorEnvironment) getEnvironment()).getRegion(); + boolean hasMore = true; + RegionScanner scanner = null; + // When the delete is based on some conditions so that Filters are available in the scan, + // we assume that the scan is perfect having necessary column(s) only. + try { + scanner = region.getScanner(scan); + while (hasMore) { + List> deleteRows = new ArrayList>(rowBatchSize); + for (int i = 0; i < rowBatchSize; i++) { + List results = new ArrayList(); + hasMore = scanner.next(results); + if (results.size() > 0) { + deleteRows.add(results); + } + if (!hasMore) { + // There are no more rows. + break; + } + } + if (deleteRows.size() > 0) { + Pair[] deleteWithLockArr = new Pair[deleteRows.size()]; + int i = 0; + for (List deleteRow : deleteRows) { + Delete delete = createDeleteMutation(deleteRow, deleteTemplate); + deleteWithLockArr[i++] = new Pair(delete, null); + } + OperationStatus[] opStatus = region.batchMutate(deleteWithLockArr); + for (i = 0; i < opStatus.length; i++) { + if (opStatus[i].getOperationStatusCode() != OperationStatusCode.SUCCESS) { + break; + } + totalRowsDeleted++; + } + } + } + } catch (IOException ioe) { + LOG.error(ioe); + response.setIoException(ioe); + } finally { + if (scanner != null) { + try { + scanner.close(); + } catch (IOException ioe) { + LOG.debug(ioe); + } + } + } + response.setRowsDeleted(totalRowsDeleted); + return response; + } + + private Delete createDeleteMutation(List deleteRow, Delete deleteTemplate) { + // We just need the rowkey. Get it from 1st KV. + byte[] row = deleteRow.get(0).getRow(); + Delete delete = new Delete(row); + // when a delete is passed, get all the properties of that delete other than the + // rowkey and give to the new Delete objects being created + // No need to check for row locks. No way to get these locks at the client + // side as the rows itself is not known. + delete.setTimestamp(deleteTemplate.getTimeStamp()); + delete.setWriteToWAL(deleteTemplate.getWriteToWAL()); + setFamilyMap(row, deleteTemplate, delete); + return delete; + } + + private void setFamilyMap(byte[] deleteRowKey, Delete deleteTemplate, Delete delete) { + Map> deleteTemplateFamilyMap = deleteTemplate.getFamilyMap(); + Map> deleteFamilyMap = new HashMap>(); + for (Entry> entry : deleteTemplateFamilyMap.entrySet()) { + List deleteTemplateKVs = entry.getValue(); + List deleteKVs = new ArrayList(deleteTemplateKVs.size()); + for (KeyValue kv : deleteTemplateKVs) { + deleteKVs.add(new KeyValue(deleteRowKey, entry.getKey(), kv.getQualifier(), kv + .getTimestamp(), Type.codeToType(kv.getType()))); + } + deleteFamilyMap.put(entry.getKey(), deleteKVs); + } + delete.setFamilyMap(deleteFamilyMap); + } +} \ No newline at end of file Index: src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteProtocol.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteProtocol.java (revision 0) +++ src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteProtocol.java (revision 0) @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.coprocessor.example; + +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.ipc.CoprocessorProtocol; + +public interface BulkDeleteProtocol extends CoprocessorProtocol { + + /** + * + * @param scan + * @param deleteTemplate + * @param rowBatchSize + * The number of rows which need to be accumulated by scan and delete as one batch + * @return + */ + BulkDeleteResponse delete(Scan scan, Delete deleteTemplate, int rowBatchSize); +} Index: src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteResponse.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteResponse.java (revision 0) +++ src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteResponse.java (revision 0) @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.coprocessor.example; + +import java.io.IOException; +import java.io.Serializable; + +/** + * Wrapper class which returns the result of the bulk deletion operation happened at the server for + * a region. This includes the total number of rows deleted and/or any {@link IOException} which is + * happened while doing the operation. It will also include total number of versions deleted, when + * the delete type is VERSION. + */ +public class BulkDeleteResponse implements Serializable { + private static final long serialVersionUID = -8192337710525997237L; + private long rowsDeleted; + private IOException ioException; + + public BulkDeleteResponse() { + + } + + public void setRowsDeleted(long rowsDeleted) { + this.rowsDeleted = rowsDeleted; + } + + public long getRowsDeleted() { + return rowsDeleted; + } + + public void setIoException(IOException ioException) { + this.ioException = ioException; + } + + public IOException getIoException() { + return ioException; + } +} Index: src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestBulkDeleteProtocol.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestBulkDeleteProtocol.java (revision 0) +++ src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestBulkDeleteProtocol.java (revision 0) @@ -0,0 +1,483 @@ +/* + * Copyright 2011 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.coprocessor.example; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.coprocessor.Batch; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; +import org.apache.hadoop.hbase.filter.FilterList; +import org.apache.hadoop.hbase.filter.FilterList.Operator; +import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; +import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(MediumTests.class) +public class TestBulkDeleteProtocol { + private static final byte[] FAMILY1 = Bytes.toBytes("cf1"); + private static final byte[] FAMILY2 = Bytes.toBytes("cf2"); + private static final byte[] QUALIFIER1 = Bytes.toBytes("c1"); + private static final byte[] QUALIFIER2 = Bytes.toBytes("c2"); + private static final byte[] QUALIFIER3 = Bytes.toBytes("c3"); + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + @BeforeClass + public static void setupBeforeClass() throws Exception { + TEST_UTIL.getConfiguration().set(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, + BulkDeleteEndpoint.class.getName()); + TEST_UTIL.startMiniCluster(2); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testBulkDeleteEndpoint() throws Throwable { + byte[] tableName = Bytes.toBytes("testBulkDeleteEndpoint"); + HTable ht = createTable(tableName); + List puts = new ArrayList(100); + for (int j = 0; j < 100; j++) { + byte[] rowkey = Bytes.toBytes(j); + puts.add(createPut(rowkey, "v1")); + } + ht.put(puts); + // Deleting all the rows. + Scan scan = new Scan(); + scan.setFilter(new FirstKeyOnlyFilter()); + Delete deleteTemplate = new Delete(new byte[0]); + long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, deleteTemplate); + assertEquals(100, noOfRowsDeleted); + + int rows = 0; + for (Result result : ht.getScanner(new Scan())) { + rows++; + } + assertEquals(0, rows); + } + + @Test + public void testBulkDeleteEndpointWhenRowBatchSizeLessThanRowsToDeleteFromARegion() + throws Throwable { + byte[] tableName = Bytes + .toBytes("testBulkDeleteEndpointWhenRowBatchSizeLessThanRowsToDeleteFromARegion"); + HTable ht = createTable(tableName); + List puts = new ArrayList(100); + for (int j = 0; j < 100; j++) { + byte[] rowkey = Bytes.toBytes(j); + puts.add(createPut(rowkey, "v1")); + } + ht.put(puts); + // Deleting all the rows. + Scan scan = new Scan(); + scan.setFilter(new FirstKeyOnlyFilter()); + Delete deleteTemplate = new Delete(new byte[0]); + long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 10, deleteTemplate); + assertEquals(100, noOfRowsDeleted); + + int rows = 0; + for (Result result : ht.getScanner(new Scan())) { + rows++; + } + assertEquals(0, rows); + } + + private long invokeBulkDeleteProtocol(byte[] tableName, final Scan scan, final int rowBatchSize, + final Delete deleteTemplate) throws Throwable { + HTable ht = new HTable(TEST_UTIL.getConfiguration(), tableName); + long noOfDeletedRows = 0L; + Batch.Call callable = + new Batch.Call() { + public BulkDeleteResponse call(BulkDeleteProtocol instance) throws IOException { + return instance.delete(scan, deleteTemplate, rowBatchSize); + } + }; + Map result = ht.coprocessorExec(BulkDeleteProtocol.class, + scan.getStartRow(), scan.getStopRow(), callable); + for (BulkDeleteResponse response : result.values()) { + noOfDeletedRows += response.getRowsDeleted(); + } + return noOfDeletedRows; + } + + @Test + public void testBulkDeleteWithConditionBasedDelete() throws Throwable { + byte[] tableName = Bytes.toBytes("testBulkDeleteWithConditionBasedDelete"); + HTable ht = createTable(tableName); + List puts = new ArrayList(100); + for (int j = 0; j < 100; j++) { + byte[] rowkey = Bytes.toBytes(j); + String value = (j % 10 == 0) ? "v1" : "v2"; + puts.add(createPut(rowkey, value)); + } + ht.put(puts); + Scan scan = new Scan(); + FilterList fl = new FilterList(Operator.MUST_PASS_ALL); + SingleColumnValueFilter scvf = new SingleColumnValueFilter(FAMILY1, QUALIFIER3, + CompareOp.EQUAL, Bytes.toBytes("v1")); + //fl.addFilter(new FirstKeyOnlyFilter()); + fl.addFilter(scvf); + scan.setFilter(fl); + // Deleting all the rows where cf1:c1=v1 + Delete deleteTemplate = new Delete(new byte[0]); + long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, deleteTemplate); + assertEquals(10, noOfRowsDeleted); + + int rows = 0; + for (Result result : ht.getScanner(new Scan())) { + rows++; + } + assertEquals(90, rows); + } + + @Test + public void testBulkDeleteColumn() throws Throwable { + byte[] tableName = Bytes.toBytes("testBulkDeleteColumn"); + HTable ht = createTable(tableName); + List puts = new ArrayList(100); + for (int j = 0; j < 100; j++) { + byte[] rowkey = Bytes.toBytes(j); + String value = (j % 10 == 0) ? "v1" : "v2"; + puts.add(createPut(rowkey, value)); + } + ht.put(puts); + Scan scan = new Scan (); + scan.addColumn(FAMILY1, QUALIFIER2); + // Delete the column cf1:col2 + Delete deleteTemplate = new Delete(new byte[0]); + deleteTemplate.deleteColumns(FAMILY1, QUALIFIER2); + long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, deleteTemplate); + assertEquals(100, noOfRowsDeleted); + + int rows = 0; + for (Result result : ht.getScanner(new Scan())) { + assertEquals(2, result.getFamilyMap(FAMILY1).size()); + assertTrue(result.getColumn(FAMILY1, QUALIFIER2).isEmpty()); + assertEquals(1, result.getColumn(FAMILY1, QUALIFIER1).size()); + assertEquals(1, result.getColumn(FAMILY1, QUALIFIER3).size()); + rows++; + } + assertEquals(100, rows); + } + + @Test + public void testBulkDeleteFamily() throws Throwable { + byte[] tableName = Bytes.toBytes("testBulkDeleteFamily"); + HTableDescriptor htd = new HTableDescriptor(tableName); + htd.addFamily(new HColumnDescriptor(FAMILY1)); + htd.addFamily(new HColumnDescriptor(FAMILY2)); + TEST_UTIL.getHBaseAdmin().createTable(htd, Bytes.toBytes(0), Bytes.toBytes(120), 5); + HTable ht = new HTable(TEST_UTIL.getConfiguration(), tableName); + List puts = new ArrayList(100); + for (int j = 0; j < 100; j++) { + Put put = new Put(Bytes.toBytes(j)); + put.add(FAMILY1, QUALIFIER1, "v1".getBytes()); + put.add(FAMILY2, QUALIFIER2, "v2".getBytes()); + puts.add(put); + } + ht.put(puts); + Scan scan = new Scan (); + scan.addFamily(FAMILY1); + // Delete the column family cf1 + Delete deleteTemplate = new Delete(new byte[0]); + deleteTemplate.deleteFamily(FAMILY1); + long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, deleteTemplate); + assertEquals(100, noOfRowsDeleted); + int rows = 0; + for (Result result : ht.getScanner(new Scan())) { + assertTrue(result.getFamilyMap(FAMILY1).isEmpty()); + assertEquals(1, result.getColumn(FAMILY2, QUALIFIER2).size()); + rows++; + } + assertEquals(100, rows); + } + + @Test + public void testBulkDeleteColumnVersion() throws Throwable { + byte[] tableName = Bytes.toBytes("testBulkDeleteColumnVersion"); + HTable ht = createTable(tableName); + List puts = new ArrayList(100); + for (int j = 0; j < 100; j++) { + Put put = new Put(Bytes.toBytes(j)); + byte[] value = "v1".getBytes(); + put.add(FAMILY1, QUALIFIER1, 1234L, value); + put.add(FAMILY1, QUALIFIER2, 1234L, value); + put.add(FAMILY1, QUALIFIER3, 1234L, value); + // Latest version values + value = "v2".getBytes(); + put.add(FAMILY1, QUALIFIER1, value); + put.add(FAMILY1, QUALIFIER2, value); + put.add(FAMILY1, QUALIFIER3, value); + put.add(FAMILY1, null, value); + puts.add(put); + } + ht.put(puts); + Scan scan = new Scan (); + scan.addFamily(FAMILY1); + // Delete the latest version values of all the columns in family cf1. + Delete deleteTemplate = new Delete(new byte[0]); + deleteTemplate.deleteColumn(FAMILY1, QUALIFIER1); + deleteTemplate.deleteColumn(FAMILY1, QUALIFIER2); + deleteTemplate.deleteColumn(FAMILY1, QUALIFIER3); + deleteTemplate.deleteColumn(FAMILY1, null); + + long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, deleteTemplate); + assertEquals(100, noOfRowsDeleted); + int rows = 0; + scan = new Scan (); + scan.setMaxVersions(); + for (Result result : ht.getScanner(scan)) { + assertEquals(3, result.getFamilyMap(FAMILY1).size()); + List column = result.getColumn(FAMILY1, QUALIFIER1); + assertEquals(1, column.size()); + assertTrue(Bytes.equals("v1".getBytes(), column.get(0).getValue())); + + column = result.getColumn(FAMILY1, QUALIFIER2); + assertEquals(1, column.size()); + assertTrue(Bytes.equals("v1".getBytes(), column.get(0).getValue())); + + column = result.getColumn(FAMILY1, QUALIFIER3); + assertEquals(1, column.size()); + assertTrue(Bytes.equals("v1".getBytes(), column.get(0).getValue())); + rows++; + } + assertEquals(100, rows); + } + + @Test + public void testBulkDeleteColumnVersionBasedOnTS() throws Throwable { + byte[] tableName = Bytes.toBytes("testBulkDeleteColumnVersionBasedOnTS"); + HTable ht = createTable(tableName); + List puts = new ArrayList(100); + for (int j = 0; j < 100; j++) { + Put put = new Put(Bytes.toBytes(j)); + // TS = 1000L + byte[] value = "v1".getBytes(); + put.add(FAMILY1, QUALIFIER1, 1000L, value); + put.add(FAMILY1, QUALIFIER2, 1000L, value); + put.add(FAMILY1, QUALIFIER3, 1000L, value); + // TS = 1234L + value = "v2".getBytes(); + put.add(FAMILY1, QUALIFIER1, 1234L, value); + put.add(FAMILY1, QUALIFIER2, 1234L, value); + put.add(FAMILY1, QUALIFIER3, 1234L, value); + // Latest version values + value = "v3".getBytes(); + put.add(FAMILY1, QUALIFIER1, value); + put.add(FAMILY1, QUALIFIER2, value); + put.add(FAMILY1, QUALIFIER3, value); + puts.add(put); + } + ht.put(puts); + Scan scan = new Scan (); + scan.addColumn(FAMILY1, QUALIFIER3); + // Delete the column cf1:c3's one version at TS=1234 + Delete deleteTemplate = new Delete(new byte[0]); + deleteTemplate.deleteColumn(FAMILY1, QUALIFIER3, 1234L); + long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, deleteTemplate); + assertEquals(100, noOfRowsDeleted); + int rows = 0; + scan = new Scan (); + scan.setMaxVersions(); + for (Result result : ht.getScanner(scan)) { + assertEquals(3, result.getFamilyMap(FAMILY1).size()); + assertEquals(3, result.getColumn(FAMILY1, QUALIFIER1).size()); + assertEquals(3, result.getColumn(FAMILY1, QUALIFIER2).size()); + List column = result.getColumn(FAMILY1, QUALIFIER3); + assertEquals(2, column.size()); + assertTrue(Bytes.equals("v3".getBytes(), column.get(0).getValue())); + assertTrue(Bytes.equals("v1".getBytes(), column.get(1).getValue())); + rows++; + } + assertEquals(100, rows); + } + + @Test + public void testBulkDeleteMixed() throws Throwable { + byte[] tableName = Bytes.toBytes("testBulkDeleteMixed"); + HTableDescriptor htd = new HTableDescriptor(tableName); + htd.addFamily(new HColumnDescriptor(FAMILY1)); + htd.addFamily(new HColumnDescriptor(FAMILY2)); + TEST_UTIL.getHBaseAdmin().createTable(htd, Bytes.toBytes(0), Bytes.toBytes(120), 5); + HTable ht = new HTable(TEST_UTIL.getConfiguration(), tableName); + List puts = new ArrayList(100); + for (int j = 0; j < 100; j++) { + Put put = new Put(Bytes.toBytes(j)); + // TS = 1000L + byte[] value = "v1".getBytes(); + put.add(FAMILY1, QUALIFIER1, 1000L, value); + put.add(FAMILY1, QUALIFIER2, 1000L, value); + put.add(FAMILY1, QUALIFIER3, 1000L, value); + // TS = 1234L + value = "v2".getBytes(); + put.add(FAMILY1, QUALIFIER1, 1234L, value); + put.add(FAMILY1, QUALIFIER2, 1234L, value); + put.add(FAMILY1, QUALIFIER3, 1234L, value); + // Latest version values + value = "v3".getBytes(); + put.add(FAMILY1, QUALIFIER1, value); + put.add(FAMILY1, QUALIFIER2, value); + put.add(FAMILY1, QUALIFIER3, value); + + //put.add(FAMILY2, QUALIFIER1, value); + puts.add(put); + } + ht.put(puts); + Scan scan = new Scan (); + + // Delete the family cf2 and delete the column cf1:col3 and delete version of cf1:col2 at TS=1234 + Delete deleteTemplate = new Delete(new byte[0]); + deleteTemplate.deleteFamily(FAMILY2); + deleteTemplate.deleteColumns(FAMILY1, QUALIFIER3); + deleteTemplate.deleteColumn(FAMILY1, QUALIFIER2, 1234L); + long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, deleteTemplate); + assertEquals(100, noOfRowsDeleted); + int rows = 0; + scan = new Scan (); + scan.setMaxVersions(); + for (Result result : ht.getScanner(scan)) { + assertEquals(0, result.getFamilyMap(FAMILY2).size()); + assertEquals(0, result.getColumn(FAMILY1, QUALIFIER3).size()); + List column = result.getColumn(FAMILY1, QUALIFIER2); + assertEquals(2, column.size()); + assertTrue(Bytes.equals("v3".getBytes(), column.get(0).getValue())); + assertTrue(Bytes.equals("v1".getBytes(), column.get(1).getValue())); + assertEquals(3, result.getColumn(FAMILY1, QUALIFIER1).size()); + rows++; + } + assertEquals(100, rows); + } + + //@Test + // Can not test this ... As of now allowing only one version of any col to be deleted + /*public void testBulkDeleteWithNumberOfVersions() throws Throwable { + byte[] tableName = Bytes.toBytes("testBulkDeleteWithNumberOfVersions"); + HTable ht = createTable(tableName); + List puts = new ArrayList(100); + for (int j = 0; j < 100; j++) { + Put put = new Put(Bytes.toBytes(j)); + // TS = 1000L + byte[] value = "v1".getBytes(); + put.add(FAMILY1, QUALIFIER1, 1000L, value); + put.add(FAMILY1, QUALIFIER2, 1000L, value); + put.add(FAMILY1, QUALIFIER3, 1000L, value); + // TS = 1234L + value = "v2".getBytes(); + put.add(FAMILY1, QUALIFIER1, 1234L, value); + put.add(FAMILY1, QUALIFIER2, 1234L, value); + put.add(FAMILY1, QUALIFIER3, 1234L, value); + // TS = 2000L + value = "v3".getBytes(); + put.add(FAMILY1, QUALIFIER1, 2000L, value); + put.add(FAMILY1, QUALIFIER2, 2000L, value); + put.add(FAMILY1, QUALIFIER3, 2000L, value); + // Latest version values + value = "v4".getBytes(); + put.add(FAMILY1, QUALIFIER1, value); + put.add(FAMILY1, QUALIFIER2, value); + put.add(FAMILY1, QUALIFIER3, value); + puts.add(put); + } + ht.put(puts); + + // Delete all the versions of columns cf1:c1 and cf1:c2 falling with the time range + // [1000,2000) + final Scan scan = new Scan(); + scan.addColumn(FAMILY1, QUALIFIER1); + scan.addColumn(FAMILY1, QUALIFIER2); + scan.setTimeRange(1000L, 2000L); + scan.setMaxVersions(); + + long noOfDeletedRows = 0L; + long noOfVersionsDeleted = 0L; + Batch.Call callable = + new Batch.Call() { + public BulkDeleteResponse call(BulkDeleteProtocol instance) throws IOException { + return instance.delete(scan, DeleteType.VERSION, null, 500); + } + }; + Map result = ht.coprocessorExec(BulkDeleteProtocol.class, + scan.getStartRow(), scan.getStopRow(), callable); + for (BulkDeleteResponse response : result.values()) { + noOfDeletedRows += response.getRowsDeleted(); + noOfVersionsDeleted += response.getVersionsDeleted(); + } + assertEquals(100, noOfDeletedRows); + assertEquals(400, noOfVersionsDeleted); + + int rows = 0; + Scan scan1 = new Scan (); + scan1.setMaxVersions(); + for (Result res : ht.getScanner(scan1)) { + assertEquals(3, res.getFamilyMap(FAMILY1).size()); + List column = res.getColumn(FAMILY1, QUALIFIER1); + assertEquals(2, column.size()); + assertTrue(Bytes.equals("v4".getBytes(), column.get(0).getValue())); + assertTrue(Bytes.equals("v3".getBytes(), column.get(1).getValue())); + column = res.getColumn(FAMILY1, QUALIFIER2); + assertEquals(2, column.size()); + assertTrue(Bytes.equals("v4".getBytes(), column.get(0).getValue())); + assertTrue(Bytes.equals("v3".getBytes(), column.get(1).getValue())); + assertEquals(4, res.getColumn(FAMILY1, QUALIFIER3).size()); + rows++; + } + assertEquals(100, rows); + }*/ + + private HTable createTable(byte[] tableName) throws IOException { + HTableDescriptor htd = new HTableDescriptor(tableName); + HColumnDescriptor hcd = new HColumnDescriptor(FAMILY1); + hcd.setMaxVersions(10);// Just setting 10 as I am not testing with more than 10 versions here + htd.addFamily(hcd); + TEST_UTIL.getHBaseAdmin().createTable(htd, Bytes.toBytes(0), Bytes.toBytes(120), 5); + HTable ht = new HTable(TEST_UTIL.getConfiguration(), tableName); + return ht; + } + + private Put createPut(byte[] rowkey, String value) throws IOException { + Put put = new Put(rowkey); + put.add(FAMILY1, QUALIFIER1, value.getBytes()); + put.add(FAMILY1, QUALIFIER2, value.getBytes()); + put.add(FAMILY1, QUALIFIER3, value.getBytes()); + return put; + } +} \ No newline at end of file