From 20c01622bd5c01a6dde8ed1d3bdc4d1595b99b9e Mon Sep 17 00:00:00 2001 From: Jan Hentschel Date: Thu, 28 Dec 2017 10:17:04 +0100 Subject: [PATCH] HBASE-7003 Moved backup examples into hbase-examples --- .../resources/hbase/checkstyle-suppressions.xml | 1 + .../hbase/backup/example/HFileArchiveManager.java | 19 +- .../backup/example/HFileArchiveTableMonitor.java | 0 .../example/LongTermArchivingHFileCleaner.java | 24 +- .../backup/example/TableHFileArchiveTracker.java | 26 +- .../hbase/backup/example/ZKTableArchiveClient.java | 6 +- .../client/example/ExportEndpointExample.java | 14 +- .../hbase/client/example/RefreshHFilesClient.java | 49 +- .../coprocessor/example/BulkDeleteEndpoint.java | 15 +- .../example/ExampleMasterObserverWithMetrics.java | 3 +- .../coprocessor/example/RefreshHFilesEndpoint.java | 11 +- .../coprocessor/example/RowCountEndpoint.java | 8 +- .../hadoop/hbase/mapreduce/IndexBuilder.java | 3 +- .../hadoop/hbase/mapreduce/SampleUploader.java | 4 +- .../org/apache/hadoop/hbase/thrift/DemoClient.java | 598 +++++++++++---------- .../apache/hadoop/hbase/thrift/HttpDoAsClient.java | 10 +- .../apache/hadoop/hbase/thrift2/DemoClient.java | 11 +- .../java/org/apache/hadoop/hbase/types/PBCell.java | 5 +- .../example/TestZooKeeperTableArchiveClient.java | 30 +- .../hbase/client/example/TestHttpProxyExample.java | 4 +- .../example/TestRefreshHFilesEndpoint.java | 21 +- .../hbase/mapreduce/TestMapReduceExamples.java | 21 +- .../org/apache/hadoop/hbase/types/TestPBCell.java | 4 +- 23 files changed, 478 insertions(+), 409 deletions(-) rename {hbase-server => hbase-examples}/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java (95%) rename {hbase-server => hbase-examples}/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java (100%) rename {hbase-server => hbase-examples}/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java (93%) rename {hbase-server => hbase-examples}/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java (96%) rename {hbase-server => hbase-examples}/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java (99%) rename {hbase-server => hbase-examples}/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java (96%) diff --git a/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml b/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml index b4173e0253..b424613cb5 100644 --- a/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml +++ b/hbase-checkstyle/src/main/resources/hbase/checkstyle-suppressions.xml @@ -37,4 +37,5 @@ + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java similarity index 95% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java rename to hbase-examples/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java index c51d4937a1..f93c3c0ff0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java @@ -21,12 +21,12 @@ import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -68,14 +68,14 @@ class HFileArchiveManager { /** * Stop retaining HFiles for the given table in the archive. HFiles will be cleaned up on the next - * pass of the {@link org.apache.hadoop.hbase.master.cleaner.HFileCleaner}, if the HFiles are retained by another - * cleaner. + * pass of the {@link org.apache.hadoop.hbase.master.cleaner.HFileCleaner}, if the HFiles are + * retained by another cleaner. * @param table name of the table for which to disable hfile retention. * @return this for chaining. * @throws KeeperException if if we can't reach zookeeper to update the hfile cleaner. */ public HFileArchiveManager disableHFileBackup(byte[] table) throws KeeperException { - disable(this.zooKeeper, table); + disable(this.zooKeeper, table); return this; } @@ -95,17 +95,16 @@ class HFileArchiveManager { } /** - * Perform a best effort enable of hfile retention, which relies on zookeeper communicating the // - * * change back to the hfile cleaner. + * Perform a best effort enable of hfile retention, which relies on zookeeper communicating the + * change back to the hfile cleaner. *

* No attempt is made to make sure that backups are successfully created - it is inherently an * asynchronous operation. * @param zooKeeper watcher connection to zk cluster * @param table table name on which to enable archiving - * @throws KeeperException + * @throws KeeperException if a ZooKeeper operation fails */ - private void enable(ZKWatcher zooKeeper, byte[] table) - throws KeeperException { + private void enable(ZKWatcher zooKeeper, byte[] table) throws KeeperException { LOG.debug("Ensuring archiving znode exists"); ZKUtil.createAndFailSilent(zooKeeper, archiveZnode); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java rename to hbase-examples/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java similarity index 93% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java rename to hbase-examples/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java index 484ff5ea17..34077e1025 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.backup.example; import java.io.IOException; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -27,6 +26,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -35,7 +35,7 @@ import org.slf4j.LoggerFactory; * {@link BaseHFileCleanerDelegate} that only cleans HFiles that don't belong to a table that is * currently being archived. *

- * This only works properly if the + * This only works properly if the * {@link org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner} * is also enabled (it always should be), since it may take a little time * for the ZK notification to propagate, in which case we may accidentally @@ -53,14 +53,18 @@ public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate { public boolean isFileDeletable(FileStatus fStat) { try { // if its a directory, then it can be deleted - if (fStat.isDirectory()) return true; - + if (fStat.isDirectory()) { + return true; + } + Path file = fStat.getPath(); // check to see if FileStatus[] deleteStatus = FSUtils.listStatus(this.fs, file, null); // if the file doesn't exist, then it can be deleted (but should never // happen since deleted files shouldn't get passed in) - if (deleteStatus == null) return true; + if (deleteStatus == null) { + return true; + } // otherwise, we need to check the file's table and see its being archived Path family = file.getParent(); @@ -69,7 +73,8 @@ public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate { String tableName = table.getName(); boolean ret = !archiveTracker.keepHFiles(tableName); - LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" + tableName); + LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" + + tableName); return ret; } catch (IOException e) { LOG.error("Failed to lookup status of:" + fStat.getPath() + ", keeping it just incase.", e); @@ -97,13 +102,14 @@ public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate { @Override public void stop(String reason) { - if (this.isStopped()) return; + if (this.isStopped()) { + return; + } + super.stop(reason); if (this.archiveTracker != null) { LOG.info("Stopping " + this.archiveTracker); this.archiveTracker.stop(); } - } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java similarity index 96% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java rename to hbase-examples/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java index 1b3b775b2a..49b0e82775 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java @@ -20,12 +20,12 @@ package org.apache.hadoop.hbase.backup.example; import java.io.IOException; import java.util.List; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKListener; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory; * archive. */ @InterfaceAudience.Private -public class TableHFileArchiveTracker extends ZKListener { +public final class TableHFileArchiveTracker extends ZKListener { private static final Logger LOG = LoggerFactory.getLogger(TableHFileArchiveTracker.class); public static final String HFILE_ARCHIVE_ZNODE_PARENT = "hfilearchive"; private HFileArchiveTableMonitor monitor; @@ -67,7 +67,9 @@ public class TableHFileArchiveTracker extends ZKListener { @Override public void nodeCreated(String path) { // if it is the archive path - if (!path.startsWith(archiveHFileZNode)) return; + if (!path.startsWith(archiveHFileZNode)) { + return; + } LOG.debug("Archive node: " + path + " created"); // since we are already enabled, just update a single table @@ -75,7 +77,6 @@ public class TableHFileArchiveTracker extends ZKListener { // the top level node has come up, so read in all the tables if (table.length() == 0) { - checkEnabledAndUpdate(); return; } @@ -90,7 +91,9 @@ public class TableHFileArchiveTracker extends ZKListener { @Override public void nodeChildrenChanged(String path) { - if (!path.startsWith(archiveHFileZNode)) return; + if (!path.startsWith(archiveHFileZNode)) { + return; + } LOG.debug("Archive node: " + path + " children changed."); // a table was added to the archive @@ -134,7 +137,9 @@ public class TableHFileArchiveTracker extends ZKListener { @Override public void nodeDeleted(String path) { - if (!path.startsWith(archiveHFileZNode)) return; + if (!path.startsWith(archiveHFileZNode)) { + return; + } LOG.debug("Archive node: " + path + " deleted"); String table = path.substring(archiveHFileZNode.length()); @@ -260,7 +265,10 @@ public class TableHFileArchiveTracker extends ZKListener { * Stop this tracker and the passed zookeeper */ public void stop() { - if (this.stopped) return; + if (this.stopped) { + return; + } + this.stopped = true; this.watcher.close(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java similarity index 99% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java rename to hbase-examples/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java index 39d95439c0..7e4a58509a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java @@ -19,13 +19,13 @@ package org.apache.hadoop.hbase.backup.example; import java.io.IOException; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; /** @@ -110,7 +110,7 @@ public class ZKTableArchiveClient extends Configured { * @param table name of the table to check * @return true if it is, false otherwise * @throws IOException if a connection to ZooKeeper cannot be established - * @throws KeeperException + * @throws KeeperException if a ZooKeeper operation fails */ public boolean getArchivingEnabled(byte[] table) throws IOException, KeeperException { HFileArchiveManager manager = createHFileArchiveManager(); diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/ExportEndpointExample.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/ExportEndpointExample.java index cc06844595..688324cd7d 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/ExportEndpointExample.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/ExportEndpointExample.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hbase.client.example; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -28,15 +32,11 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.coprocessor.Export; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.coprocessor.Export; import org.apache.hadoop.hbase.util.Bytes; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - /** * A simple example on how to use {@link org.apache.hadoop.hbase.coprocessor.Export}. * @@ -45,7 +45,9 @@ import java.util.Map; * hbase-endpoint/src/main/protobuf/Export.proto. *

*/ -public class ExportEndpointExample { +public final class ExportEndpointExample { + private ExportEndpointExample() { + } public static void main(String[] args) throws Throwable { int rowCount = 100; diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/RefreshHFilesClient.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/RefreshHFilesClient.java index a829b2ab1b..8fb1b77dbb 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/RefreshHFilesClient.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/RefreshHFilesClient.java @@ -19,6 +19,9 @@ package org.apache.hadoop.hbase.client.example; +import java.io.Closeable; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -32,9 +35,6 @@ import org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.Closeable; -import java.io.IOException; - /** * This client class is for invoking the refresh HFile function deployed on the * Region Server side via the RefreshHFilesService. @@ -46,7 +46,7 @@ public class RefreshHFilesClient implements Closeable { /** * Constructor with Conf object * - * @param cfg + * @param cfg the configuration to use */ public RefreshHFilesClient(Configuration cfg) { try { @@ -70,26 +70,27 @@ public class RefreshHFilesClient implements Closeable { } public void refreshHFiles(final Table table) throws Throwable { - final RefreshHFilesProtos.RefreshHFilesRequest request = RefreshHFilesProtos.RefreshHFilesRequest - .getDefaultInstance(); - table.coprocessorService(RefreshHFilesProtos.RefreshHFilesService.class, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW, - new Batch.Call() { - @Override - public RefreshHFilesProtos.RefreshHFilesResponse call( - RefreshHFilesProtos.RefreshHFilesService refreshHFilesService) - throws IOException { - ServerRpcController controller = new ServerRpcController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback<>(); - refreshHFilesService.refreshHFiles(controller, request, rpcCallback); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return rpcCallback.get(); - } - }); + final RefreshHFilesProtos.RefreshHFilesRequest request = + RefreshHFilesProtos.RefreshHFilesRequest.getDefaultInstance(); + table.coprocessorService(RefreshHFilesProtos.RefreshHFilesService.class, + HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, + new Batch.Call() { + @Override + public RefreshHFilesProtos.RefreshHFilesResponse call( + RefreshHFilesProtos.RefreshHFilesService refreshHFilesService) throws IOException { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback<>(); + refreshHFilesService.refreshHFiles(controller, request, rpcCallback); + + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + + return rpcCallback.get(); + } + }); LOG.debug("Done refreshing HFiles"); } } diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java index 4735b3db72..fcfd20906d 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hbase.coprocessor.example; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; + import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -51,10 +55,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.protobuf.RpcCallback; -import com.google.protobuf.RpcController; -import com.google.protobuf.Service; - /** * Defines a protocol to delete data in bulk based on a scan. The scan can be range scan or with * conditions(filters) etc.This can be used to delete rows, column family(s), column qualifier(s) @@ -88,8 +88,8 @@ import com.google.protobuf.Service; * return rpcCallback.get(); * } * }; - * Map<byte[], BulkDeleteResponse> result = ht.coprocessorService(BulkDeleteService.class, scan - * .getStartRow(), scan.getStopRow(), callable); + * Map<byte[], BulkDeleteResponse> result = ht.coprocessorService(BulkDeleteService.class, + * scan.getStartRow(), scan.getStopRow(), callable); * for (BulkDeleteResponse response : result.values()) { * noOfDeletedRows += response.getRowsDeleted(); * } @@ -225,7 +225,8 @@ public class BulkDeleteEndpoint extends BulkDeleteService implements RegionCopro int noOfVersionsToDelete = 0; if (timestamp == null) { for (Cell kv : deleteRow) { - delete.addColumn(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), kv.getTimestamp()); + delete.addColumn(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), + kv.getTimestamp()); noOfVersionsToDelete++; } } else { diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.java index 87387a0547..ba74663836 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.java @@ -93,7 +93,8 @@ public class ExampleMasterObserverWithMetrics implements MasterCoprocessor, Mast } @Override - public void preDisableTable(ObserverContext ctx, TableName tableName) throws IOException { + public void preDisableTable(ObserverContext ctx, + TableName tableName) throws IOException { // Increment the Counter for disable table operations this.disableTableCounter.increment(); } diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RefreshHFilesEndpoint.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RefreshHFilesEndpoint.java index 60cb1542fd..9fdd41db11 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RefreshHFilesEndpoint.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RefreshHFilesEndpoint.java @@ -22,6 +22,9 @@ import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; +import java.io.IOException; +import java.util.Collections; + import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.CoprocessorException; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; @@ -32,9 +35,6 @@ import org.apache.hadoop.hbase.regionserver.Store; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.Collections; - /** * Coprocessor endpoint to refresh HFiles on replica. *

@@ -57,8 +57,9 @@ public class RefreshHFilesEndpoint extends RefreshHFilesProtos.RefreshHFilesServ } @Override - public void refreshHFiles(RpcController controller, RefreshHFilesProtos.RefreshHFilesRequest request, - RpcCallback done) { + public void refreshHFiles(RpcController controller, + RefreshHFilesProtos.RefreshHFilesRequest request, + RpcCallback done) { try { for (Store store : env.getRegion().getStores()) { LOG.debug("Refreshing HFiles for region: " + store.getRegionInfo().getRegionNameAsString() + diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java index ff7b43dcef..4f6c799ec2 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java @@ -18,6 +18,10 @@ package org.apache.hadoop.hbase.coprocessor.example; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; + import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -36,10 +40,6 @@ import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.util.Bytes; -import com.google.protobuf.RpcCallback; -import com.google.protobuf.RpcController; -import com.google.protobuf.Service; - /** * Sample coprocessor endpoint exposing a Service interface for counting rows and key values. * diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/mapreduce/IndexBuilder.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/mapreduce/IndexBuilder.java index 01e9ef37cc..8c5909ef38 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/mapreduce/IndexBuilder.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/mapreduce/IndexBuilder.java @@ -52,7 +52,8 @@ import org.apache.hadoop.util.ToolRunner; * Modify ${HADOOP_HOME}/conf/hadoop-env.sh to include the hbase jar, the * zookeeper jar (can be found in lib/ directory under HBase root, the examples output directory, * and the hbase conf directory in HADOOP_CLASSPATH, and then run - * bin/hadoop org.apache.hadoop.hbase.mapreduce.IndexBuilder TABLE_NAME COLUMN_FAMILY ATTR [ATTR ...] + * bin/hadoop org.apache.hadoop.hbase.mapreduce.IndexBuilder TABLE_NAME COLUMN_FAMILY + * ATTR [ATTR ...] *

* *

diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/mapreduce/SampleUploader.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/mapreduce/SampleUploader.java index 18eb5a6b1c..c7f394da4e 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/mapreduce/SampleUploader.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/mapreduce/SampleUploader.java @@ -62,9 +62,7 @@ public class SampleUploader extends Configured implements Tool { private static final String NAME = "SampleUploader"; - static class Uploader - extends Mapper { - + static class Uploader extends Mapper { private long checkpoint = 100; private long count = 0; diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java index 706f82f364..6187f4c7e7 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java @@ -54,337 +54,359 @@ import org.apache.thrift.transport.TTransport; * See the instructions under hbase-examples/README.txt */ public class DemoClient { + static protected int port; + static protected String host; + CharsetDecoder decoder = null; - static protected int port; - static protected String host; - CharsetDecoder decoder = null; + private static boolean secure = false; + private static String serverPrincipal = "hbase"; - private static boolean secure = false; - private static String serverPrincipal = "hbase"; + public static void main(String[] args) throws Exception { + if (args.length < 2 || args.length > 4 || (args.length > 2 && !isBoolean(args[2]))) { + System.out.println("Invalid arguments!"); + System.out.println("Usage: DemoClient host port [secure=false [server-principal=hbase] ]"); - public static void main(String[] args) throws Exception { - - if (args.length < 2 || args.length > 4 || (args.length > 2 && !isBoolean(args[2]))) { - - System.out.println("Invalid arguments!"); - System.out.println("Usage: DemoClient host port [secure=false [server-principal=hbase] ]"); - - System.exit(-1); - } - - port = Integer.parseInt(args[1]); - host = args[0]; - if (args.length > 2) { - secure = Boolean.parseBoolean(args[2]); - } - - if (args.length == 4) { - serverPrincipal = args[3]; - } - - final DemoClient client = new DemoClient(); - Subject.doAs(getSubject(), - new PrivilegedExceptionAction() { - @Override - public Void run() throws Exception { - client.run(); - return null; - } - }); + System.exit(-1); } - private static boolean isBoolean(String s){ - return Boolean.TRUE.toString().equalsIgnoreCase(s) || Boolean.FALSE.toString().equalsIgnoreCase(s); + port = Integer.parseInt(args[1]); + host = args[0]; + + if (args.length > 2) { + secure = Boolean.parseBoolean(args[2]); } - DemoClient() { - decoder = Charset.forName("UTF-8").newDecoder(); + if (args.length == 4) { + serverPrincipal = args[3]; } - // Helper to translate byte[]'s to UTF8 strings - private String utf8(byte[] buf) { - try { - return decoder.decode(ByteBuffer.wrap(buf)).toString(); - } catch (CharacterCodingException e) { - return "[INVALID UTF-8]"; + final DemoClient client = new DemoClient(); + Subject.doAs(getSubject(), new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + client.run(); + return null; } + }); + } + + private static boolean isBoolean(String s){ + return Boolean.TRUE.toString().equalsIgnoreCase(s) || + Boolean.FALSE.toString().equalsIgnoreCase(s); + } + + DemoClient() { + decoder = Charset.forName("UTF-8").newDecoder(); + } + + // Helper to translate byte[]'s to UTF8 strings + private String utf8(byte[] buf) { + try { + return decoder.decode(ByteBuffer.wrap(buf)).toString(); + } catch (CharacterCodingException e) { + return "[INVALID UTF-8]"; } + } + + // Helper to translate strings to UTF8 bytes + private byte[] bytes(String s) { + try { + return s.getBytes("UTF-8"); + } catch (UnsupportedEncodingException e) { + e.printStackTrace(); + return null; + } + } - // Helper to translate strings to UTF8 bytes - private byte[] bytes(String s) { - try { - return s.getBytes("UTF-8"); - } catch (UnsupportedEncodingException e) { - e.printStackTrace(); - return null; - } + private void run() throws Exception { + TTransport transport = new TSocket(host, port); + + if (secure) { + Map saslProperties = new HashMap<>(); + saslProperties.put(Sasl.QOP, "auth-conf,auth-int,auth"); + /* + * The Thrift server the DemoClient is trying to connect to + * must have a matching principal, and support authentication. + * + * The HBase cluster must be secure, allow proxy user. + */ + transport = new TSaslClientTransport("GSSAPI", null, + serverPrincipal, // Thrift server user name, should be an authorized proxy user. + host, // Thrift server domain + saslProperties, null, transport); } - private void run() throws Exception { - TTransport transport = new TSocket(host, port); - if (secure) { - Map saslProperties = new HashMap<>(); - saslProperties.put(Sasl.QOP, "auth-conf,auth-int,auth"); - /** - * The Thrift server the DemoClient is trying to connect to - * must have a matching principal, and support authentication. - * - * The HBase cluster must be secure, allow proxy user. - */ - transport = new TSaslClientTransport("GSSAPI", null, - serverPrincipal, // Thrift server user name, should be an authorized proxy user. - host, // Thrift server domain - saslProperties, null, transport); - } + transport.open(); - transport.open(); - - TProtocol protocol = new TBinaryProtocol(transport, true, true); - Hbase.Client client = new Hbase.Client(protocol); - - byte[] t = bytes("demo_table"); - - // - // Scan all tables, look for the demo table and delete it. - // - System.out.println("scanning tables..."); - for (ByteBuffer name : client.getTableNames()) { - System.out.println(" found: " + utf8(name.array())); - if (utf8(name.array()).equals(utf8(t))) { - if (client.isTableEnabled(name)) { - System.out.println(" disabling table: " + utf8(name.array())); - client.disableTable(name); - } - System.out.println(" deleting table: " + utf8(name.array())); - client.deleteTable(name); - } - } + TProtocol protocol = new TBinaryProtocol(transport, true, true); + Hbase.Client client = new Hbase.Client(protocol); - // - // Create the demo table with two column families, entry: and unused: - // - ArrayList columns = new ArrayList<>(2); - ColumnDescriptor col; - col = new ColumnDescriptor(); - col.name = ByteBuffer.wrap(bytes("entry:")); - col.timeToLive = Integer.MAX_VALUE; - col.maxVersions = 10; - columns.add(col); - col = new ColumnDescriptor(); - col.name = ByteBuffer.wrap(bytes("unused:")); - col.timeToLive = Integer.MAX_VALUE; - columns.add(col); - - System.out.println("creating table: " + utf8(t)); - try { - client.createTable(ByteBuffer.wrap(t), columns); - } catch (AlreadyExists ae) { - System.out.println("WARN: " + ae.message); - } + byte[] t = createDemoTable(client); + System.out.println("column families in " + utf8(t) + ": "); + Map columnMap = client.getColumnDescriptors(ByteBuffer.wrap(t)); - System.out.println("column families in " + utf8(t) + ": "); - Map columnMap = client.getColumnDescriptors(ByteBuffer.wrap(t)); - for (ColumnDescriptor col2 : columnMap.values()) { - System.out.println(" column: " + utf8(col2.name.array()) + ", maxVer: " + Integer.toString(col2.maxVersions)); - } + for (ColumnDescriptor col2 : columnMap.values()) { + System.out.println(" column: " + utf8(col2.name.array()) + ", maxVer: " + + Integer.toString(col2.maxVersions)); + } - Map dummyAttributes = null; - boolean writeToWal = false; - - // - // Test UTF-8 handling - // - byte[] invalid = {(byte) 'f', (byte) 'o', (byte) 'o', (byte) '-', - (byte) 0xfc, (byte) 0xa1, (byte) 0xa1, (byte) 0xa1, (byte) 0xa1}; - byte[] valid = {(byte) 'f', (byte) 'o', (byte) 'o', (byte) '-', - (byte) 0xE7, (byte) 0x94, (byte) 0x9F, (byte) 0xE3, (byte) 0x83, - (byte) 0x93, (byte) 0xE3, (byte) 0x83, (byte) 0xBC, (byte) 0xE3, - (byte) 0x83, (byte) 0xAB}; - - ArrayList mutations; - // non-utf8 is fine for data - mutations = new ArrayList<>(1); - mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), + Map dummyAttributes = null; + boolean writeToWal = false; + + // Test UTF-8 handling + byte[] invalid = { + (byte) 'f', (byte) 'o', (byte) 'o', (byte) '-', (byte) 0xfc, (byte) 0xa1, (byte) 0xa1, + (byte) 0xa1, (byte) 0xa1 + }; + byte[] valid = { + (byte) 'f', (byte) 'o', (byte) 'o', (byte) '-', (byte) 0xE7, (byte) 0x94, (byte) 0x9F, + (byte) 0xE3, (byte) 0x83, (byte) 0x93, (byte) 0xE3, (byte) 0x83, (byte) 0xBC, (byte) 0xE3, + (byte) 0x83, (byte) 0xAB + }; + + ArrayList mutations; + // non-utf8 is fine for data + mutations = new ArrayList<>(1); + mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(invalid), writeToWal)); - client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(bytes("foo")), - mutations, dummyAttributes); - + client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(bytes("foo")), mutations, + dummyAttributes); - // this row name is valid utf8 - mutations = new ArrayList<>(1); - mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(valid), writeToWal)); - client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(valid), mutations, dummyAttributes); + // this row name is valid utf8 + mutations = new ArrayList<>(1); + mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(valid), + writeToWal)); + client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(valid), mutations, dummyAttributes); - // non-utf8 is now allowed in row names because HBase stores values as binary + // non-utf8 is now allowed in row names because HBase stores values as binary - mutations = new ArrayList<>(1); - mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(invalid), writeToWal)); - client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(invalid), mutations, dummyAttributes); + mutations = new ArrayList<>(1); + mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), + ByteBuffer.wrap(invalid), writeToWal)); + client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(invalid), mutations, dummyAttributes); + + // Run a scanner on the rows we just created + ArrayList columnNames = new ArrayList<>(); + columnNames.add(ByteBuffer.wrap(bytes("entry:"))); + + System.out.println("Starting scanner..."); + int scanner = client.scannerOpen(ByteBuffer.wrap(t), ByteBuffer.wrap(bytes("")), columnNames, + dummyAttributes); + + while (true) { + List entry = client.scannerGet(scanner); + if (entry.isEmpty()) { + break; + } + printRow(entry); + } + // Run some operations on a bunch of rows + for (int i = 100; i >= 0; --i) { + testOperations(client, t, dummyAttributes, writeToWal, i); + } - // Run a scanner on the rows we just created - ArrayList columnNames = new ArrayList<>(); - columnNames.add(ByteBuffer.wrap(bytes("entry:"))); + // scan all rows/columnNames + columnNames.clear(); + for (ColumnDescriptor col2 : client.getColumnDescriptors(ByteBuffer.wrap(t)).values()) { + System.out.println("column with name: " + new String(col2.name.array())); + System.out.println(col2.toString()); - System.out.println("Starting scanner..."); - int scanner = client.scannerOpen(ByteBuffer.wrap(t), ByteBuffer.wrap(bytes("")), columnNames, dummyAttributes); + columnNames.add(col2.name); + } - while (true) { - List entry = client.scannerGet(scanner); - if (entry.isEmpty()) { - break; - } - printRow(entry); - } + System.out.println("Starting scanner..."); + scanner = client.scannerOpenWithStop(ByteBuffer.wrap(t), ByteBuffer.wrap(bytes("00020")), + ByteBuffer.wrap(bytes("00040")), columnNames, dummyAttributes); + + while (true) { + List entry = client.scannerGet(scanner); + if (entry.isEmpty()) { + System.out.println("Scanner finished"); + break; + } + printRow(entry); + } - // - // Run some operations on a bunch of rows - // - for (int i = 100; i >= 0; --i) { - // format row keys as "00000" to "00100" - NumberFormat nf = NumberFormat.getInstance(); - nf.setMinimumIntegerDigits(5); - nf.setGroupingUsed(false); - byte[] row = bytes(nf.format(i)); - - mutations = new ArrayList<>(1); - mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("unused:")), ByteBuffer.wrap(bytes("DELETE_ME")), writeToWal)); - client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes); - printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes)); - client.deleteAllRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes); - - // sleep to force later timestamp - try { - Thread.sleep(50); - } catch (InterruptedException e) { - // no-op - } + transport.close(); + } + + private void testOperations(Hbase.Client client, byte[] t, Map dummyAttributes, boolean writeToWal, int i) + throws org.apache.thrift.TException { + ArrayList mutations;// format row keys as "00000" to "00100" + NumberFormat nf = NumberFormat.getInstance(); + nf.setMinimumIntegerDigits(5); + nf.setGroupingUsed(false); + byte[] row = bytes(nf.format(i)); + + mutations = new ArrayList<>(1); + mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("unused:")), + ByteBuffer.wrap(bytes("DELETE_ME")), writeToWal)); + client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes); + printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes)); + client.deleteAllRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes); + + // sleep to force later timestamp + try { + Thread.sleep(50); + } catch (InterruptedException e) { + // no-op + } - mutations = new ArrayList<>(2); - mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:num")), ByteBuffer.wrap(bytes("0")), writeToWal)); - mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(bytes("FOO")), writeToWal)); - client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes); - printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes)); - - Mutation m; - mutations = new ArrayList<>(2); - m = new Mutation(); - m.column = ByteBuffer.wrap(bytes("entry:foo")); - m.isDelete = true; - mutations.add(m); - m = new Mutation(); - m.column = ByteBuffer.wrap(bytes("entry:num")); - m.value = ByteBuffer.wrap(bytes("-1")); - mutations.add(m); - client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes); - printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes)); - - mutations = new ArrayList<>(); - mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:num")), ByteBuffer.wrap(bytes(Integer.toString(i))), writeToWal)); - mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:sqr")), ByteBuffer.wrap(bytes(Integer.toString(i * i))), writeToWal)); - client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes); - printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes)); - - // sleep to force later timestamp - try { - Thread.sleep(50); - } catch (InterruptedException e) { - // no-op - } + mutations = new ArrayList<>(2); + mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:num")), + ByteBuffer.wrap(bytes("0")), writeToWal)); + mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), + ByteBuffer.wrap(bytes("FOO")), writeToWal)); + client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes); + printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes)); + + Mutation m; + mutations = new ArrayList<>(2); + m = new Mutation(); + m.column = ByteBuffer.wrap(bytes("entry:foo")); + m.isDelete = true; + mutations.add(m); + m = new Mutation(); + m.column = ByteBuffer.wrap(bytes("entry:num")); + m.value = ByteBuffer.wrap(bytes("-1")); + mutations.add(m); + client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes); + printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes)); + + mutations = new ArrayList<>(); + mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:num")), + ByteBuffer.wrap(bytes(Integer.toString(i))), writeToWal)); + mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:sqr")), + ByteBuffer.wrap(bytes(Integer.toString(i * i))), writeToWal)); + client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes); + printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes)); + + // sleep to force later timestamp + try { + Thread.sleep(50); + } catch (InterruptedException e) { + // no-op + } - mutations.clear(); - m = new Mutation(); - m.column = ByteBuffer.wrap(bytes("entry:num")); - m.value= ByteBuffer.wrap(bytes("-999")); - mutations.add(m); - m = new Mutation(); - m.column = ByteBuffer.wrap(bytes("entry:sqr")); - m.isDelete = true; - client.mutateRowTs(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, 1, dummyAttributes); // shouldn't override latest - printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes)); - - List versions = client.getVer(ByteBuffer.wrap(t), ByteBuffer.wrap(row), ByteBuffer.wrap(bytes("entry:num")), 10, dummyAttributes); - printVersions(ByteBuffer.wrap(row), versions); - if (versions.isEmpty()) { - System.out.println("FATAL: wrong # of versions"); - System.exit(-1); - } + mutations.clear(); + m = new Mutation(); + m.column = ByteBuffer.wrap(bytes("entry:num")); + m.value= ByteBuffer.wrap(bytes("-999")); + mutations.add(m); + m = new Mutation(); + m.column = ByteBuffer.wrap(bytes("entry:sqr")); + m.isDelete = true; + // shouldn't override latest + client.mutateRowTs(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, 1, dummyAttributes); + printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes)); + + List versions = client.getVer(ByteBuffer.wrap(t), ByteBuffer.wrap(row), + ByteBuffer.wrap(bytes("entry:num")), 10, dummyAttributes); + printVersions(ByteBuffer.wrap(row), versions); + + if (versions.isEmpty()) { + System.out.println("FATAL: wrong # of versions"); + System.exit(-1); + } - List result = client.get(ByteBuffer.wrap(t), ByteBuffer.wrap(row), ByteBuffer.wrap(bytes("entry:foo")), dummyAttributes); - if (!result.isEmpty()) { - System.out.println("FATAL: shouldn't get here"); - System.exit(-1); - } + List result = client.get(ByteBuffer.wrap(t), ByteBuffer.wrap(row), + ByteBuffer.wrap(bytes("entry:foo")), dummyAttributes); - System.out.println(""); - } + if (!result.isEmpty()) { + System.out.println("FATAL: shouldn't get here"); + System.exit(-1); + } - // scan all rows/columnNames + System.out.println(""); + } - columnNames.clear(); - for (ColumnDescriptor col2 : client.getColumnDescriptors(ByteBuffer.wrap(t)).values()) { - System.out.println("column with name: " + new String(col2.name.array())); - System.out.println(col2.toString()); + private byte[] createDemoTable(Hbase.Client client) throws org.apache.thrift.TException { + byte[] t = bytes("demo_table"); - columnNames.add(col2.name); - } + // Scan all tables, look for the demo table and delete it. + System.out.println("scanning tables..."); - System.out.println("Starting scanner..."); - scanner = client.scannerOpenWithStop(ByteBuffer.wrap(t), ByteBuffer.wrap(bytes("00020")), ByteBuffer.wrap(bytes("00040")), columnNames, dummyAttributes); + for (ByteBuffer name : client.getTableNames()) { + System.out.println(" found: " + utf8(name.array())); - while (true) { - List entry = client.scannerGet(scanner); - if (entry.isEmpty()) { - System.out.println("Scanner finished"); - break; - } - printRow(entry); + if (utf8(name.array()).equals(utf8(t))) { + if (client.isTableEnabled(name)) { + System.out.println(" disabling table: " + utf8(name.array())); + client.disableTable(name); } - transport.close(); + System.out.println(" deleting table: " + utf8(name.array())); + client.deleteTable(name); + } } - private void printVersions(ByteBuffer row, List versions) { - StringBuilder rowStr = new StringBuilder(); - for (TCell cell : versions) { - rowStr.append(utf8(cell.value.array())); - rowStr.append("; "); - } - System.out.println("row: " + utf8(row.array()) + ", values: " + rowStr); + // Create the demo table with two column families, entry: and unused: + ArrayList columns = new ArrayList<>(2); + ColumnDescriptor col; + col = new ColumnDescriptor(); + col.name = ByteBuffer.wrap(bytes("entry:")); + col.timeToLive = Integer.MAX_VALUE; + col.maxVersions = 10; + columns.add(col); + col = new ColumnDescriptor(); + col.name = ByteBuffer.wrap(bytes("unused:")); + col.timeToLive = Integer.MAX_VALUE; + columns.add(col); + + System.out.println("creating table: " + utf8(t)); + + try { + client.createTable(ByteBuffer.wrap(t), columns); + } catch (AlreadyExists ae) { + System.out.println("WARN: " + ae.message); + } + return t; + } + + private void printVersions(ByteBuffer row, List versions) { + StringBuilder rowStr = new StringBuilder(); + for (TCell cell : versions) { + rowStr.append(utf8(cell.value.array())); + rowStr.append("; "); + } + System.out.println("row: " + utf8(row.array()) + ", values: " + rowStr); + } + + private void printRow(TRowResult rowResult) { + // copy values into a TreeMap to get them in sorted order + TreeMap sorted = new TreeMap<>(); + for (Map.Entry column : rowResult.columns.entrySet()) { + sorted.put(utf8(column.getKey().array()), column.getValue()); } - private void printRow(TRowResult rowResult) { - // copy values into a TreeMap to get them in sorted order - - TreeMap sorted = new TreeMap<>(); - for (Map.Entry column : rowResult.columns.entrySet()) { - sorted.put(utf8(column.getKey().array()), column.getValue()); - } - - StringBuilder rowStr = new StringBuilder(); - for (SortedMap.Entry entry : sorted.entrySet()) { - rowStr.append(entry.getKey()); - rowStr.append(" => "); - rowStr.append(utf8(entry.getValue().value.array())); - rowStr.append("; "); - } - System.out.println("row: " + utf8(rowResult.row.array()) + ", cols: " + rowStr); + StringBuilder rowStr = new StringBuilder(); + for (SortedMap.Entry entry : sorted.entrySet()) { + rowStr.append(entry.getKey()); + rowStr.append(" => "); + rowStr.append(utf8(entry.getValue().value.array())); + rowStr.append("; "); } + System.out.println("row: " + utf8(rowResult.row.array()) + ", cols: " + rowStr); + } - private void printRow(List rows) { - for (TRowResult rowResult : rows) { - printRow(rowResult); - } + private void printRow(List rows) { + for (TRowResult rowResult : rows) { + printRow(rowResult); } + } - static Subject getSubject() throws Exception { - if (!secure) return new Subject(); + static Subject getSubject() throws Exception { + if (!secure) { + return new Subject(); + } - /* - * To authenticate the DemoClient, kinit should be invoked ahead. - * Here we try to get the Kerberos credential from the ticket cache. - */ - LoginContext context = new LoginContext("", new Subject(), null, + /* + * To authenticate the DemoClient, kinit should be invoked ahead. + * Here we try to get the Kerberos credential from the ticket cache. + */ + LoginContext context = new LoginContext("", new Subject(), null, new Configuration() { @Override public AppConfigurationEntry[] getAppConfigurationEntry(String name) { @@ -403,12 +425,12 @@ public class DemoClient { options.put("debug", "true"); return new AppConfigurationEntry[]{ - new AppConfigurationEntry("com.sun.security.auth.module.Krb5LoginModule", + new AppConfigurationEntry("com.sun.security.auth.module.Krb5LoginModule", AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, options)}; } }); - context.login(); - return context.getSubject(); - } + context.login(); + return context.getSubject(); + } } diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/HttpDoAsClient.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/HttpDoAsClient.java index 25fdc4af34..013df686eb 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/HttpDoAsClient.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/HttpDoAsClient.java @@ -175,7 +175,8 @@ public class HttpDoAsClient { Map columnMap = refresh(client, httpClient) .getColumnDescriptors(ByteBuffer.wrap(t)); for (ColumnDescriptor col2 : columnMap.values()) { - System.out.println(" column: " + utf8(col2.name.array()) + ", maxVer: " + Integer.toString(col2.maxVersions)); + System.out.println(" column: " + utf8(col2.name.array()) + ", maxVer: " + + Integer.toString(col2.maxVersions)); } transport.close(); @@ -252,7 +253,10 @@ public class HttpDoAsClient { } static Subject getSubject() throws Exception { - if (!secure) return new Subject(); + if (!secure) { + return new Subject(); + } + /* * To authenticate the DemoClient, kinit should be invoked ahead. * Here we try to get the Kerberos credential from the ticket cache. @@ -276,7 +280,7 @@ public class HttpDoAsClient { options.put("debug", "true"); return new AppConfigurationEntry[]{ - new AppConfigurationEntry("com.sun.security.auth.module.Krb5LoginModule", + new AppConfigurationEntry("com.sun.security.auth.module.Krb5LoginModule", AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, options)}; } diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift2/DemoClient.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift2/DemoClient.java index 666997e978..7f65ae208a 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift2/DemoClient.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift2/DemoClient.java @@ -55,7 +55,8 @@ public class DemoClient { public static void main(String[] args) throws Exception { System.out.println("Thrift2 Demo"); System.out.println("Usage: DemoClient [host=localhost] [port=9090] [secure=false]"); - System.out.println("This demo assumes you have a table called \"example\" with a column family called \"family1\""); + System.out.println("This demo assumes you have a table called \"example\" with a column " + + "family called \"family1\""); // use passed in arguments instead of defaults if (args.length >= 1) { @@ -149,7 +150,9 @@ public class DemoClient { } static Subject getSubject() throws Exception { - if (!secure) return new Subject(); + if (!secure) { + return new Subject(); + } /* * To authenticate the DemoClient, kinit should be invoked ahead. @@ -173,8 +176,8 @@ public class DemoClient { } options.put("debug", "true"); - return new AppConfigurationEntry[]{ - new AppConfigurationEntry("com.sun.security.auth.module.Krb5LoginModule", + return new AppConfigurationEntry[] { + new AppConfigurationEntry("com.sun.security.auth.module.Krb5LoginModule", AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, options)}; } diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/types/PBCell.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/types/PBCell.java index c063aa9bcb..f48fa50cc0 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/types/PBCell.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/types/PBCell.java @@ -19,11 +19,12 @@ package org.apache.hadoop.hbase.types; import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; -import org.apache.hadoop.hbase.protobuf.generated.CellProtos; -import org.apache.hadoop.hbase.util.PositionedByteRange; import java.io.IOException; +import org.apache.hadoop.hbase.protobuf.generated.CellProtos; +import org.apache.hadoop.hbase.util.PositionedByteRange; + /** * An example for using protobuf objects with {@link DataType} API. */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java similarity index 96% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java rename to hbase-examples/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java index 4eb1ae4973..62e752f6bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java +++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java @@ -261,8 +261,11 @@ public class TestZooKeeperTableArchiveClient { for (Path file : files) { String tableName = file.getParent().getParent().getParent().getName(); // check to which table this file belongs - if (tableName.equals(otherTable)) initialCountForOtherTable++; - else if (tableName.equals(STRING_TABLE_NAME)) initialCountForPrimary++; + if (tableName.equals(otherTable)) { + initialCountForOtherTable++; + } else if (tableName.equals(STRING_TABLE_NAME)) { + initialCountForPrimary++; + } } assertTrue("Didn't archive files for:" + STRING_TABLE_NAME, initialCountForPrimary > 0); @@ -285,11 +288,13 @@ public class TestZooKeeperTableArchiveClient { String tableName = file.getParent().getParent().getParent().getName(); // ensure we don't have files from the non-archived table assertFalse("Have a file from the non-archived table: " + file, tableName.equals(otherTable)); - if (tableName.equals(STRING_TABLE_NAME)) archivedForPrimary++; + if (tableName.equals(STRING_TABLE_NAME)) { + archivedForPrimary++; + } } - assertEquals("Not all archived files for the primary table were retained.", initialCountForPrimary, - archivedForPrimary); + assertEquals("Not all archived files for the primary table were retained.", + initialCountForPrimary, archivedForPrimary); // but we still have the archive directory assertTrue("Archive directory was deleted via archiver", fs.exists(archiveDir)); @@ -366,7 +371,10 @@ public class TestZooKeeperTableArchiveClient { @SuppressWarnings("unchecked") Iterable ret = (Iterable) invocation.callRealMethod(); - if (counter[0] >= expected) finished.countDown(); + if (counter[0] >= expected) { + finished.countDown(); + } + return ret; } }).when(delegateSpy).getDeletableFiles(Mockito.anyListOf(FileStatus.class)); @@ -391,7 +399,11 @@ public class TestZooKeeperTableArchiveClient { for (FileStatus file : files) { if (file.isDirectory()) { List subFiles = getAllFiles(fs, file.getPath()); - if (subFiles != null) allFiles.addAll(subFiles); + + if (subFiles != null) { + allFiles.addAll(subFiles); + } + continue; } allFiles.add(file.getPath()); @@ -418,7 +430,7 @@ public class TestZooKeeperTableArchiveClient { * Create a new hfile in the passed region * @param region region to operate on * @param columnFamily family for which to add data - * @throws IOException + * @throws IOException if doing the put or flush fails */ private void createHFileInRegion(HRegion region, byte[] columnFamily) throws IOException { // put one row in the region @@ -430,7 +442,7 @@ public class TestZooKeeperTableArchiveClient { } /** - * @param cleaner + * @param cleaner the cleaner to use */ private void runCleaner(HFileCleaner cleaner, CountDownLatch finished, Stoppable stop) throws InterruptedException { diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/client/example/TestHttpProxyExample.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/client/example/TestHttpProxyExample.java index afa257a02f..6324f3032d 100644 --- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/client/example/TestHttpProxyExample.java +++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/client/example/TestHttpProxyExample.java @@ -19,8 +19,6 @@ package org.apache.hadoop.hbase.client.example; import static org.junit.Assert.assertEquals; -import org.apache.hbase.thirdparty.com.google.common.io.ByteStreams; - import java.nio.charset.StandardCharsets; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -41,6 +39,8 @@ import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.hbase.thirdparty.com.google.common.io.ByteStreams; + @Category({ ClientTests.class, MediumTests.class }) public class TestHttpProxyExample { diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestRefreshHFilesEndpoint.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestRefreshHFilesEndpoint.java index 77f98999ea..22203e09d5 100644 --- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestRefreshHFilesEndpoint.java +++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestRefreshHFilesEndpoint.java @@ -78,7 +78,8 @@ public class TestRefreshHFilesEndpoint { CONF.set(HConstants.REGION_IMPL, regionImpl); CONF.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); - CONF.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, RefreshHFilesEndpoint.class.getName()); + CONF.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, + RefreshHFilesEndpoint.class.getName()); cluster = HTU.startMiniCluster(NUM_MASTER, NUM_RS); // Create table @@ -106,8 +107,9 @@ public class TestRefreshHFilesEndpoint { Path regionDir = new Path(tableDir, region.getRegionInfo().getEncodedName()); Path familyDir = new Path(regionDir, Bytes.toString(FAMILY)); HFileTestUtil - .createHFile(HTU.getConfiguration(), HTU.getTestFileSystem(), new Path(familyDir, HFILE_NAME), FAMILY, - QUALIFIER, Bytes.toBytes("50"), Bytes.toBytes("60"), NUM_ROWS); + .createHFile(HTU.getConfiguration(), HTU.getTestFileSystem(), + new Path(familyDir, HFILE_NAME), FAMILY, QUALIFIER, Bytes.toBytes("50"), + Bytes.toBytes("60"), NUM_ROWS); } assertEquals(2, HTU.getNumHFiles(TABLE_NAME, FAMILY)); callRefreshRegionHFilesEndPoint(); @@ -125,8 +127,9 @@ public class TestRefreshHFilesEndpoint { RefreshHFilesClient refreshHFilesClient = new RefreshHFilesClient(CONF); refreshHFilesClient.refreshHFiles(TABLE_NAME); } catch (RetriesExhaustedException rex) { - if (rex.getCause() instanceof IOException) + if (rex.getCause() instanceof IOException) { throw new IOException(); + } } catch (Throwable ex) { LOG.error(ex.toString(), ex); fail("Couldn't call the RefreshRegionHFilesEndpoint"); @@ -137,15 +140,15 @@ public class TestRefreshHFilesEndpoint { HStoreWithFaultyRefreshHFilesAPI store; public HRegionForRefreshHFilesEP(final Path tableDir, final WAL wal, final FileSystem fs, - final Configuration confParam, final RegionInfo regionInfo, - final TableDescriptor htd, final RegionServerServices rsServices) { + final Configuration confParam, final RegionInfo regionInfo, final TableDescriptor htd, + final RegionServerServices rsServices) { super(tableDir, wal, fs, confParam, regionInfo, htd, rsServices); } @Override public List getStores() { List list = new ArrayList<>(stores.size()); - /** + /* * This is used to trigger the custom definition (faulty) * of refresh HFiles API. */ @@ -165,8 +168,8 @@ public class TestRefreshHFilesEndpoint { } public static class HStoreWithFaultyRefreshHFilesAPI extends HStore { - public HStoreWithFaultyRefreshHFilesAPI(final HRegion region, final ColumnFamilyDescriptor family, - final Configuration confParam) throws IOException { + public HStoreWithFaultyRefreshHFilesAPI(final HRegion region, + final ColumnFamilyDescriptor family, final Configuration confParam) throws IOException { super(region, family, confParam); } diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMapReduceExamples.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMapReduceExamples.java index 089dafdbe5..a7f23927b4 100644 --- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMapReduceExamples.java +++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMapReduceExamples.java @@ -15,16 +15,27 @@ package org.apache.hadoop.hbase.mapreduce; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.IndexBuilder.Map; import org.apache.hadoop.hbase.mapreduce.SampleUploader.Uploader; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.LauncherSecurityManager; import org.apache.hadoop.io.LongWritable; @@ -38,12 +49,6 @@ import org.junit.experimental.categories.Category; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import java.io.ByteArrayOutputStream; -import java.io.PrintStream; - -import static org.junit.Assert.*; -import static org.mockito.Mockito.*; - @Category({MapReduceTests.class, LargeTests.class}) public class TestMapReduceExamples { private static HBaseTestingUtility util = new HBaseTestingUtility(); diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/types/TestPBCell.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/types/TestPBCell.java index 77c9e22d53..883de103a9 100644 --- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/types/TestPBCell.java +++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/types/TestPBCell.java @@ -25,10 +25,10 @@ import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.testclassification.MiscTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.CellProtos; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.PositionedByteRange; import org.apache.hadoop.hbase.util.SimplePositionedByteRange; -- 2.14.3 (Apple Git-98)