rpc refreshHFiles(.hbase.pb.RefreshHFilesRequest) returns (.hbase.pb.RefreshHFilesResponse);
+ */
+ public abstract void refreshHFiles(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest request,
+ com.google.protobuf.RpcCallbackrpc refreshHFiles(.hbase.pb.RefreshHFilesRequest) returns (.hbase.pb.RefreshHFilesResponse);
+ */
+ public abstract void refreshHFiles(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos.RefreshHFilesRequest request,
+ com.google.protobuf.RpcCallback+ *
+ * For the protocol buffer definition of the RefreshHFilesService, see the source file located under + * hbase-protocol/src/main/protobuf/RefreshHFiles.proto. + *
+ */ +public class RefreshHFilesEndpoint extends RefreshHFilesProtos.RefreshHFilesService + implements Coprocessor, CoprocessorService { + protected static final Log LOG = LogFactory.getLog(RefreshHFilesEndpoint.class); + private RegionCoprocessorEnvironment env; + + public RefreshHFilesEndpoint() { + } + + @Override + public Service getService() { + return this; + } + + @Override + public void refreshHFiles(RpcController controller, RefreshHFilesProtos.RefreshHFilesRequest request, + RpcCallback+ * http://www.apache.org/licenses/LICENSE-2.0 + *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.coprocessor;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
+import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.HFileTestUtil;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.IOException;
+
+import static org.junit.Assert.assertEquals;
+
+@Category(MediumTests.class)
+public class TestRefreshHFilesEndpoint {
+ private static final HBaseTestingUtility HTU = new HBaseTestingUtility();
+ private static final int NUM_MASTER = 1;
+ private static final int NUM_RS = 2;
+ private static final TableName TABLE_NAME = TableName.valueOf("testRefreshRegionHFilesEP");
+ private static final byte[] FAMILY = Bytes.toBytes("family");
+ private static final byte[] QUALIFIER = Bytes.toBytes("qualifier");
+ private static final byte[][] SPLIT_KEY = new byte[][] { Bytes.toBytes("10") };
+ private static final int NUM_ROWS = 5;
+ private static final String HFILE_NAME = "123abcdef";
+
+ private static Configuration CONF = null;
+ private static MiniHBaseCluster cluster;
+ private static HTableDescriptor desc;
+ private static Admin hbaseAdmin;
+ private static HTable table;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ CONF = HTU.getConfiguration();
+ CONF.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, RefreshHFilesEndpoint.class.getName());
+
+ cluster = HTU.startMiniCluster(NUM_MASTER, NUM_RS);
+
+ // Create table
+ desc = new HTableDescriptor(TABLE_NAME);
+ desc.addFamily(new HColumnDescriptor(FAMILY));
+ hbaseAdmin = cluster.getMaster().getConnection().getAdmin();
+ hbaseAdmin.createTable(desc, SPLIT_KEY);
+ table = new HTable(HTU.getConfiguration(), TABLE_NAME);
+
+ // this will create 2 regions spread across slaves
+ HTU.loadNumericRows(table, FAMILY, 1, 20);
+ HTU.flush(TABLE_NAME);
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ HTU.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testRefreshRegionHFilesEndpoint() throws Throwable {
+ Path tableDir = desc.getTableDir(HTU.getDefaultRootDirPath(), TABLE_NAME.toBytes());
+ for (Region region : cluster.getRegions(TABLE_NAME)) {
+ Path regionDir = new Path(tableDir, region.getRegionInfo().getEncodedName());
+ Path familyDir = new Path(regionDir, Bytes.toString(FAMILY));
+ HFileTestUtil
+ .createHFile(HTU.getConfiguration(), HTU.getTestFileSystem(), new Path(familyDir, HFILE_NAME), FAMILY,
+ QUALIFIER, Bytes.toBytes("50"), Bytes.toBytes("60"), NUM_ROWS);
+ }
+ assertEquals(2, HTU.getNumHFiles(TABLE_NAME, FAMILY));
+ callRefreshRegionHFilesEndPoint();
+ assertEquals(4, HTU.getNumHFiles(TABLE_NAME, FAMILY));
+ }
+
+ private void callRefreshRegionHFilesEndPoint() throws Throwable {
+ final RefreshHFilesProtos.RefreshHFilesRequest request = RefreshHFilesProtos.RefreshHFilesRequest
+ .getDefaultInstance();
+ table.coprocessorService(RefreshHFilesProtos.RefreshHFilesService.class, null, null,
+ new Batch.Call