diff --git bin/hirb.rb bin/hirb.rb index 94b5cdb..d0295d6 100644 --- bin/hirb.rb +++ bin/hirb.rb @@ -115,7 +115,7 @@ org.apache.log4j.Logger.getLogger("org.apache.zookeeper").setLevel(log_level) org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase").setLevel(log_level) # Require HBase now after setting log levels -require 'hbase' +require 'hbase_constants' # Load hbase shell require 'shell' @@ -123,15 +123,11 @@ require 'shell' # Require formatter require 'shell/formatter' -# Presume console format. -# Formatter takes an :output_stream parameter, if you don't want STDOUT. -@formatter = Shell::Formatter::Console.new - # Setup the HBase module. Create a configuration. @hbase = Hbase::Hbase.new # Setup console -@shell = Shell::Shell.new(@hbase, @formatter, interactive) +@shell = Shell::Shell.new(@hbase, interactive) @shell.debug = @shell_debug # Add commands to this namespace diff --git bin/region_status.rb bin/region_status.rb index 55bc672..91873cb 100644 --- bin/region_status.rb +++ bin/region_status.rb @@ -50,7 +50,7 @@ import org.apache.hadoop.hbase.TableName import org.apache.hadoop.hbase.HConstants import org.apache.hadoop.hbase.MasterNotRunningException import org.apache.hadoop.hbase.client.HBaseAdmin -import org.apache.hadoop.hbase.client.HTable +import org.apache.hadoop.hbase.client.Table import org.apache.hadoop.hbase.client.Scan import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter import org.apache.hadoop.hbase.util.Bytes diff --git dev-support/findHangingTests.py dev-support/findHangingTests.py index deccc8b..28f4895 100755 --- dev-support/findHangingTests.py +++ dev-support/findHangingTests.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python ## # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file @@ -19,64 +19,72 @@ # script to find hanging test from Jenkins build output # usage: ./findHangingTests.py # -import urllib2 +import re +import requests import sys -import string -if len(sys.argv) != 2 : - print "ERROR : Provide the jenkins job console URL as the only argument." - exit(1) -print "Fetching " + sys.argv[1] -response = urllib2.urlopen(sys.argv[1]) -i = 0; -tests = {} -failed_tests = {} -summary = 0 -host = False -patch = False -branch = False -while True: - n = response.readline() - if n == "" : - break - if not host and n.find("Building remotely on") >= 0: - host = True - print n.strip() - continue - if not patch and n.find("Testing patch for ") >= 0: - patch = True - print n.strip() - continue - if not branch and n.find("Testing patch on branch ") >= 0: - branch = True - print n.strip() - continue - if n.find("PATCH APPLICATION FAILED") >= 0: - print "PATCH APPLICATION FAILED" - sys.exit(1) - if summary == 0 and n.find("Running tests.") >= 0: - summary = summary + 1 - continue - if summary == 1 and n.find("[INFO] Reactor Summary:") >= 0: - summary = summary + 1 - continue - if summary == 2 and n.find("[INFO] Apache HBase ") >= 0: - sys.stdout.write(n) - continue - if n.find("org.apache.hadoop.hbase") < 0: - continue - test_name = string.strip(n[n.find("org.apache.hadoop.hbase"):len(n)]) - if n.find("Running org.apache.hadoop.hbase") > -1 : - tests[test_name] = False - if n.find("Tests run:") > -1 : - if n.find("FAILURE") > -1 or n.find("ERROR") > -1: - failed_tests[test_name] = True - tests[test_name] = True -response.close() -print "Printing hanging tests" -for key, value in tests.iteritems(): - if value == False: - print "Hanging test : " + key -print "Printing Failing tests" -for key, value in failed_tests.iteritems(): - print "Failing test : " + key +# Returns [[all tests], [failed tests], [timeout tests], [hanging tests]] +# Definitions: +# All tests: All testcases which were run. +# Hanging test: A testcase which started but never finished. +# Failed test: Testcase which encountered any kind of failure. It can be failing atomic tests, +# timed out tests, etc +# Timeout test: A Testcase which encountered timeout. Naturally, all timeout tests will be +# included in failed tests. +def get_bad_tests(console_url): + response = requests.get(console_url) + if response.status_code != 200: + print "Error getting consoleText. Response = {} {}".format( + response.status_code, response.reason) + return {} + + all_tests = set() + hanging_tests = set() + failed_tests = set() + timeout_tests = set() + for line in response.content.splitlines(): + result1 = re.match("^Running org.apache.hadoop.hbase.(\w*\.)*(\w*)", line) + if result1: + test_case = result1.group(2) + if test_case in all_tests: + print ("ERROR! Multiple tests with same name '{}'. Might get wrong results " + "for this test.".format(test_case)) + else: + hanging_tests.add(test_case) + all_tests.add(test_case) + result2 = re.match("^Tests run:.*- in org.apache.hadoop.hbase.(\w*\.)*(\w*)", line) + if result2: + test_case = result2.group(2) + if "FAILURE!" in line: + failed_tests.add(test_case) + if test_case not in hanging_tests: + print ("ERROR! No test '{}' found in hanging_tests. Might get wrong results " + "for this test.".format(test_case)) + else: + hanging_tests.remove(test_case) + result3 = re.match("^\s+(\w*).*\sTestTimedOut", line) + if result3: + test_case = result3.group(1) + timeout_tests.add(test_case) + print "Result > total tests: {:4} failed : {:4} timedout : {:4} hanging : {:4}".format( + len(all_tests), len(failed_tests), len(timeout_tests), len(hanging_tests)) + return [all_tests, failed_tests, timeout_tests, hanging_tests] + +if __name__ == "__main__": + if len(sys.argv) != 2 : + print "ERROR : Provide the jenkins job console URL as the only argument." + sys.exit(1) + + print "Fetching {}".format(sys.argv[1]) + [all_tests, failed_tests, timedout_tests, hanging_tests] = get_bad_tests(sys.argv[1]) + print "Found {} hanging tests:".format(len(hanging_tests)) + for test in hanging_tests: + print test + print "\n" + print "Found {} failed tests of which {} timed out:".format( + len(failed_tests), len(timedout_tests)) + for test in failed_tests: + print "{0} {1}".format(test, ("(Timed Out)" if test in timedout_tests else "")) + + print ("\nA test may have had 0 or more atomic test failures before it timed out. So a " + "'Timed Out' test may have other errors too.") diff --git dev-support/flaky-dashboard-template.html dev-support/flaky-dashboard-template.html new file mode 100644 index 0000000..77dfc86 --- /dev/null +++ dev-support/flaky-dashboard-template.html @@ -0,0 +1,122 @@ + + + + + Apache HBase Flaky Dashboard + + + +

+ +      + + Apache HBase Flaky Tests Dashboard + +

+

+{% set counter = 0 %} +{% for url in results %} +{% set result = results[url] %} +{# Dedup ids since test names may duplicate across urls #} +{% set counter = counter + 1 %} + Job : {{ url |e }} + 🔗 +

+ + + + + + + + {% for test in result %} + {% set all = result[test]['all'] %} + {% set failed = result[test]['failed'] %} + {% set timeout = result[test]['timeout'] %} + {% set hanging = result[test]['hanging'] %} + {% set success = all.difference(failed).difference(hanging) %} + + + {% set flakyness = + (failed|length + hanging|length) * 100 / all|length %} + {% if flakyness == 100 %} + + + + + {% endfor %} +
Test NameFlakynessFailed/Timeout/HangingRun Ids
{{ test |e }} + {% else %} + + {% endif %} + {{ "{:.1f}% ({} / {})".format( + flakyness, failed|length + hanging|length, all|length) }} + + {{ failed|length }} / {{ timeout|length }} / {{ hanging|length }} + + {% set id = "details_" ~ test ~ "_" ~ counter %} + +
+ +
+


+{% endfor %} + + + diff --git dev-support/hbase-personality.sh dev-support/hbase-personality.sh index 6797090..3b6ebad 100755 --- dev-support/hbase-personality.sh +++ dev-support/hbase-personality.sh @@ -104,6 +104,7 @@ function personality_modules if [[ -n "${excludes}" ]]; then extra="${extra} -Dtest.exclude.pattern=${excludes}" fi + rm excludes else echo "Wget error $? in fetching excludes file from url" \ "${EXCLUDE_TESTS_URL}. Ignoring and proceeding." @@ -115,6 +116,7 @@ function personality_modules if [[ -n "${includes}" ]]; then extra="${extra} -Dtest=${includes}" fi + rm includes else echo "Wget error $? in fetching includes file from url" \ "${INCLUDE_TESTS_URL}. Ignoring and proceeding." diff --git dev-support/jenkins-tools/README.md dev-support/jenkins-tools/README.md deleted file mode 100644 index 9e1905f..0000000 --- dev-support/jenkins-tools/README.md +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -jenkins-tools -============= - -A tool which pulls test case results from Jenkins server. It displays a union of failed test cases -from the last 15(by default and actual number of jobs can be less depending on availablity) runs -recorded in Jenkins sever and track how each of them are performed for all the last 15 runs(passed, -not run or failed) - -Pre-requirement(run under folder jenkins-tools) - Please download jenkins-client from https://github.com/cosmin/jenkins-client - 1) git clone git://github.com/cosmin/jenkins-client.git - 2) make sure the dependency jenkins-client version in ./buildstats/pom.xml matches the - downloaded jenkins-client(current value is 0.1.6-SNAPSHOT) - -Build command(run under folder jenkins-tools): - - mvn clean package - -Usage are: - - java -jar ./buildstats/target/buildstats.jar [number of last most recent jobs to check] - -Sample commands are: - - java -jar ./buildstats/target/buildstats.jar https://builds.apache.org HBase-TRUNK - -Sample output(where 1 means "PASSED", 0 means "NOT RUN AT ALL", -1 means "FAILED"): - -Failed Test Cases 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3632 3633 3634 3635 - -org.apache.hadoop.hbase.catalog.testmetareadereditor.testretrying 1 1 -1 0 1 1 1 1 -1 0 1 1 1 1 -org.apache.hadoop.hbase.client.testadmin.testdeleteeditunknowncolumnfamilyandortable 0 1 1 1 -1 0 1 1 0 1 1 1 1 1 -org.apache.hadoop.hbase.client.testfromclientsidewithcoprocessor.testclientpoolthreadlocal 1 1 1 1 1 1 1 1 0 1 1 -1 0 1 -org.apache.hadoop.hbase.client.testhcm.testregioncaching 1 1 -1 0 1 1 -1 0 -1 0 -1 0 1 1 -org.apache.hadoop.hbase.client.testmultiparallel.testflushcommitswithabort 1 1 1 1 1 1 1 1 -1 0 1 1 1 1 -org.apache.hadoop.hbase.client.testscannertimeout.test3686a 1 1 1 1 1 1 1 1 -1 0 1 1 1 1 -org.apache.hadoop.hbase.coprocessor.example.testrowcountendpoint.org.apache.hadoop.hbase.coprocessor.example.testrowcountendpoint 0 -1 0 -1 0 0 0 -1 0 0 0 0 0 0 -org.apache.hadoop.hbase.coprocessor.example.testzookeeperscanpolicyobserver.org.apache.hadoop.hbase.coprocessor.example.testzookeeperscanpolicyobserver 0 -1 0 -1 0 0 0 -1 0 0 0 0 0 0 -org.apache.hadoop.hbase.master.testrollingrestart.testbasicrollingrestart 1 1 1 1 -1 0 1 1 1 1 1 1 -1 0 -org.apache.hadoop.hbase.regionserver.testcompactionstate.testmajorcompaction 1 1 -1 0 1 1 1 1 1 1 1 1 1 1 -org.apache.hadoop.hbase.regionserver.testcompactionstate.testminorcompaction 1 1 -1 0 1 1 1 1 1 1 1 1 1 1 -org.apache.hadoop.hbase.replication.testreplication.loadtesting 1 1 1 1 1 1 1 1 1 -1 0 1 1 1 -org.apache.hadoop.hbase.rest.client.testremoteadmin.org.apache.hadoop.hbase.rest.client.testremoteadmin 0 0 0 0 0 0 0 0 -1 0 0 0 0 0 -org.apache.hadoop.hbase.rest.client.testremotetable.org.apache.hadoop.hbase.rest.client.testremotetable 0 0 0 0 0 0 0 0 -1 0 0 0 0 0 -org.apache.hadoop.hbase.security.access.testtablepermissions.testbasicwrite 0 1 1 1 1 1 1 1 1 1 1 1 1 -1 -org.apache.hadoop.hbase.testdrainingserver.testdrainingserverwithabort 1 1 1 1 1 -1 0 1 1 1 1 1 -1 0 -org.apache.hadoop.hbase.util.testhbasefsck.testregionshouldnotbedeployed 1 1 1 1 1 1 -1 0 -1 0 -1 -1 0 -1 - - diff --git dev-support/jenkins-tools/buildstats/pom.xml dev-support/jenkins-tools/buildstats/pom.xml deleted file mode 100644 index 4149dc7..0000000 --- dev-support/jenkins-tools/buildstats/pom.xml +++ /dev/null @@ -1,87 +0,0 @@ - - - - 4.0.0 - - org.apache.hbase - buildstats - 1.0 - jar - buildstats - - - UTF-8 - - - - - com.offbytwo.jenkins - jenkins-client - 0.1.6-SNAPSHOT - - - - - - - org.apache.maven.plugins - maven-jar-plugin - 2.4 - true - - - - true - true - - - - - - - maven-assembly-plugin - - - - true - org.apache.hadoop.hbase.devtools.buildstats.TestResultHistory - - - - jar-with-dependencies - - buildstats - false - - - - make-my-jar-with-dependencies - package - - single - - - - - - - - diff --git dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/BuildResultWithTestCaseDetails.java dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/BuildResultWithTestCaseDetails.java deleted file mode 100644 index ad3f0e3..0000000 --- dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/BuildResultWithTestCaseDetails.java +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.devtools.buildstats; - -import com.offbytwo.jenkins.model.BaseModel; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -public class BuildResultWithTestCaseDetails extends BaseModel { - - List suites; - - /* default constructor needed for Jackson */ - public BuildResultWithTestCaseDetails() { - this(new ArrayList()); - } - - public BuildResultWithTestCaseDetails(List s) { - this.suites = s; - } - - public BuildResultWithTestCaseDetails(TestSuite... s) { - this(Arrays.asList(s)); - } - - public List getSuites() { - return suites; - } - - public void setSuites(List s) { - suites = s; - } -} diff --git dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/HistoryReport.java dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/HistoryReport.java deleted file mode 100644 index 80671b2..0000000 --- dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/HistoryReport.java +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.devtools.buildstats; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.SortedSet; -import java.util.TreeSet; -import java.util.Set; - -public class HistoryReport { - private List buildsWithTestResults; - private Map historyResults; - private Map> skippedTests; - - public HistoryReport() { - buildsWithTestResults = new ArrayList(); - this.historyResults = new HashMap(); - } - - public Map getHistoryResults() { - return this.historyResults; - } - - public Map> getSkippedTests() { - return this.skippedTests; - } - - public List getBuildsWithTestResults() { - return this.buildsWithTestResults; - } - - public void setBuildsWithTestResults(List src) { - this.buildsWithTestResults = src; - } - - public void setHistoryResults(Map src, Map> skippedTests) { - this.skippedTests = skippedTests; - this.historyResults = src; - } - - public void printReport() { - System.out.printf("%-30s", "Failed Test Cases Stats"); - for (Integer i : getBuildsWithTestResults()) { - System.out.printf("%5d", i); - } - System.out.println("\n========================================================"); - SortedSet keys = new TreeSet(getHistoryResults().keySet()); - for (String failedTestCase : keys) { - System.out.println(); - int[] resultHistory = getHistoryResults().get(failedTestCase); - System.out.print(failedTestCase); - for (int i = 0; i < resultHistory.length; i++) { - System.out.printf("%5d", resultHistory[i]); - } - } - System.out.println(); - - if (skippedTests == null) return; - - System.out.printf("\n%-30s\n", "Skipped Test Cases Stats"); - for (Integer i : getBuildsWithTestResults()) { - Set tmpSkippedTests = skippedTests.get(i); - if (tmpSkippedTests == null || tmpSkippedTests.isEmpty()) continue; - System.out.printf("======= %d skipped(Or don't have) following test suites =======\n", i); - for (String skippedTestcase : tmpSkippedTests) { - System.out.println(skippedTestcase); - } - } - } -} \ No newline at end of file diff --git dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/TestCaseResult.java dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/TestCaseResult.java deleted file mode 100644 index e476cb9..0000000 --- dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/TestCaseResult.java +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.devtools.buildstats; - -public class TestCaseResult { - private String className; - private int failedSince; - private String name; - private String status; - - public String getName() { - return name; - } - - public String getClassName() { - return className; - } - - public int failedSince() { - return failedSince; - } - - public String getStatus() { - return status; - } - - public void setName(String s) { - name = s; - } - - public void setClassName(String s) { - className = s; - } - - public void setFailedSince(int s) { - failedSince = s; - } - - public void setStatus(String s) { - status = s; - } - - public String getFullName() { - return (this.className + "." + this.name).toLowerCase(); - } -} diff --git dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/TestResultHistory.java dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/TestResultHistory.java deleted file mode 100644 index 0270f91..0000000 --- dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/TestResultHistory.java +++ /dev/null @@ -1,260 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.devtools.buildstats; - -import com.offbytwo.jenkins.JenkinsServer; -import com.offbytwo.jenkins.client.JenkinsHttpClient; -import com.offbytwo.jenkins.model.*; - -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.util.*; - -public class TestResultHistory { - public final static String STATUS_REGRESSION = "REGRESSION"; - public final static String STATUS_FAILED = "FAILED"; - public final static String STATUS_PASSED = "PASSED"; - public final static String STATUS_FIXED = "FIXED"; - public static int BUILD_HISTORY_NUM = 15; - - private JenkinsHttpClient client; - private String jobName; - - public TestResultHistory(String apacheHTTPURL, String jobName, String userName, String passWord) - throws URISyntaxException { - this.client = new JenkinsHttpClient(new URI(apacheHTTPURL), userName, passWord); - this.jobName = jobName; - } - - public static void main(String[] args) { - - if (args.length < 2) { - printUsage(); - return; - } - - String apacheHTTPUrl = args[0]; - String jobName = args[1]; - if (args.length > 2) { - int tmpHistoryJobNum = -1; - try { - tmpHistoryJobNum = Integer.parseInt(args[2]); - } catch (NumberFormatException ex) { - // ignore - } - if (tmpHistoryJobNum > 0) { - BUILD_HISTORY_NUM = tmpHistoryJobNum; - } - } - - try { - TestResultHistory buildHistory = new TestResultHistory(apacheHTTPUrl, jobName, "", ""); - HistoryReport report = buildHistory.getReport(); - // display result in console - report.printReport(); - } catch (Exception ex) { - System.out.println("Got unexpected exception: " + ex.getMessage()); - } - } - - protected static void printUsage() { - System.out.println(" [Number of Historical Jobs to Check]"); - System.out.println("Sample Input: \"https://builds.apache.org\" " - + "\"HBase-TRUNK-on-Hadoop-2.0.0\" "); - } - - public HistoryReport getReport() { - HistoryReport report = new HistoryReport(); - - List buildWithTestResults = new ArrayList(); - Map failureStats = new HashMap(); - - try { - JenkinsServer jenkins = new JenkinsServer(this.client); - Map jobs = jenkins.getJobs(); - JobWithDetails job = jobs.get(jobName.toLowerCase()).details(); - - // build test case failures stats for the past 10 builds - Build lastBuild = job.getLastBuild(); - int startingBuildNumber = - (lastBuild.getNumber() - BUILD_HISTORY_NUM > 0) ? lastBuild.getNumber() - - BUILD_HISTORY_NUM + 1 : 1; - - Map> executedTestCases = - new HashMap>(); - Map> skippedTestCases = new TreeMap>(); - Set allExecutedTestCases = new HashSet(); - Map> normalizedTestSet = new HashMap>(); - String buildUrl = lastBuild.getUrl(); - for (int i = startingBuildNumber; i <= lastBuild.getNumber(); i++) { - HashMap buildExecutedTestCases = new HashMap(2048); - String curBuildUrl = buildUrl.replaceFirst("/" + lastBuild.getNumber(), "/" + i); - List failedCases = null; - try { - failedCases = getBuildFailedTestCases(curBuildUrl, buildExecutedTestCases); - buildWithTestResults.add(i); - } catch (Exception ex) { - // can't get result so skip it - continue; - } - executedTestCases.put(i, buildExecutedTestCases); - HashSet tmpSet = new HashSet(); - for (String tmpTestCase : buildExecutedTestCases.keySet()) { - allExecutedTestCases.add(tmpTestCase.substring(0, tmpTestCase.lastIndexOf("."))); - tmpSet.add(tmpTestCase.substring(0, tmpTestCase.lastIndexOf("."))); - } - normalizedTestSet.put(i, tmpSet); - - // set test result failed cases of current build - for (String curFailedTestCase : failedCases) { - if (failureStats.containsKey(curFailedTestCase)) { - int[] testCaseResultArray = failureStats.get(curFailedTestCase); - testCaseResultArray[i - startingBuildNumber] = -1; - } else { - int[] testResult = new int[BUILD_HISTORY_NUM]; - testResult[i - startingBuildNumber] = -1; - // refill previous build test results for newly failed test case - for (int k = startingBuildNumber; k < i; k++) { - HashMap tmpBuildExecutedTestCases = executedTestCases.get(k); - if (tmpBuildExecutedTestCases != null - && tmpBuildExecutedTestCases.containsKey(curFailedTestCase)) { - String statusStr = tmpBuildExecutedTestCases.get(curFailedTestCase); - testResult[k - startingBuildNumber] = convertStatusStringToInt(statusStr); - } - } - failureStats.put(curFailedTestCase, testResult); - } - - } - - // set test result for previous failed test cases - for (String curTestCase : failureStats.keySet()) { - if (!failedCases.contains(curTestCase) && buildExecutedTestCases.containsKey(curTestCase)) { - String statusVal = buildExecutedTestCases.get(curTestCase); - int[] testCaseResultArray = failureStats.get(curTestCase); - testCaseResultArray[i - startingBuildNumber] = convertStatusStringToInt(statusVal); - } - } - } - - // check which test suits skipped - for (int i = startingBuildNumber; i <= lastBuild.getNumber(); i++) { - Set skippedTests = new HashSet(); - HashMap tmpBuildExecutedTestCases = executedTestCases.get(i); - if (tmpBuildExecutedTestCases == null || tmpBuildExecutedTestCases.isEmpty()) continue; - // normalize test case names - Set tmpNormalizedTestCaseSet = normalizedTestSet.get(i); - for (String testCase : allExecutedTestCases) { - if (!tmpNormalizedTestCaseSet.contains(testCase)) { - skippedTests.add(testCase); - } - } - skippedTestCases.put(i, skippedTests); - } - - report.setBuildsWithTestResults(buildWithTestResults); - for (String failedTestCase : failureStats.keySet()) { - int[] resultHistory = failureStats.get(failedTestCase); - int[] compactHistory = new int[buildWithTestResults.size()]; - int index = 0; - for (Integer i : buildWithTestResults) { - compactHistory[index] = resultHistory[i - startingBuildNumber]; - index++; - } - failureStats.put(failedTestCase, compactHistory); - } - - report.setHistoryResults(failureStats, skippedTestCases); - - } catch (Exception ex) { - System.out.println(ex); - ex.printStackTrace(); - } - - return report; - } - - /** - * @param statusVal - * @return 1 means PASSED, -1 means FAILED, 0 means SKIPPED - */ - static int convertStatusStringToInt(String statusVal) { - - if (statusVal.equalsIgnoreCase(STATUS_REGRESSION) || statusVal.equalsIgnoreCase(STATUS_FAILED)) { - return -1; - } else if (statusVal.equalsIgnoreCase(STATUS_PASSED)) { - return 1; - } - - return 0; - } - - /** - * Get failed test cases of a build - * @param buildURL Jenkins build job URL - * @param executedTestCases Set of test cases which was executed for the build - * @return list of failed test case names - */ - List getBuildFailedTestCases(String buildURL, HashMap executedTestCases) - throws IOException { - List result = new ArrayList(); - - String apiPath = - urlJoin(buildURL, - "testReport?depth=10&tree=suites[cases[className,name,status,failedSince]]"); - - List suites = client.get(apiPath, BuildResultWithTestCaseDetails.class).getSuites(); - - result = getTestSuiteFailedTestcase(suites, executedTestCases); - - return result; - } - - private List getTestSuiteFailedTestcase(List suites, - HashMap executedTestCases) { - List result = new ArrayList(); - - if (suites == null) { - return result; - } - - for (TestSuite curTestSuite : suites) { - for (TestCaseResult curTestCaseResult : curTestSuite.getCases()) { - if (curTestCaseResult.getStatus().equalsIgnoreCase(STATUS_FAILED) - || curTestCaseResult.getStatus().equalsIgnoreCase(STATUS_REGRESSION)) { - // failed test case - result.add(curTestCaseResult.getFullName()); - } - executedTestCases.put(curTestCaseResult.getFullName(), curTestCaseResult.getStatus()); - } - } - - return result; - } - - String urlJoin(String path1, String path2) { - if (!path1.endsWith("/")) { - path1 += "/"; - } - if (path2.startsWith("/")) { - path2 = path2.substring(1); - } - return path1 + path2; - } -} diff --git dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/TestSuite.java dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/TestSuite.java deleted file mode 100644 index b8a7624..0000000 --- dev-support/jenkins-tools/buildstats/src/main/java/org/apache/hadoop/hbase/devtools/buildstats/TestSuite.java +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.devtools.buildstats; - -import com.offbytwo.jenkins.model.BaseModel; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -public class TestSuite extends BaseModel { - List cases; - - public TestSuite() { - this(new ArrayList()); - } - - public TestSuite(List s) { - this.cases = s; - } - - public TestSuite(TestCaseResult... s) { - this(Arrays.asList(s)); - } - - public List getCases() { - return cases; - } - - public void setCases(List s) { - cases = s; - } -} diff --git dev-support/jenkins-tools/pom.xml dev-support/jenkins-tools/pom.xml deleted file mode 100644 index 952b29a..0000000 --- dev-support/jenkins-tools/pom.xml +++ /dev/null @@ -1,36 +0,0 @@ - - - - - 4.0.0 - - org.apache.hbase - jenkins-tools - 1.0 - pom - - - jenkins-client - buildstats - - diff --git dev-support/python-requirements.txt dev-support/python-requirements.txt index df79669..e7fcf31 100644 --- dev-support/python-requirements.txt +++ dev-support/python-requirements.txt @@ -16,3 +16,6 @@ # limitations under the License. # requests +gitpython +rbtools +jinja2 diff --git dev-support/report-flakies.py dev-support/report-flakies.py index e5e66cc..676eca3 100755 --- dev-support/report-flakies.py +++ dev-support/report-flakies.py @@ -16,137 +16,171 @@ # See the License for the specific language governing permissions and # limitations under the License. -# This script uses Jenkins REST api to collect test results of given builds and generates flakyness -# data about unittests. -# Print help: ./report-flakies.py -h +# This script uses Jenkins REST api to collect test result(s) of given build/builds and generates +# flakyness data about unittests. +# Print help: report-flakies.py -h import argparse +import findHangingTests +from jinja2 import Template +import os import logging -import re import requests parser = argparse.ArgumentParser() -parser.add_argument("--max-builds", type=int, metavar="n", - help="Number of builds to analyze for each job (if available in jenkins). Default: all " - + "available builds.") +parser.add_argument("--urls", metavar="url[ max-builds]", action="append", required=True, + help="Urls to analyze, which can refer to simple projects, multi-configuration projects or " + "individual build run. Optionally, specify maximum builds to analyze for this url " + "(if available on jenkins) using space as separator. By default, all available " + "builds are analyzed.") parser.add_argument("--mvn", action="store_true", help="Writes two strings for including/excluding these flaky tests using maven flags. These " - + "strings are written to files so they can be saved as artifacts and easily imported in " - + "other projects.") + "strings are written to files so they can be saved as artifacts and easily imported in " + "other projects. Also writes timeout and failing tests in separate files for " + "reference.") parser.add_argument("-v", "--verbose", help="Prints more logs.", action="store_true") -parser.add_argument( - "urls", help="Space separated list of urls (single/multi-configuration project) to analyze") args = parser.parse_args() logging.basicConfig() -logger = logging.getLogger("org.apache.hadoop.hbase.report-flakies") +logger = logging.getLogger(__name__) if args.verbose: - logger.setLevel(logging.INFO) - -# Given url of an executed build, fetches its test report, and returns dictionary from testname to -# pass/skip/fail status. -def get_build_results(build_url): - logger.info("Getting test results for %s", build_url) - url = build_url + "testReport/api/json?tree=suites[cases[className,name,status]]" - response = requests.get(url) - if response.status_code == 404: - logger.info("No test results for %s", build_url) + logger.setLevel(logging.INFO) + + +# Given url of an executed build, analyzes its console text, and returns +# [list of all tests, list of timeout tests, list of failed tests]. +def get_bad_tests(build_url): + logger.info("Analyzing %s", build_url) + json_response = requests.get(build_url + "/api/json").json() + if json_response["building"]: + logger.info("Skipping this build since it is in progress.") return {} - json_response = response.json() + console_url = build_url + "/consoleText" + return findHangingTests.get_bad_tests(console_url) - tests = {} - for test_cases in json_response["suites"]: - for test in test_cases["cases"]: - # Truncate initial "org.apache.hadoop.hbase." from all tests. - test_name = (test["className"] + "#" + test["name"])[24:] - tests[test_name] = test["status"] - return tests # If any url is of type multi-configuration project (i.e. has key 'activeConfigurations'), # get urls for individual jobs. -jobs_list = [] -for url in args.urls.split(): +def expand_multi_configuration_projects(urls_list): + expanded_urls = [] + for url_max_build in urls_list: + splits = url_max_build.split() + url = splits[0] + max_builds = 10000 # Some high value + if len(splits) == 2: + max_builds = int(splits[1]) + json_response = requests.get(url + "/api/json").json() + if json_response.has_key("activeConfigurations"): + for config in json_response["activeConfigurations"]: + expanded_urls.append({'url':config["url"], 'max_builds': max_builds}) + else: + expanded_urls.append({'url':url, 'max_builds': max_builds}) + return expanded_urls + + +# Set of timeout/failed tests across all given urls. +all_timeout_tests = set() +all_failed_tests = set() +all_hanging_tests = set() +# Contains { : { : { 'all': [], 'failed': [], +# 'timeout': [], 'hanging': [] } } } +url_to_bad_test_results = {} + +# Iterates over each url, gets test results and prints flaky tests. +expanded_urls = expand_multi_configuration_projects(args.urls) +for url_max_build in expanded_urls: + url = url_max_build["url"] json_response = requests.get(url + "/api/json").json() - if json_response.has_key("activeConfigurations"): - for config in json_response["activeConfigurations"]: - jobs_list.append(config["url"]) - elif json_response.has_key("builds"): - jobs_list.append(url) + if json_response.has_key("builds"): + builds = json_response["builds"] + logger.info("Analyzing job: %s", url) else: - raise Exception("Bad url ({0}).".format(url)) - -global_bad_tests = set() -# Iterates over each job, gets its test results and prints flaky tests. -for job_url in jobs_list: - logger.info("Analyzing job: %s", job_url) + builds = [{'number' : json_response["id"], 'url': url}] + logger.info("Analyzing build : %s", url) build_id_to_results = {} - builds = requests.get(job_url + "/api/json").json()["builds"] num_builds = 0 build_ids = [] - build_ids_without_result = [] + build_ids_without_tests_run = [] for build in builds: build_id = build["number"] build_ids.append(build_id) - build_result = get_build_results(build["url"]) - if len(build_result) > 0: - build_id_to_results[build_id] = build_result + result = get_bad_tests(build["url"]) + if result == {}: + continue + if len(result[0]) > 0: + build_id_to_results[build_id] = result else: - build_ids_without_result.append(build_id) + build_ids_without_tests_run.append(build_id) num_builds += 1 - if num_builds == args.max_builds: + if num_builds == url_max_build["max_builds"]: break # Collect list of bad tests. bad_tests = set() for build in build_id_to_results: - for test in build_id_to_results[build]: - if (build_id_to_results[build][test] == "REGRESSION" - or build_id_to_results[build][test] == "FAILED"): - bad_tests.add(test) - global_bad_tests.add(test) - - # Get total and failed build times for each bad test. - build_counts = {key:dict([('total', 0), ('failed', 0)]) for key in bad_tests} + [_, failed_tests, timeout_tests, hanging_tests] = build_id_to_results[build] + all_timeout_tests.update(timeout_tests) + all_failed_tests.update(failed_tests) + all_hanging_tests.update(hanging_tests) + # Note that timedout tests are already included in failed tests. + bad_tests.update(failed_tests.union(hanging_tests)) + + # For each bad test, get build ids where it ran, timed out, failed or hanged. + test_to_build_ids = {key : {'all' : set(), 'timeout': set(), 'failed': set(), 'hanging' : set()} + for key in bad_tests} for build in build_id_to_results: - build_results = build_id_to_results[build] - for bad_test in bad_tests: - if build_results.has_key(bad_test): - if build_results[bad_test] != "SKIPPED": # Ignore the test if it's skipped. - build_counts[bad_test]['total'] += 1 - if build_results[bad_test] == "REGRESSION": - build_counts[bad_test]['failed'] += 1 - - if len(bad_tests) > 0: - print "Job: {}".format(job_url) - print "{:>100} {:6} {:10} {}".format("Test Name", "Failed", "Total Runs", "Flakyness") - for bad_test in bad_tests: - fail = build_counts[bad_test]['failed'] - total = build_counts[bad_test]['total'] - print "{:>100} {:6} {:10} {:2.0f}%".format(bad_test, fail, total, fail*100.0/total) + [all_tests, failed_tests, timeout_tests, hanging_tests] = build_id_to_results[build] + for bad_test in test_to_build_ids: + if all_tests.issuperset([bad_test]): + test_to_build_ids[bad_test]["all"].add(build) + if timeout_tests.issuperset([bad_test]): + test_to_build_ids[bad_test]['timeout'].add(build) + if failed_tests.issuperset([bad_test]): + test_to_build_ids[bad_test]['failed'].add(build) + if hanging_tests.issuperset([bad_test]): + test_to_build_ids[bad_test]['hanging'].add(build) + url_to_bad_test_results[url] = test_to_build_ids + + if len(test_to_build_ids) > 0: + print "URL: {}".format(url) + print "{:>60} {:10} {:25} {}".format( + "Test Name", "Total Runs", "Bad Runs(failed/timeout/hanging)", "Flakyness") + for bad_test in test_to_build_ids: + failed = len(test_to_build_ids[bad_test]['failed']) + timeout = len(test_to_build_ids[bad_test]['timeout']) + hanging = len(test_to_build_ids[bad_test]['hanging']) + total = len(test_to_build_ids[bad_test]['all']) + print "{:>60} {:10} {:7} ( {:4} / {:5} / {:5} ) {:2.0f}%".format( + bad_test, total, failed + timeout, failed, timeout, hanging, + (failed + timeout) * 100.0 / total) else: print "No flaky tests founds." - if len(build_ids) == len(build_ids_without_result): + if len(build_ids) == len(build_ids_without_tests_run): print "None of the analyzed builds have test result." - print "Builds analyzed: " + str(build_ids) - print "Builds with no results: " + str(build_ids_without_result) + print "Builds analyzed: {}".format(build_ids) + print "Builds without any test runs: {}".format(build_ids_without_tests_run) print "" + +all_bad_tests = all_hanging_tests.union(all_failed_tests) if args.mvn: - # There might be multiple tests failing within each TestCase, avoid duplication of TestCase names. - test_cases = set() - for test in global_bad_tests: - test = re.sub(".*\.", "", test) # Remove package name prefix. - test = re.sub("#.*", "", test) # Remove individual unittest's name - test_cases.add(test) - - includes = ",".join(test_cases) + includes = ",".join(all_bad_tests) with open("./includes", "w") as inc_file: inc_file.write(includes) - inc_file.close() - excludes = "" - for test_case in test_cases: - excludes += "**/" + test_case + ".java," + excludes = ["**/{0}.java".format(bad_test) for bad_test in all_bad_tests] with open("./excludes", "w") as exc_file: - exc_file.write(excludes) - exc_file.close() + exc_file.write(",".join(excludes)) + + with open("./timeout", "w") as file: + file.write(",".join(all_timeout_tests)) + + with open("./failed", "w") as file: + file.write(",".join(all_failed_tests)) + +dev_support_dir = os.path.dirname(os.path.abspath(__file__)) +with open(os.path.join(dev_support_dir, "flaky-dashboard-template.html"), "r") as f: + template = Template(f.read()) + +with open("dashboard.html", "w") as f: + f.write(template.render(results=url_to_bad_test_results)) diff --git dev-support/submit-patch.py dev-support/submit-patch.py new file mode 100755 index 0000000..236f31d --- /dev/null +++ dev-support/submit-patch.py @@ -0,0 +1,311 @@ +#!/usr/bin/env python +## +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Makes a patch for the current branch, creates/updates the review board request and uploads new +# patch to jira. Patch is named as (JIRA).(branch name).(patch number).patch as per Yetus' naming +# rules. If no jira is specified, patch will be named (branch name).patch and jira and review board +# are not updated. Review board id is retrieved from the remote link in the jira. +# Print help: submit-patch.py --h +import argparse +import getpass +import git +import json +import logging +import os +import re +import requests +import subprocess +import sys + +parser = argparse.ArgumentParser( + epilog = "To avoid having to enter jira/review board username/password every time, setup an " + "encrypted ~/.apache-cred files as follows:\n" + "1) Create a file with following single " + "line: \n{\"jira_username\" : \"appy\", \"jira_password\":\"123\", " + "\"rb_username\":\"appy\", \"rb_password\" : \"@#$\"}\n" + "2) Encrypt it with openssl.\n" + "openssl enc -aes-256-cbc -in -out ~/.apache-creds\n" + "3) Delete original file.\n" + "Now onwards, you'll need to enter this encryption key only once per run. If you " + "forget the key, simply regenerate ~/.apache-cred file again.", + formatter_class=argparse.RawTextHelpFormatter +) +parser.add_argument("-b", "--branch", + help = "Branch to use for generating diff. If not specified, tracking branch " + "is used. If there is no tracking branch, error will be thrown.") + +# Arguments related to Jira. +parser.add_argument("-jid", "--jira-id", + help = "Jira id of the issue. If set, we deduce next patch version from " + "attachments in the jira and also upload the new patch. Script will " + "ask for jira username/password for authentication. If not set, " + "patch is named .patch.") + +# Arguments related to Review Board. +parser.add_argument("-srb", "--skip-review-board", + help = "Don't create/update the review board.", + default = False, action = "store_true") +parser.add_argument("--reviewers", + help = "Comma separated list of users to add as reviewers.") + +# Misc arguments +parser.add_argument("--patch-dir", default = "~/patches", + help = "Directory to store patch files. If it doesn't exist, it will be " + "created. Default: ~/patches") +parser.add_argument("--rb-repo", default = "hbase-git", + help = "Review board repository. Default: hbase-git") +args = parser.parse_args() + +# Setup logger +logging.basicConfig() +logger = logging.getLogger("submit-patch") +logger.setLevel(logging.INFO) + + +def log_fatal_and_exit(*arg): + logger.fatal(*arg) + sys.exit(1) + + +def assert_status_code(response, expected_status_code, description): + if response.status_code != expected_status_code: + log_fatal_and_exit(" Oops, something went wrong when %s. \nResponse: %s %s\nExiting..", + description, response.status_code, response.reason) + + +# Make repo instance to interact with git repo. +try: + repo = git.Repo(os.getcwd()) + git = repo.git +except git.exc.InvalidGitRepositoryError as e: + log_fatal_and_exit(" '%s' is not valid git repo directory.\nRun from base directory of " + "HBase's git repo.", e) + +logger.info(" Active branch: %s", repo.active_branch.name) +# Do not proceed if there are uncommitted changes. +if repo.is_dirty(): + log_fatal_and_exit(" Git status is dirty. Commit locally first.") + + +# Returns base branch for creating diff. +def get_base_branch(): + # if --branch is set, use it as base branch for computing diff. Also check that it's a valid branch. + if args.branch is not None: + base_branch = args.branch + # Check that given branch exists. + for ref in repo.refs: + if ref.name == base_branch: + return base_branch + log_fatal_and_exit(" Branch '%s' does not exist in refs.", base_branch) + else: + # if --branch is not set, use tracking branch as base branch for computing diff. + # If there is no tracking branch, log error and quit. + tracking_branch = repo.active_branch.tracking_branch() + if tracking_branch is None: + log_fatal_and_exit(" Active branch doesn't have a tracking_branch. Please specify base " + " branch for computing diff using --branch flag.") + logger.info(" Using tracking branch as base branch") + return tracking_branch.name + + +# Returns patch name having format (JIRA).(branch name).(patch number).patch. If no jira is +# specified, patch is name (branch name).patch. +def get_patch_name(branch): + if args.jira_id is None: + return branch + ".patch" + + patch_name_prefix = args.jira_id.upper() + "." + branch + return get_patch_name_with_version(patch_name_prefix) + + +# Fetches list of attachments from the jira, deduces next version for the patch and returns final +# patch name. +def get_patch_name_with_version(patch_name_prefix): + # JIRA's rest api is broken wrt to attachments. https://jira.atlassian.com/browse/JRA-27637. + # Using crude way to get list of attachments. + url = "https://issues.apache.org/jira/browse/" + args.jira_id + logger.info("Getting list of attachments for jira %s from %s", args.jira_id, url) + html = requests.get(url) + if html.status_code == 404: + log_fatal_and_exit(" Invalid jira id : %s", args.jira_id) + if html.status_code != 200: + log_fatal_and_exit(" Cannot fetch jira information. Status code %s", html.status_code) + # Iterate over patch names starting from version 1 and return when name is not already used. + content = unicode(html.content, 'utf-8') + for i in range(1, 1000): + name = patch_name_prefix + "." + ('{0:03d}'.format(i)) + ".patch" + if name not in content: + return name + + +# Validates that patch directory exists, if not, creates it. +def validate_patch_dir(patch_dir): + # Create patch_dir if it doesn't exist. + if not os.path.exists(patch_dir): + logger.warn(" Patch directory doesn't exist. Creating it.") + os.mkdir(patch_dir) + else: + # If patch_dir exists, make sure it's a directory. + if not os.path.isdir(patch_dir): + log_fatal_and_exit(" '%s' exists but is not a directory. Specify another directory.", + patch_dir) + + +# Make sure current branch is ahead of base_branch by exactly 1 commit. Quits if +# - base_branch has commits not in current branch +# - current branch is same as base branch +# - current branch is ahead of base_branch by more than 1 commits +def check_diff_between_branches(base_branch): + only_in_base_branch = git.log("HEAD.." + base_branch, oneline = True) + only_in_active_branch = git.log(base_branch + "..HEAD", oneline = True) + if len(only_in_base_branch) != 0: + log_fatal_and_exit(" '%s' is ahead of current branch by %s commits. Rebase " + "and try again.", base_branch, len(only_in_base_branch.split("\n"))) + if len(only_in_active_branch) == 0: + log_fatal_and_exit(" Current branch is same as '%s'. Exiting...", base_branch) + if len(only_in_active_branch.split("\n")) > 1: + log_fatal_and_exit(" Current branch is ahead of '%s' by %s commits. Squash into single " + "commit and try again.", + base_branch, len(only_in_active_branch.split("\n"))) + + +# If ~/.apache-creds is present, load credentials from it otherwise prompt user. +def get_credentials(): + creds = dict() + creds_filepath = os.path.expanduser("~/.apache-creds") + if os.path.exists(creds_filepath): + try: + logger.info(" Reading ~/.apache-creds for Jira and ReviewBoard credentials") + content = subprocess.check_output("openssl enc -aes-256-cbc -d -in " + creds_filepath, + shell=True) + except subprocess.CalledProcessError as e: + log_fatal_and_exit(" Couldn't decrypt ~/.apache-creds file. Exiting..") + creds = json.loads(content) + else: + creds['jira_username'] = raw_input("Jira username:") + creds['jira_password'] = getpass.getpass("Jira password:") + if not args.skip_review_board: + creds['rb_username'] = raw_input("Review Board username:") + creds['rb_password'] = getpass.getpass("Review Board password:") + return creds + + +def attach_patch_to_jira(issue_url, patch_filepath, creds): + # Upload patch to jira using REST API. + headers = {'X-Atlassian-Token': 'no-check'} + files = {'file': open(patch_filepath, 'rb')} + jira_auth = requests.auth.HTTPBasicAuth(creds['jira_username'], creds['jira_password']) + attachment_url = issue_url + "/attachments" + r = requests.post(attachment_url, headers = headers, files = files, auth = jira_auth) + assert_status_code(r, 200, "uploading patch to jira") + + +def get_jira_summary(issue_url): + r = requests.get(issue_url + "?fields=summary") + assert_status_code(r, 200, "fetching jira summary") + return json.loads(r.content)["fields"]["summary"] + + +def get_review_board_id_if_present(issue_url, rb_link_title): + r = requests.get(issue_url + "/remotelink") + assert_status_code(r, 200, "fetching remote links") + links = json.loads(r.content) + for link in links: + if link["object"]["title"] == rb_link_title: + res = re.search("reviews.apache.org/r/([0-9]+)", link["object"]["url"]) + return res.group(1) + return None + + +base_branch = get_base_branch() +# Remove remote repo name from branch name if present. This assumes that we don't use '/' in +# actual branch names. +base_branch_without_remote = base_branch.split('/')[-1] +logger.info(" Base branch: %s", base_branch) + +check_diff_between_branches(base_branch) + +patch_dir = os.path.abspath(os.path.expanduser(args.patch_dir)) +logger.info(" Patch directory: %s", patch_dir) +validate_patch_dir(patch_dir) + +patch_filename = get_patch_name(base_branch_without_remote) +logger.info(" Patch name: %s", patch_filename) +patch_filepath = os.path.join(patch_dir, patch_filename) + +diff = git.format_patch(base_branch, stdout = True) +with open(patch_filepath, "w") as f: + f.write(diff) + +if args.jira_id is not None: + creds = get_credentials() + issue_url = "https://issues.apache.org/jira/rest/api/2/issue/" + args.jira_id + + attach_patch_to_jira(issue_url, patch_filepath, creds) + + if not args.skip_review_board: + rb_auth = requests.auth.HTTPBasicAuth(creds['rb_username'], creds['rb_password']) + + rb_link_title = "Review Board (" + base_branch_without_remote + ")" + rb_id = get_review_board_id_if_present(issue_url, rb_link_title) + + # If no review board link found, create new review request and add its link to jira. + if rb_id is None: + reviews_url = "https://reviews.apache.org/api/review-requests/" + data = {"repository" : "hbase-git"} + r = requests.post(reviews_url, data = data, auth = rb_auth) + assert_status_code(r, 201, "creating new review request") + review_request = json.loads(r.content)["review_request"] + absolute_url = review_request["absolute_url"] + logger.info(" Created new review request: %s", absolute_url) + + # Use jira summary as review's summary too. + summary = get_jira_summary(issue_url) + # Use commit message as description. + description = git.log("-1", pretty="%B") + update_draft_data = {"bugs_closed" : [args.jira_id.upper()], "target_groups" : "hbase", + "target_people" : args.reviewers, "summary" : summary, + "description" : description } + draft_url = review_request["links"]["draft"]["href"] + r = requests.put(draft_url, data = update_draft_data, auth = rb_auth) + assert_status_code(r, 200, "updating review draft") + + draft_request = json.loads(r.content)["draft"] + diff_url = draft_request["links"]["draft_diffs"]["href"] + files = {'path' : (patch_filename, open(patch_filepath, 'rb'))} + r = requests.post(diff_url, files = files, auth = rb_auth) + assert_status_code(r, 201, "uploading diff to review draft") + + r = requests.put(draft_url, data = {"public" : True}, auth = rb_auth) + assert_status_code(r, 200, "publishing review request") + + # Add link to review board in the jira. + remote_link = json.dumps({'object': {'url': absolute_url, 'title': rb_link_title}}) + jira_auth = requests.auth.HTTPBasicAuth(creds['jira_username'], creds['jira_password']) + r = requests.post(issue_url + "/remotelink", data = remote_link, auth = jira_auth, + headers={'Content-Type':'application/json'}) + else: + logger.info(" Updating existing review board: https://reviews.apache.org/r/%s", rb_id) + draft_url = "https://reviews.apache.org/api/review-requests/" + rb_id + "/draft/" + diff_url = draft_url + "diffs/" + files = {'path' : (patch_filename, open(patch_filepath, 'rb'))} + r = requests.post(diff_url, files = files, auth = rb_auth) + assert_status_code(r, 201, "uploading diff to review draft") + + r = requests.put(draft_url, data = {"public" : True}, auth = rb_auth) + assert_status_code(r, 200, "publishing review request") diff --git hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/StabilityOptions.java hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/StabilityOptions.java index 71af5d2..0911fd5 100644 --- hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/StabilityOptions.java +++ hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/StabilityOptions.java @@ -21,6 +21,7 @@ import com.sun.javadoc.DocErrorReporter; import java.util.ArrayList; import java.util.List; +import java.util.Locale; final class StabilityOptions { @@ -31,7 +32,7 @@ final class StabilityOptions { public static final String UNSTABLE_OPTION = "-unstable"; public static Integer optionLength(String option) { - String opt = option.toLowerCase(); + String opt = option.toLowerCase(Locale.ROOT); if (opt.equals(UNSTABLE_OPTION)) return 1; if (opt.equals(EVOLVING_OPTION)) return 1; if (opt.equals(STABLE_OPTION)) return 1; @@ -40,7 +41,7 @@ final class StabilityOptions { public static void validOptions(String[][] options, DocErrorReporter reporter) { for (int i = 0; i < options.length; i++) { - String opt = options[i][0].toLowerCase(); + String opt = options[i][0].toLowerCase(Locale.ROOT); if (opt.equals(UNSTABLE_OPTION)) { RootDocProcessor.stability = UNSTABLE_OPTION; } else if (opt.equals(EVOLVING_OPTION)) { diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java index bc97a95..ffeb51a 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java @@ -25,6 +25,7 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.Set; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -80,7 +81,7 @@ public class ClusterStatus extends VersionedWritable { private Collection deadServers; private ServerName master; private Collection backupMasters; - private Map intransition; + private Set intransition; private String clusterId; private String[] masterCoprocessors; private Boolean balancerOn; @@ -90,7 +91,7 @@ public class ClusterStatus extends VersionedWritable { final Collection deadServers, final ServerName master, final Collection backupMasters, - final Map rit, + final Set rit, final String[] masterCoprocessors, final Boolean balancerOn) { this.hbaseVersion = hbaseVersion; @@ -261,7 +262,7 @@ public class ClusterStatus extends VersionedWritable { } @InterfaceAudience.Private - public Map getRegionsInTransition() { + public Set getRegionsInTransition() { return this.intransition; } @@ -340,7 +341,7 @@ public class ClusterStatus extends VersionedWritable { int ritSize = (intransition != null) ? intransition.size() : 0; sb.append("\nNumber of regions in transition: " + ritSize); if (ritSize > 0) { - for (RegionState state: intransition.values()) { + for (RegionState state: intransition) { sb.append("\n " + state.toDescriptiveString()); } } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java index 3c16f4e..b75e8cd 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -18,10 +18,13 @@ */ package org.apache.hadoop.hbase; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.Locale; import java.util.Map; import java.util.Set; @@ -38,7 +41,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.PrettyPrinter; import org.apache.hadoop.hbase.util.PrettyPrinter.Unit; -import com.google.common.base.Preconditions; /** * An HColumnDescriptor contains information about a column family such as the @@ -62,6 +64,8 @@ public class HColumnDescriptor implements Comparable { // Version 11 -- add column family level configuration. private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11; + public static final String IN_MEMORY_COMPACTION = "IN_MEMORY_COMPACTION"; + // These constants are used as FileInfo keys public static final String COMPRESSION = "COMPRESSION"; public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT"; @@ -151,7 +155,7 @@ public class HColumnDescriptor implements Comparable { * Default number of versions of a record to keep. */ public static final int DEFAULT_VERSIONS = HBaseConfiguration.create().getInt( - "hbase.column.max.version", 1); + "hbase.column.max.version", 1); /** * Default is not to keep a minimum of versions. @@ -170,6 +174,11 @@ public class HColumnDescriptor implements Comparable { public static final boolean DEFAULT_IN_MEMORY = false; /** + * Default setting for whether to set the memstore of this column family as compacting or not. + */ + public static final boolean DEFAULT_IN_MEMORY_COMPACTION = false; + + /** * Default setting for preventing deleted from being collected immediately. */ public static final KeepDeletedCells DEFAULT_KEEP_DELETED = KeepDeletedCells.FALSE; @@ -246,30 +255,31 @@ public class HColumnDescriptor implements Comparable { = new HashSet(); static { - DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER); - DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE)); - DEFAULT_VALUES.put(HConstants.VERSIONS, String.valueOf(DEFAULT_VERSIONS)); - DEFAULT_VALUES.put(MIN_VERSIONS, String.valueOf(DEFAULT_MIN_VERSIONS)); - DEFAULT_VALUES.put(COMPRESSION, DEFAULT_COMPRESSION); - DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL)); - DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE)); - DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY)); - DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE)); - DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED)); - DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING)); - DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE)); - DEFAULT_VALUES.put(CACHE_DATA_IN_L1, String.valueOf(DEFAULT_CACHE_DATA_IN_L1)); - DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE)); - DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE)); - DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE)); - DEFAULT_VALUES.put(PREFETCH_BLOCKS_ON_OPEN, String.valueOf(DEFAULT_PREFETCH_BLOCKS_ON_OPEN)); - for (String s : DEFAULT_VALUES.keySet()) { - RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s))); - } - RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION))); - RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION_KEY))); - RESERVED_KEYWORDS.add(new Bytes(IS_MOB_BYTES)); - RESERVED_KEYWORDS.add(new Bytes(MOB_THRESHOLD_BYTES)); + DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER); + DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE)); + DEFAULT_VALUES.put(HConstants.VERSIONS, String.valueOf(DEFAULT_VERSIONS)); + DEFAULT_VALUES.put(MIN_VERSIONS, String.valueOf(DEFAULT_MIN_VERSIONS)); + DEFAULT_VALUES.put(COMPRESSION, DEFAULT_COMPRESSION); + DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL)); + DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE)); + DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY)); + DEFAULT_VALUES.put(IN_MEMORY_COMPACTION, String.valueOf(DEFAULT_IN_MEMORY_COMPACTION)); + DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE)); + DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED)); + DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING)); + DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE)); + DEFAULT_VALUES.put(CACHE_DATA_IN_L1, String.valueOf(DEFAULT_CACHE_DATA_IN_L1)); + DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE)); + DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE)); + DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE)); + DEFAULT_VALUES.put(PREFETCH_BLOCKS_ON_OPEN, String.valueOf(DEFAULT_PREFETCH_BLOCKS_ON_OPEN)); + for (String s : DEFAULT_VALUES.keySet()) { + RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s))); + } + RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION))); + RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION_KEY))); + RESERVED_KEYWORDS.add(new Bytes(IS_MOB_BYTES)); + RESERVED_KEYWORDS.add(new Bytes(MOB_THRESHOLD_BYTES)); } private static final int UNINITIALIZED = -1; @@ -319,11 +329,12 @@ public class HColumnDescriptor implements Comparable { setMinVersions(DEFAULT_MIN_VERSIONS); setKeepDeletedCells(DEFAULT_KEEP_DELETED); setInMemory(DEFAULT_IN_MEMORY); + setInMemoryCompaction(DEFAULT_IN_MEMORY_COMPACTION); setBlockCacheEnabled(DEFAULT_BLOCKCACHE); setTimeToLive(DEFAULT_TTL); - setCompressionType(Compression.Algorithm.valueOf(DEFAULT_COMPRESSION.toUpperCase())); - setDataBlockEncoding(DataBlockEncoding.valueOf(DEFAULT_DATA_BLOCK_ENCODING.toUpperCase())); - setBloomFilterType(BloomType.valueOf(DEFAULT_BLOOMFILTER.toUpperCase())); + setCompressionType(Compression.Algorithm.valueOf(DEFAULT_COMPRESSION.toUpperCase(Locale.ROOT))); + setDataBlockEncoding(DataBlockEncoding.valueOf(DEFAULT_DATA_BLOCK_ENCODING.toUpperCase(Locale.ROOT))); + setBloomFilterType(BloomType.valueOf(DEFAULT_BLOOMFILTER.toUpperCase(Locale.ROOT))); setBlocksize(DEFAULT_BLOCKSIZE); setScope(DEFAULT_REPLICATION_SCOPE); } @@ -565,7 +576,7 @@ public class HColumnDescriptor implements Comparable { if (n == null) { return Compression.Algorithm.NONE; } - return Compression.Algorithm.valueOf(n.toUpperCase()); + return Compression.Algorithm.valueOf(n.toUpperCase(Locale.ROOT)); } /** @@ -577,7 +588,7 @@ public class HColumnDescriptor implements Comparable { * @return this (for chained invocation) */ public HColumnDescriptor setCompressionType(Compression.Algorithm type) { - return setValue(COMPRESSION, type.getName().toUpperCase()); + return setValue(COMPRESSION, type.getName().toUpperCase(Locale.ROOT)); } /** @@ -639,7 +650,7 @@ public class HColumnDescriptor implements Comparable { if (n == null) { return getCompressionType(); } - return Compression.Algorithm.valueOf(n.toUpperCase()); + return Compression.Algorithm.valueOf(n.toUpperCase(Locale.ROOT)); } /** @@ -652,7 +663,7 @@ public class HColumnDescriptor implements Comparable { */ public HColumnDescriptor setCompactionCompressionType( Compression.Algorithm type) { - return setValue(COMPRESSION_COMPACT, type.getName().toUpperCase()); + return setValue(COMPRESSION_COMPACT, type.getName().toUpperCase(Locale.ROOT)); } /** @@ -676,11 +687,32 @@ public class HColumnDescriptor implements Comparable { return setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory)); } + /** + * @return True if we prefer to keep the in-memory data compacted + * for this column family + */ + public boolean isInMemoryCompaction() { + String value = getValue(IN_MEMORY_COMPACTION); + if (value != null) { + return Boolean.parseBoolean(value); + } + return DEFAULT_IN_MEMORY_COMPACTION; + } + + /** + * @param inMemoryCompaction True if we prefer to keep the in-memory data compacted + * for this column family + * @return this (for chained invocation) + */ + public HColumnDescriptor setInMemoryCompaction(boolean inMemoryCompaction) { + return setValue(IN_MEMORY_COMPACTION, Boolean.toString(inMemoryCompaction)); + } + public KeepDeletedCells getKeepDeletedCells() { String value = getValue(KEEP_DELETED_CELLS); if (value != null) { // toUpperCase for backwards compatibility - return KeepDeletedCells.valueOf(value.toUpperCase()); + return KeepDeletedCells.valueOf(value.toUpperCase(Locale.ROOT)); } return DEFAULT_KEEP_DELETED; } @@ -765,7 +797,7 @@ public class HColumnDescriptor implements Comparable { if (n == null) { n = DEFAULT_BLOOMFILTER; } - return BloomType.valueOf(n.toUpperCase()); + return BloomType.valueOf(n.toUpperCase(Locale.ROOT)); } /** diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java index 5bf2ec7..b5852d4 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java @@ -38,6 +38,7 @@ public class RegionLoad { protected ClusterStatusProtos.RegionLoad regionLoadPB; + @InterfaceAudience.Private public RegionLoad(ClusterStatusProtos.RegionLoad regionLoadPB) { this.regionLoadPB = regionLoadPB; } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java index 1ddcc20..3ea59db 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java @@ -57,6 +57,7 @@ public class ServerLoad { private long totalCompactingKVs = 0; private long currentCompactedKVs = 0; + @InterfaceAudience.Private public ServerLoad(ClusterStatusProtos.ServerLoad serverLoad) { this.serverLoad = serverLoad; for (ClusterStatusProtos.RegionLoad rl: serverLoad.getRegionLoadsList()) { @@ -81,6 +82,7 @@ public class ServerLoad { // NOTE: Function name cannot start with "get" because then an OpenDataException is thrown because // HBaseProtos.ServerLoad cannot be converted to an open data type(see HBASE-5967). /* @return the underlying ServerLoad protobuf object */ + @InterfaceAudience.Private public ClusterStatusProtos.ServerLoad obtainServerLoadPB() { return serverLoad; } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java index e8af36c..812e4bf 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java @@ -120,6 +120,12 @@ class AsyncProcess { */ public static final String LOG_DETAILS_FOR_BATCH_ERROR = "hbase.client.log.batcherrors.details"; + private final int thresholdToLogUndoneTaskDetails; + private static final String THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS = + "hbase.client.threshold.log.details"; + private static final int DEFAULT_THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS = 10; + private final int THRESHOLD_TO_LOG_REGION_DETAILS = 2; + /** * The context used to wait for results from one submit call. * 1) If AsyncProcess is set to track errors globally, and not per call (for HTable puts), @@ -277,7 +283,7 @@ class AsyncProcess { RpcRetryingCallerFactory rpcCaller, boolean useGlobalErrors, RpcControllerFactory rpcFactory) { if (hc == null) { - throw new IllegalArgumentException("HConnection cannot be null."); + throw new IllegalArgumentException("ClusterConnection cannot be null."); } this.connection = hc; @@ -332,6 +338,10 @@ class AsyncProcess { this.rpcCallerFactory = rpcCaller; this.rpcFactory = rpcFactory; this.logBatchErrorDetails = conf.getBoolean(LOG_DETAILS_FOR_BATCH_ERROR, false); + + this.thresholdToLogUndoneTaskDetails = + conf.getInt(THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS, + DEFAULT_THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS); } /** @@ -389,7 +399,7 @@ class AsyncProcess { List locationErrorRows = null; do { // Wait until there is at least one slot for a new task. - waitForMaximumCurrentTasks(maxTotalConcurrentTasks - 1); + waitForMaximumCurrentTasks(maxTotalConcurrentTasks - 1, tableName.getNameAsString()); // Remember the previous decisions about regions or region servers we put in the // final multi. @@ -1765,18 +1775,19 @@ class AsyncProcess { @VisibleForTesting /** Waits until all outstanding tasks are done. Used in tests. */ void waitUntilDone() throws InterruptedIOException { - waitForMaximumCurrentTasks(0); + waitForMaximumCurrentTasks(0, null); } /** Wait until the async does not have more than max tasks in progress. */ - private void waitForMaximumCurrentTasks(int max) throws InterruptedIOException { - waitForMaximumCurrentTasks(max, tasksInProgress, id); + private void waitForMaximumCurrentTasks(int max, String tableName) + throws InterruptedIOException { + waitForMaximumCurrentTasks(max, tasksInProgress, id, tableName); } // Break out this method so testable @VisibleForTesting - static void waitForMaximumCurrentTasks(int max, final AtomicLong tasksInProgress, final long id) - throws InterruptedIOException { + void waitForMaximumCurrentTasks(int max, final AtomicLong tasksInProgress, final long id, + String tableName) throws InterruptedIOException { long lastLog = EnvironmentEdgeManager.currentTime(); long currentInProgress, oldInProgress = Long.MAX_VALUE; while ((currentInProgress = tasksInProgress.get()) > max) { @@ -1785,7 +1796,11 @@ class AsyncProcess { if (now > lastLog + 10000) { lastLog = now; LOG.info("#" + id + ", waiting for some tasks to finish. Expected max=" - + max + ", tasksInProgress=" + currentInProgress); + + max + ", tasksInProgress=" + currentInProgress + + " hasError=" + hasError() + tableName == null ? "" : ", tableName=" + tableName); + if (currentInProgress <= thresholdToLogUndoneTaskDetails) { + logDetailsOfUndoneTasks(currentInProgress); + } } } oldInProgress = currentInProgress; @@ -1802,6 +1817,25 @@ class AsyncProcess { } } + private void logDetailsOfUndoneTasks(long taskInProgress) { + ArrayList servers = new ArrayList(); + for (Map.Entry entry : taskCounterPerServer.entrySet()) { + if (entry.getValue().get() > 0) { + servers.add(entry.getKey()); + } + } + LOG.info("Left over " + taskInProgress + " task(s) are processed on server(s): " + servers); + if (taskInProgress <= THRESHOLD_TO_LOG_REGION_DETAILS) { + ArrayList regions = new ArrayList(); + for (Map.Entry entry : taskCounterPerRegion.entrySet()) { + if (entry.getValue().get() > 0) { + regions.add(Bytes.toString(entry.getKey())); + } + } + LOG.info("Regions against which left over task(s) are processed: " + regions); + } + } + /** * Only used w/useGlobalErrors ctor argument, for HTable backward compat. * @return Whether there were any errors in any request since the last time @@ -1817,12 +1851,13 @@ class AsyncProcess { * failed operations themselves. * @param failedRows an optional list into which the rows that failed since the last time * {@link #waitForAllPreviousOpsAndReset(List)} was called, or AP was created, are saved. + * @param tableName name of the table * @return all the errors since the last time {@link #waitForAllPreviousOpsAndReset(List)} * was called, or AP was created. */ public RetriesExhaustedWithDetailsException waitForAllPreviousOpsAndReset( - List failedRows) throws InterruptedIOException { - waitForMaximumCurrentTasks(0); + List failedRows, String tableName) throws InterruptedIOException { + waitForMaximumCurrentTasks(0, tableName); if (!globalErrors.hasErrors()) { return null; } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java index 3287335..5dc7fc3 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java @@ -52,7 +52,7 @@ import java.util.List; * extreme circumstances, such as JVM or machine failure, may cause some data loss.

* *

NOTE: This class replaces the functionality that used to be available via - *HTableInterface#setAutoFlush(boolean) set to {@code false}. + * HTable#setAutoFlush(boolean) set to {@code false}. *

* *

See also the {@code BufferedMutatorExample} in the hbase-examples module.

diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java index 01aaec5..e98ad4e 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java @@ -238,7 +238,8 @@ public class BufferedMutatorImpl implements BufferedMutator { while (!buffer.isEmpty()) { ap.submit(tableName, buffer, true, null, false); } - RetriesExhaustedWithDetailsException error = ap.waitForAllPreviousOpsAndReset(null); + RetriesExhaustedWithDetailsException error = + ap.waitForAllPreviousOpsAndReset(null, tableName.getNameAsString()); if (error != null) { if (listener == null) { throw error; @@ -260,7 +261,7 @@ public class BufferedMutatorImpl implements BufferedMutator { /** * This is used for legacy purposes in {@link HTable#setWriteBufferSize(long)} only. This ought * not be called for production uses. - * @deprecated Going away when we drop public support for {@link HTableInterface}. + * @deprecated Going away when we drop public support for {@link HTable}. */ @Deprecated public void setWriteBufferSize(long writeBufferSize) throws RetriesExhaustedWithDetailsException, diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java index 3027761..5c70b77 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java @@ -40,13 +40,18 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; @InterfaceAudience.Private // NOTE: Although this class is public, this class is meant to be used directly from internal // classes and unit tests only. -public interface ClusterConnection extends HConnection { +public interface ClusterConnection extends Connection { + + /** + * Key for configuration in Configuration whose value is the class we implement making a + * new Connection instance. + */ + String HBASE_CLIENT_CONNECTION_IMPL = "hbase.client.connection.impl"; /** * @return - true if the master server is running * @deprecated this has been deprecated without a replacement */ - @Override @Deprecated boolean isMasterRunning() throws MasterNotRunningException, ZooKeeperConnectionException; @@ -63,46 +68,64 @@ public interface ClusterConnection extends HConnection { * @throws IOException * if a remote or network exception occurs */ - @Override boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws IOException; /** + * A table that isTableEnabled == false and isTableDisabled == false + * is possible. This happens when a table has a lot of regions + * that must be processed. + * @param tableName table name + * @return true if the table is enabled, false otherwise + * @throws IOException if a remote or network exception occurs + */ + boolean isTableEnabled(TableName tableName) throws IOException; + + /** + * @param tableName table name + * @return true if the table is disabled, false otherwise + * @throws IOException if a remote or network exception occurs + */ + boolean isTableDisabled(TableName tableName) throws IOException; + + /** + * Retrieve TableState, represent current table state. + * @param tableName table state for + * @return state of the table + */ + TableState getTableState(TableName tableName) throws IOException; + + /** * Find the location of the region of tableName that row * lives in. * @param tableName name of the table row is in * @param row row key you're trying to find the region of * @return HRegionLocation that describes where to find the region in - * question + * question * @throws IOException if a remote or network exception occurs */ - @Override - public HRegionLocation locateRegion(final TableName tableName, + HRegionLocation locateRegion(final TableName tableName, final byte [] row) throws IOException; /** * Allows flushing the region cache. */ - @Override void clearRegionCache(); - void cacheLocation(final TableName tableName, final RegionLocations location); /** * Allows flushing the region cache of all locations that pertain to * tableName * @param tableName Name of the table whose regions we are to remove from - * cache. + * cache. */ - @Override void clearRegionCache(final TableName tableName); /** * Deletes cached locations for the specific region. * @param location The location object for the region, to be purged from cache. */ - @Override void deleteCachedRegionLocation(final HRegionLocation location); /** @@ -111,10 +134,9 @@ public interface ClusterConnection extends HConnection { * @param tableName name of the table row is in * @param row row key you're trying to find the region of * @return HRegionLocation that describes where to find the region in - * question + * question * @throws IOException if a remote or network exception occurs */ - @Override HRegionLocation relocateRegion(final TableName tableName, final byte [] row) throws IOException; @@ -125,7 +147,7 @@ public interface ClusterConnection extends HConnection { * @param row row key you're trying to find the region of * @param replicaId the replicaId of the region * @return RegionLocations that describe where to find the region in - * question + * question * @throws IOException if a remote or network exception occurs */ RegionLocations relocateRegion(final TableName tableName, @@ -140,19 +162,16 @@ public interface ClusterConnection extends HConnection { * @param exception the exception if any. Can be null. * @param source the previous location */ - @Override void updateCachedLocations(TableName tableName, byte[] regionName, byte[] rowkey, Object exception, ServerName source); - /** * Gets the location of the region of regionName. * @param regionName name of the region to locate * @return HRegionLocation that describes where to find the region in - * question + * question * @throws IOException if a remote or network exception occurs */ - @Override HRegionLocation locateRegion(final byte[] regionName) throws IOException; @@ -160,9 +179,8 @@ public interface ClusterConnection extends HConnection { * Gets the locations of all regions in the specified table, tableName. * @param tableName table to get regions of * @return list of region locations for all regions of table - * @throws IOException + * @throws IOException if IO failure occurs */ - @Override List locateRegions(final TableName tableName) throws IOException; /** @@ -172,9 +190,8 @@ public interface ClusterConnection extends HConnection { * @param offlined True if we are to include offlined regions, false and we'll leave out offlined * regions from returned list. * @return list of region locations for all regions of table - * @throws IOException + * @throws IOException if IO failure occurs */ - @Override List locateRegions(final TableName tableName, final boolean useCache, final boolean offlined) throws IOException; @@ -186,12 +203,12 @@ public interface ClusterConnection extends HConnection { * @param useCache Should we use the cache to retrieve the region information. * @param retry do we retry * @return region locations for this row. - * @throws IOException + * @throws IOException if IO failure occurs */ RegionLocations locateRegion(TableName tableName, byte[] row, boolean useCache, boolean retry) throws IOException; - /** + /** * * @param tableName table to get regions of * @param row the row @@ -199,15 +216,14 @@ public interface ClusterConnection extends HConnection { * @param retry do we retry * @param replicaId the replicaId for the region * @return region locations for this row. - * @throws IOException + * @throws IOException if IO failure occurs */ - RegionLocations locateRegion(TableName tableName, byte[] row, boolean useCache, boolean retry, + RegionLocations locateRegion(TableName tableName, byte[] row, boolean useCache, boolean retry, int replicaId) throws IOException; /** * Returns a {@link MasterKeepAliveConnection} to the active master */ - @Override MasterService.BlockingInterface getMaster() throws IOException; @@ -217,7 +233,6 @@ public interface ClusterConnection extends HConnection { * @return proxy for HRegionServer * @throws IOException if a remote or network exception occurs */ - @Override AdminService.BlockingInterface getAdmin(final ServerName serverName) throws IOException; /** @@ -229,7 +244,6 @@ public interface ClusterConnection extends HConnection { * @throws IOException if a remote or network exception occurs * */ - @Override ClientService.BlockingInterface getClient(final ServerName serverName) throws IOException; /** @@ -240,7 +254,6 @@ public interface ClusterConnection extends HConnection { * @return Location of row. * @throws IOException if a remote or network exception occurs */ - @Override HRegionLocation getRegionLocation(TableName tableName, byte [] row, boolean reload) throws IOException; @@ -249,34 +262,30 @@ public interface ClusterConnection extends HConnection { * Clear any caches that pertain to server name sn. * @param sn A server name */ - @Override void clearCaches(final ServerName sn); /** * This function allows HBaseAdmin and potentially others to get a shared MasterService * connection. * @return The shared instance. Never returns null. - * @throws MasterNotRunningException + * @throws MasterNotRunningException if master is not running * @deprecated Since 0.96.0 */ - @Override @Deprecated MasterKeepAliveConnection getKeepAliveMasterService() throws MasterNotRunningException; /** - * @param serverName + * @param serverName of server to check * @return true if the server is known as dead, false otherwise. - * @deprecated internal method, do not use thru HConnection */ - @Override + * @deprecated internal method, do not use thru ClusterConnection */ @Deprecated boolean isDeadServer(ServerName serverName); /** - * @return Nonce generator for this HConnection; may be null if disabled in configuration. + * @return Nonce generator for this ClusterConnection; may be null if disabled in configuration. */ - @Override - public NonceGenerator getNonceGenerator(); + NonceGenerator getNonceGenerator(); /** * @return Default AsyncProcess associated with this connection. @@ -287,7 +296,7 @@ public interface ClusterConnection extends HConnection { * Returns a new RpcRetryingCallerFactory from the given {@link Configuration}. * This RpcRetryingCallerFactory lets the users create {@link RpcRetryingCaller}s which can be * intercepted with the configured {@link RetryingCallerInterceptor} - * @param conf + * @param conf configuration * @return RpcRetryingCallerFactory */ RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(Configuration conf); @@ -320,11 +329,17 @@ public interface ClusterConnection extends HConnection { /** * @return the MetricsConnection instance associated with this connection. */ - public MetricsConnection getConnectionMetrics(); + MetricsConnection getConnectionMetrics(); /** * @return true when this connection uses a {@link org.apache.hadoop.hbase.codec.Codec} and so * supports cell blocks. */ boolean hasCellBlockSupport(); + + /** + * @return the number of region servers that are currently running + * @throws IOException if a remote or network exception occurs + */ + int getCurrentNrHRS() throws IOException; } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java index a3f6fe6..b979c6a 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java @@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; * thread will obtain its own Table instance. Caching or pooling of {@link Table} and {@link Admin} * is not recommended. * - *

This class replaces {@link HConnection}, which is now deprecated. * @see ConnectionFactory * @since 0.99.0 */ @@ -59,7 +58,7 @@ public interface Connection extends Abortable, Closeable { * - Only allow new style of interfaces: * -- All table names are passed as TableName. No more byte[] and string arguments * -- Most of the classes with names H is deprecated in favor of non-H versions - * (Table, Connection vs HConnection, etc) + * (Table, Connection, etc) * -- Only real client-facing public methods are allowed * - Connection should contain only getTable(), getAdmin() kind of general methods. */ @@ -123,7 +122,7 @@ public interface Connection extends Abortable, Closeable { * * @return a {@link BufferedMutator} for the supplied tableName. */ - public BufferedMutator getBufferedMutator(TableName tableName) throws IOException; + BufferedMutator getBufferedMutator(TableName tableName) throws IOException; /** * Retrieve a {@link BufferedMutator} for performing client-side buffering of writes. The @@ -134,7 +133,7 @@ public interface Connection extends Abortable, Closeable { * @param params details on how to instantiate the {@code BufferedMutator}. * @return a {@link BufferedMutator} for the supplied tableName. */ - public BufferedMutator getBufferedMutator(BufferedMutatorParams params) throws IOException; + BufferedMutator getBufferedMutator(BufferedMutatorParams params) throws IOException; /** * Retrieve a RegionLocator implementation to inspect region information on a table. The returned @@ -151,7 +150,7 @@ public interface Connection extends Abortable, Closeable { * @param tableName Name of the table who's region is to be examined * @return A RegionLocator instance */ - public RegionLocator getRegionLocator(TableName tableName) throws IOException; + RegionLocator getRegionLocator(TableName tableName) throws IOException; /** * Retrieve an Admin implementation to administer an HBase cluster. @@ -167,7 +166,7 @@ public interface Connection extends Abortable, Closeable { Admin getAdmin() throws IOException; @Override - public void close() throws IOException; + void close() throws IOException; /** * Returns whether the connection is closed or not. diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java index 3e8ca31..a5dbddd 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java @@ -214,7 +214,7 @@ public class ConnectionFactory { user = provider.getCurrent(); } - String className = conf.get(HConnection.HBASE_CLIENT_CONNECTION_IMPL, + String className = conf.get(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, ConnectionImplementation.class.getName()); Class clazz; try { diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index 429e47d..d93a8b4 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -52,7 +52,6 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.RegionLocations; @@ -64,13 +63,11 @@ import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy; import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory; -import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil; import org.apache.hadoop.hbase.exceptions.RegionMovedException; import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcClientFactory; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; @@ -98,7 +95,6 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.ipc.RemoteException; import org.apache.zookeeper.KeeperException; - /** * Main implementation of {@link Connection} and {@link ClusterConnection} interfaces. * Encapsulates connection to zookeeper and regionservers. @@ -124,8 +120,8 @@ class ConnectionImplementation implements ClusterConnection, Closeable { * Once it's set under nonceGeneratorCreateLock, it is never unset or changed. */ private static volatile NonceGenerator nonceGenerator = null; - /** The nonce generator lock. Only taken when creating HConnection, which gets a private copy. */ - private static Object nonceGeneratorCreateLock = new Object(); + /** The nonce generator lock. Only taken when creating Connection, which gets a private copy. */ + private static final Object nonceGeneratorCreateLock = new Object(); private final AsyncProcess asyncProcess; // single tracker per connection @@ -137,7 +133,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable { // package protected for the tests ClusterStatusListener clusterStatusListener; - private final Object metaRegionLock = new Object(); // We have a single lock for master & zk to prevent deadlocks. Having @@ -162,23 +157,23 @@ class ConnectionImplementation implements ClusterConnection, Closeable { private final ConnectionConfiguration connectionConfig; // Client rpc instance. - private RpcClient rpcClient; + private final RpcClient rpcClient; private final MetaCache metaCache; private final MetricsConnection metrics; protected User user; - private RpcRetryingCallerFactory rpcCallerFactory; + private final RpcRetryingCallerFactory rpcCallerFactory; - private RpcControllerFactory rpcControllerFactory; + private final RpcControllerFactory rpcControllerFactory; private final RetryingCallerInterceptor interceptor; /** * Cluster registry of basic info such as clusterid and meta region location. */ - Registry registry; + Registry registry; private final ClientBackoffPolicy backoffPolicy; @@ -279,34 +274,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable { return ng; } - - @Override - public HTableInterface getTable(String tableName) throws IOException { - return getTable(TableName.valueOf(tableName)); - } - @Override - public HTableInterface getTable(byte[] tableName) throws IOException { - return getTable(TableName.valueOf(tableName)); - } - - @Override - public HTableInterface getTable(TableName tableName) throws IOException { + public Table getTable(TableName tableName) throws IOException { return getTable(tableName, getBatchPool()); } @Override - public HTableInterface getTable(String tableName, ExecutorService pool) throws IOException { - return getTable(TableName.valueOf(tableName), pool); - } - - @Override - public HTableInterface getTable(byte[] tableName, ExecutorService pool) throws IOException { - return getTable(TableName.valueOf(tableName), pool); - } - - @Override - public HTableInterface getTable(TableName tableName, ExecutorService pool) throws IOException { + public Table getTable(TableName tableName, ExecutorService pool) throws IOException { return new HTable(tableName, this, connectionConfig, rpcCallerFactory, rpcControllerFactory, pool); } @@ -463,7 +437,9 @@ class ConnectionImplementation implements ClusterConnection, Closeable { protected String clusterId = null; protected void retrieveClusterId() { - if (clusterId != null) return; + if (clusterId != null) { + return; + } this.clusterId = this.registry.getClusterId(); if (clusterId == null) { clusterId = HConstants.CLUSTER_ID_DEFAULT; @@ -519,12 +495,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable { return reload? relocateRegion(tableName, row): locateRegion(tableName, row); } - @Override - public HRegionLocation getRegionLocation(final byte[] tableName, - final byte [] row, boolean reload) - throws IOException { - return getRegionLocation(TableName.valueOf(tableName), row, reload); - } @Override public boolean isTableEnabled(TableName tableName) throws IOException { @@ -532,34 +502,16 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } @Override - public boolean isTableEnabled(byte[] tableName) throws IOException { - return isTableEnabled(TableName.valueOf(tableName)); - } - - @Override public boolean isTableDisabled(TableName tableName) throws IOException { return getTableState(tableName).inStates(TableState.State.DISABLED); } @Override - public boolean isTableDisabled(byte[] tableName) throws IOException { - return isTableDisabled(TableName.valueOf(tableName)); - } - - @Override - public boolean isTableAvailable(final TableName tableName) throws IOException { - return isTableAvailable(tableName, null); - } - - @Override - public boolean isTableAvailable(final byte[] tableName) throws IOException { - return isTableAvailable(TableName.valueOf(tableName)); - } - - @Override public boolean isTableAvailable(final TableName tableName, @Nullable final byte[][] splitKeys) throws IOException { - if (this.closed) throw new IOException(toString() + " closed"); + if (this.closed) { + throw new IOException(toString() + " closed"); + } try { if (!isTableEnabled(tableName)) { LOG.debug("Table " + tableName + " not enabled"); @@ -616,12 +568,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } @Override - public boolean isTableAvailable(final byte[] tableName, final byte[][] splitKeys) - throws IOException { - return isTableAvailable(TableName.valueOf(tableName), splitKeys); - } - - @Override public HRegionLocation locateRegion(final byte[] regionName) throws IOException { RegionLocations locations = locateRegion(HRegionInfo.getTable(regionName), HRegionInfo.getStartKey(regionName), false, true); @@ -644,12 +590,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } @Override - public List locateRegions(final byte[] tableName) - throws IOException { - return locateRegions(TableName.valueOf(tableName)); - } - - @Override public List locateRegions(final TableName tableName, final boolean useCache, final boolean offlined) throws IOException { List regions = MetaTableAccessor @@ -669,12 +609,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } @Override - public List locateRegions(final byte[] tableName, - final boolean useCache, final boolean offlined) throws IOException { - return locateRegions(TableName.valueOf(tableName), useCache, offlined); - } - - @Override public HRegionLocation locateRegion( final TableName tableName, final byte[] row) throws IOException{ RegionLocations locations = locateRegion(tableName, row, true, true); @@ -682,13 +616,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } @Override - public HRegionLocation locateRegion(final byte[] tableName, - final byte [] row) - throws IOException{ - return locateRegion(TableName.valueOf(tableName), row); - } - - @Override public HRegionLocation relocateRegion(final TableName tableName, final byte [] row) throws IOException{ RegionLocations locations = relocateRegion(tableName, row, @@ -711,12 +638,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } @Override - public HRegionLocation relocateRegion(final byte[] tableName, - final byte [] row) throws IOException { - return relocateRegion(TableName.valueOf(tableName), row); - } - - @Override public RegionLocations locateRegion(final TableName tableName, final byte [] row, boolean useCache, boolean retry) throws IOException { @@ -727,7 +648,9 @@ class ConnectionImplementation implements ClusterConnection, Closeable { public RegionLocations locateRegion(final TableName tableName, final byte [] row, boolean useCache, boolean retry, int replicaId) throws IOException { - if (this.closed) throw new IOException(toString() + " closed"); + if (this.closed) { + throw new IOException(toString() + " closed"); + } if (tableName== null || tableName.getName().length == 0) { throw new IllegalArgumentException( "table name cannot be null or zero length"); @@ -966,11 +889,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable { metaCache.clearCache(tableName); } - @Override - public void clearRegionCache(final byte[] tableName) { - clearRegionCache(TableName.valueOf(tableName)); - } - /** * Put a newly discovered HRegionLocation into the cache. * @param tableName The table name. @@ -993,11 +911,11 @@ class ConnectionImplementation implements ClusterConnection, Closeable { * State of the MasterService connection/setup. */ static class MasterServiceState { - HConnection connection; + Connection connection; MasterProtos.MasterService.BlockingInterface stub; int userCount; - MasterServiceState(final HConnection connection) { + MasterServiceState(final Connection connection) { super(); this.connection = connection; } @@ -1189,7 +1107,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable { /** * Create a stub against the master. Retry if necessary. * @return A stub to do intf against the master - * @throws org.apache.hadoop.hbase.MasterNotRunningException + * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running */ Object makeStub() throws IOException { // The lock must be at the beginning to prevent multiple master creations @@ -1245,26 +1163,18 @@ class ConnectionImplementation implements ClusterConnection, Closeable { @Override public AdminProtos.AdminService.BlockingInterface getAdmin(final ServerName serverName) throws IOException { - return getAdmin(serverName, false); - } - - @Override - // Nothing is done w/ the 'master' parameter. It is ignored. - public AdminProtos.AdminService.BlockingInterface getAdmin(final ServerName serverName, - final boolean master) - throws IOException { if (isDeadServer(serverName)) { throw new RegionServerStoppedException(serverName + " is dead."); } String key = getStubKey(AdminProtos.AdminService.BlockingInterface.class.getName(), - serverName.getHostname(), serverName.getPort(), this.hostnamesCanChange); + serverName.getHostname(), serverName.getPort(), this.hostnamesCanChange); this.connectionLock.putIfAbsent(key, key); - AdminProtos.AdminService.BlockingInterface stub = null; + AdminProtos.AdminService.BlockingInterface stub; synchronized (this.connectionLock.get(key)) { stub = (AdminProtos.AdminService.BlockingInterface)this.stubs.get(key); if (stub == null) { BlockingRpcChannel channel = - this.rpcClient.createBlockingRpcChannel(serverName, user, rpcTimeout); + this.rpcClient.createBlockingRpcChannel(serverName, user, rpcTimeout); stub = AdminProtos.AdminService.newBlockingStub(channel); this.stubs.put(key, stub); } @@ -1798,7 +1708,9 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } void releaseMaster(MasterServiceState mss) { - if (mss.getStub() == null) return; + if (mss.getStub() == null) { + return; + } synchronized (masterAndZKLock) { --mss.userCount; } @@ -1833,20 +1745,12 @@ class ConnectionImplementation implements ClusterConnection, Closeable { metaCache.clearCache(location); } - @Override - public void updateCachedLocations(final TableName tableName, byte[] rowkey, - final Object exception, final HRegionLocation source) { - assert source != null; - updateCachedLocations(tableName, source.getRegionInfo().getRegionName() - , rowkey, exception, source.getServerName()); - } - /** * Update the location with the new value (if the exception is a RegionMovedException) * or delete it from the cache. Does nothing if we can be sure from the exception that * the location is still accurate, or if the cache has already been updated. * @param exception an object (to simplify user code) on which we will try to find a nested - * or wrapped or both RegionMovedException + * or wrapped or both RegionMovedException * @param source server that is the source of the location update. */ @Override @@ -1916,84 +1820,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable { metaCache.clearCache(regionInfo); } - @Override - public void updateCachedLocations(final byte[] tableName, byte[] rowkey, - final Object exception, final HRegionLocation source) { - updateCachedLocations(TableName.valueOf(tableName), rowkey, exception, source); - } - - /** - * @deprecated since 0.96 Use {@link org.apache.hadoop.hbase.client.HTableInterface#batch} instead - */ - @Override - @Deprecated - public void processBatch(List list, - final TableName tableName, - ExecutorService pool, - Object[] results) throws IOException, InterruptedException { - // This belongs in HTable!!! Not in here. St.Ack - - // results must be the same size as list - if (results.length != list.size()) { - throw new IllegalArgumentException( - "argument results must be the same size as argument list"); - } - processBatchCallback(list, tableName, pool, results, null); - } - - /** - * @deprecated Unsupported API - */ - @Override - @Deprecated - public void processBatch(List list, - final byte[] tableName, - ExecutorService pool, - Object[] results) throws IOException, InterruptedException { - processBatch(list, TableName.valueOf(tableName), pool, results); - } - - /** - * Send the queries in parallel on the different region servers. Retries on failures. - * If the method returns it means that there is no error, and the 'results' array will - * contain no exception. On error, an exception is thrown, and the 'results' array will - * contain results and exceptions. - * @deprecated since 0.96 - * Use {@link org.apache.hadoop.hbase.client.HTable#processBatchCallback} instead - */ - @Override - @Deprecated - public void processBatchCallback( - List list, - TableName tableName, - ExecutorService pool, - Object[] results, - Batch.Callback callback) - throws IOException, InterruptedException { - - AsyncProcess.AsyncRequestFuture ars = this.asyncProcess.submitAll( - pool, tableName, list, callback, results); - ars.waitUntilDone(); - if (ars.hasError()) { - throw ars.getErrors(); - } - } - - /** - * @deprecated Unsupported API - */ - @Override - @Deprecated - public void processBatchCallback( - List list, - byte[] tableName, - ExecutorService pool, - Object[] results, - Batch.Callback callback) - throws IOException, InterruptedException { - processBatchCallback(list, TableName.valueOf(tableName), pool, results, callback); - } - // For tests to override. protected AsyncProcess createAsyncProcess(Configuration conf) { // No default pool available. @@ -2024,41 +1850,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable { return metaCache.getNumberOfCachedRegionLocations(tableName); } - /** - * @deprecated always return false since 0.99 - */ - @Override - @Deprecated - public void setRegionCachePrefetch(final TableName tableName, final boolean enable) { - } - - /** - * @deprecated always return false since 0.99 - */ - @Override - @Deprecated - public void setRegionCachePrefetch(final byte[] tableName, - final boolean enable) { - } - - /** - * @deprecated always return false since 0.99 - */ - @Override - @Deprecated - public boolean getRegionCachePrefetch(TableName tableName) { - return false; - } - - /** - * @deprecated always return false since 0.99 - */ - @Override - @Deprecated - public boolean getRegionCachePrefetch(byte[] tableName) { - return false; - } - @Override public void abort(final String msg, Throwable t) { if (t instanceof KeeperException.SessionExpiredException @@ -2133,146 +1924,20 @@ class ConnectionImplementation implements ClusterConnection, Closeable { close(); } - /** - * @deprecated Use {@link org.apache.hadoop.hbase.client.Admin#listTables()} instead - */ - @Deprecated - @Override - public HTableDescriptor[] listTables() throws IOException { - MasterKeepAliveConnection master = getKeepAliveMasterService(); - try { - MasterProtos.GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest((List)null); - return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(null, req)); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } finally { - master.close(); - } - } - - /** - * @deprecated Use {@link org.apache.hadoop.hbase.client.Admin#listTableNames()} instead - */ - @Deprecated - @Override - public String[] getTableNames() throws IOException { - TableName[] tableNames = listTableNames(); - String[] result = new String[tableNames.length]; - for (int i = 0; i < tableNames.length; i++) { - result[i] = tableNames[i].getNameAsString(); - } - return result; - } - - /** - * @deprecated Use {@link org.apache.hadoop.hbase.client.Admin#listTableNames()} instead - */ - @Deprecated - @Override - public TableName[] listTableNames() throws IOException { - MasterKeepAliveConnection master = getKeepAliveMasterService(); - try { - return ProtobufUtil.getTableNameArray(master.getTableNames(null, - MasterProtos.GetTableNamesRequest.newBuilder().build()) - .getTableNamesList()); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } finally { - master.close(); - } - } - - /** - * @deprecated Use {@link - * org.apache.hadoop.hbase.client.Admin#getTableDescriptorsByTableName(java.util.List)} instead - */ - @Deprecated - @Override - public HTableDescriptor[] getHTableDescriptorsByTableName( - List tableNames) throws IOException { - if (tableNames == null || tableNames.isEmpty()) return new HTableDescriptor[0]; - MasterKeepAliveConnection master = getKeepAliveMasterService(); - try { - MasterProtos.GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest(tableNames); - return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(null, req)); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } finally { - master.close(); - } - } - - /** - * @deprecated Use - * {@link org.apache.hadoop.hbase.client.Admin#getTableDescriptorsByTableName(java.util.List)} - * instead - */ - @Deprecated - @Override - public HTableDescriptor[] getHTableDescriptors(List names) throws IOException { - List tableNames = new ArrayList(names.size()); - for(String name : names) { - tableNames.add(TableName.valueOf(name)); - } - - return getHTableDescriptorsByTableName(tableNames); - } - @Override public NonceGenerator getNonceGenerator() { return nonceGenerator; } - /** - * Connects to the master to get the table descriptor. - * @param tableName table name - * @throws java.io.IOException if the connection to master fails or if the table - * is not found. - * @deprecated Use {@link - * org.apache.hadoop.hbase.client.Admin#getTableDescriptor(org.apache.hadoop.hbase.TableName)} - * instead - */ - @Deprecated - @Override - public HTableDescriptor getHTableDescriptor(final TableName tableName) - throws IOException { - if (tableName == null) return null; - MasterKeepAliveConnection master = getKeepAliveMasterService(); - MasterProtos.GetTableDescriptorsResponse htds; - try { - MasterProtos.GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest(tableName); - htds = master.getTableDescriptors(null, req); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } finally { - master.close(); - } - if (!htds.getTableSchemaList().isEmpty()) { - return ProtobufUtil.convertToHTableDesc(htds.getTableSchemaList().get(0)); - } - throw new TableNotFoundException(tableName.getNameAsString()); - } - - /** - * @deprecated Use {@link - * org.apache.hadoop.hbase.client.Admin#getTableDescriptor(org.apache.hadoop.hbase.TableName)} - * instead - */ - @Deprecated - @Override - public HTableDescriptor getHTableDescriptor(final byte[] tableName) - throws IOException { - return getHTableDescriptor(TableName.valueOf(tableName)); - } - @Override public TableState getTableState(TableName tableName) throws IOException { - if (this.closed) throw new IOException(toString() + " closed"); + if (this.closed) { + throw new IOException(toString() + " closed"); + } TableState tableState = MetaTableAccessor.getTableState(this, tableName); - if (tableState == null) throw new TableNotFoundException(tableName); + if (tableState == null) { + throw new TableNotFoundException(tableName); + } return tableState; } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java index 363a0e0..df89622 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java @@ -17,9 +17,11 @@ */ package org.apache.hadoop.hbase.client; +import com.google.common.annotations.VisibleForTesting; + import java.io.IOException; -import java.util.Random; import java.util.concurrent.ExecutorService; +import java.util.concurrent.ThreadLocalRandom; import org.apache.commons.logging.Log; import org.apache.hadoop.conf.Configuration; @@ -32,8 +34,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; -import com.google.common.annotations.VisibleForTesting; - /** * Utility used by client connections. */ @@ -42,12 +42,11 @@ public final class ConnectionUtils { private ConnectionUtils() {} - private static final Random RANDOM = new Random(); /** * Calculate pause time. * Built on {@link HConstants#RETRY_BACKOFF}. - * @param pause - * @param tries + * @param pause time to pause + * @param tries amount of tries * @return How long to wait after tries retries */ public static long getPauseTime(final long pause, final int tries) { @@ -60,18 +59,19 @@ public final class ConnectionUtils { } long normalPause = pause * HConstants.RETRY_BACKOFF[ntries]; - long jitter = (long)(normalPause * RANDOM.nextFloat() * 0.01f); // 1% possible jitter + // 1% possible jitter + long jitter = (long) (normalPause * ThreadLocalRandom.current().nextFloat() * 0.01f); return normalPause + jitter; } /** - * Adds / subs a 10% jitter to a pause time. Minimum is 1. + * Adds / subs an up to 50% jitter to a pause time. Minimum is 1. * @param pause the expected pause. * @param jitter the jitter ratio, between 0 and 1, exclusive. */ public static long addJitter(final long pause, final float jitter) { - float lag = pause * (RANDOM.nextFloat() - 0.5f) * jitter; + float lag = pause * (ThreadLocalRandom.current().nextFloat() - 0.5f) * jitter; long newPause = pause + (long) lag; if (newPause <= 0) { return 1; @@ -90,7 +90,7 @@ public final class ConnectionUtils { } /** - * Changes the configuration to set the number of retries needed when using HConnection + * Changes the configuration to set the number of retries needed when using Connection * internally, e.g. for updating catalog tables, etc. * Call this method before we create any Connections. * @param c The Configuration instance to set the retries into. @@ -106,7 +106,7 @@ public final class ConnectionUtils { int serversideMultiplier = c.getInt("hbase.client.serverside.retries.multiplier", 10); int retries = hcRetries * serversideMultiplier; c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries); - log.info(sn + " server-side HConnection retries=" + retries); + log.info(sn + " server-side Connection retries=" + retries); } /** @@ -119,7 +119,7 @@ public final class ConnectionUtils { * @param admin the admin interface of the local server * @param client the client interface of the local server * @return an short-circuit connection. - * @throws IOException + * @throws IOException if IO failure occurred */ public static ClusterConnection createShortCircuitConnection(final Configuration conf, ExecutorService pool, User user, final ServerName serverName, @@ -130,9 +130,8 @@ public final class ConnectionUtils { } return new ConnectionImplementation(conf, pool, user) { @Override - public AdminService.BlockingInterface getAdmin(ServerName sn, boolean getMaster) - throws IOException { - return serverName.equals(sn) ? admin : super.getAdmin(sn, getMaster); + public AdminService.BlockingInterface getAdmin(ServerName sn) throws IOException { + return serverName.equals(sn) ? admin : super.getAdmin(sn); } @Override @@ -148,7 +147,7 @@ public final class ConnectionUtils { */ @VisibleForTesting public static void setupMasterlessConnection(Configuration conf) { - conf.set(HConnection.HBASE_CLIENT_CONNECTION_IMPL, + conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, MasterlessConnection.class.getName()); } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index a2ef34b..e21a5d2 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -18,6 +18,10 @@ */ package org.apache.hadoop.hbase.client; +import com.google.common.annotations.VisibleForTesting; +import com.google.protobuf.ByteString; +import com.google.protobuf.ServiceException; + import java.io.Closeable; import java.io.IOException; import java.io.InterruptedIOException; @@ -176,10 +180,6 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.StringUtils; import org.apache.zookeeper.KeeperException; -import com.google.common.annotations.VisibleForTesting; -import com.google.protobuf.ByteString; -import com.google.protobuf.ServiceException; - /** * HBaseAdmin is no longer a client API. It is marked InterfaceAudience.Private indicating that * this is an HBase-internal class as defined in @@ -313,9 +313,9 @@ public class HBaseAdmin implements Admin { } } - /** @return HConnection used by this object. */ + /** @return Connection used by this object. */ @Override - public HConnection getConnection() { + public Connection getConnection() { return connection; } @@ -404,11 +404,11 @@ public class HBaseAdmin implements Admin { @Override public HTableDescriptor getTableDescriptor(final TableName tableName) throws IOException { - return getTableDescriptor(tableName, getConnection(), rpcCallerFactory, rpcControllerFactory, + return getTableDescriptor(tableName, getConnection(), rpcCallerFactory, rpcControllerFactory, operationTimeout, rpcTimeout); } - static HTableDescriptor getTableDescriptor(final TableName tableName, HConnection connection, + static HTableDescriptor getTableDescriptor(final TableName tableName, Connection connection, RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory, int operationTimeout, int rpcTimeout) throws IOException { if (tableName == null) return null; @@ -588,7 +588,7 @@ public class HBaseAdmin implements Admin { protected Void postOperationResult(final Void result, final long deadlineTs) throws IOException, TimeoutException { // Delete cached information to prevent clients from using old locations - getAdmin().getConnection().clearRegionCache(getTableName()); + ((ClusterConnection) getAdmin().getConnection()).clearRegionCache(getTableName()); return super.postOperationResult(result, deadlineTs); } } @@ -843,7 +843,7 @@ public class HBaseAdmin implements Admin { @Override public boolean isTableAvailable(TableName tableName) throws IOException { - return connection.isTableAvailable(tableName); + return connection.isTableAvailable(tableName, null); } @Override @@ -1701,7 +1701,7 @@ public class HBaseAdmin implements Admin { * @param regionName Name of a region. * @return a pair of HRegionInfo and ServerName if regionName is * a verified region name (we call {@link - * MetaTableAccessor#getRegionLocation(HConnection, byte[])} + * MetaTableAccessor#getRegionLocation(Connection, byte[])} * else null. * Throw IllegalArgumentException if regionName is null. * @throws IOException diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java deleted file mode 100644 index cc5e9fa..0000000 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java +++ /dev/null @@ -1,626 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.IOException; -import java.util.List; -import java.util.concurrent.ExecutorService; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.MasterNotRunningException; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.coprocessor.Batch; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; - -/** - * A cluster connection. Knows how to find the master, locate regions out on the cluster, - * keeps a cache of locations and then knows how to re-calibrate after they move. You need one - * of these to talk to your HBase cluster. {@link ConnectionFactory} manages instances of this - * class. See it for how to get one of these. - * - *

This is NOT a connection to a particular server but to ALL servers in the cluster. Individual - * connections are managed at a lower level. - * - *

HConnections are used by {@link HTable} mostly but also by - * {@link HBaseAdmin}, and {@link org.apache.hadoop.hbase.zookeeper.MetaTableLocator}. - * - * @see ConnectionFactory - * @deprecated in favor of {@link Connection} and {@link ConnectionFactory} - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -@Deprecated -public interface HConnection extends Connection { - /** - * Key for configuration in Configuration whose value is the class we implement making a - * new HConnection instance. - */ - public static final String HBASE_CLIENT_CONNECTION_IMPL = "hbase.client.connection.impl"; - - /** - * @return Configuration instance being used by this HConnection instance. - */ - @Override - Configuration getConfiguration(); - - /** - * Retrieve an HTableInterface implementation for access to a table. - * The returned HTableInterface is not thread safe, a new instance should - * be created for each using thread. - * This is a lightweight operation, pooling or caching of the returned HTableInterface - * is neither required nor desired. - * (created with {@link ConnectionFactory#createConnection(Configuration)}). - * @param tableName - * @return an HTable to use for interactions with this table - */ - public HTableInterface getTable(String tableName) throws IOException; - - /** - * Retrieve an HTableInterface implementation for access to a table. - * The returned HTableInterface is not thread safe, a new instance should - * be created for each using thread. - * This is a lightweight operation, pooling or caching of the returned HTableInterface - * is neither required nor desired. - * (created with {@link ConnectionFactory#createConnection(Configuration)}). - * @param tableName - * @return an HTable to use for interactions with this table - */ - public HTableInterface getTable(byte[] tableName) throws IOException; - - /** - * Retrieve an HTableInterface implementation for access to a table. - * The returned HTableInterface is not thread safe, a new instance should - * be created for each using thread. - * This is a lightweight operation, pooling or caching of the returned HTableInterface - * is neither required nor desired. - * (created with {@link ConnectionFactory#createConnection(Configuration)}). - * @param tableName - * @return an HTable to use for interactions with this table - */ - @Override - public HTableInterface getTable(TableName tableName) throws IOException; - - /** - * Retrieve an HTableInterface implementation for access to a table. - * The returned HTableInterface is not thread safe, a new instance should - * be created for each using thread. - * This is a lightweight operation, pooling or caching of the returned HTableInterface - * is neither required nor desired. - * (created with {@link ConnectionFactory#createConnection(Configuration)}). - * @param tableName - * @param pool The thread pool to use for batch operations, null to use a default pool. - * @return an HTable to use for interactions with this table - */ - public HTableInterface getTable(String tableName, ExecutorService pool) throws IOException; - - /** - * Retrieve an HTableInterface implementation for access to a table. - * The returned HTableInterface is not thread safe, a new instance should - * be created for each using thread. - * This is a lightweight operation, pooling or caching of the returned HTableInterface - * is neither required nor desired. - * (created with {@link ConnectionFactory#createConnection(Configuration)}). - * @param tableName - * @param pool The thread pool to use for batch operations, null to use a default pool. - * @return an HTable to use for interactions with this table - */ - public HTableInterface getTable(byte[] tableName, ExecutorService pool) throws IOException; - - /** - * Retrieve an HTableInterface implementation for access to a table. - * The returned HTableInterface is not thread safe, a new instance should - * be created for each using thread. - * This is a lightweight operation, pooling or caching of the returned HTableInterface - * is neither required nor desired. - * (created with {@link ConnectionFactory#createConnection(Configuration)}). - * @param tableName table to get interface for - * @param pool The thread pool to use for batch operations, null to use a default pool. - * @return an HTable to use for interactions with this table - */ - @Override - public HTableInterface getTable(TableName tableName, ExecutorService pool) throws IOException; - - /** - * Retrieve a RegionLocator implementation to inspect region information on a table. The returned - * RegionLocator is not thread-safe, so a new instance should be created for each using thread. - * - * This is a lightweight operation. Pooling or caching of the returned RegionLocator is neither - * required nor desired. - * @param tableName Name of the table who's region is to be examined - * @return A RegionLocator instance - */ - @Override - public RegionLocator getRegionLocator(TableName tableName) throws IOException; - - /** - * Retrieve an Admin implementation to administer an HBase cluster. - * The returned Admin is not guaranteed to be thread-safe. A new instance should be created for - * each using thread. This is a lightweight operation. Pooling or caching of the returned - * Admin is not recommended. - * - * @return an Admin instance for cluster administration - */ - @Override - Admin getAdmin() throws IOException; - - /** @return - true if the master server is running - * @deprecated internal method, do not use thru HConnection */ - @Deprecated - boolean isMasterRunning() - throws MasterNotRunningException, ZooKeeperConnectionException; - - /** - * A table that isTableEnabled == false and isTableDisabled == false - * is possible. This happens when a table has a lot of regions - * that must be processed. - * @param tableName table name - * @return true if the table is enabled, false otherwise - * @throws IOException if a remote or network exception occurs - */ - boolean isTableEnabled(TableName tableName) throws IOException; - - /** - * @deprecated instead use {@link #isTableEnabled(TableName)} - */ - @Deprecated - boolean isTableEnabled(byte[] tableName) throws IOException; - - /** - * @param tableName table name - * @return true if the table is disabled, false otherwise - * @throws IOException if a remote or network exception occurs - */ - boolean isTableDisabled(TableName tableName) throws IOException; - - /** - * @deprecated instead use {@link #isTableDisabled(TableName)} - */ - @Deprecated - boolean isTableDisabled(byte[] tableName) throws IOException; - - /** - * Retrieve TableState, represent current table state. - * @param tableName table state for - * @return state of the table - */ - public TableState getTableState(TableName tableName) throws IOException; - - /** - * @param tableName table name - * @return true if all regions of the table are available, false otherwise - * @throws IOException if a remote or network exception occurs - */ - boolean isTableAvailable(TableName tableName) throws IOException; - - /** - * @deprecated instead use {@link #isTableAvailable(TableName)} - */ - @Deprecated - boolean isTableAvailable(byte[] tableName) throws IOException; - - /** - * Use this api to check if the table has been created with the specified number of - * splitkeys which was used while creating the given table. - * Note : If this api is used after a table's region gets splitted, the api may return - * false. - * @param tableName tableName - * @param splitKeys splitKeys used while creating table - * @throws IOException if a remote or network exception occurs - * @deprecated internal method, do not use through HConnection */ - @Deprecated - boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws IOException; - - /** - * @deprecated internal method, do not use through HConnection - */ - @Deprecated - boolean isTableAvailable(byte[] tableName, byte[][] splitKeys) throws IOException; - - /** - * List all the userspace tables. In other words, scan the hbase:meta table. - * - * @return - returns an array of HTableDescriptors - * @throws IOException if a remote or network exception occurs - * @deprecated Use {@link Admin#listTables()} instead. - */ - @Deprecated - HTableDescriptor[] listTables() throws IOException; - - // This is a bit ugly - We call this getTableNames in 0.94 and the - // successor function, returning TableName, listTableNames in later versions - // because Java polymorphism doesn't consider return value types - - /** - * @deprecated Use {@link Admin#listTableNames()} instead. - */ - @Deprecated - String[] getTableNames() throws IOException; - - /** - * @deprecated Use {@link Admin#listTables()} instead. - */ - @Deprecated - TableName[] listTableNames() throws IOException; - - /** - * @param tableName table name - * @return table metadata - * @throws IOException if a remote or network exception occurs - * @deprecated internal method, do not use through HConnection - */ - @Deprecated - HTableDescriptor getHTableDescriptor(TableName tableName) - throws IOException; - - /** - * @deprecated internal method, do not use through HConnection - */ - @Deprecated - HTableDescriptor getHTableDescriptor(byte[] tableName) - throws IOException; - - /** - * Find the location of the region of tableName that row - * lives in. - * @param tableName name of the table row is in - * @param row row key you're trying to find the region of - * @return HRegionLocation that describes where to find the region in - * question - * @throws IOException if a remote or network exception occurs - * @deprecated internal method, do not use through HConnection - */ - @Deprecated - public HRegionLocation locateRegion(final TableName tableName, - final byte [] row) throws IOException; - - /** - * @deprecated internal method, do not use through HConnection - */ - @Deprecated - public HRegionLocation locateRegion(final byte[] tableName, - final byte [] row) throws IOException; - - /** - * Allows flushing the region cache. - * @deprecated internal method, do not use through HConnection */ - @Deprecated - void clearRegionCache(); - - /** - * Allows flushing the region cache of all locations that pertain to - * tableName - * @param tableName Name of the table whose regions we are to remove from - * cache. - * @deprecated internal method, do not use through HConnection */ - @Deprecated - void clearRegionCache(final TableName tableName); - - /** - * @deprecated internal method, do not use through HConnection - */ - @Deprecated - void clearRegionCache(final byte[] tableName); - - /** - * Deletes cached locations for the specific region. - * @param location The location object for the region, to be purged from cache. - * @deprecated internal method, do not use thru HConnection */ - @Deprecated - void deleteCachedRegionLocation(final HRegionLocation location); - - /** - * Find the location of the region of tableName that row - * lives in, ignoring any value that might be in the cache. - * @param tableName name of the table row is in - * @param row row key you're trying to find the region of - * @return HRegionLocation that describes where to find the region in - * question - * @throws IOException if a remote or network exception occurs - * @deprecated internal method, do not use through HConnection */ - @Deprecated - HRegionLocation relocateRegion(final TableName tableName, - final byte [] row) throws IOException; - - /** - * @deprecated internal method, do not use through HConnection - */ - @Deprecated - HRegionLocation relocateRegion(final byte[] tableName, - final byte [] row) throws IOException; - - /** - * @deprecated internal method, do not use through HConnection - */ - @Deprecated - void updateCachedLocations(TableName tableName, byte[] rowkey, - Object exception, HRegionLocation source); - - /** - * Update the location cache. This is used internally by HBase, in most cases it should not be - * used by the client application. - * @param tableName the table name - * @param regionName the regionName - * @param rowkey the row - * @param exception the exception if any. Can be null. - * @param source the previous location - * @deprecated internal method, do not use through HConnection - */ - @Deprecated - void updateCachedLocations(TableName tableName, byte[] regionName, byte[] rowkey, - Object exception, ServerName source); - /** - * @deprecated internal method, do not use through HConnection - */ - @Deprecated - void updateCachedLocations(byte[] tableName, byte[] rowkey, - Object exception, HRegionLocation source); - - /** - * Gets the location of the region of regionName. - * @param regionName name of the region to locate - * @return HRegionLocation that describes where to find the region in - * question - * @throws IOException if a remote or network exception occurs - * @deprecated internal method, do not use thru HConnection */ - @Deprecated - HRegionLocation locateRegion(final byte[] regionName) - throws IOException; - - /** - * Gets the locations of all regions in the specified table, tableName. - * @param tableName table to get regions of - * @return list of region locations for all regions of table - * @throws IOException - * @deprecated internal method, do not use thru HConnection */ - @Deprecated - List locateRegions(final TableName tableName) throws IOException; - - /** - * @deprecated internal method, do not use through HConnection - */ - @Deprecated - List locateRegions(final byte[] tableName) throws IOException; - - /** - * Gets the locations of all regions in the specified table, tableName. - * @param tableName table to get regions of - * @param useCache Should we use the cache to retrieve the region information. - * @param offlined True if we are to include offlined regions, false and we'll leave out offlined - * regions from returned list. - * @return list of region locations for all regions of table - * @throws IOException - * @deprecated internal method, do not use thru HConnection - */ - @Deprecated - public List locateRegions(final TableName tableName, - final boolean useCache, - final boolean offlined) throws IOException; - - /** - * @deprecated internal method, do not use through HConnection - */ - @Deprecated - public List locateRegions(final byte[] tableName, - final boolean useCache, - final boolean offlined) throws IOException; - - /** - * Returns a {@link MasterKeepAliveConnection} to the active master - * @deprecated internal method, do not use thru HConnection */ - @Deprecated - MasterService.BlockingInterface getMaster() throws IOException; - - - /** - * Establishes a connection to the region server at the specified address. - * @param serverName - * @return proxy for HRegionServer - * @throws IOException if a remote or network exception occurs - * @deprecated internal method, do not use thru HConnection */ - @Deprecated - AdminService.BlockingInterface getAdmin(final ServerName serverName) throws IOException; - - /** - * Establishes a connection to the region server at the specified address, and returns - * a region client protocol. - * - * @param serverName - * @return ClientProtocol proxy for RegionServer - * @throws IOException if a remote or network exception occurs - * @deprecated internal method, do not use thru HConnection */ - @Deprecated - ClientService.BlockingInterface getClient(final ServerName serverName) throws IOException; - - /** - * Establishes a connection to the region server at the specified address. - * @param serverName - * @param getMaster do we check if master is alive - * @return proxy for HRegionServer - * @throws IOException if a remote or network exception occurs - * @deprecated You can pass master flag but nothing special is done. - */ - @Deprecated - AdminService.BlockingInterface getAdmin(final ServerName serverName, boolean getMaster) - throws IOException; - - /** - * Find region location hosting passed row - * @param tableName table name - * @param row Row to find. - * @param reload If true do not use cache, otherwise bypass. - * @return Location of row. - * @throws IOException if a remote or network exception occurs - * @deprecated internal method, do not use thru HConnection */ - @Deprecated - HRegionLocation getRegionLocation(TableName tableName, byte [] row, - boolean reload) - throws IOException; - - /** - * @deprecated internal method, do not use through HConnection - */ - @Deprecated - HRegionLocation getRegionLocation(byte[] tableName, byte [] row, - boolean reload) - throws IOException; - - /** - * Process a mixed batch of Get, Put and Delete actions. All actions for a - * RegionServer are forwarded in one RPC call. - * - * - * @param actions The collection of actions. - * @param tableName Name of the hbase table - * @param pool thread pool for parallel execution - * @param results An empty array, same size as list. If an exception is thrown, - * you can test here for partial results, and to determine which actions - * processed successfully. - * @throws IOException if there are problems talking to META. Per-item - * exceptions are stored in the results array. - * @deprecated since 0.96 - Use {@link HTableInterface#batch} instead - */ - @Deprecated - void processBatch(List actions, final TableName tableName, - ExecutorService pool, Object[] results) throws IOException, InterruptedException; - - /** - * @deprecated internal method, do not use through HConnection - */ - @Deprecated - void processBatch(List actions, final byte[] tableName, - ExecutorService pool, Object[] results) throws IOException, InterruptedException; - - /** - * Parameterized batch processing, allowing varying return types for different - * {@link Row} implementations. - * @deprecated since 0.96 - Use {@link HTableInterface#batchCallback} instead - */ - @Deprecated - public void processBatchCallback(List list, - final TableName tableName, - ExecutorService pool, - Object[] results, - Batch.Callback callback) throws IOException, InterruptedException; - - /** - * @deprecated Unsupported API - */ - @Deprecated - public void processBatchCallback(List list, - final byte[] tableName, - ExecutorService pool, - Object[] results, - Batch.Callback callback) throws IOException, InterruptedException; - - /** - * @deprecated does nothing since since 0.99 - **/ - @Deprecated - public void setRegionCachePrefetch(final TableName tableName, - final boolean enable); - - /** - * @deprecated does nothing since 0.99 - **/ - @Deprecated - public void setRegionCachePrefetch(final byte[] tableName, - final boolean enable); - - /** - * @deprecated always return false since 0.99 - **/ - @Deprecated - boolean getRegionCachePrefetch(final TableName tableName); - - /** - * @deprecated always return false since 0.99 - **/ - @Deprecated - boolean getRegionCachePrefetch(final byte[] tableName); - - /** - * @return the number of region servers that are currently running - * @throws IOException if a remote or network exception occurs - * @deprecated This method will be changed from public to package protected. - */ - @Deprecated - int getCurrentNrHRS() throws IOException; - - /** - * @param tableNames List of table names - * @return HTD[] table metadata - * @throws IOException if a remote or network exception occurs - * @deprecated Use {@link Admin#getTableDescriptor(TableName)} instead. - */ - @Deprecated - HTableDescriptor[] getHTableDescriptorsByTableName(List tableNames) throws IOException; - - /** - * @deprecated since 0.96.0 - */ - @Deprecated - HTableDescriptor[] getHTableDescriptors(List tableNames) throws - IOException; - - /** - * @return true if this connection is closed - */ - @Override - boolean isClosed(); - - - /** - * Clear any caches that pertain to server name sn. - * @param sn A server name - * @deprecated internal method, do not use thru HConnection */ - @Deprecated - void clearCaches(final ServerName sn); - - /** - * This function allows HBaseAdmin and potentially others to get a shared MasterService - * connection. - * @return The shared instance. Never returns null. - * @throws MasterNotRunningException - * @deprecated Since 0.96.0 - */ - // TODO: Why is this in the public interface when the returned type is shutdown package access? - @Deprecated - MasterKeepAliveConnection getKeepAliveMasterService() - throws MasterNotRunningException; - - /** - * @param serverName - * @return true if the server is known as dead, false otherwise. - * @deprecated internal method, do not use thru HConnection */ - @Deprecated - boolean isDeadServer(ServerName serverName); - - /** - * @return Nonce generator for this HConnection; may be null if disabled in configuration. - * @deprecated internal method, do not use thru HConnection */ - @Deprecated - public NonceGenerator getNonceGenerator(); -} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index bf9ec22..54fbfe9 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -18,6 +18,12 @@ */ package org.apache.hadoop.hbase.client; +import com.google.common.annotations.VisibleForTesting; +import com.google.protobuf.Descriptors; +import com.google.protobuf.Message; +import com.google.protobuf.Service; +import com.google.protobuf.ServiceException; + import java.io.IOException; import java.io.InterruptedIOException; import java.util.ArrayList; @@ -68,12 +74,6 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.hbase.util.Threads; -import com.google.common.annotations.VisibleForTesting; -import com.google.protobuf.Descriptors; -import com.google.protobuf.Message; -import com.google.protobuf.Service; -import com.google.protobuf.ServiceException; - /** * An implementation of {@link Table}. Used to communicate with a single HBase table. * Lightweight. Get as needed and just close when done. @@ -100,14 +100,13 @@ import com.google.protobuf.ServiceException; */ @InterfaceAudience.Private @InterfaceStability.Stable -public class HTable implements HTableInterface { +public class HTable implements Table { private static final Log LOG = LogFactory.getLog(HTable.class); protected ClusterConnection connection; private final TableName tableName; private volatile Configuration configuration; private ConnectionConfiguration connConfiguration; protected BufferedMutatorImpl mutator; - private boolean autoFlush = true; private boolean closed = false; protected int scannerCaching; protected long scannerMaxResultSize; @@ -149,7 +148,7 @@ public class HTable implements HTableInterface { * Used by HBase internally. DO NOT USE. See {@link ConnectionFactory} class comment for how to * get a {@link Table} instance (use {@link Table} instead of {@link HTable}). * @param tableName Name of the table. - * @param connection HConnection to be used. + * @param connection Connection to be used. * @param pool ExecutorService to be used. * @throws IOException if a remote or network exception occurs */ @@ -237,14 +236,6 @@ public class HTable implements HTableInterface { return configuration; } - /** - * {@inheritDoc} - */ - @Override - public byte [] getTableName() { - return this.tableName.getName(); - } - @Override public TableName getName() { return tableName; @@ -253,13 +244,10 @@ public class HTable implements HTableInterface { /** * INTERNAL Used by unit tests and tools to do low-level * manipulations. - * @return An HConnection instance. - * @deprecated This method will be changed from public to package protected. + * @return A Connection instance. */ - // TODO(tsuna): Remove this. Unit tests shouldn't require public helpers. - @Deprecated @VisibleForTesting - public HConnection getConnection() { + protected Connection getConnection() { return this.connection; } @@ -331,7 +319,7 @@ public class HTable implements HTableInterface { /** * The underlying {@link HTable} must not be closed. - * {@link HTableInterface#getScanner(Scan)} has other usage details. + * {@link Table#getScanner(Scan)} has other usage details. */ @Override public ResultScanner getScanner(final Scan scan) throws IOException { @@ -382,7 +370,7 @@ public class HTable implements HTableInterface { /** * The underlying {@link HTable} must not be closed. - * {@link HTableInterface#getScanner(byte[])} has other usage details. + * {@link Table#getScanner(byte[])} has other usage details. */ @Override public ResultScanner getScanner(byte [] family) throws IOException { @@ -393,7 +381,7 @@ public class HTable implements HTableInterface { /** * The underlying {@link HTable} must not be closed. - * {@link HTableInterface#getScanner(byte[], byte[])} has other usage details. + * {@link Table#getScanner(byte[], byte[])} has other usage details. */ @Override public ResultScanner getScanner(byte [] family, byte [] qualifier) @@ -500,9 +488,20 @@ public class HTable implements HTableInterface { */ @Override public void batchCallback( - final List actions, final Object[] results, final Batch.Callback callback) - throws IOException, InterruptedException { - connection.processBatchCallback(actions, tableName, pool, results, callback); + final List actions, final Object[] results, final Batch.Callback callback) + throws IOException, InterruptedException { + doBatchWithCallback(actions, results, callback, connection, pool, tableName); + } + + public static void doBatchWithCallback(List actions, Object[] results, + Callback callback, ClusterConnection connection, ExecutorService pool, TableName tableName) + throws InterruptedIOException, RetriesExhaustedWithDetailsException { + AsyncRequestFuture ars = connection.getAsyncProcess().submitAll( + pool, tableName, actions, callback, results); + ars.waitUntilDone(); + if (ars.hasError()) { + throw ars.getErrors(); + } } /** @@ -564,9 +563,7 @@ public class HTable implements HTableInterface { @Override public void put(final Put put) throws IOException { getBufferedMutator().mutate(put); - if (autoFlush) { - flushCommits(); - } + flushCommits(); } /** @@ -576,9 +573,7 @@ public class HTable implements HTableInterface { @Override public void put(final List puts) throws IOException { getBufferedMutator().mutate(puts); - if (autoFlush) { - flushCommits(); - } + flushCommits(); } /** @@ -976,8 +971,7 @@ public class HTable implements HTableInterface { * {@inheritDoc} * @throws IOException */ - @Override - public void flushCommits() throws IOException { + void flushCommits() throws IOException { if (mutator == null) { // nothing to flush if there's no mutator; don't bother creating one. return; @@ -991,10 +985,10 @@ public class HTable implements HTableInterface { * * @param list The collection of actions. * @param results An empty array, same size as list. If an exception is thrown, - * you can test here for partial results, and to determine which actions - * processed successfully. + * you can test here for partial results, and to determine which actions + * processed successfully. * @throws IOException if there are problems talking to META. Per-item - * exceptions are stored in the results array. + * exceptions are stored in the results array. */ public void processBatchCallback( final List list, final Object[] results, final Batch.Callback callback) @@ -1062,30 +1056,6 @@ public class HTable implements HTableInterface { } /** - * {@inheritDoc} - */ - @Override - public boolean isAutoFlush() { - return autoFlush; - } - - /** - * {@inheritDoc} - */ - @Override - public void setAutoFlushTo(boolean autoFlush) { - this.autoFlush = autoFlush; - } - - /** - * {@inheritDoc} - */ - @Override - public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) { - this.autoFlush = autoFlush; - } - - /** * Returns the maximum size in bytes of the write buffer for this HTable. *

* The default value comes from the configuration parameter diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java index 4cd81e7..9d41218 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; /** * Used to communicate with a single HBase table. - * Obtain an instance from an {@link HConnection}. + * Obtain an instance from a {@link Connection}. * * @since 0.21.0 * @deprecated use {@link org.apache.hadoop.hbase.client.Table} instead diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java index a636533..66d3c21 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java @@ -26,11 +26,11 @@ import java.io.IOException; * @param return type */ abstract class MasterCallable implements RetryingCallable, Closeable { - protected HConnection connection; + protected ClusterConnection connection; protected MasterKeepAliveConnection master; - public MasterCallable(final HConnection connection) { - this.connection = connection; + public MasterCallable(final Connection connection) { + this.connection = (ClusterConnection) connection; } @Override @@ -41,7 +41,9 @@ abstract class MasterCallable implements RetryingCallable, Closeable { @Override public void close() throws IOException { // The above prepare could fail but this would still be called though masterAdmin is null - if (this.master != null) this.master.close(); + if (this.master != null) { + this.master.close(); + } } @Override diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java index a2ca975..6b06875 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java @@ -43,9 +43,10 @@ import org.apache.hadoop.hbase.util.Bytes; /** * Used to perform Put operations for a single row. *

- * To perform a Put, instantiate a Put object with the row to insert to and - * for eachumn to be inserted, execute {@link #addColumn(byte[], byte[], byte[]) add} or - * {@link #addColumn(byte[], byte[], long, byte[]) add} if setting the timestamp. + * To perform a Put, instantiate a Put object with the row to insert to, and + * for each column to be inserted, execute {@link #addColumn(byte[], byte[], + * byte[]) add} or {@link #addColumn(byte[], byte[], long, byte[]) add} if + * setting the timestamp. */ @InterfaceAudience.Public @InterfaceStability.Stable diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java index 725bec0..54c93a0 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java @@ -121,9 +121,9 @@ public abstract class RegionAdminServiceCallable implements RetryingCallable< } /** - * @return {@link HConnection} instance used by this Callable. + * @return {@link Connection} instance used by this Callable. */ - HConnection getConnection() { + Connection getConnection() { return this.connection; } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLoadStats.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLoadStats.java index 443026f..bfdb216 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLoadStats.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLoadStats.java @@ -20,11 +20,11 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -@InterfaceAudience.Public -@InterfaceStability.Evolving /** * POJO representing region server load */ +@InterfaceAudience.Public +@InterfaceStability.Evolving public class RegionLoadStats { int memstoreLoad; int heapOccupancy; diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java index 1b0f387..f2cec97 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java @@ -577,12 +577,12 @@ public interface Table extends Closeable { * early and throw SocketTimeoutException. * @param operationTimeout the total timeout of each operation in millisecond. */ - public void setOperationTimeout(int operationTimeout); + void setOperationTimeout(int operationTimeout); /** * Get timeout (millisecond) of each operation for in Table instance. */ - public int getOperationTimeout(); + int getOperationTimeout(); /** * Set timeout (millisecond) of each rpc request in operations of this Table instance, will @@ -591,11 +591,11 @@ public interface Table extends Closeable { * retries exhausted or operation timeout reached. * @param rpcTimeout the timeout of each rpc request in millisecond. */ - public void setRpcTimeout(int rpcTimeout); + void setRpcTimeout(int rpcTimeout); /** * Get timeout (millisecond) of each rpc request in this Table instance. */ - public int getRpcTimeout(); + int getRpcTimeout(); } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java index 04d4b41..34f7b23 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java @@ -28,9 +28,9 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; /** * We inherit the current ZooKeeperWatcher implementation to change the semantic * of the close: the new close won't immediately close the connection but - * will have a keep alive. See {@link HConnection}. + * will have a keep alive. See {@link ConnectionImplementation}. * This allows to make it available with a consistent interface. The whole - * ZooKeeperWatcher use in HConnection will be then changed to remove the + * ZooKeeperWatcher use in ConnectionImplementation will be then changed to remove the * watcher part. * * This class is intended to be used internally by HBase classes; but not by diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java index a2ad2e7..d062448 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java @@ -516,9 +516,9 @@ public class ReplicationAdmin implements Closeable { if (repPeers == null || repPeers.size() <= 0) { throw new IllegalArgumentException("Found no peer cluster for replication."); } - + final TableName onlyTableNameQualifier = TableName.valueOf(tableName.getQualifierAsString()); - + for (ReplicationPeer repPeer : repPeers) { Map> tableCFMap = repPeer.getTableCFs(); // TODO Currently peer TableCFs will not include namespace so we need to check only for table @@ -595,20 +595,11 @@ public class ReplicationAdmin implements Closeable { admin = this.connection.getAdmin(); HTableDescriptor htd = admin.getTableDescriptor(tableName); if (isTableRepEnabled(htd) ^ isRepEnabled) { - boolean isOnlineSchemaUpdateEnabled = - this.connection.getConfiguration() - .getBoolean("hbase.online.schema.update.enable", true); - if (!isOnlineSchemaUpdateEnabled) { - admin.disableTable(tableName); - } for (HColumnDescriptor hcd : htd.getFamilies()) { hcd.setScope(isRepEnabled ? HConstants.REPLICATION_SCOPE_GLOBAL : HConstants.REPLICATION_SCOPE_LOCAL); } admin.modifyTable(tableName, htd); - if (!isOnlineSchemaUpdateEnabled) { - admin.enableTable(tableName); - } } } finally { if (admin != null) { diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java index 63fd0a3..d36b158 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase.filter; +import java.util.Locale; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.exceptions.DeserializationException; @@ -53,8 +54,8 @@ public class SubstringComparator extends ByteArrayComparable { * @param substr the substring */ public SubstringComparator(String substr) { - super(Bytes.toBytes(substr.toLowerCase())); - this.substr = substr.toLowerCase(); + super(Bytes.toBytes(substr.toLowerCase(Locale.ROOT))); + this.substr = substr.toLowerCase(Locale.ROOT); } @Override @@ -64,7 +65,7 @@ public class SubstringComparator extends ByteArrayComparable { @Override public int compareTo(byte[] value, int offset, int length) { - return Bytes.toString(value, offset, length).toLowerCase().contains(substr) ? 0 + return Bytes.toString(value, offset, length).toLowerCase(Locale.ROOT).contains(substr) ? 0 : 1; } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java index 71c8875..3d3339a 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java @@ -30,6 +30,7 @@ import java.net.ConnectException; import java.net.InetSocketAddress; import java.net.SocketAddress; import java.net.SocketTimeoutException; +import java.net.UnknownHostException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -266,7 +267,7 @@ public abstract class AbstractRpcClient implements RpcClient { @Override public BlockingRpcChannel createBlockingRpcChannel(final ServerName sn, final User ticket, - int defaultOperationTimeout) { + int defaultOperationTimeout) throws UnknownHostException { return new BlockingRpcChannelImplementation(this, sn, ticket, defaultOperationTimeout); } @@ -332,8 +333,12 @@ public abstract class AbstractRpcClient implements RpcClient { * @param channelOperationTimeout - the default timeout when no timeout is given */ protected BlockingRpcChannelImplementation(final AbstractRpcClient rpcClient, - final ServerName sn, final User ticket, int channelOperationTimeout) { + final ServerName sn, final User ticket, int channelOperationTimeout) + throws UnknownHostException { this.isa = new InetSocketAddress(sn.getHostname(), sn.getPort()); + if (this.isa.isUnresolved()) { + throw new UnknownHostException(sn.getHostname()); + } this.rpcClient = rpcClient; this.ticket = ticket; this.channelOperationTimeout = channelOperationTimeout; diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannelImpl.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannelImpl.java index 2b9000a..6b7dc5b 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannelImpl.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannelImpl.java @@ -38,6 +38,7 @@ import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; +import java.util.Locale; import java.util.List; import java.util.Map; import java.util.Random; @@ -231,6 +232,26 @@ public class AsyncRpcChannelImpl implements AsyncRpcChannel { } } + private void startConnectionWithEncryption(Channel ch) { + // for rpc encryption, the order of ChannelInboundHandler should be: + // LengthFieldBasedFrameDecoder->SaslClientHandler->LengthFieldBasedFrameDecoder + // Don't skip the first 4 bytes for length in beforeUnwrapDecoder, + // SaslClientHandler will handler this + ch.pipeline().addFirst("beforeUnwrapDecoder", + new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 0)); + ch.pipeline().addLast("afterUnwrapDecoder", + new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4)); + ch.pipeline().addLast(new AsyncServerResponseHandler(this)); + List callsToWrite; + synchronized (pendingCalls) { + connected = true; + callsToWrite = new ArrayList(pendingCalls.values()); + } + for (AsyncCall call : callsToWrite) { + writeRequest(call); + } + } + /** * Get SASL handler * @param bootstrap to reconnect to @@ -242,7 +263,8 @@ public class AsyncRpcChannelImpl implements AsyncRpcChannel { return new SaslClientHandler(realTicket, authMethod, token, serverPrincipal, client.fallbackAllowed, client.conf.get("hbase.rpc.protection", - SaslUtil.QualityOfProtection.AUTHENTICATION.name().toLowerCase()), + SaslUtil.QualityOfProtection.AUTHENTICATION.name().toLowerCase(Locale.ROOT)), + getChannelHeaderBytes(authMethod), new SaslClientHandler.SaslExceptionHandler() { @Override public void handle(int retryCount, Random random, Throwable cause) { @@ -261,6 +283,11 @@ public class AsyncRpcChannelImpl implements AsyncRpcChannel { public void onSuccess(Channel channel) { startHBaseConnection(channel); } + + @Override + public void onSaslProtectionSucess(Channel channel) { + startConnectionWithEncryption(channel); + } }); } @@ -341,6 +368,25 @@ public class AsyncRpcChannelImpl implements AsyncRpcChannel { * @throws java.io.IOException on failure to write */ private ChannelFuture writeChannelHeader(Channel channel) throws IOException { + RPCProtos.ConnectionHeader header = getChannelHeader(authMethod); + int totalSize = IPCUtil.getTotalSizeWhenWrittenDelimited(header); + ByteBuf b = channel.alloc().directBuffer(totalSize); + + b.writeInt(header.getSerializedSize()); + b.writeBytes(header.toByteArray()); + + return channel.writeAndFlush(b); + } + + private byte[] getChannelHeaderBytes(AuthMethod authMethod) { + RPCProtos.ConnectionHeader header = getChannelHeader(authMethod); + ByteBuffer b = ByteBuffer.allocate(header.getSerializedSize() + 4); + b.putInt(header.getSerializedSize()); + b.put(header.toByteArray()); + return b.array(); + } + + private RPCProtos.ConnectionHeader getChannelHeader(AuthMethod authMethod) { RPCProtos.ConnectionHeader.Builder headerBuilder = RPCProtos.ConnectionHeader.newBuilder() .setServiceName(serviceName); @@ -357,16 +403,7 @@ public class AsyncRpcChannelImpl implements AsyncRpcChannel { } headerBuilder.setVersionInfo(ProtobufUtil.getVersionInfo()); - RPCProtos.ConnectionHeader header = headerBuilder.build(); - - int totalSize = IPCUtil.getTotalSizeWhenWrittenDelimited(header); - - ByteBuf b = channel.alloc().directBuffer(totalSize); - - b.writeInt(header.getSerializedSize()); - b.writeBytes(header.toByteArray()); - - return channel.writeAndFlush(b); + return headerBuilder.build(); } /** @@ -443,7 +480,7 @@ public class AsyncRpcChannelImpl implements AsyncRpcChannel { throw new IOException("Can't obtain server Kerberos config key from SecurityInfo"); } this.serverPrincipal = SecurityUtil.getServerPrincipal(client.conf.get(serverKey), - address.getAddress().getCanonicalHostName().toLowerCase()); + address.getAddress().getCanonicalHostName().toLowerCase(Locale.ROOT)); if (LOG.isDebugEnabled()) { LOG.debug("RPC Server Kerberos principal name for service=" + serviceName + " is " + serverPrincipal); @@ -661,7 +698,7 @@ public class AsyncRpcChannelImpl implements AsyncRpcChannel { } else { String msg = "Couldn't setup connection for " + UserGroupInformation.getLoginUser().getUserName() + " to " + serverPrincipal; - LOG.warn(msg); + LOG.warn(msg, ex); throw (IOException) new IOException(msg).initCause(ex); } } else { diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java index 00eea7a..dc05af1 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java @@ -45,6 +45,7 @@ import java.security.PrivilegedExceptionAction; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.Locale; import java.util.Map; import java.util.Map.Entry; import java.util.Random; @@ -331,7 +332,7 @@ public class RpcClientImpl extends AbstractRpcClient { "Can't obtain server Kerberos config key from SecurityInfo"); } serverPrincipal = SecurityUtil.getServerPrincipal( - conf.get(serverKey), server.getAddress().getCanonicalHostName().toLowerCase()); + conf.get(serverKey), server.getAddress().getCanonicalHostName().toLowerCase(Locale.ROOT)); if (LOG.isDebugEnabled()) { LOG.debug("RPC Server Kerberos principal name for service=" + remoteId.getServiceName() + " is " + serverPrincipal); @@ -618,7 +619,7 @@ public class RpcClientImpl extends AbstractRpcClient { final OutputStream out2) throws IOException { saslRpcClient = new HBaseSaslRpcClient(authMethod, token, serverPrincipal, fallbackAllowed, conf.get("hbase.rpc.protection", - QualityOfProtection.AUTHENTICATION.name().toLowerCase())); + QualityOfProtection.AUTHENTICATION.name().toLowerCase(Locale.ROOT))); return saslRpcClient.saslConnect(in2, out2); } @@ -672,7 +673,7 @@ public class RpcClientImpl extends AbstractRpcClient { String msg = "Couldn't setup connection for " + UserGroupInformation.getLoginUser().getUserName() + " to " + serverPrincipal; - LOG.warn(msg); + LOG.warn(msg, ex); throw (IOException) new IOException(msg).initCause(ex); } } else { @@ -875,7 +876,7 @@ public class RpcClientImpl extends AbstractRpcClient { } protected void tracedWriteRequest(Call call, int priority, Span span) throws IOException { - try (TraceScope ignored = Trace.continueSpan(span)) { + try (TraceScope ignored = Trace.startSpan("RpcClientImpl.tracedWriteRequest", span)) { writeRequest(call, priority, span); } } @@ -1201,9 +1202,8 @@ public class RpcClientImpl extends AbstractRpcClient { } if (connsToClose != null) { for (Connection conn : connsToClose) { - if (conn.markClosed(new InterruptedIOException("RpcClient is closing"))) { - conn.close(); - } + conn.markClosed(new InterruptedIOException("RpcClient is closing")); + conn.close(); } } // wait until all connections are closed diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/SyncCoprocessorRpcChannel.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/SyncCoprocessorRpcChannel.java index af8ddd4..347d8a1 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/SyncCoprocessorRpcChannel.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/SyncCoprocessorRpcChannel.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.protobuf.ResponseConverter; * Base class which provides clients with an RPC connection to * call coprocessor endpoint {@link com.google.protobuf.Service}s. * Note that clients should not use this class directly, except through - * {@link org.apache.hadoop.hbase.client.HTableInterface#coprocessorService(byte[])}. + * {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}. */ @InterfaceAudience.Public @InterfaceStability.Evolving diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index bbc13ab..fecc3c2 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -31,10 +31,13 @@ import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Map.Entry; import java.util.NavigableSet; +import java.util.Set; import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; @@ -1108,6 +1111,16 @@ public final class ProtobufUtil { return builder.build(); } + static void setTimeRange(final MutationProto.Builder builder, final TimeRange timeRange) { + if (!timeRange.isAllTime()) { + HBaseProtos.TimeRange.Builder timeRangeBuilder = + HBaseProtos.TimeRange.newBuilder(); + timeRangeBuilder.setFrom(timeRange.getMin()); + timeRangeBuilder.setTo(timeRange.getMax()); + builder.setTimeRange(timeRangeBuilder.build()); + } + } + /** * Convert a client Increment to a protobuf Mutate. * @@ -1123,13 +1136,7 @@ public final class ProtobufUtil { builder.setNonce(nonce); } TimeRange timeRange = increment.getTimeRange(); - if (!timeRange.isAllTime()) { - HBaseProtos.TimeRange.Builder timeRangeBuilder = - HBaseProtos.TimeRange.newBuilder(); - timeRangeBuilder.setFrom(timeRange.getMin()); - timeRangeBuilder.setTo(timeRange.getMax()); - builder.setTimeRange(timeRangeBuilder.build()); - } + setTimeRange(builder, timeRange); ColumnValue.Builder columnBuilder = ColumnValue.newBuilder(); QualifierValue.Builder valueBuilder = QualifierValue.newBuilder(); for (Map.Entry> family: increment.getFamilyCellMap().entrySet()) { @@ -1250,6 +1257,9 @@ public final class ProtobufUtil { final MutationProto.Builder builder, long nonce) throws IOException { getMutationBuilderAndSetCommonFields(type, mutation, builder); builder.setAssociatedCellCount(mutation.size()); + if (mutation instanceof Increment) { + setTimeRange(builder, ((Increment)mutation).getTimeRange()); + } if (nonce != HConstants.NO_NONCE) { builder.setNonce(nonce); } @@ -3445,7 +3455,7 @@ public final class ProtobufUtil { */ public static HBaseProtos.SnapshotDescription.Type createProtosSnapShotDescType(String snapshotDesc) { - return HBaseProtos.SnapshotDescription.Type.valueOf(snapshotDesc.toUpperCase()); + return HBaseProtos.SnapshotDescription.Type.valueOf(snapshotDesc.toUpperCase(Locale.ROOT)); } /** @@ -3528,12 +3538,11 @@ public final class ProtobufUtil { backupMasters.add(ProtobufUtil.toServerName(sn)); } - Map rit = null; - rit = new HashMap(proto.getRegionsInTransitionList().size()); + Set rit = null; + rit = new HashSet(proto.getRegionsInTransitionList().size()); for (RegionInTransition region : proto.getRegionsInTransitionList()) { - String key = new String(region.getSpec().getValue().toByteArray()); RegionState value = RegionState.convert(region.getRegionState()); - rit.put(key, value); + rit.add(value); } String[] masterCoprocessors = null; @@ -3577,11 +3586,11 @@ public final class ProtobufUtil { } if (status.getRegionsInTransition() != null) { - for (Map.Entry rit : status.getRegionsInTransition().entrySet()) { - ClusterStatusProtos.RegionState rs = rit.getValue().convert(); + for (RegionState rit : status.getRegionsInTransition()) { + ClusterStatusProtos.RegionState rs = rit.convert(); RegionSpecifier.Builder spec = RegionSpecifier.newBuilder().setType(RegionSpecifierType.REGION_NAME); - spec.setValue(ByteStringer.wrap(Bytes.toBytes(rit.getKey()))); + spec.setValue(ByteStringer.wrap(rit.getRegion().getRegionName())); RegionInTransition pbRIT = RegionInTransition.newBuilder().setSpec(spec.build()).setRegionState(rs).build(); diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java index 91e77ca..e264a4d 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase.replication; +import org.apache.commons.lang.reflect.ConstructorUtils; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; @@ -30,9 +31,11 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @InterfaceAudience.Private public class ReplicationFactory { - public static ReplicationQueues getReplicationQueues(final ZooKeeperWatcher zk, - Configuration conf, Abortable abortable) { - return new ReplicationQueuesZKImpl(zk, conf, abortable); + public static ReplicationQueues getReplicationQueues(ReplicationQueuesArguments args) + throws Exception { + Class classToBuild = args.getConf().getClass("hbase.region.replica." + + "replication.ReplicationQueuesType", ReplicationQueuesZKImpl.class); + return (ReplicationQueues) ConstructorUtils.invokeConstructor(classToBuild, args); } public static ReplicationQueuesClient getReplicationQueuesClient(final ZooKeeperWatcher zk, @@ -44,7 +47,7 @@ public class ReplicationFactory { Abortable abortable) { return getReplicationPeers(zk, conf, null, abortable); } - + public static ReplicationPeers getReplicationPeers(final ZooKeeperWatcher zk, Configuration conf, final ReplicationQueuesClient queuesClient, Abortable abortable) { return new ReplicationPeersZKImpl(zk, conf, queuesClient, abortable); diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java index 7799de6..1d2066c 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java @@ -96,8 +96,10 @@ public class ReplicationPeerConfig { @Override public String toString() { StringBuilder builder = new StringBuilder("clusterKey=").append(clusterKey).append(","); - builder.append("replicationEndpointImpl=").append(replicationEndpointImpl).append(",") - .append("tableCFs=").append(tableCFsMap.toString()); + builder.append("replicationEndpointImpl=").append(replicationEndpointImpl).append(","); + if (tableCFsMap != null) { + builder.append("tableCFs=").append(tableCFsMap.toString()); + } return builder.toString(); } } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java index 09d2100..5af97c2 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java @@ -129,17 +129,6 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re ZKUtil.createWithParents(this.zookeeper, this.peersZNode); - // Irrespective of bulk load hfile replication is enabled or not we add peerId node to - // hfile-refs node -- HBASE-15397 - try { - String peerId = ZKUtil.joinZNode(this.hfileRefsZNode, id); - LOG.info("Adding peer " + peerId + " to hfile reference queue."); - ZKUtil.createWithParents(this.zookeeper, peerId); - } catch (KeeperException e) { - throw new ReplicationException("Failed to add peer with id=" + id - + ", node under hfile references node.", e); - } - List listOfOps = new ArrayList(); ZKUtilOp op1 = ZKUtilOp.createAndFailSilent(getPeerNode(id), ReplicationSerDeHelper.toByteArray(peerConfig)); @@ -166,16 +155,6 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re + " because that id does not exist."); } ZKUtil.deleteNodeRecursively(this.zookeeper, ZKUtil.joinZNode(this.peersZNode, id)); - // Delete peerId node from hfile-refs node irrespective of whether bulk loaded hfile - // replication is enabled or not - - String peerId = ZKUtil.joinZNode(this.hfileRefsZNode, id); - try { - LOG.info("Removing peer " + peerId + " from hfile reference queue."); - ZKUtil.deleteNodeRecursively(this.zookeeper, peerId); - } catch (NoNodeException e) { - LOG.info("Did not find node " + peerId + " to delete.", e); - } } catch (KeeperException e) { throw new ReplicationException("Could not remove peer with id=" + id, e); } @@ -550,6 +529,12 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re } } } + // Check for hfile-refs queue + if (-1 != ZKUtil.checkExists(zookeeper, hfileRefsZNode) + && queuesClient.getAllPeersFromHFileRefsQueue().contains(peerId)) { + throw new ReplicationException("Undeleted queue for peerId: " + peerId + + ", found in hfile-refs node path " + hfileRefsZNode); + } } catch (KeeperException e) { throw new ReplicationException("Could not check queues deleted with id=" + peerId, e); } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java index 0d47a88..809b122 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java @@ -83,13 +83,13 @@ public interface ReplicationQueues { /** * Get a list of all WALs in the given queue. * @param queueId a String that identifies the queue - * @return a list of WALs, null if this region server is dead and has no outstanding queues + * @return a list of WALs, null if no such queue exists for this server */ List getLogsInQueue(String queueId); /** * Get a list of all queues for this region server. - * @return a list of queueIds, null if this region server is dead and has no outstanding queues + * @return a list of queueIds, an empty list if this region server is dead and has no outstanding queues */ List getAllQueues(); @@ -110,10 +110,10 @@ public interface ReplicationQueues { /** * Checks if the provided znode is the same as this region server's - * @param znode to check + * @param regionserver the id of the region server * @return if this is this rs's znode */ - boolean isThisOurZnode(String znode); + boolean isThisOurRegionServer(String regionserver); /** * Add a peer to hfile reference queue if peer does not exist. @@ -123,6 +123,12 @@ public interface ReplicationQueues { void addPeerToHFileRefs(String peerId) throws ReplicationException; /** + * Remove a peer from hfile reference queue. + * @param peerId peer cluster id to be removed + */ + void removePeerFromHFileRefs(String peerId); + + /** * Add new hfile references to the queue. * @param peerId peer cluster id to which the hfiles need to be replicated * @param files list of hfile references to be added diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesArguments.java hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesArguments.java new file mode 100644 index 0000000..4907b73 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesArguments.java @@ -0,0 +1,66 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; + +@InterfaceAudience.Private +public class ReplicationQueuesArguments { + + private ZooKeeperWatcher zk; + private Configuration conf; + private Abortable abort; + + public ReplicationQueuesArguments(Configuration conf, Abortable abort) { + this.conf = conf; + this.abort = abort; + } + + public ReplicationQueuesArguments(Configuration conf, Abortable abort, ZooKeeperWatcher zk) { + this(conf, abort); + setZk(zk); + } + + public ZooKeeperWatcher getZk() { + return zk; + } + + public void setZk(ZooKeeperWatcher zk) { + this.zk = zk; + } + + public Configuration getConf() { + return conf; + } + + public void setConf(Configuration conf) { + this.conf = conf; + } + + public Abortable getAbort() { + return abort; + } + + public void setAbort(Abortable abort) { + this.abort = abort; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesHBaseImpl.java hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesHBaseImpl.java new file mode 100644 index 0000000..29f0632 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesHBaseImpl.java @@ -0,0 +1,497 @@ +/* +* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.conf.Configuration; + +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.RowMutations; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.filter.CompareFilter; +import org.apache.hadoop.hbase.filter.FilterList; +import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; +import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; +import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.RetryCounter; +import org.apache.hadoop.hbase.util.RetryCounterFactory; +import sun.reflect.generics.reflectiveObjects.NotImplementedException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.SortedSet; + +@InterfaceAudience.Private +public class ReplicationQueuesHBaseImpl implements ReplicationQueues { + + /** Name of the HBase Table used for tracking replication*/ + public static final TableName REPLICATION_TABLE_NAME = + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "replication"); + + // Column family and column names for the Replication Table + private static final byte[] CF = Bytes.toBytes("r"); + private static final byte[] COL_OWNER = Bytes.toBytes("o"); + private static final byte[] COL_QUEUE_ID = Bytes.toBytes("q"); + + // Column Descriptor for the Replication Table + private static final HColumnDescriptor REPLICATION_COL_DESCRIPTOR = + new HColumnDescriptor(CF).setMaxVersions(1) + .setInMemory(true) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL) + // TODO: Figure out which bloom filter to use + .setBloomFilterType(BloomType.NONE) + .setCacheDataInL1(true); + + // Common byte values used in replication offset tracking + private static final byte[] INITIAL_OFFSET = Bytes.toBytes(0L); + + /* + * Make sure that HBase table operations for replication have a high number of retries. This is + * because the server is aborted if any HBase table operation fails. Each RPC will be attempted + * 3600 times before exiting. This provides each operation with 2 hours of retries + * before the server is aborted. + */ + private static final int CLIENT_RETRIES = 3600; + private static final int RPC_TIMEOUT = 2000; + private static final int OPERATION_TIMEOUT = CLIENT_RETRIES * RPC_TIMEOUT; + + private final Configuration conf; + private final Admin admin; + private final Connection connection; + private final Table replicationTable; + private final Abortable abortable; + private String serverName = null; + private byte[] serverNameBytes = null; + + public ReplicationQueuesHBaseImpl(ReplicationQueuesArguments args) throws IOException { + this(args.getConf(), args.getAbort()); + } + + public ReplicationQueuesHBaseImpl(Configuration conf, Abortable abort) throws IOException { + this.conf = new Configuration(conf); + // Modify the connection's config so that the Replication Table it returns has a much higher + // number of client retries + conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, CLIENT_RETRIES); + this.connection = ConnectionFactory.createConnection(conf); + this.admin = connection.getAdmin(); + this.abortable = abort; + replicationTable = createAndGetReplicationTable(); + replicationTable.setRpcTimeout(RPC_TIMEOUT); + replicationTable.setOperationTimeout(OPERATION_TIMEOUT); + } + + @Override + public void init(String serverName) throws ReplicationException { + this.serverName = serverName; + this.serverNameBytes = Bytes.toBytes(serverName); + } + + @Override + public void removeQueue(String queueId) { + try { + byte[] rowKey = this.queueIdToRowKey(queueId); + // The rowkey will be null if the queue cannot be found in the Replication Table + if (rowKey == null) { + String errMsg = "Could not remove non-existent queue with queueId=" + queueId; + abortable.abort(errMsg, new ReplicationException(errMsg)); + return; + } + Delete deleteQueue = new Delete(rowKey); + safeQueueUpdate(deleteQueue); + } catch (IOException e) { + abortable.abort("Could not remove queue with queueId=" + queueId, e); + } + } + + @Override + public void addLog(String queueId, String filename) throws ReplicationException { + try { + // Check if the queue info (Owner, QueueId) is currently stored in the Replication Table + if (this.queueIdToRowKey(queueId) == null) { + // Each queue will have an Owner, QueueId, and a collection of [WAL:offset] key values. + Put putNewQueue = new Put(Bytes.toBytes(buildServerQueueName(queueId))); + putNewQueue.addColumn(CF, COL_OWNER, Bytes.toBytes(serverName)); + putNewQueue.addColumn(CF, COL_QUEUE_ID, Bytes.toBytes(queueId)); + putNewQueue.addColumn(CF, Bytes.toBytes(filename), INITIAL_OFFSET); + replicationTable.put(putNewQueue); + } else { + // Otherwise simply add the new log and offset as a new column + Put putNewLog = new Put(this.queueIdToRowKey(queueId)); + putNewLog.addColumn(CF, Bytes.toBytes(filename), INITIAL_OFFSET); + safeQueueUpdate(putNewLog); + } + } catch (IOException e) { + abortable.abort("Could not add queue queueId=" + queueId + " filename=" + filename, e); + } + } + + @Override + public void removeLog(String queueId, String filename) { + try { + byte[] rowKey = this.queueIdToRowKey(queueId); + if (rowKey == null) { + String errMsg = "Could not remove log from non-existent queueId=" + queueId + ", filename=" + + filename; + abortable.abort(errMsg, new ReplicationException(errMsg)); + return; + } + Delete delete = new Delete(rowKey); + delete.addColumns(CF, Bytes.toBytes(filename)); + safeQueueUpdate(delete); + } catch (IOException e) { + abortable.abort("Could not remove log from queueId=" + queueId + ", filename=" + filename, e); + } + } + + @Override + public void setLogPosition(String queueId, String filename, long position) { + try { + byte[] rowKey = this.queueIdToRowKey(queueId); + if (rowKey == null) { + String errMsg = "Could not set position of log from non-existent queueId=" + queueId + + ", filename=" + filename; + abortable.abort(errMsg, new ReplicationException(errMsg)); + return; + } + // Check that the log exists. addLog() must have been called before setLogPosition(). + Get checkLogExists = new Get(rowKey); + checkLogExists.addColumn(CF, Bytes.toBytes(filename)); + if (!replicationTable.exists(checkLogExists)) { + String errMsg = "Could not set position of non-existent log from queueId=" + queueId + + ", filename=" + filename; + abortable.abort(errMsg, new ReplicationException(errMsg)); + return; + } + // Update the log offset if it exists + Put walAndOffset = new Put(rowKey); + walAndOffset.addColumn(CF, Bytes.toBytes(filename), Bytes.toBytes(position)); + safeQueueUpdate(walAndOffset); + } catch (IOException e) { + abortable.abort("Failed to write replication wal position (filename=" + filename + + ", position=" + position + ")", e); + } + } + + @Override + public long getLogPosition(String queueId, String filename) throws ReplicationException { + try { + byte[] rowKey = this.queueIdToRowKey(queueId); + if (rowKey == null) { + throw new ReplicationException("Could not get position in log for non-existent queue " + + "queueId=" + queueId + ", filename=" + filename); + } + Get getOffset = new Get(rowKey); + getOffset.addColumn(CF, Bytes.toBytes(filename)); + Result result = replicationTable.get(getOffset); + if (result.isEmpty()) { + throw new ReplicationException("Could not read empty result while getting log position " + + "queueId=" + queueId + ", filename=" + filename); + } + return Bytes.toLong(result.getValue(CF, Bytes.toBytes(filename))); + } catch (IOException e) { + throw new ReplicationException("Could not get position in log for queueId=" + queueId + + ", filename=" + filename); + } + } + + @Override + public void removeAllQueues() { + List myQueueIds = getAllQueues(); + for (String queueId : myQueueIds) { + removeQueue(queueId); + } + } + + @Override + public List getLogsInQueue(String queueId) { + List logs = new ArrayList(); + try { + byte[] rowKey = this.queueIdToRowKey(queueId); + if (rowKey == null) { + String errMsg = "Could not get logs from non-existent queueId=" + queueId; + abortable.abort(errMsg, new ReplicationException(errMsg)); + return null; + } + Get getQueue = new Get(rowKey); + Result queue = replicationTable.get(getQueue); + if (queue.isEmpty()) { + return null; + } + Map familyMap = queue.getFamilyMap(CF); + for (byte[] cQualifier : familyMap.keySet()) { + if (Arrays.equals(cQualifier, COL_OWNER) || Arrays.equals(cQualifier, COL_QUEUE_ID)) { + continue; + } + logs.add(Bytes.toString(cQualifier)); + } + } catch (IOException e) { + abortable.abort("Could not get logs from queue queueId=" + queueId, e); + return null; + } + return logs; + } + + @Override + public List getAllQueues() { + try { + return this.getQueuesBelongingToServer(serverName); + } catch (IOException e) { + abortable.abort("Could not get all replication queues", e); + return null; + } + } + + @Override + public SortedMap> claimQueues(String regionserver) { + // TODO + throw new NotImplementedException(); + } + + @Override + public List getListOfReplicators() { + // TODO + throw new NotImplementedException(); + } + + @Override + public boolean isThisOurRegionServer(String regionserver) { + return this.serverName.equals(regionserver); + } + + @Override + public void addPeerToHFileRefs(String peerId) throws ReplicationException { + // TODO + throw new NotImplementedException(); + } + + @Override + public void removePeerFromHFileRefs(String peerId) { + // TODO + throw new NotImplementedException(); + } + + @Override + public void addHFileRefs(String peerId, List files) throws ReplicationException { + // TODO + throw new NotImplementedException(); + } + + @Override + public void removeHFileRefs(String peerId, List files) { + // TODO + throw new NotImplementedException(); + } + + /** + * Gets the Replication Table. Builds and blocks until the table is available if the Replication + * Table does not exist. + * + * @return the Replication Table + * @throws IOException if the Replication Table takes too long to build + */ + private Table createAndGetReplicationTable() throws IOException { + if (!replicationTableExists()) { + createReplicationTable(); + } + int maxRetries = conf.getInt("replication.queues.createtable.retries.number", 100); + RetryCounterFactory counterFactory = new RetryCounterFactory(maxRetries, 100); + RetryCounter retryCounter = counterFactory.create(); + while (!replicationTableExists()) { + try { + retryCounter.sleepUntilNextRetry(); + if (!retryCounter.shouldRetry()) { + throw new IOException("Unable to acquire the Replication Table"); + } + } catch (InterruptedException e) { + return null; + } + } + return connection.getTable(REPLICATION_TABLE_NAME); + } + + /** + * Checks whether the Replication Table exists yet + * + * @return whether the Replication Table exists + * @throws IOException + */ + private boolean replicationTableExists() { + try { + return admin.tableExists(REPLICATION_TABLE_NAME); + } catch (IOException e) { + return false; + } + } + + /** + * Create the replication table with the provided HColumnDescriptor REPLICATION_COL_DESCRIPTOR + * in ReplicationQueuesHBaseImpl + * @throws IOException + */ + private void createReplicationTable() throws IOException { + HTableDescriptor replicationTableDescriptor = new HTableDescriptor(REPLICATION_TABLE_NAME); + replicationTableDescriptor.addFamily(REPLICATION_COL_DESCRIPTOR); + admin.createTable(replicationTableDescriptor); + } + + /** + * Builds the unique identifier for a queue in the Replication table by appending the queueId to + * the servername + * + * @param queueId a String that identifies the queue + * @return unique identifier for a queue in the Replication table + */ + private String buildServerQueueName(String queueId) { + return serverName + "-" + queueId; + } + + /** + * See safeQueueUpdate(RowMutations mutate) + * @param put Row mutation to perform on the queue + */ + private void safeQueueUpdate(Put put) { + RowMutations mutations = new RowMutations(put.getRow()); + try { + mutations.add(put); + } catch (IOException e){ + abortable.abort("Failed to update Replication Table because of IOException", e); + } + safeQueueUpdate(mutations); + } + + /** + * See safeQueueUpdate(RowMutations mutate) + * @param delete Row mutation to perform on the queue + */ + private void safeQueueUpdate(Delete delete) { + RowMutations mutations = new RowMutations(delete.getRow()); + try { + mutations.add(delete); + } catch (IOException e) { + abortable.abort("Failed to update Replication Table because of IOException", e); + } + safeQueueUpdate(mutations); + } + + /** + * Attempt to mutate a given queue in the Replication Table with a checkAndPut on the OWNER column + * of the queue. Abort the server if this checkAndPut fails: which means we have somehow lost + * ownership of the column or an IO Exception has occurred during the transaction. + * + * @param mutate Mutation to perform on a given queue + */ + private void safeQueueUpdate(RowMutations mutate) { + try { + boolean updateSuccess = replicationTable.checkAndMutate(mutate.getRow(), CF, COL_OWNER, + CompareFilter.CompareOp.EQUAL, serverNameBytes, mutate); + if (!updateSuccess) { + String errMsg = "Failed to update Replication Table because we lost queue ownership"; + abortable.abort(errMsg, new ReplicationException(errMsg)); + } + } catch (IOException e) { + abortable.abort("Failed to update Replication Table because of IOException", e); + } + } + + /** + * Get the QueueIds belonging to the named server from the ReplicationTable + * + * @param server name of the server + * @return a list of the QueueIds belonging to the server + * @throws IOException + */ + private List getQueuesBelongingToServer(String server) throws IOException { + List queues = new ArrayList(); + Scan scan = new Scan(); + SingleColumnValueFilter filterMyQueues = new SingleColumnValueFilter(CF, COL_OWNER, + CompareFilter.CompareOp.EQUAL, Bytes.toBytes(server)); + scan.setFilter(filterMyQueues); + scan.addColumn(CF, COL_QUEUE_ID); + scan.addColumn(CF, COL_OWNER); + ResultScanner results = replicationTable.getScanner(scan); + for (Result result : results) { + queues.add(Bytes.toString(result.getValue(CF, COL_QUEUE_ID))); + } + results.close(); + return queues; + } + + /** + * Finds the row key of the HBase row corresponding to the provided queue. This has to be done, + * because the row key is [original server name + "-" + queueId0]. And the original server will + * make calls to getLog(), getQueue(), etc. with the argument queueId = queueId0. + * On the original server we can build the row key by concatenating servername + queueId0. + * Yet if the queue is claimed by another server, future calls to getLog(), getQueue(), etc. + * will be made with the argument queueId = queueId0 + "-" + pastOwner0 + "-" + pastOwner1 ... + * so we need a way to look up rows by their modified queueId's. + * + * TODO: Consider updating the queueId passed to getLog, getQueue()... inside of ReplicationSource + * TODO: and ReplicationSourceManager or the parsing of the passed in queueId's so that we don't + * TODO have to scan the table for row keys for each update. See HBASE-15956. + * + * TODO: We can also cache queueId's if ReplicationQueuesHBaseImpl becomes a bottleneck. We + * TODO: currently perform scan's over all the rows looking for one with a matching QueueId. + * + * @param queueId string representation of the queue id + * @return the rowkey of the corresponding queue. This returns null if the corresponding queue + * cannot be found. + * @throws IOException + */ + private byte[] queueIdToRowKey(String queueId) throws IOException { + Scan scan = new Scan(); + scan.addColumn(CF, COL_QUEUE_ID); + scan.addColumn(CF, COL_OWNER); + scan.setMaxResultSize(1); + // Search for the queue that matches this queueId + SingleColumnValueFilter filterByQueueId = new SingleColumnValueFilter(CF, COL_QUEUE_ID, + CompareFilter.CompareOp.EQUAL, Bytes.toBytes(queueId)); + // Make sure that we are the owners of the queue. QueueId's may overlap. + SingleColumnValueFilter filterByOwner = new SingleColumnValueFilter(CF, COL_OWNER, + CompareFilter.CompareOp.EQUAL, Bytes.toBytes(serverName)); + // We only want the row key + FirstKeyOnlyFilter filterOutColumns = new FirstKeyOnlyFilter(); + FilterList filterList = new FilterList(filterByQueueId, filterByOwner, filterOutColumns); + scan.setFilter(filterList); + ResultScanner results = replicationTable.getScanner(scan); + Result result = results.next(); + results.close(); + return (result == null) ? null : result.getRow(); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java index 2bb8ea8..f03efff 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java @@ -41,7 +41,8 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; /** - * This class provides an implementation of the ReplicationQueues interface using ZooKeeper. The + * This class provides an implementation of the + * interface using ZooKeeper. The * base znode that this class works at is the myQueuesZnode. The myQueuesZnode contains a list of * all outstanding WAL files on this region server that need to be replicated. The myQueuesZnode is * the regionserver name (a concatenation of the region server’s hostname, client port and start @@ -71,6 +72,10 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R private static final Log LOG = LogFactory.getLog(ReplicationQueuesZKImpl.class); + public ReplicationQueuesZKImpl(ReplicationQueuesArguments args) { + this(args.getZk(), args.getConf(), args.getAbort()); + } + public ReplicationQueuesZKImpl(final ZooKeeperWatcher zk, Configuration conf, Abortable abortable) { super(zk, conf, abortable); @@ -84,12 +89,14 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R } catch (KeeperException e) { throw new ReplicationException("Could not initialize replication queues.", e); } - // Irrespective of bulk load hfile replication is enabled or not we add peerId node to - // hfile-refs node -- HBASE-15397 - try { - ZKUtil.createWithParents(this.zookeeper, this.hfileRefsZNode); - } catch (KeeperException e) { - throw new ReplicationException("Could not initialize hfile references replication queue.", e); + if (conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, + HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT)) { + try { + ZKUtil.createWithParents(this.zookeeper, this.hfileRefsZNode); + } catch (KeeperException e) { + throw new ReplicationException("Could not initialize hfile references replication queue.", + e); + } } } @@ -166,8 +173,8 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R } @Override - public boolean isThisOurZnode(String znode) { - return ZKUtil.joinZNode(this.queuesZNode, znode).equals(this.myQueuesZnode); + public boolean isThisOurRegionServer(String regionserver) { + return ZKUtil.joinZNode(this.queuesZNode, regionserver).equals(this.myQueuesZnode); } @Override @@ -223,7 +230,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R this.abortable.abort("Failed to get a list of queues for region server: " + this.myQueuesZnode, e); } - return listOfQueues; + return listOfQueues == null ? new ArrayList() : listOfQueues; } /** @@ -499,4 +506,23 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R e); } } + + @Override + public void removePeerFromHFileRefs(String peerId) { + final String peerZnode = ZKUtil.joinZNode(this.hfileRefsZNode, peerId); + try { + if (ZKUtil.checkExists(this.zookeeper, peerZnode) == -1) { + if (LOG.isDebugEnabled()) { + LOG.debug("Peer " + peerZnode + " not found in hfile reference queue."); + } + return; + } else { + LOG.info("Removing peer " + peerZnode + " from hfile reference queue."); + ZKUtil.deleteNodeRecursively(this.zookeeper, peerZnode); + } + } catch (KeeperException e) { + LOG.error("Ignoring the exception to remove peer " + peerId + " from hfile reference queue.", + e); + } + } } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslClientHandler.java hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslClientHandler.java index bfb625b..06d01d7 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslClientHandler.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslClientHandler.java @@ -39,6 +39,7 @@ import javax.security.sasl.SaslClient; import javax.security.sasl.SaslException; import java.io.IOException; +import java.nio.ByteBuffer; import java.nio.charset.Charset; import java.security.PrivilegedExceptionAction; import java.util.Map; @@ -63,6 +64,7 @@ public class SaslClientHandler extends ChannelDuplexHandler { private final SaslExceptionHandler exceptionHandler; private final SaslSuccessfulConnectHandler successfulConnectHandler; private byte[] saslToken; + private byte[] connectionHeader; private boolean firstRead = true; private int retryCount = 0; @@ -80,10 +82,11 @@ public class SaslClientHandler extends ChannelDuplexHandler { */ public SaslClientHandler(UserGroupInformation ticket, AuthMethod method, Token token, String serverPrincipal, boolean fallbackAllowed, - String rpcProtection, SaslExceptionHandler exceptionHandler, + String rpcProtection, byte[] connectionHeader, SaslExceptionHandler exceptionHandler, SaslSuccessfulConnectHandler successfulConnectHandler) throws IOException { this.ticket = ticket; this.fallbackAllowed = fallbackAllowed; + this.connectionHeader = connectionHeader; this.exceptionHandler = exceptionHandler; this.successfulConnectHandler = successfulConnectHandler; @@ -225,8 +228,13 @@ public class SaslClientHandler extends ChannelDuplexHandler { if (!useWrap) { ctx.pipeline().remove(this); + successfulConnectHandler.onSuccess(ctx.channel()); + } else { + byte[] wrappedCH = saslClient.wrap(connectionHeader, 0, connectionHeader.length); + // write connection header + writeSaslToken(ctx, wrappedCH); + successfulConnectHandler.onSaslProtectionSucess(ctx.channel()); } - successfulConnectHandler.onSuccess(ctx.channel()); } } // Normal wrapped reading @@ -303,9 +311,11 @@ public class SaslClientHandler extends ChannelDuplexHandler { super.write(ctx, msg, promise); } else { ByteBuf in = (ByteBuf) msg; + byte[] unwrapped = new byte[in.readableBytes()]; + in.readBytes(unwrapped); try { - saslToken = saslClient.wrap(in.array(), in.readerIndex(), in.readableBytes()); + saslToken = saslClient.wrap(unwrapped, 0, unwrapped.length); } catch (SaslException se) { try { saslClient.dispose(); @@ -355,5 +365,12 @@ public class SaslClientHandler extends ChannelDuplexHandler { * @param channel which is successfully authenticated */ public void onSuccess(Channel channel); + + /** + * Runs on success if data protection used in Sasl + * + * @param channel which is successfully authenticated + */ + public void onSaslProtectionSucess(Channel channel); } } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java index 25ac01f..79dbd05 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java @@ -217,11 +217,16 @@ public class AccessControlClient { HTableDescriptor[] htds = null; if (tableRegex == null || tableRegex.isEmpty()) { permList = ProtobufUtil.getUserPermissions(controller, protocol); - } else if (tableRegex.charAt(0) == '@') { - String namespace = tableRegex.substring(1); - permList = ProtobufUtil.getUserPermissions(controller, protocol, - Bytes.toBytes(namespace)); - } else { + } else if (tableRegex.charAt(0) == '@') { // Namespaces + String namespaceRegex = tableRegex.substring(1); + for (NamespaceDescriptor nsds : admin.listNamespaceDescriptors()) { // Read out all namespaces + String namespace = nsds.getName(); + if (namespace.matches(namespaceRegex)) { // Match the given namespace regex? + permList.addAll(ProtobufUtil.getUserPermissions(controller, protocol, + Bytes.toBytes(namespace))); + } + } + } else { // Tables htds = admin.listTables(Pattern.compile(tableRegex), true); for (HTableDescriptor hd : htds) { permList.addAll(ProtobufUtil.getUserPermissions(controller, protocol, diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java index 9d0319b..b683fcc 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java @@ -23,6 +23,7 @@ import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; @@ -254,7 +255,7 @@ public class PoolMap implements Map { } public static String fuzzyNormalize(String name) { - return name != null ? name.replaceAll("-", "").trim().toLowerCase() : ""; + return name != null ? name.replaceAll("-", "").trim().toLowerCase(Locale.ROOT) : ""; } public static PoolType fuzzyMatch(String name) { diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java index 0b53f95..497e8c4 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java @@ -17,15 +17,18 @@ */ package org.apache.hadoop.hbase.zookeeper; +import com.google.protobuf.InvalidProtocolBufferException; + import java.io.EOFException; import java.io.IOException; import java.net.ConnectException; import java.net.NoRouteToHostException; import java.net.SocketException; import java.net.SocketTimeoutException; -import java.rmi.UnknownHostException; +import java.net.UnknownHostException; import java.util.ArrayList; import java.util.List; +import java.util.Locale; import org.apache.commons.logging.Log; @@ -37,8 +40,6 @@ import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.ClusterConnection; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.exceptions.DeserializationException; @@ -58,14 +59,12 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.ipc.RemoteException; import org.apache.zookeeper.KeeperException; -import com.google.protobuf.InvalidProtocolBufferException; - /** * Utility class to perform operation (get/wait for/verify/set/delete) on znode in ZooKeeper * which keeps hbase:meta region server location. * * Stateless class with a bunch of static methods. Doesn't manage resources passed in - * (e.g. HConnection, ZooKeeperWatcher etc). + * (e.g. Connection, ZooKeeperWatcher etc). * * Meta region location is set by RegionServerServices. * This class doesn't use ZK watchers, rather accesses ZK directly. @@ -259,7 +258,7 @@ public class MetaTableLocator { * @throws java.io.IOException * @throws InterruptedException */ - public boolean verifyMetaRegionLocation(HConnection hConnection, + public boolean verifyMetaRegionLocation(ClusterConnection hConnection, ZooKeeperWatcher zkw, final long timeout) throws InterruptedException, IOException { return verifyMetaRegionLocation(hConnection, zkw, timeout, HRegionInfo.DEFAULT_REPLICA_ID); @@ -267,7 +266,7 @@ public class MetaTableLocator { /** * Verify hbase:meta is deployed and accessible. - * @param hConnection + * @param connection * @param zkw * @param timeout How long to wait on zk for meta address (passed through to * @param replicaId @@ -275,12 +274,12 @@ public class MetaTableLocator { * @throws InterruptedException * @throws IOException */ - public boolean verifyMetaRegionLocation(HConnection hConnection, + public boolean verifyMetaRegionLocation(ClusterConnection connection, ZooKeeperWatcher zkw, final long timeout, int replicaId) throws InterruptedException, IOException { AdminProtos.AdminService.BlockingInterface service = null; try { - service = getMetaServerConnection(hConnection, zkw, timeout, replicaId); + service = getMetaServerConnection(connection, zkw, timeout, replicaId); } catch (NotAllMetaRegionsOnlineException e) { // Pass } catch (ServerNotRunningYetException e) { @@ -290,7 +289,7 @@ public class MetaTableLocator { } catch (RegionServerStoppedException e) { // Pass -- server name sends us to a server that is dying or already dead. } - return (service != null) && verifyRegionLocation(hConnection, service, + return (service != null) && verifyRegionLocation(connection, service, getMetaRegionLocation(zkw, replicaId), RegionReplicaUtil.getRegionInfoForReplica( HRegionInfo.FIRST_META_REGIONINFO, replicaId).getRegionName()); } @@ -310,7 +309,7 @@ public class MetaTableLocator { // rather than have to pass it in. Its made awkward by the fact that the // HRI is likely a proxy against remote server so the getServerName needs // to be fixed to go to a local method or to a cache before we can do this. - private boolean verifyRegionLocation(final Connection connection, + private boolean verifyRegionLocation(final ClusterConnection connection, AdminService.BlockingInterface hostingServer, final ServerName address, final byte [] regionName) throws IOException { @@ -319,10 +318,7 @@ public class MetaTableLocator { return false; } Throwable t; - PayloadCarryingRpcController controller = null; - if (connection instanceof ClusterConnection) { - controller = ((ClusterConnection) connection).getRpcControllerFactory().newController(); - } + PayloadCarryingRpcController controller = connection.getRpcControllerFactory().newController(); try { // Try and get regioninfo from the hosting server. return ProtobufUtil.getRegionInfo(controller, hostingServer, regionName) != null; @@ -353,7 +349,7 @@ public class MetaTableLocator { * Gets a connection to the server hosting meta, as reported by ZooKeeper, * waiting up to the specified timeout for availability. *

WARNING: Does not retry. Use an {@link org.apache.hadoop.hbase.client.HTable} instead. - * @param hConnection + * @param connection * @param zkw * @param timeout How long to wait on meta location * @param replicaId @@ -362,10 +358,10 @@ public class MetaTableLocator { * @throws NotAllMetaRegionsOnlineException if timed out waiting * @throws IOException */ - private AdminService.BlockingInterface getMetaServerConnection(HConnection hConnection, + private AdminService.BlockingInterface getMetaServerConnection(ClusterConnection connection, ZooKeeperWatcher zkw, long timeout, int replicaId) throws InterruptedException, NotAllMetaRegionsOnlineException, IOException { - return getCachedConnection(hConnection, waitMetaRegionLocation(zkw, replicaId, timeout)); + return getCachedConnection(connection, waitMetaRegionLocation(zkw, replicaId, timeout)); } /** @@ -376,7 +372,7 @@ public class MetaTableLocator { * @throws IOException */ @SuppressWarnings("deprecation") - private static AdminService.BlockingInterface getCachedConnection(HConnection hConnection, + private static AdminService.BlockingInterface getCachedConnection(ClusterConnection connection, ServerName sn) throws IOException { if (sn == null) { @@ -384,7 +380,7 @@ public class MetaTableLocator { } AdminService.BlockingInterface service = null; try { - service = hConnection.getAdmin(sn); + service = connection.getAdmin(sn); } catch (RetriesExhaustedException e) { if (e.getCause() != null && e.getCause() instanceof ConnectException) { // Catch this; presume it means the cached connection has gone bad. @@ -410,7 +406,7 @@ public class MetaTableLocator { } else if (cause != null && cause instanceof EOFException) { // Catch. Other end disconnected us. } else if (cause != null && cause.getMessage() != null && - cause.getMessage().toLowerCase().contains("connection reset")) { + cause.getMessage().toLowerCase(Locale.ROOT).contains("connection reset")) { // Catch. Connection reset. } else { throw ioe; diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java index ae26400..f5b720e 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java @@ -1926,27 +1926,27 @@ public class ZKUtil { int port = sp.length > 1 ? Integer.parseInt(sp[1]) : HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT; - Socket socket = new Socket(); InetSocketAddress sockAddr = new InetSocketAddress(host, port); - socket.connect(sockAddr, timeout); - - socket.setSoTimeout(timeout); - PrintWriter out = new PrintWriter(socket.getOutputStream(), true); - BufferedReader in = new BufferedReader(new InputStreamReader( - socket.getInputStream())); - out.println("stat"); - out.flush(); - ArrayList res = new ArrayList(); - while (true) { - String line = in.readLine(); - if (line != null) { - res.add(line); - } else { - break; + try (Socket socket = new Socket()) { + socket.connect(sockAddr, timeout); + + socket.setSoTimeout(timeout); + PrintWriter out = new PrintWriter(socket.getOutputStream(), true); + BufferedReader in = new BufferedReader(new InputStreamReader( + socket.getInputStream())); + out.println("stat"); + out.flush(); + ArrayList res = new ArrayList(); + while (true) { + String line = in.readLine(); + if (line != null) { + res.add(line); + } else { + break; + } } + return res.toArray(new String[res.size()]); } - socket.close(); - return res.toArray(new String[res.size()]); } private static void logRetrievedMsg(final ZooKeeperWatcher zkw, diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java index 93828eb..ff3d1c7 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java @@ -171,7 +171,17 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { this.recoverableZooKeeper = ZKUtil.connect(conf, quorum, pendingWatcher, identifier); pendingWatcher.prepare(this); if (canCreateBaseZNode) { - createBaseZNodes(); + try { + createBaseZNodes(); + } catch (ZooKeeperConnectionException zce) { + try { + this.recoverableZooKeeper.close(); + } catch (InterruptedException ie) { + LOG.debug("Encountered InterruptedException when closing " + this.recoverableZooKeeper); + Thread.currentThread().interrupt(); + } + throw zce; + } } } diff --git hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java index 680f2c1..70380e6 100644 --- hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java +++ hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java @@ -55,35 +55,36 @@ public class TestHTableDescriptor { public void testAddCoprocessorWithSpecStr() throws IOException { HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME); String cpName = "a.b.c.d"; - boolean expected = false; try { htd.addCoprocessorWithSpec(cpName); + fail(); } catch (IllegalArgumentException iae) { - expected = true; + // Expected as cpName is invalid } - if (!expected) fail(); + // Try minimal spec. try { htd.addCoprocessorWithSpec("file:///some/path" + "|" + cpName); + fail(); } catch (IllegalArgumentException iae) { - expected = false; + // Expected to be invalid } - if (expected) fail(); + // Try more spec. String spec = "hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2"; try { htd.addCoprocessorWithSpec(spec); } catch (IllegalArgumentException iae) { - expected = false; + fail(); } - if (expected) fail(); + // Try double add of same coprocessor try { htd.addCoprocessorWithSpec(spec); + fail(); } catch (IOException ioe) { - expected = true; + // Expect that the coprocessor already exists } - if (!expected) fail(); } @Test diff --git hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java index 0e0fbb0..426b6a7 100644 --- hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java +++ hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java @@ -20,7 +20,11 @@ package org.apache.hadoop.hbase; import java.io.IOException; import java.lang.annotation.Annotation; +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; import java.lang.reflect.Modifier; +import java.util.ArrayList; +import java.util.List; import java.util.Set; import org.apache.commons.logging.Log; @@ -28,6 +32,8 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.Triple; import org.apache.hadoop.hbase.ClassFinder.And; import org.apache.hadoop.hbase.ClassFinder.FileNameFilter; import org.apache.hadoop.hbase.ClassFinder.Not; @@ -59,6 +65,7 @@ import org.junit.experimental.categories.Category; @Category(SmallTests.class) public class TestInterfaceAudienceAnnotations { + private static final String HBASE_PROTOBUF = "org.apache.hadoop.hbase.protobuf.generated"; private static final Log LOG = LogFactory.getLog(TestInterfaceAudienceAnnotations.class); /** Selects classes with generated in their package name */ @@ -180,6 +187,28 @@ public class TestInterfaceAudienceAnnotations { c.equals(InterfaceStability.Evolving.class); } + private boolean isInterfacePrivateMethod(Method m) { + if(m.getDeclaredAnnotations().length > 0) { + for(Annotation ann : m.getDeclaredAnnotations()) { + if(ann.annotationType().equals(InterfaceAudience.Private.class)) { + return true; + } + } + } + return false; + } + + private boolean isInterfacePrivateContructor(Constructor c) { + if(c.getDeclaredAnnotations().length > 0) { + for(Annotation ann : c.getDeclaredAnnotations()) { + if(ann.annotationType().equals(InterfaceAudience.Private.class)) { + return true; + } + } + } + return false; + } + /** Selects classes that are declared public */ class PublicClassFilter implements ClassFinder.ClassFilter { @Override @@ -299,4 +328,127 @@ public class TestInterfaceAudienceAnnotations { + "have @InterfaceStability annotation as well", 0, classes.size()); } + + @Test + public void testProtosInReturnTypes() throws ClassNotFoundException, IOException, LinkageError { + Set> classes = findPublicClasses(); + List, Method>> protosReturnType = new ArrayList, Method>>(); + for (Class clazz : classes) { + findProtoInReturnType(clazz, protosReturnType); + } + if (protosReturnType.size() != 0) { + LOG.info("These are the methods that have Protos as the return type"); + for (Pair, Method> pair : protosReturnType) { + LOG.info(pair.getFirst().getName() + " " + pair.getSecond().getName() + " " + + pair.getSecond().getReturnType().getName()); + } + } + + Assert.assertEquals("Public exposed methods should not have protos in return type", 0, + protosReturnType.size()); + } + + private Set> findPublicClasses() + throws ClassNotFoundException, IOException, LinkageError { + ClassFinder classFinder = + new ClassFinder(new And(new MainCodeResourcePathFilter(), new TestFileNameFilter()), + new Not((FileNameFilter) new TestFileNameFilter()), + new And(new PublicClassFilter(), new Not(new TestClassFilter()), + new Not(new GeneratedClassFilter()), + new InterfaceAudiencePublicAnnotatedClassFilter())); + Set> classes = classFinder.findClasses(false); + return classes; + } + + @Test + public void testProtosInParamTypes() throws ClassNotFoundException, IOException, LinkageError { + Set> classes = findPublicClasses(); + List, Method, Class>> protosParamType = + new ArrayList, Method, Class>>(); + for (Class clazz : classes) { + findProtoInParamType(clazz, protosParamType); + } + + if (protosParamType.size() != 0) { + LOG.info("These are the methods that have Protos as the param type"); + for (Triple, Method, Class> pair : protosParamType) { + LOG.info(pair.getFirst().getName() + " " + pair.getSecond().getName() + " " + + pair.getThird().getName()); + } + } + + Assert.assertEquals("Public exposed methods should not have protos in param type", 0, + protosParamType.size()); + } + + @Test + public void testProtosInConstructors() throws ClassNotFoundException, IOException, LinkageError { + Set> classes = findPublicClasses(); + List> classList = new ArrayList>(); + for (Class clazz : classes) { + Constructor[] constructors = clazz.getConstructors(); + for (Constructor cons : constructors) { + if (!isInterfacePrivateContructor(cons)) { + Class[] parameterTypes = cons.getParameterTypes(); + for (Class param : parameterTypes) { + if (param.getName().contains(HBASE_PROTOBUF)) { + classList.add(clazz); + break; + } + } + } + } + } + + if (classList.size() != 0) { + LOG.info("These are the classes that have Protos in the constructor"); + for (Class clazz : classList) { + LOG.info(clazz.getName()); + } + } + + Assert.assertEquals("Public exposed classes should not have protos in constructors", 0, + classList.size()); + } + + private void findProtoInReturnType(Class clazz, + List, Method>> protosReturnType) { + Pair, Method> returnTypePair = new Pair, Method>(); + Method[] methods = clazz.getMethods(); + returnTypePair.setFirst(clazz); + for (Method method : methods) { + if (clazz.isInterface() || method.getModifiers() == Modifier.PUBLIC) { + if (!isInterfacePrivateMethod(method)) { + Class returnType = method.getReturnType(); + if (returnType.getName().contains(HBASE_PROTOBUF)) { + returnTypePair.setSecond(method); + protosReturnType.add(returnTypePair); + continue; + } + } + } + } + } + + private void findProtoInParamType(Class clazz, + List, Method, Class>> protosParamType) { + Triple, Method, Class> paramType = new Triple, Method, Class>(); + Method[] methods = clazz.getMethods(); + paramType.setFirst(clazz); + for (Method method : methods) { + if (clazz.isInterface() || method.getModifiers() == Modifier.PUBLIC) { + if (!isInterfacePrivateMethod(method)) { + Class[] parameters = method.getParameterTypes(); + for (Class param : parameters) { + if (param.getName().contains(HBASE_PROTOBUF)) { + paramType.setSecond(method); + paramType.setThird(param); + protosParamType.add(paramType); + break; + } + } + } + } + } + } } diff --git hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java index 376c02a..d943316 100644 --- hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java +++ hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java @@ -712,15 +712,17 @@ public class TestAsyncProcess { private void doHTableFailedPut(boolean bufferOn) throws Exception { ClusterConnection conn = createHConnection(); - HTable ht = new HTable(conn, new BufferedMutatorParams(DUMMY_TABLE)); - MyAsyncProcess ap = new MyAsyncProcess(conn, conf, true); - ht.mutator.ap = ap; + BufferedMutatorParams bufferParam = new BufferedMutatorParams(DUMMY_TABLE); if (bufferOn) { - ht.setWriteBufferSize(1024L * 1024L); + bufferParam.writeBufferSize(1024L * 1024L); } else { - ht.setWriteBufferSize(0L); + bufferParam.writeBufferSize(0L); } + HTable ht = new HTable(conn, bufferParam); + MyAsyncProcess ap = new MyAsyncProcess(conn, conf, true); + ht.mutator.ap = ap; + Put put = createPut(1, false); Assert.assertEquals(0L, ht.mutator.currentWriteBufferSize.get()); @@ -1133,16 +1135,17 @@ public class TestAsyncProcess { } @Test - public void testWaitForMaximumCurrentTasks() throws InterruptedException, BrokenBarrierException { + public void testWaitForMaximumCurrentTasks() throws Exception { final AtomicLong tasks = new AtomicLong(0); final AtomicInteger max = new AtomicInteger(0); final CyclicBarrier barrier = new CyclicBarrier(2); + final AsyncProcess ap = new MyAsyncProcess(createHConnection(), conf); Runnable runnable = new Runnable() { @Override public void run() { try { barrier.await(); - AsyncProcess.waitForMaximumCurrentTasks(max.get(), tasks, 1); + ap.waitForMaximumCurrentTasks(max.get(), tasks, 1, null); } catch (InterruptedIOException e) { Assert.fail(e.getMessage()); } catch (InterruptedException e) { diff --git hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java index 0a5a37f..e8135a8 100644 --- hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java +++ hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java @@ -104,7 +104,7 @@ public class TestClientNoCluster extends Configured implements Tool { @Before public void setUp() throws Exception { this.conf = HBaseConfiguration.create(); - // Run my HConnection overrides. Use my little ConnectionImplementation below which + // Run my Connection overrides. Use my little ConnectionImplementation below which // allows me insert mocks and also use my Registry below rather than the default zk based // one so tests run faster and don't have zk dependency. this.conf.set("hbase.client.registry.impl", SimpleRegistry.class.getName()); diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 45d7c34..b05a520 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1024,13 +1024,13 @@ public final class HConstants { * by different set of handlers. For example, HIGH_QOS tagged methods are * handled by high priority handlers. */ - // normal_QOS < QOS_threshold < replication_QOS < replay_QOS < admin_QOS < high_QOS + // normal_QOS < replication_QOS < replay_QOS < QOS_threshold < admin_QOS < high_QOS public static final int NORMAL_QOS = 0; - public static final int QOS_THRESHOLD = 10; - public static final int HIGH_QOS = 200; public static final int REPLICATION_QOS = 5; public static final int REPLAY_QOS = 6; + public static final int QOS_THRESHOLD = 10; public static final int ADMIN_QOS = 100; + public static final int HIGH_QOS = 200; public static final int SYSTEMTABLE_QOS = HIGH_QOS; /** Directory under /hbase where archived hfiles are stored */ diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java index c0c43ed..52db37b 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java @@ -25,6 +25,7 @@ import com.google.protobuf.InvalidProtocolBufferException; import java.io.Serializable; import java.util.ArrayList; import java.util.List; +import java.util.Locale; import java.util.regex.Pattern; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -232,7 +233,7 @@ import org.apache.hadoop.hbase.util.Bytes; */ static String getServerName(String hostName, int port, long startcode) { final StringBuilder name = new StringBuilder(hostName.length() + 1 + 5 + 1 + 13); - name.append(hostName.toLowerCase()); + name.append(hostName.toLowerCase(Locale.ROOT)); name.append(SERVERNAME_SEPARATOR); name.append(port); name.append(SERVERNAME_SEPARATOR); diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/Compression.java hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/Compression.java index 821b21f..6dc4190 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/Compression.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/Compression.java @@ -235,7 +235,34 @@ public final class Compression { throw new RuntimeException(e); } } - }; + }, + BZIP2("bzip2") { + // Use base type to avoid compile-time dependencies. + private volatile transient CompressionCodec bzipCodec; + private transient Object lock = new Object(); + + @Override + CompressionCodec getCodec(Configuration conf) { + if (bzipCodec == null) { + synchronized (lock) { + if (bzipCodec == null) { + bzipCodec = buildCodec(conf); + } + } + } + return bzipCodec; + } + + private CompressionCodec buildCodec(Configuration conf) { + try { + Class externalCodec = + getClassLoaderForCodec().loadClass("org.apache.hadoop.io.compress.BZip2Codec"); + return (CompressionCodec) ReflectionUtils.newInstance(externalCodec, conf); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } + } + }; private final Configuration conf; private final String compressName; diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java index 2d58a18..7e3c013 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java @@ -30,6 +30,7 @@ import java.security.KeyStoreException; import java.security.NoSuchAlgorithmException; import java.security.UnrecoverableKeyException; import java.security.cert.CertificateException; +import java.util.Locale; import java.util.Properties; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -142,7 +143,7 @@ public class KeyStoreKeyProvider implements KeyProvider { throw new RuntimeException("KeyProvider scheme should specify KeyStore type"); } // KeyStore expects instance type specifications in uppercase - store = KeyStore.getInstance(storeType.toUpperCase()); + store = KeyStore.getInstance(storeType.toUpperCase(Locale.ROOT)); processParameters(uri); load(uri); } catch (URISyntaxException e) { diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java index 78ecc7e..11016c3 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java @@ -177,8 +177,10 @@ public class CoprocessorClassLoader extends ClassLoaderBase { if (m.matches()) { File file = new File(parentDirStr, "." + pathPrefix + "." + path.getName() + "." + System.currentTimeMillis() + "." + m.group(1)); - IOUtils.copyBytes(jarFile.getInputStream(entry), - new FileOutputStream(file), conf, true); + try (FileOutputStream outStream = new FileOutputStream(file)) { + IOUtils.copyBytes(jarFile.getInputStream(entry), + outStream, conf, true); + } file.deleteOnExit(); addURL(file.toURI().toURL()); } diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/Triple.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/Triple.java index 1438ab7..1de6bee 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/util/Triple.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/Triple.java @@ -28,6 +28,10 @@ public class Triple { private A first; private B second; private C third; + // default constructor + public Triple() { + + } public Triple(A first, B second, C third) { this.first = first; diff --git hbase-common/src/main/resources/hbase-default.xml hbase-common/src/main/resources/hbase-default.xml index 62a6b62..55ac497 100644 --- hbase-common/src/main/resources/hbase-default.xml +++ hbase-common/src/main/resources/hbase-default.xml @@ -562,7 +562,7 @@ possible configurations would overwhelm and obscure the important. hbase.regions.slop 0.001 - Rebalance if any regionserver has average + (average * slop) regions. + Rebalance if any regionserver has average + (average * slop) regions. The default value of this parameter is 0.001 in StochasticLoadBalancer (the default load balancer), while the default is 0.2 in other load balancers (i.e., SimpleLoadBalancer). @@ -865,7 +865,7 @@ possible configurations would overwhelm and obscure the important. Must be a multiple of 1024 else you will run into 'java.io.IOException: Invalid HFile block magic' when you go to read from cache. If you specify no values here, then you pick up the default bucketsizes set - in code (See BucketAllocator#DEFAULT_BUCKET_SIZES). + in code (See BucketAllocator#DEFAULT_BUCKET_SIZES). @@ -1132,11 +1132,6 @@ possible configurations would overwhelm and obscure the important. of servers, so this is most useful for debugging only. - hbase.online.schema.update.enable - true - Set true to enable online schema changes. - - hbase.table.lock.enable true Set to true to enable locking the table in zookeeper for schema change operations. diff --git hbase-common/src/test/java/org/apache/hadoop/hbase/CategoryBasedTimeout.java hbase-common/src/test/java/org/apache/hadoop/hbase/CategoryBasedTimeout.java index 7f7aafd..8a31ff3 100644 --- hbase-common/src/test/java/org/apache/hadoop/hbase/CategoryBasedTimeout.java +++ hbase-common/src/test/java/org/apache/hadoop/hbase/CategoryBasedTimeout.java @@ -32,6 +32,15 @@ import org.junit.rules.Timeout; */ public class CategoryBasedTimeout extends Timeout { + public static Timeout forClass(Class clazz) { + return CategoryBasedTimeout.builder().withTimeout(clazz).withLookingForStuckThread(true) + .build(); + } + + public static Builder builder() { + return new CategoryBasedTimeout.Builder(); + } + @Deprecated public CategoryBasedTimeout(int millis) { super(millis); @@ -45,10 +54,6 @@ public class CategoryBasedTimeout extends Timeout { super(builder); } - public static Builder builder() { - return new CategoryBasedTimeout.Builder(); - } - public static class Builder extends Timeout.Builder { public Timeout.Builder withTimeout(Class clazz) { Annotation annotation = clazz.getAnnotation(Category.class); diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java index bb89789..ce57e0f 100644 --- hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java +++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java @@ -52,14 +52,16 @@ public interface MetricsHBaseServerSource extends BaseSource { String TOTAL_CALL_TIME_NAME = "totalCallTime"; String TOTAL_CALL_TIME_DESC = "Total call time, including both queued and processing time."; String QUEUE_SIZE_NAME = "queueSize"; - String QUEUE_SIZE_DESC = "Number of bytes in the call queues."; + String QUEUE_SIZE_DESC = "Number of bytes in the call queues; request has been read and " + + "parsed and is waiting to run or is currently being executed."; String GENERAL_QUEUE_NAME = "numCallsInGeneralQueue"; - String GENERAL_QUEUE_DESC = "Number of calls in the general call queue."; + String GENERAL_QUEUE_DESC = "Number of calls in the general call queue; " + + "parsed requests waiting in scheduler to be executed"; String PRIORITY_QUEUE_NAME = "numCallsInPriorityQueue"; String REPLICATION_QUEUE_NAME = "numCallsInReplicationQueue"; String REPLICATION_QUEUE_DESC = - "Number of calls in the replication call queue."; - String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue."; + "Number of calls in the replication call queue waiting to be run"; + String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue waiting to be run"; String NUM_OPEN_CONNECTIONS_NAME = "numOpenConnections"; String NUM_OPEN_CONNECTIONS_DESC = "Number of open connections."; String NUM_ACTIVE_HANDLER_NAME = "numActiveHandler"; diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java index 4098e26..76bbb09 100644 --- hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java +++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.ipc; import java.util.HashMap; +import java.util.Locale; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -48,7 +49,7 @@ public class MetricsHBaseServerSourceFactoryImpl extends MetricsHBaseServerSourc source = new MetricsHBaseServerSourceImpl( context, METRICS_DESCRIPTION, - context.toLowerCase(), + context.toLowerCase(Locale.ROOT), context + METRICS_JMX_CONTEXT_SUFFIX, wrap); //Store back in storage diff --git hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java index 5503675..4291eb7 100644 --- hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java +++ hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java @@ -28,6 +28,7 @@ import org.apache.hadoop.metrics2.MetricsTag; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import java.util.HashMap; +import java.util.Locale; import java.util.Map; import static org.junit.Assert.*; @@ -245,6 +246,6 @@ public class MetricsAssertHelperImpl implements MetricsAssertHelper { } private String canonicalizeMetricName(String in) { - return in.toLowerCase().replaceAll("[^A-Za-z0-9 ]", ""); + return in.toLowerCase(Locale.ROOT).replaceAll("[^A-Za-z0-9 ]", ""); } } diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java index f13102d..b6f1aeb 100644 --- hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java +++ hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase; import java.io.File; import java.io.IOException; +import java.util.Locale; import java.util.Map; import org.apache.commons.lang.StringUtils; @@ -205,7 +206,7 @@ public class HBaseClusterManager extends Configured implements ClusterManager { @Override public String getCommand(ServiceType service, Operation op) { return String.format("%s/bin/hbase-daemon.sh %s %s %s", hbaseHome, confDir, - op.toString().toLowerCase(), service); + op.toString().toLowerCase(Locale.ROOT), service); } } @@ -235,7 +236,7 @@ public class HBaseClusterManager extends Configured implements ClusterManager { @Override public String getCommand(ServiceType service, Operation op) { return String.format("%s/sbin/hadoop-daemon.sh %s %s %s", hadoopHome, confDir, - op.toString().toLowerCase(), service); + op.toString().toLowerCase(Locale.ROOT), service); } } @@ -264,7 +265,7 @@ public class HBaseClusterManager extends Configured implements ClusterManager { @Override public String getCommand(ServiceType service, Operation op) { - return String.format("%s/bin/zkServer.sh %s", zookeeperHome, op.toString().toLowerCase()); + return String.format("%s/bin/zkServer.sh %s", zookeeperHome, op.toString().toLowerCase(Locale.ROOT)); } @Override diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.java hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.java index db52c78..8f9b92c 100644 --- hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.java +++ hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.java @@ -101,11 +101,11 @@ public class IntegrationTestRegionReplicaPerf extends IntegrationTestBase { * Wraps the invocation of {@link PerformanceEvaluation} in a {@code Callable}. */ static class PerfEvalCallable implements Callable { - private final Queue argv = new LinkedList(); + private final Queue argv = new LinkedList<>(); private final Admin admin; public PerfEvalCallable(Admin admin, String argv) { - // TODO: this API is awkward, should take HConnection, not HBaseAdmin + // TODO: this API is awkward, should take Connection, not Admin this.admin = admin; this.argv.addAll(Arrays.asList(argv.split(" "))); LOG.debug("Created PerformanceEvaluationCallable with args: " + argv); diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/RESTApiClusterManager.java hbase-it/src/test/java/org/apache/hadoop/hbase/RESTApiClusterManager.java index 717de17..04a3b05 100644 --- hbase-it/src/test/java/org/apache/hadoop/hbase/RESTApiClusterManager.java +++ hbase-it/src/test/java/org/apache/hadoop/hbase/RESTApiClusterManager.java @@ -37,6 +37,7 @@ import javax.xml.ws.http.HTTPException; import java.io.IOException; import java.net.URI; import java.util.HashMap; +import java.util.Locale; import java.util.Map; /** @@ -274,8 +275,8 @@ public class RESTApiClusterManager extends Configured implements ClusterManager if (role.get("hostRef").get("hostId").getTextValue().equals(hostId) && role.get("type") .getTextValue() - .toLowerCase() - .equals(roleType.toLowerCase())) { + .toLowerCase(Locale.ROOT) + .equals(roleType.toLowerCase(Locale.ROOT))) { roleValue = role.get(property).getTextValue(); break; } @@ -328,7 +329,7 @@ public class RESTApiClusterManager extends Configured implements ClusterManager // APIs tend to take commands in lowercase, so convert them to save the trouble later. @Override public String toString() { - return name().toLowerCase(); + return name().toLowerCase(Locale.ROOT); } } @@ -348,4 +349,4 @@ public class RESTApiClusterManager extends Configured implements ClusterManager private enum Service { HBASE, HDFS, MAPREDUCE } -} \ No newline at end of file +} diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/StripeCompactionsPerformanceEvaluation.java hbase-it/src/test/java/org/apache/hadoop/hbase/StripeCompactionsPerformanceEvaluation.java index 848017f..a6b502f 100644 --- hbase-it/src/test/java/org/apache/hadoop/hbase/StripeCompactionsPerformanceEvaluation.java +++ hbase-it/src/test/java/org/apache/hadoop/hbase/StripeCompactionsPerformanceEvaluation.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase; import java.io.IOException; +import java.util.Locale; import java.util.Set; import org.apache.commons.cli.CommandLine; @@ -111,7 +112,7 @@ public class StripeCompactionsPerformanceEvaluation extends AbstractHBaseTool { } else { minValueSize = maxValueSize = Integer.parseInt(valueSize); } - String datagen = cmd.getOptionValue(DATAGEN_KEY, "default").toLowerCase(); + String datagen = cmd.getOptionValue(DATAGEN_KEY, "default").toLowerCase(Locale.ROOT); if ("default".equals(datagen)) { dataGen = new MultiThreadedAction.DefaultDataGenerator( minValueSize, maxValueSize, 1, 1, new byte[][] { COLUMN_FAMILY }); diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java index c28f3e6..6c0fbcc 100644 --- hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java +++ hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.ipc; +import static org.apache.hadoop.hbase.ipc.RpcClient.SPECIFIC_WRITE_THREAD; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -41,12 +42,6 @@ import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.codec.Codec; -import org.apache.hadoop.hbase.ipc.AbstractRpcClient; -import org.apache.hadoop.hbase.ipc.AsyncRpcClient; -import org.apache.hadoop.hbase.ipc.FifoRpcScheduler; -import org.apache.hadoop.hbase.ipc.RpcClientImpl; -import org.apache.hadoop.hbase.ipc.RpcScheduler; -import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestRpcServiceProtos; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EchoRequestProto; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EchoResponseProto; @@ -290,6 +285,7 @@ public class IntegrationTestRpcClient { static class SimpleClient extends Thread { AbstractRpcClient rpcClient; AtomicBoolean running = new AtomicBoolean(true); + AtomicBoolean sending = new AtomicBoolean(false); AtomicReference exception = new AtomicReference<>(null); Cluster cluster; String id; @@ -319,6 +315,7 @@ public class IntegrationTestRpcClient { if (address == null) { throw new IOException("Listener channel is closed"); } + sending.set(true); ret = (EchoResponseProto) rpcClient.callBlockingMethod(md, null, param, ret, user, address); } catch (Exception e) { @@ -340,6 +337,9 @@ public class IntegrationTestRpcClient { void stopRunning() { running.set(false); } + boolean isSending() { + return sending.get(); + } void rethrowException() throws Throwable { if (exception.get() != null) { @@ -348,6 +348,29 @@ public class IntegrationTestRpcClient { } } + /* + Test that not started connections are successfully removed from connection pool when + rpc client is closing. + */ + @Test (timeout = 30000) + public void testRpcWithWriteThread() throws IOException, InterruptedException { + LOG.info("Starting test"); + Cluster cluster = new Cluster(1, 1); + cluster.startServer(); + conf.setBoolean(SPECIFIC_WRITE_THREAD, true); + for(int i = 0; i <1000; i++) { + AbstractRpcClient rpcClient = createRpcClient(conf, true); + SimpleClient client = new SimpleClient(cluster, rpcClient, "Client1"); + client.start(); + while(!client.isSending()) { + Thread.sleep(1); + } + client.stopRunning(); + rpcClient.close(); + } + } + + @Test (timeout = 900000) public void testRpcWithChaosMonkeyWithSyncClient() throws Throwable { for (int i = 0; i < numIterations; i++) { diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java index c864580..430c8a6 100644 --- hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java +++ hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java @@ -80,7 +80,7 @@ import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.mapreduce.TableMapper; import org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl; import org.apache.hadoop.hbase.mapreduce.WALPlayer; -import org.apache.hadoop.hbase.regionserver.FlushLargeStoresPolicy; +import org.apache.hadoop.hbase.regionserver.FlushAllLargeStoresPolicy; import org.apache.hadoop.hbase.regionserver.FlushPolicyFactory; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.testclassification.IntegrationTests; @@ -1586,7 +1586,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { Configuration conf = getTestingUtil(getConf()).getConfiguration(); if (isMultiUnevenColumnFamilies(getConf())) { // make sure per CF flush is on - conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushLargeStoresPolicy.class.getName()); + conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushAllLargeStoresPolicy.class.getName()); } int ret = ToolRunner.run(conf, new Loop(), new String[] { "1", "1", "2000000", diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java index 2a146b3..f8a8ffa 100644 --- hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java +++ hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java @@ -41,10 +41,10 @@ import org.apache.hadoop.hbase.chaos.factories.MonkeyFactory; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.BufferedMutatorParams; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; @@ -452,7 +452,7 @@ public class IntegrationTestBigLinkedListWithVisibility extends IntegrationTestB @Override protected void handleFailure(Counters counters) throws IOException { Configuration conf = job.getConfiguration(); - HConnection conn = (HConnection) ConnectionFactory.createConnection(conf); + ClusterConnection conn = (ClusterConnection) ConnectionFactory.createConnection(conf); TableName tableName = TableName.valueOf(COMMON_TABLE_NAME); CounterGroup g = counters.getGroup("undef"); Iterator it = g.iterator(); diff --git hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java index 7e58420..ee61841 100644 --- hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java +++ hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java @@ -79,6 +79,9 @@ public abstract class Procedure implements Comparable { private int childrenLatch = 0; private long lastUpdate; + // TODO: it will be nice having pointers to allow the scheduler doing suspend/resume tricks + private boolean suspended = false; + private RemoteProcedureException exception = null; private byte[] result = null; @@ -94,7 +97,7 @@ public abstract class Procedure implements Comparable { * @throws InterruptedException the procedure will be added back to the queue and retried later */ protected abstract Procedure[] execute(TEnvironment env) - throws ProcedureYieldException, InterruptedException; + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException; /** * The code to undo what done by the execute() code. @@ -276,6 +279,9 @@ public abstract class Procedure implements Comparable { */ protected void toStringState(StringBuilder builder) { builder.append(getState()); + if (isSuspended()) { + builder.append("|SUSPENDED"); + } } /** @@ -319,7 +325,7 @@ public abstract class Procedure implements Comparable { } public long getParentProcId() { - return parentProcId; + return parentProcId.longValue(); } public NonceKey getNonceKey() { @@ -371,6 +377,23 @@ public abstract class Procedure implements Comparable { return false; } + /** + * @return true if the procedure is in a suspended state, + * waiting for the resources required to execute the procedure will become available. + */ + public synchronized boolean isSuspended() { + return suspended; + } + + public synchronized void suspend() { + suspended = true; + } + + public synchronized void resume() { + assert isSuspended() : this + " expected suspended state, got " + state; + suspended = false; + } + public synchronized RemoteProcedureException getException() { return exception; } @@ -398,7 +421,7 @@ public abstract class Procedure implements Comparable { * @return the timeout in msec */ public int getTimeout() { - return timeout; + return timeout.intValue(); } /** @@ -494,7 +517,7 @@ public abstract class Procedure implements Comparable { */ @InterfaceAudience.Private protected Procedure[] doExecute(final TEnvironment env) - throws ProcedureYieldException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { try { updateTimestamp(); return execute(env); diff --git hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index f43b65f..9d71f65 100644 --- hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -505,15 +505,25 @@ public class ProcedureExecutor { } }; + long st, et; + // Acquire the store lease. + st = EnvironmentEdgeManager.currentTime(); store.recoverLease(); + et = EnvironmentEdgeManager.currentTime(); + LOG.info(String.format("recover procedure store (%s) lease: %s", + store.getClass().getSimpleName(), StringUtils.humanTimeDiff(et - st))); // TODO: Split in two steps. // TODO: Handle corrupted procedures (currently just a warn) // The first one will make sure that we have the latest id, // so we can start the threads and accept new procedures. // The second step will do the actual load of old procedures. + st = EnvironmentEdgeManager.currentTime(); load(abortOnCorruption); + et = EnvironmentEdgeManager.currentTime(); + LOG.info(String.format("load procedure store (%s): %s", + store.getClass().getSimpleName(), StringUtils.humanTimeDiff(et - st))); // Start the executors. Here we must have the lastProcId set. for (int i = 0; i < threads.length; ++i) { @@ -840,7 +850,7 @@ public class ProcedureExecutor { } // Execute the procedure - assert proc.getState() == ProcedureState.RUNNABLE; + assert proc.getState() == ProcedureState.RUNNABLE : proc; if (proc.acquireLock(getEnvironment())) { execProcedure(procStack, proc); proc.releaseLock(getEnvironment()); @@ -1042,6 +1052,7 @@ public class ProcedureExecutor { Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE); // Execute the procedure + boolean isSuspended = false; boolean reExecute = false; Procedure[] subprocs = null; do { @@ -1051,6 +1062,8 @@ public class ProcedureExecutor { if (subprocs != null && subprocs.length == 0) { subprocs = null; } + } catch (ProcedureSuspendedException e) { + isSuspended = true; } catch (ProcedureYieldException e) { if (LOG.isTraceEnabled()) { LOG.trace("Yield procedure: " + procedure + ": " + e.getMessage()); @@ -1086,7 +1099,7 @@ public class ProcedureExecutor { break; } - assert subproc.getState() == ProcedureState.INITIALIZING; + assert subproc.getState() == ProcedureState.INITIALIZING : subproc; subproc.setParentProcId(procedure.getProcId()); subproc.setProcId(nextProcId()); } @@ -1107,7 +1120,7 @@ public class ProcedureExecutor { } } else if (procedure.getState() == ProcedureState.WAITING_TIMEOUT) { waitingTimeout.add(procedure); - } else { + } else if (!isSuspended) { // No subtask, so we are done procedure.setState(ProcedureState.FINISHED); } diff --git hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java new file mode 100644 index 0000000..f28d57a --- /dev/null +++ hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +@InterfaceAudience.Private +@InterfaceStability.Stable +public class ProcedureSuspendedException extends ProcedureException { + /** default constructor */ + public ProcedureSuspendedException() { + super(); + } + + /** + * Constructor + * @param s message + */ + public ProcedureSuspendedException(String s) { + super(s); + } +} diff --git hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java index 636a037..f0bcdea 100644 --- hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java +++ hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java @@ -42,7 +42,7 @@ public abstract class SequentialProcedure extends Procedure corrupted = new ArrayList(); + private final ArrayList loaded = new ArrayList(); + + private Set procIds; + private long maxProcId = 0; + + public LoadCounter() { + this(null); + } + + public LoadCounter(final Set procIds) { + this.procIds = procIds; + } + + public void reset() { + reset(null); + } + + public void reset(final Set procIds) { + corrupted.clear(); + loaded.clear(); + this.procIds = procIds; + this.maxProcId = 0; + } + + public long getMaxProcId() { + return maxProcId; + } + + public ArrayList getLoaded() { + return loaded; + } + + public int getLoadedCount() { + return loaded.size(); + } + + public ArrayList getCorrupted() { + return corrupted; + } + + public int getCorruptedCount() { + return corrupted.size(); + } + + @Override + public void setMaxProcId(long maxProcId) { + maxProcId = maxProcId; + } + + @Override + public void load(ProcedureIterator procIter) throws IOException { + while (procIter.hasNext()) { + Procedure proc = procIter.nextAsProcedure(); + LOG.debug("loading procId=" + proc.getProcId() + ": " + proc); + if (procIds != null) { + assertTrue("procId=" + proc.getProcId() + " unexpected", + procIds.contains(proc.getProcId())); + } + loaded.add(proc); + } + } + + @Override + public void handleCorrupted(ProcedureIterator procIter) throws IOException { + while (procIter.hasNext()) { + Procedure proc = procIter.nextAsProcedure(); + LOG.debug("corrupted procId=" + proc.getProcId() + ": " + proc); + corrupted.add(proc); + } + } + } } diff --git hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.java hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.java new file mode 100644 index 0000000..1c1af79 --- /dev/null +++ hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.java @@ -0,0 +1,134 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.store.wal; + +import java.io.IOException; +import java.util.Random; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseCommonTestingUtility; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.LoadCounter; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.IOUtils; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({MasterTests.class, LargeTests.class}) +public class TestStressWALProcedureStore { + private static final Log LOG = LogFactory.getLog(TestWALProcedureStore.class); + + private static final int PROCEDURE_STORE_SLOTS = 8; + + private WALProcedureStore procStore; + + private HBaseCommonTestingUtility htu; + private FileSystem fs; + private Path testDir; + private Path logDir; + + private void setupConfiguration(Configuration conf) { + conf.setBoolean("hbase.procedure.store.wal.use.hsync", false); + conf.setInt("hbase.procedure.store.wal.periodic.roll.msec", 5000); + conf.setInt("hbase.procedure.store.wal.roll.threshold", 128 * 1024); + } + + @Before + public void setUp() throws IOException { + htu = new HBaseCommonTestingUtility(); + setupConfiguration(htu.getConfiguration()); + + testDir = htu.getDataTestDir(); + fs = testDir.getFileSystem(htu.getConfiguration()); + assertTrue(testDir.depth() > 1); + + logDir = new Path(testDir, "proc-logs"); + procStore = ProcedureTestingUtility.createWalStore(htu.getConfiguration(), fs, logDir); + procStore.start(PROCEDURE_STORE_SLOTS); + procStore.recoverLease(); + + LoadCounter loader = new LoadCounter(); + procStore.load(loader); + assertEquals(0, loader.getMaxProcId()); + assertEquals(0, loader.getLoadedCount()); + assertEquals(0, loader.getCorruptedCount()); + } + + @After + public void tearDown() throws IOException { + procStore.stop(false); + fs.delete(logDir, true); + } + + @Test + public void testInsertUpdateDelete() throws Exception { + final long LAST_PROC_ID = 19999; + final Thread[] thread = new Thread[PROCEDURE_STORE_SLOTS]; + final AtomicLong procCounter = new AtomicLong((long)Math.round(Math.random() * 100)); + for (int i = 0; i < thread.length; ++i) { + thread[i] = new Thread() { + @Override + public void run() { + Random rand = new Random(); + TestProcedure proc; + do { + proc = new TestProcedure(procCounter.addAndGet(1)); + // Insert + procStore.insert(proc, null); + // Update + for (int i = 0, nupdates = rand.nextInt(10); i <= nupdates; ++i) { + try { Thread.sleep(0, rand.nextInt(15)); } catch (InterruptedException e) {} + procStore.update(proc); + } + // Delete + procStore.delete(proc.getProcId()); + } while (proc.getProcId() < LAST_PROC_ID); + } + }; + thread[i].start(); + } + + for (int i = 0; i < thread.length; ++i) { + thread[i].join(); + } + + procStore.getStoreTracker().dump(); + assertTrue(procCounter.get() >= LAST_PROC_ID); + assertTrue(procStore.getStoreTracker().isEmpty()); + assertEquals(1, procStore.getActiveLogs().size()); + } +} diff --git hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java index 88c85ba..f964d86 100644 --- hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java +++ hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java @@ -22,13 +22,10 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; import java.util.HashSet; import java.util.Set; -import java.util.Random; -import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -38,6 +35,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseCommonTestingUtility; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.LoadCounter; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure; import org.apache.hadoop.hbase.procedure2.SequentialProcedure; import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; @@ -441,66 +439,6 @@ public class TestWALProcedureStore { } @Test - public void testInsertUpdateDelete() throws Exception { - final int NTHREAD = 2; - - procStore.stop(false); - fs.delete(logDir, true); - - org.apache.hadoop.conf.Configuration conf = - new org.apache.hadoop.conf.Configuration(htu.getConfiguration()); - conf.setBoolean("hbase.procedure.store.wal.use.hsync", false); - conf.setInt("hbase.procedure.store.wal.periodic.roll.msec", 10000); - conf.setInt("hbase.procedure.store.wal.roll.threshold", 128 * 1024); - - fs.mkdirs(logDir); - procStore = ProcedureTestingUtility.createWalStore(conf, fs, logDir); - procStore.start(NTHREAD); - procStore.recoverLease(); - - LoadCounter loader = new LoadCounter(); - procStore.load(loader); - assertEquals(0, loader.getMaxProcId()); - assertEquals(0, loader.getLoadedCount()); - assertEquals(0, loader.getCorruptedCount()); - - final long LAST_PROC_ID = 9999; - final Thread[] thread = new Thread[NTHREAD]; - final AtomicLong procCounter = new AtomicLong((long)Math.round(Math.random() * 100)); - for (int i = 0; i < thread.length; ++i) { - thread[i] = new Thread() { - @Override - public void run() { - Random rand = new Random(); - TestProcedure proc; - do { - proc = new TestProcedure(procCounter.addAndGet(1)); - // Insert - procStore.insert(proc, null); - // Update - for (int i = 0, nupdates = rand.nextInt(10); i <= nupdates; ++i) { - try { Thread.sleep(0, rand.nextInt(15)); } catch (InterruptedException e) {} - procStore.update(proc); - } - // Delete - procStore.delete(proc.getProcId()); - } while (proc.getProcId() < LAST_PROC_ID); - } - }; - thread[i].start(); - } - - for (int i = 0; i < thread.length; ++i) { - thread[i].join(); - } - - procStore.getStoreTracker().dump(); - assertTrue(procCounter.get() >= LAST_PROC_ID); - assertTrue(procStore.getStoreTracker().isEmpty()); - assertEquals(1, procStore.getActiveLogs().size()); - } - - @Test public void testRollAndRemove() throws IOException { // Insert something in the log Procedure proc1 = new TestSequentialProcedure(); @@ -599,78 +537,4 @@ public class TestWALProcedureStore { } } } - - private class LoadCounter implements ProcedureStore.ProcedureLoader { - private final ArrayList corrupted = new ArrayList(); - private final ArrayList loaded = new ArrayList(); - - private Set procIds; - private long maxProcId = 0; - - public LoadCounter() { - this(null); - } - - public LoadCounter(final Set procIds) { - this.procIds = procIds; - } - - public void reset() { - reset(null); - } - - public void reset(final Set procIds) { - corrupted.clear(); - loaded.clear(); - this.procIds = procIds; - this.maxProcId = 0; - } - - public long getMaxProcId() { - return maxProcId; - } - - public ArrayList getLoaded() { - return loaded; - } - - public int getLoadedCount() { - return loaded.size(); - } - - public ArrayList getCorrupted() { - return corrupted; - } - - public int getCorruptedCount() { - return corrupted.size(); - } - - @Override - public void setMaxProcId(long maxProcId) { - maxProcId = maxProcId; - } - - @Override - public void load(ProcedureIterator procIter) throws IOException { - while (procIter.hasNext()) { - Procedure proc = procIter.nextAsProcedure(); - LOG.debug("loading procId=" + proc.getProcId() + ": " + proc); - if (procIds != null) { - assertTrue("procId=" + proc.getProcId() + " unexpected", - procIds.contains(proc.getProcId())); - } - loaded.add(proc); - } - } - - @Override - public void handleCorrupted(ProcedureIterator procIter) throws IOException { - while (procIter.hasNext()) { - Procedure proc = procIter.nextAsProcedure(); - LOG.debug("corrupted procId=" + proc.getProcId() + ": " + proc); - corrupted.add(proc); - } - } - } } diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java index c88ac91..8ff3ef6 100644 --- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java +++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java @@ -86,7 +86,9 @@ public class MultiRowResource extends ResourceBase implements Constants { } model.addRow(rowModel); } else { - LOG.trace("The row : " + rk + " not found in the table."); + if (LOG.isTraceEnabled()) { + LOG.trace("The row : " + rk + " not found in the table."); + } } } diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java index 8f64738..c832905 100644 --- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java +++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java @@ -91,8 +91,8 @@ public class NamespacesInstanceResource extends ResourceBase { MIMETYPE_PROTOBUF_IETF}) public Response get(final @Context ServletContext context, final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("GET " + uriInfo.getAbsolutePath()); + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); @@ -135,8 +135,8 @@ public class NamespacesInstanceResource extends ResourceBase { @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF}) public Response put(final NamespacesInstanceModel model, final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("PUT " + uriInfo.getAbsolutePath()); + if (LOG.isTraceEnabled()) { + LOG.trace("PUT " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); return processUpdate(model, true, uriInfo); @@ -151,8 +151,8 @@ public class NamespacesInstanceResource extends ResourceBase { @PUT public Response putNoBody(final byte[] message, final @Context UriInfo uriInfo, final @Context HttpHeaders headers) { - if (LOG.isDebugEnabled()) { - LOG.debug("PUT " + uriInfo.getAbsolutePath()); + if (LOG.isTraceEnabled()) { + LOG.trace("PUT " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); try{ @@ -176,8 +176,8 @@ public class NamespacesInstanceResource extends ResourceBase { public Response post(final NamespacesInstanceModel model, final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("POST " + uriInfo.getAbsolutePath()); + if (LOG.isTraceEnabled()) { + LOG.trace("POST " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); return processUpdate(model, false, uriInfo); @@ -192,8 +192,8 @@ public class NamespacesInstanceResource extends ResourceBase { @POST public Response postNoBody(final byte[] message, final @Context UriInfo uriInfo, final @Context HttpHeaders headers) { - if (LOG.isDebugEnabled()) { - LOG.debug("POST " + uriInfo.getAbsolutePath()); + if (LOG.isTraceEnabled()) { + LOG.trace("POST " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); try{ @@ -287,8 +287,8 @@ public class NamespacesInstanceResource extends ResourceBase { @DELETE public Response deleteNoBody(final byte[] message, final @Context UriInfo uriInfo, final @Context HttpHeaders headers) { - if (LOG.isDebugEnabled()) { - LOG.debug("DELETE " + uriInfo.getAbsolutePath()); + if (LOG.isTraceEnabled()) { + LOG.trace("DELETE " + uriInfo.getAbsolutePath()); } if (servlet.isReadOnly()) { servlet.getMetrics().incrementFailedDeleteRequests(1); diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java index 0548fe8..1304fe0 100644 --- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java +++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java @@ -64,8 +64,8 @@ public class NamespacesResource extends ResourceBase { @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF}) public Response get(final @Context ServletContext context, final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("GET " + uriInfo.getAbsolutePath()); + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); try { diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingUtil.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingUtil.java index 93bb940..cb0f4c8 100644 --- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingUtil.java +++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingUtil.java @@ -49,8 +49,10 @@ public class ProtobufStreamingUtil implements StreamingOutput { this.contentType = type; this.limit = limit; this.fetchSize = fetchSize; - LOG.debug("Created ScanStreamingUtil with content type = " + this.contentType + " user limit : " - + this.limit + " scan fetch size : " + this.fetchSize); + if (LOG.isTraceEnabled()) { + LOG.trace("Created ScanStreamingUtil with content type = " + this.contentType + + " user limit : " + this.limit + " scan fetch size : " + this.fetchSize); + } } @Override @@ -82,7 +84,9 @@ public class ProtobufStreamingUtil implements StreamingOutput { outStream.write(Bytes.toBytes((short)objectBytes.length)); outStream.write(objectBytes); outStream.flush(); - LOG.trace("Wrote " + model.getRows().size() + " rows to stream successfully."); + if (LOG.isTraceEnabled()) { + LOG.trace("Wrote " + model.getRows().size() + " rows to stream successfully."); + } } private CellSetModel createModelFromResults(Result[] results) { diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java index b26de54..cb37fb5 100644 --- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java +++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java @@ -168,20 +168,26 @@ public class RESTServer implements Constants { if (commandLine != null && commandLine.hasOption("port")) { String val = commandLine.getOptionValue("port"); servlet.getConfiguration().setInt("hbase.rest.port", Integer.parseInt(val)); - LOG.debug("port set to " + val); + if (LOG.isDebugEnabled()) { + LOG.debug("port set to " + val); + } } // check if server should only process GET requests, if so override the conf if (commandLine != null && commandLine.hasOption("readonly")) { servlet.getConfiguration().setBoolean("hbase.rest.readonly", true); - LOG.debug("readonly set to true"); + if (LOG.isDebugEnabled()) { + LOG.debug("readonly set to true"); + } } // check for user-defined info server port setting, if so override the conf if (commandLine != null && commandLine.hasOption("infoport")) { String val = commandLine.getOptionValue("infoport"); servlet.getConfiguration().setInt("hbase.rest.info.port", Integer.parseInt(val)); - LOG.debug("Web UI port set to " + val); + if (LOG.isDebugEnabled()) { + LOG.debug("Web UI port set to " + val); + } } @SuppressWarnings("unchecked") diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java index 4da5c67..411ced8 100644 --- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java +++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java @@ -20,6 +20,8 @@ package org.apache.hadoop.hbase.rest; import java.io.IOException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; @@ -30,14 +32,13 @@ import org.apache.hadoop.hbase.util.ConnectionCache; import org.apache.hadoop.hbase.util.JvmPauseMonitor; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.ProxyUsers; -import org.apache.log4j.Logger; /** * Singleton class encapsulating global REST servlet state and functions. */ @InterfaceAudience.Private public class RESTServlet implements Constants { - private static final Logger LOG = Logger.getLogger(RESTServlet.class); + private static final Log LOG = LogFactory.getLog(RESTServlet.class); private static RESTServlet INSTANCE; private final Configuration conf; private final MetricsREST metrics; diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java index 48721bb..f803b26 100644 --- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java +++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java @@ -71,8 +71,8 @@ public class RegionsResource extends ResourceBase { @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF}) public Response get(final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("GET " + uriInfo.getAbsolutePath()); + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); try { diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java index c08bb8b..fc4c548 100644 --- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java +++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java @@ -72,8 +72,8 @@ public class RootResource extends ResourceBase { @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF}) public Response get(final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("GET " + uriInfo.getAbsolutePath()); + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); try { diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java index 15828ce..de84625 100644 --- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java +++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java @@ -85,8 +85,8 @@ public class RowResource extends ResourceBase { @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF}) public Response get(final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("GET " + uriInfo.getAbsolutePath()); + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); MultivaluedMap params = uriInfo.getQueryParameters(); @@ -130,8 +130,8 @@ public class RowResource extends ResourceBase { @GET @Produces(MIMETYPE_BINARY) public Response getBinary(final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("GET " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY); + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY); } servlet.getMetrics().incrementRequests(1); // doesn't make sense to use a non specific coordinate as this can only @@ -221,8 +221,8 @@ public class RowResource extends ResourceBase { put.addImmutable(parts[0], parts[1], cell.getTimestamp(), cell.getValue()); } puts.add(put); - if (LOG.isDebugEnabled()) { - LOG.debug("PUT " + put.toString()); + if (LOG.isTraceEnabled()) { + LOG.trace("PUT " + put.toString()); } } table = servlet.getTable(tableResource.getName()); @@ -289,8 +289,8 @@ public class RowResource extends ResourceBase { put.addImmutable(parts[0], parts[1], timestamp, message); table = servlet.getTable(tableResource.getName()); table.put(put); - if (LOG.isDebugEnabled()) { - LOG.debug("PUT " + put.toString()); + if (LOG.isTraceEnabled()) { + LOG.trace("PUT " + put.toString()); } servlet.getMetrics().incrementSucessfulPutRequests(1); return Response.ok().build(); @@ -301,7 +301,7 @@ public class RowResource extends ResourceBase { if (table != null) try { table.close(); } catch (IOException ioe) { - LOG.debug(ioe); + LOG.debug("Exception received while closing the table", ioe); } } } @@ -311,8 +311,8 @@ public class RowResource extends ResourceBase { MIMETYPE_PROTOBUF_IETF}) public Response put(final CellSetModel model, final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("PUT " + uriInfo.getAbsolutePath() + if (LOG.isTraceEnabled()) { + LOG.trace("PUT " + uriInfo.getAbsolutePath() + " " + uriInfo.getQueryParameters()); } return update(model, true); @@ -322,8 +322,8 @@ public class RowResource extends ResourceBase { @Consumes(MIMETYPE_BINARY) public Response putBinary(final byte[] message, final @Context UriInfo uriInfo, final @Context HttpHeaders headers) { - if (LOG.isDebugEnabled()) { - LOG.debug("PUT " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY); + if (LOG.isTraceEnabled()) { + LOG.trace("PUT " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY); } return updateBinary(message, headers, true); } @@ -333,8 +333,8 @@ public class RowResource extends ResourceBase { MIMETYPE_PROTOBUF_IETF}) public Response post(final CellSetModel model, final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("POST " + uriInfo.getAbsolutePath() + if (LOG.isTraceEnabled()) { + LOG.trace("POST " + uriInfo.getAbsolutePath() + " " + uriInfo.getQueryParameters()); } return update(model, false); @@ -344,16 +344,16 @@ public class RowResource extends ResourceBase { @Consumes(MIMETYPE_BINARY) public Response postBinary(final byte[] message, final @Context UriInfo uriInfo, final @Context HttpHeaders headers) { - if (LOG.isDebugEnabled()) { - LOG.debug("POST " + uriInfo.getAbsolutePath() + " as "+MIMETYPE_BINARY); + if (LOG.isTraceEnabled()) { + LOG.trace("POST " + uriInfo.getAbsolutePath() + " as "+MIMETYPE_BINARY); } return updateBinary(message, headers, false); } @DELETE public Response delete(final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("DELETE " + uriInfo.getAbsolutePath()); + if (LOG.isTraceEnabled()) { + LOG.trace("DELETE " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { @@ -397,8 +397,8 @@ public class RowResource extends ResourceBase { table = servlet.getTable(tableResource.getName()); table.delete(delete); servlet.getMetrics().incrementSucessfulDeleteRequests(1); - if (LOG.isDebugEnabled()) { - LOG.debug("DELETE " + delete.toString()); + if (LOG.isTraceEnabled()) { + LOG.trace("DELETE " + delete.toString()); } } catch (Exception e) { servlet.getMetrics().incrementFailedDeleteRequests(1); @@ -407,7 +407,7 @@ public class RowResource extends ResourceBase { if (table != null) try { table.close(); } catch (IOException ioe) { - LOG.debug(ioe); + LOG.debug("Exception received while closing the table", ioe); } } return Response.ok().build(); @@ -499,8 +499,8 @@ public class RowResource extends ResourceBase { .build(); } - if (LOG.isDebugEnabled()) { - LOG.debug("CHECK-AND-PUT " + put.toString() + ", returns " + retValue); + if (LOG.isTraceEnabled()) { + LOG.trace("CHECK-AND-PUT " + put.toString() + ", returns " + retValue); } if (!retValue) { servlet.getMetrics().incrementFailedPutRequests(1); @@ -517,7 +517,7 @@ public class RowResource extends ResourceBase { } finally { if (table != null) try { table.close(); - } catch (IOException ioe) { + } catch (IOException ioe) { LOG.debug("Exception received while closing the table", ioe); } } @@ -627,8 +627,8 @@ public class RowResource extends ResourceBase { .build(); } - if (LOG.isDebugEnabled()) { - LOG.debug("CHECK-AND-DELETE " + delete.toString() + ", returns " + if (LOG.isTraceEnabled()) { + LOG.trace("CHECK-AND-DELETE " + delete.toString() + ", returns " + retValue); } diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java index ffb2fae..2469faa 100644 --- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java +++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java @@ -62,7 +62,7 @@ public class ScannerInstanceResource extends ResourceBase { public ScannerInstanceResource() throws IOException { } - public ScannerInstanceResource(String table, String id, + public ScannerInstanceResource(String table, String id, ResultGenerator generator, int batch) throws IOException { this.id = id; this.generator = generator; @@ -72,10 +72,10 @@ public class ScannerInstanceResource extends ResourceBase { @GET @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF}) - public Response get(final @Context UriInfo uriInfo, + public Response get(final @Context UriInfo uriInfo, @QueryParam("n") int maxRows, final @QueryParam("c") int maxValues) { - if (LOG.isDebugEnabled()) { - LOG.debug("GET " + uriInfo.getAbsolutePath()); + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); if (generator == null) { @@ -108,7 +108,9 @@ public class ScannerInstanceResource extends ResourceBase { .build(); } if (value == null) { - LOG.info("generator exhausted"); + if (LOG.isTraceEnabled()) { + LOG.trace("generator exhausted"); + } // respond with 204 (No Content) if an empty cell set would be // returned if (count == limit) { @@ -123,7 +125,7 @@ public class ScannerInstanceResource extends ResourceBase { if (!Bytes.equals(CellUtil.cloneRow(value), rowKey)) { // if maxRows was given as a query param, stop if we would exceed the // specified number of rows - if (maxRows > 0) { + if (maxRows > 0) { if (--maxRows == 0) { generator.putBack(value); break; @@ -134,7 +136,7 @@ public class ScannerInstanceResource extends ResourceBase { rowModel = new RowModel(rowKey); } rowModel.addCell( - new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), + new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), value.getTimestamp(), CellUtil.cloneValue(value))); } while (--count > 0); model.addRow(rowModel); @@ -147,21 +149,23 @@ public class ScannerInstanceResource extends ResourceBase { @GET @Produces(MIMETYPE_BINARY) public Response getBinary(final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("GET " + uriInfo.getAbsolutePath() + " as " + + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + MIMETYPE_BINARY); } servlet.getMetrics().incrementRequests(1); try { Cell value = generator.next(); if (value == null) { - LOG.info("generator exhausted"); + if (LOG.isTraceEnabled()) { + LOG.trace("generator exhausted"); + } return Response.noContent().build(); } ResponseBuilder response = Response.ok(CellUtil.cloneValue(value)); response.cacheControl(cacheControl); - response.header("X-Row", Base64.encodeBytes(CellUtil.cloneRow(value))); - response.header("X-Column", + response.header("X-Row", Base64.encodeBytes(CellUtil.cloneRow(value))); + response.header("X-Column", Base64.encodeBytes( KeyValue.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value)))); response.header("X-Timestamp", value.getTimestamp()); @@ -182,8 +186,8 @@ public class ScannerInstanceResource extends ResourceBase { @DELETE public Response delete(final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("DELETE " + uriInfo.getAbsolutePath()); + if (LOG.isTraceEnabled()) { + LOG.trace("DELETE " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java index 6c424ce..71723d8 100644 --- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java +++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java @@ -31,7 +31,6 @@ import javax.ws.rs.PUT; import javax.ws.rs.Path; import javax.ws.rs.PathParam; import javax.ws.rs.core.Context; -import javax.ws.rs.core.MultivaluedMap; import javax.ws.rs.core.Response; import javax.ws.rs.core.UriBuilder; import javax.ws.rs.core.UriInfo; @@ -91,8 +90,7 @@ public class ScannerResource extends ResourceBase { spec = new RowSpec(model.getStartRow(), endRow, model.getColumns(), model.getStartTime(), model.getEndTime(), model.getMaxVersions()); } - MultivaluedMap params = uriInfo.getQueryParameters(); - + try { Filter filter = ScannerResultGenerator.buildFilterFromModel(model); String tableName = tableResource.getName(); @@ -103,8 +101,8 @@ public class ScannerResource extends ResourceBase { ScannerInstanceResource instance = new ScannerInstanceResource(tableName, id, gen, model.getBatch()); scanners.put(id, instance); - if (LOG.isDebugEnabled()) { - LOG.debug("new scanner: " + id); + if (LOG.isTraceEnabled()) { + LOG.trace("new scanner: " + id); } UriBuilder builder = uriInfo.getAbsolutePathBuilder(); URI uri = builder.path(id).build(); @@ -130,10 +128,10 @@ public class ScannerResource extends ResourceBase { @PUT @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF}) - public Response put(final ScannerModel model, + public Response put(final ScannerModel model, final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("PUT " + uriInfo.getAbsolutePath()); + if (LOG.isTraceEnabled()) { + LOG.trace("PUT " + uriInfo.getAbsolutePath()); } return update(model, true, uriInfo); } @@ -143,8 +141,8 @@ public class ScannerResource extends ResourceBase { MIMETYPE_PROTOBUF_IETF}) public Response post(final ScannerModel model, final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("POST " + uriInfo.getAbsolutePath()); + if (LOG.isTraceEnabled()) { + LOG.trace("POST " + uriInfo.getAbsolutePath()); } return update(model, false, uriInfo); } diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java index c0e7153..dc34f09 100644 --- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java +++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java @@ -86,8 +86,8 @@ public class SchemaResource extends ResourceBase { @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF}) public Response get(final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("GET " + uriInfo.getAbsolutePath()); + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); try { @@ -99,7 +99,7 @@ public class SchemaResource extends ResourceBase { } catch (Exception e) { servlet.getMetrics().incrementFailedGetRequests(1); return processException(e); - } + } } private Response replace(final TableName name, final TableSchemaModel model, @@ -198,10 +198,10 @@ public class SchemaResource extends ResourceBase { @PUT @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF}) - public Response put(final TableSchemaModel model, + public Response put(final TableSchemaModel model, final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("PUT " + uriInfo.getAbsolutePath()); + if (LOG.isTraceEnabled()) { + LOG.trace("PUT " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); return update(model, true, uriInfo); @@ -210,10 +210,10 @@ public class SchemaResource extends ResourceBase { @POST @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF}) - public Response post(final TableSchemaModel model, + public Response post(final TableSchemaModel model, final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("PUT " + uriInfo.getAbsolutePath()); + if (LOG.isTraceEnabled()) { + LOG.trace("PUT " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); return update(model, false, uriInfo); @@ -223,8 +223,8 @@ public class SchemaResource extends ResourceBase { justification="Expected") @DELETE public Response delete(final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("DELETE " + uriInfo.getAbsolutePath()); + if (LOG.isTraceEnabled()) { + LOG.trace("DELETE " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java index a7e52bd..27977c3 100644 --- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java +++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java @@ -63,8 +63,8 @@ public class StorageClusterStatusResource extends ResourceBase { @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF}) public Response get(final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("GET " + uriInfo.getAbsolutePath()); + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); try { diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java index 85e81f8..b9fb5d4 100644 --- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java +++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java @@ -58,8 +58,8 @@ public class StorageClusterVersionResource extends ResourceBase { @GET @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON}) public Response get(final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("GET " + uriInfo.getAbsolutePath()); + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); try { diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java index f87ef7e..70a4538 100644 --- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java +++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java @@ -133,7 +133,7 @@ public class TableResource extends ResourceBase { @DefaultValue("-1") @QueryParam(Constants.SCAN_BATCH_SIZE) int batchSize, @DefaultValue("0") @QueryParam(Constants.SCAN_START_TIME) long startTime, @DefaultValue(Long.MAX_VALUE + "") @QueryParam(Constants.SCAN_END_TIME) long endTime, - @DefaultValue("true") @QueryParam(Constants.SCAN_BATCH_SIZE) boolean cacheBlocks, + @DefaultValue("true") @QueryParam(Constants.SCAN_BATCH_SIZE) boolean cacheBlocks, @DefaultValue("") @QueryParam(Constants.SCAN_FILTER) String filters) { try { Filter filter = null; @@ -146,10 +146,12 @@ public class TableResource extends ResourceBase { tableScan.setStartRow(prefixBytes); } } - LOG.debug("Query parameters : Table Name = > " + this.table + " Start Row => " + startRow - + " End Row => " + endRow + " Columns => " + column + " Start Time => " + startTime - + " End Time => " + endTime + " Cache Blocks => " + cacheBlocks + " Max Versions => " - + maxVersions + " Batch Size => " + batchSize); + if (LOG.isTraceEnabled()) { + LOG.trace("Query parameters : Table Name = > " + this.table + " Start Row => " + startRow + + " End Row => " + endRow + " Columns => " + column + " Start Time => " + startTime + + " End Time => " + endTime + " Cache Blocks => " + cacheBlocks + " Max Versions => " + + maxVersions + " Batch Size => " + batchSize); + } Table hTable = RESTServlet.getInstance().getTable(this.table); tableScan.setBatch(batchSize); tableScan.setMaxVersions(maxVersions); @@ -162,15 +164,21 @@ public class TableResource extends ResourceBase { String[] familysplit = csplit.trim().split(":"); if (familysplit.length == 2) { if (familysplit[1].length() > 0) { - LOG.debug("Scan family and column : " + familysplit[0] + " " + familysplit[1]); + if (LOG.isTraceEnabled()) { + LOG.trace("Scan family and column : " + familysplit[0] + " " + familysplit[1]); + } tableScan.addColumn(Bytes.toBytes(familysplit[0]), Bytes.toBytes(familysplit[1])); } else { tableScan.addFamily(Bytes.toBytes(familysplit[0])); - LOG.debug("Scan family : " + familysplit[0] + " and empty qualifier."); + if (LOG.isTraceEnabled()) { + LOG.trace("Scan family : " + familysplit[0] + " and empty qualifier."); + } tableScan.addColumn(Bytes.toBytes(familysplit[0]), null); } - } else if (StringUtils.isNotEmpty(familysplit[0])){ - LOG.debug("Scan family : " + familysplit[0]); + } else if (StringUtils.isNotEmpty(familysplit[0])) { + if (LOG.isTraceEnabled()) { + LOG.trace("Scan family : " + familysplit[0]); + } tableScan.addFamily(Bytes.toBytes(familysplit[0])); } } @@ -192,8 +200,8 @@ public class TableResource extends ResourceBase { } int fetchSize = this.servlet.getConfiguration().getInt(Constants.SCAN_FETCH_SIZE, 10); tableScan.setCaching(fetchSize); - return new TableScanResource(hTable.getScanner(tableScan), userRequestedLimit); - } catch (Exception exp) { + return new TableScanResource(hTable.getScanner(tableScan), userRequestedLimit); + } catch (IOException exp) { servlet.getMetrics().incrementFailedScanRequests(1); processException(exp); LOG.warn(exp); diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java index ae93825..172246c 100644 --- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java +++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java @@ -68,15 +68,15 @@ public class VersionResource extends ResourceBase { * Build a response for a version request. * @param context servlet context * @param uriInfo (JAX-RS context variable) request URL - * @return a response for a version request + * @return a response for a version request */ @GET @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF}) - public Response get(final @Context ServletContext context, + public Response get(final @Context ServletContext context, final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("GET " + uriInfo.getAbsolutePath()); + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); ResponseBuilder response = Response.ok(new VersionModel(context)); @@ -89,7 +89,7 @@ public class VersionResource extends ResourceBase { * Dispatch to StorageClusterVersionResource */ @Path("cluster") - public StorageClusterVersionResource getClusterVersionResource() + public StorageClusterVersionResource getClusterVersionResource() throws IOException { return new StorageClusterVersionResource(); } diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java index 142c276..e26de63 100644 --- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java +++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java @@ -101,10 +101,10 @@ public class Client { } /** - * Shut down the client. Close any open persistent connections. + * Shut down the client. Close any open persistent connections. */ public void shutdown() { - MultiThreadedHttpConnectionManager manager = + MultiThreadedHttpConnectionManager manager = (MultiThreadedHttpConnectionManager) httpClient.getHttpConnectionManager(); manager.shutdown(); } @@ -151,7 +151,7 @@ public class Client { * one of the members of the supplied cluster definition and iterate through * the list until a transaction can be successfully completed. The * definition of success here is a complete HTTP transaction, irrespective - * of result code. + * of result code. * @param cluster the cluster definition * @param method the transaction method * @param headers HTTP header values to send @@ -209,8 +209,8 @@ public class Client { long startTime = System.currentTimeMillis(); int code = httpClient.executeMethod(method); long endTime = System.currentTimeMillis(); - if (LOG.isDebugEnabled()) { - LOG.debug(method.getName() + " " + uri + " " + code + " " + + if (LOG.isTraceEnabled()) { + LOG.trace(method.getName() + " " + uri + " " + code + " " + method.getStatusText() + " in " + (endTime - startTime) + " ms"); } return code; @@ -250,7 +250,7 @@ public class Client { } /** - * Send a HEAD request + * Send a HEAD request * @param path the path or URI * @return a Response object with response detail * @throws IOException @@ -260,14 +260,14 @@ public class Client { } /** - * Send a HEAD request + * Send a HEAD request * @param cluster the cluster definition * @param path the path or URI * @param headers the HTTP headers to include in the request * @return a Response object with response detail * @throws IOException */ - public Response head(Cluster cluster, String path, Header[] headers) + public Response head(Cluster cluster, String path, Header[] headers) throws IOException { HeadMethod method = new HeadMethod(); try { @@ -280,7 +280,7 @@ public class Client { } /** - * Send a GET request + * Send a GET request * @param path the path or URI * @return a Response object with response detail * @throws IOException @@ -290,7 +290,7 @@ public class Client { } /** - * Send a GET request + * Send a GET request * @param cluster the cluster definition * @param path the path or URI * @return a Response object with response detail @@ -301,7 +301,7 @@ public class Client { } /** - * Send a GET request + * Send a GET request * @param path the path or URI * @param accept Accept header value * @return a Response object with response detail @@ -312,7 +312,7 @@ public class Client { } /** - * Send a GET request + * Send a GET request * @param cluster the cluster definition * @param path the path or URI * @param accept Accept header value @@ -329,7 +329,7 @@ public class Client { /** * Send a GET request * @param path the path or URI - * @param headers the HTTP headers to include in the request, + * @param headers the HTTP headers to include in the request, * Accept must be supplied * @return a Response object with response detail * @throws IOException @@ -346,7 +346,7 @@ public class Client { * @return a Response object with response detail * @throws IOException */ - public Response get(Cluster c, String path, Header[] headers) + public Response get(Cluster c, String path, Header[] headers) throws IOException { GetMethod method = new GetMethod(); try { @@ -396,7 +396,7 @@ public class Client { * @return a Response object with response detail * @throws IOException for error */ - public Response put(Cluster cluster, String path, String contentType, + public Response put(Cluster cluster, String path, String contentType, byte[] content) throws IOException { Header[] headers = new Header[1]; headers[0] = new Header("Content-Type", contentType); @@ -413,7 +413,7 @@ public class Client { * @return a Response object with response detail * @throws IOException for error */ - public Response put(Cluster cluster, String path, String contentType, + public Response put(Cluster cluster, String path, String contentType, byte[] content, Header extraHdr) throws IOException { int cnt = extraHdr == null ? 1 : 2; Header[] headers = new Header[cnt]; @@ -433,7 +433,7 @@ public class Client { * @return a Response object with response detail * @throws IOException */ - public Response put(String path, Header[] headers, byte[] content) + public Response put(String path, Header[] headers, byte[] content) throws IOException { return put(cluster, path, headers, content); } @@ -448,7 +448,7 @@ public class Client { * @return a Response object with response detail * @throws IOException */ - public Response put(Cluster cluster, String path, Header[] headers, + public Response put(Cluster cluster, String path, Header[] headers, byte[] content) throws IOException { PutMethod method = new PutMethod(); try { @@ -498,7 +498,7 @@ public class Client { * @return a Response object with response detail * @throws IOException for error */ - public Response post(Cluster cluster, String path, String contentType, + public Response post(Cluster cluster, String path, String contentType, byte[] content) throws IOException { Header[] headers = new Header[1]; headers[0] = new Header("Content-Type", contentType); @@ -515,7 +515,7 @@ public class Client { * @return a Response object with response detail * @throws IOException for error */ - public Response post(Cluster cluster, String path, String contentType, + public Response post(Cluster cluster, String path, String contentType, byte[] content, Header extraHdr) throws IOException { int cnt = extraHdr == null ? 1 : 2; Header[] headers = new Header[cnt]; @@ -535,7 +535,7 @@ public class Client { * @return a Response object with response detail * @throws IOException */ - public Response post(String path, Header[] headers, byte[] content) + public Response post(String path, Header[] headers, byte[] content) throws IOException { return post(cluster, path, headers, content); } @@ -550,7 +550,7 @@ public class Client { * @return a Response object with response detail * @throws IOException */ - public Response post(Cluster cluster, String path, Header[] headers, + public Response post(Cluster cluster, String path, Header[] headers, byte[] content) throws IOException { PostMethod method = new PostMethod(); try { diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java index e5208af..f051bc8 100644 --- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java +++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java @@ -72,7 +72,9 @@ public class AuthFilter extends AuthenticationFilter { throw new ServletException("Failed to retrieve server principal", ie); } } - LOG.debug("Setting property " + name + "=" + value); + if (LOG.isTraceEnabled()) { + LOG.trace("Setting property " + name + "=" + value); + } name = name.substring(REST_PREFIX_LEN); props.setProperty(name, value); } diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java index 4995b86..094ae0b 100644 --- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java +++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java @@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.rest.filter; import java.io.IOException; import java.io.OutputStream; import java.util.HashSet; +import java.util.Locale; import java.util.Set; import java.util.StringTokenizer; @@ -65,11 +66,11 @@ public class GzipFilter implements Filter { String acceptEncoding = request.getHeader("accept-encoding"); String contentType = request.getHeader("content-type"); if ((contentEncoding != null) && - (contentEncoding.toLowerCase().indexOf("gzip") > -1)) { + (contentEncoding.toLowerCase(Locale.ROOT).indexOf("gzip") > -1)) { request = new GZIPRequestWrapper(request); } if (((acceptEncoding != null) && - (acceptEncoding.toLowerCase().indexOf("gzip") > -1)) || + (acceptEncoding.toLowerCase(Locale.ROOT).indexOf("gzip") > -1)) || ((contentType != null) && mimeTypes.contains(contentType))) { response = new GZIPResponseWrapper(response); } diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java index 30eea95..dbb1447 100644 --- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java +++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java @@ -34,13 +34,12 @@ import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * This filter provides protection against cross site request forgery (CSRF) * attacks for REST APIs. Enabling this filter on an endpoint results in the @@ -52,8 +51,8 @@ import org.slf4j.LoggerFactory; @InterfaceStability.Evolving public class RestCsrfPreventionFilter implements Filter { - private static final Logger LOG = - LoggerFactory.getLogger(RestCsrfPreventionFilter.class); + private static final Log LOG = + LogFactory.getLog(RestCsrfPreventionFilter.class); public static final String HEADER_USER_AGENT = "User-Agent"; public static final String BROWSER_USER_AGENT_PARAM = @@ -87,9 +86,9 @@ public class RestCsrfPreventionFilter implements Filter { agents = BROWSER_USER_AGENTS_DEFAULT; } parseBrowserUserAgents(agents); - LOG.info("Adding cross-site request forgery (CSRF) protection, " - + "headerName = {}, methodsToIgnore = {}, browserUserAgents = {}", - headerName, methodsToIgnore, browserUserAgents); + LOG.info(String.format("Adding cross-site request forgery (CSRF) protection, " + + "headerName = %s, methodsToIgnore = %s, browserUserAgents = %s", + headerName, methodsToIgnore, browserUserAgents)); } void parseBrowserUserAgents(String userAgents) { diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java index ec39db0..073c038 100644 --- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java +++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java @@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; @Provider @Consumes({Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF}) @InterfaceAudience.Private -public class ProtobufMessageBodyConsumer +public class ProtobufMessageBodyConsumer implements MessageBodyReader { private static final Log LOG = LogFactory.getLog(ProtobufMessageBodyConsumer.class); @@ -73,8 +73,8 @@ public class ProtobufMessageBodyConsumer baos.write(buffer, 0, read); } } while (read > 0); - if (LOG.isDebugEnabled()) { - LOG.debug(getClass() + ": read " + baos.size() + " bytes from " + + if (LOG.isTraceEnabled()) { + LOG.trace(getClass() + ": read " + baos.size() + " bytes from " + inputStream); } obj = obj.getObjectFromMessage(baos.toByteArray()); diff --git hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java index 8725781..7aea464 100644 --- hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java +++ hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java @@ -191,7 +191,7 @@ public class RSGroupAdminServer extends RSGroupAdmin { } } for (RegionState state : - master.getAssignmentManager().getRegionStates().getRegionsInTransition().values()) { + master.getAssignmentManager().getRegionStates().getRegionsInTransition()) { if (state.getServerName().getHostPort().equals(rs)) { regions.add(state.getRegion()); } diff --git hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java index 34add63..25c54b2 100644 --- hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java +++ hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java @@ -19,7 +19,6 @@ */ package org.apache.hadoop.hbase.rsgroup; -import com.google.common.collect.Lists; import com.google.common.collect.Sets; import com.google.common.net.HostAndPort; @@ -35,9 +34,9 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.Waiter.Predicate; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; @@ -50,20 +49,11 @@ import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.mockito.Mockito; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; -import javax.management.MBeanServer; -import javax.management.ObjectName; import java.io.IOException; -import java.lang.management.ManagementFactory; import java.util.Iterator; -import java.util.List; -import java.util.concurrent.atomic.AtomicReference; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -190,7 +180,8 @@ public class TestRSGroups extends TestRSGroupsBase { }); ServerName targetServer = ServerName.parseServerName(appInfo.getServers().iterator().next().toString()); - AdminProtos.AdminService.BlockingInterface rs = admin.getConnection().getAdmin(targetServer); + AdminProtos.AdminService.BlockingInterface rs = + ((ClusterConnection) admin.getConnection()).getAdmin(targetServer); //verify it was assigned to the right group Assert.assertEquals(1, ProtobufUtil.getOnlineRegions(rs).size()); } diff --git hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java index 9225e09..b1c7b3b 100644 --- hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java +++ hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.RegionLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -482,7 +483,7 @@ public abstract class TestRSGroupsBase { } final AdminProtos.AdminService.BlockingInterface targetRS = - admin.getConnection().getAdmin(targetServer); + ((ClusterConnection) admin.getConnection()).getAdmin(targetServer); //move target server to group rsGroupAdmin.moveServers(Sets.newHashSet(targetServer.getHostPort()), @@ -571,7 +572,7 @@ public abstract class TestRSGroupsBase { ServerName targetServer = ServerName.parseServerName( appInfo.getServers().iterator().next().toString()); AdminProtos.AdminService.BlockingInterface targetRS = - admin.getConnection().getAdmin(targetServer); + ((ClusterConnection) admin.getConnection()).getAdmin(targetServer); HRegionInfo targetRegion = ProtobufUtil.getOnlineRegions(targetRS).get(0); Assert.assertEquals(1, ProtobufUtil.getOnlineRegions(targetRS).size()); @@ -612,7 +613,7 @@ public abstract class TestRSGroupsBase { targetServer = ServerName.parseServerName( newServers.iterator().next().toString()); targetRS = - admin.getConnection().getAdmin(targetServer); + ((ClusterConnection) admin.getConnection()).getAdmin(targetServer); Assert.assertEquals(1, ProtobufUtil.getOnlineRegions(targetRS).size()); Assert.assertEquals(tableName, ProtobufUtil.getOnlineRegions(targetRS).get(0).getTable()); diff --git hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon index 42334ff..e2ae09d 100644 --- hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon +++ hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon @@ -23,38 +23,34 @@ org.apache.hadoop.hbase.master.RegionState; org.apache.hadoop.conf.Configuration; org.apache.hadoop.hbase.HBaseConfiguration; org.apache.hadoop.hbase.HConstants; -java.util.Iterator; -java.util.Map; -java.util.List; -java.util.ArrayList; -java.util.Map.Entry; -java.util.Arrays; +java.util.HashSet; +java.util.SortedSet; <%args> AssignmentManager assignmentManager; int limit = 100; -<%java Map rit = assignmentManager +<%java SortedSet rit = assignmentManager .getRegionStates().getRegionsInTransitionOrderedByTimestamp(); %> <%if !rit.isEmpty() %> <%java> -List ritsOverThreshold = new ArrayList<>(); -List ritsTwiceThreshold = new ArrayList<>(); +HashSet ritsOverThreshold = new HashSet(); +HashSet ritsTwiceThreshold = new HashSet(); // process the map to find region in transition details Configuration conf = HBaseConfiguration.create(); int ritThreshold = conf.getInt(HConstants.METRICS_RIT_STUCK_WARNING_THRESHOLD, 60000); int numOfRITOverThreshold = 0; long currentTime = System.currentTimeMillis(); -for (Map.Entry e : rit.entrySet()) { - long ritTime = currentTime - e.getValue().getStamp(); +for (RegionState rs : rit) { + long ritTime = currentTime - rs.getStamp(); if(ritTime > (ritThreshold * 2)) { numOfRITOverThreshold++; - ritsTwiceThreshold.add(e.getKey()); + ritsTwiceThreshold.add(rs.getRegion().getEncodedName()); } else if (ritTime > ritThreshold) { numOfRITOverThreshold++; - ritsOverThreshold.add(e.getKey()); + ritsOverThreshold.add(rs.getRegion().getEncodedName()); } } @@ -64,7 +60,7 @@ int numOfPages = (int) Math.ceil(numOfRITs * 1.0 / ritsPerPage);

Regions in Transition

-

<% numOfRITs %> region(s) in transition. +

<% numOfRITs %> region(s) in transition. <%if !ritsTwiceThreshold.isEmpty() %> <%elseif !ritsOverThreshold.isEmpty() %> @@ -72,14 +68,14 @@ int numOfPages = (int) Math.ceil(numOfRITs * 1.0 / ritsPerPage); <%else> - <% numOfRITOverThreshold %> region(s) in transition for + <% numOfRITOverThreshold %> region(s) in transition for more than <% ritThreshold %> milliseconds.

<%java int recordItr = 0; %> - <%for Map.Entry entry : rit.entrySet() %> + <%for RegionState rs : rit %> <%if (recordItr % ritsPerPage) == 0 %> <%if recordItr == 0 %>
@@ -89,18 +85,17 @@ int numOfPages = (int) Math.ceil(numOfRITs * 1.0 / ritsPerPage); - - <%if ritsOverThreshold.contains(entry.getKey()) %> + + <%if ritsOverThreshold.contains(rs.getRegion().getEncodedName()) %> - <%elseif ritsTwiceThreshold.contains(entry.getKey()) %> + <%elseif ritsTwiceThreshold.contains(rs.getRegion().getEncodedName()) %> <%else> - - + + <%java recordItr++; %> <%if (recordItr % ritsPerPage) == 0 %> @@ -108,7 +103,7 @@ int numOfPages = (int) Math.ceil(numOfRITs * 1.0 / ritsPerPage); - + <%if (recordItr % ritsPerPage) != 0 %> <%for ; (recordItr % ritsPerPage) != 0 ; recordItr++ %> diff --git hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon index f5aa478..82cb4e7 100644 --- hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon +++ hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon @@ -193,14 +193,20 @@ MetricsHBaseServerWrapper mServerWrap;
Region StateRIT time (ms)
<% entry.getKey() %> - <% HRegionInfo.getDescriptiveNameFromRegionStateForDisplay( - entry.getValue(), conf) %><% (currentTime - entry.getValue().getStamp()) %> <% rs.getRegion().getEncodedName() %> + <% HRegionInfo.getDescriptiveNameFromRegionStateForDisplay(rs, conf) %><% (currentTime - rs.getStamp()) %>
- - - + + + + + + + + +
Compaction Queue SizeFlush Queue SizeCall Queue Size (bytes)Compaction Queue LengthFlush Queue LengthPriority Call Queue LengthGeneral Call Queue LengthReplication Call Queue LengthTotal Call Queue Size (bytes)
<% mWrap.getCompactionQueueSize() %> <% mWrap.getFlushQueueSize() %><% mServerWrap.getPriorityQueueLength() %><% mServerWrap.getGeneralQueueLength() %><% mServerWrap.getReplicationQueueLength() %> <% TraditionalBinaryPrefix.long2String(mServerWrap.getTotalQueueSize(), "B", 1) %>
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java index 8b16a5b..33fff97 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java @@ -144,9 +144,6 @@ public class LocalHBaseCluster { // Always have masters and regionservers come up on port '0' so we don't // clash over default ports. conf.set(HConstants.MASTER_PORT, "0"); - if (conf.getInt(HConstants.MASTER_INFO_PORT, 0) != -1) { - conf.set(HConstants.MASTER_INFO_PORT, "0"); - } conf.set(HConstants.REGIONSERVER_PORT, "0"); if (conf.getInt(HConstants.REGIONSERVER_INFO_PORT, 0) != -1) { conf.set(HConstants.REGIONSERVER_INFO_PORT, "0"); @@ -178,7 +175,7 @@ public class LocalHBaseCluster { Configuration config, final int index) throws IOException { // Create each regionserver with its own Configuration instance so each has - // its HConnection instance rather than share (see HBASE_INSTANCES down in + // its Connection instance rather than share (see HBASE_INSTANCES down in // the guts of ConnectionManager). // Also, create separate CoordinatedStateManager instance per Server. @@ -213,7 +210,7 @@ public class LocalHBaseCluster { public JVMClusterUtil.MasterThread addMaster(Configuration c, final int index) throws IOException { // Create each master with its own Configuration instance so each has - // its HConnection instance rather than share (see HBASE_INSTANCES down in + // its Connection instance rather than share (see HBASE_INSTANCES down in // the guts of ConnectionManager. // Also, create separate CoordinatedStateManager instance per Server. @@ -450,4 +447,4 @@ public class LocalHBaseCluster { connection.close(); cluster.shutdown(); } -} \ No newline at end of file +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java index 85b1135..63d88ef 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java @@ -21,10 +21,10 @@ import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -44,7 +44,7 @@ class HFileArchiveManager { private final ZooKeeperWatcher zooKeeper; private volatile boolean stopped = false; - public HFileArchiveManager(HConnection connection, Configuration conf) + public HFileArchiveManager(Connection connection, Configuration conf) throws ZooKeeperConnectionException, IOException { this.zooKeeper = new ZooKeeperWatcher(conf, "hfileArchiveManager-on-" + connection.toString(), connection); @@ -74,7 +74,7 @@ class HFileArchiveManager { */ public HFileArchiveManager disableHFileBackup(byte[] table) throws KeeperException { disable(this.zooKeeper, table); - return this; + return this; } /** diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java index 285737d..44e0597 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java @@ -43,11 +43,11 @@ public class CoprocessorHConnection extends ConnectionImplementation { private static final NonceGenerator NO_NONCE_GEN = new NoNonceGenerator(); /** - * Create an {@link HConnection} based on the environment in which we are running the - * coprocessor. The {@link HConnection} must be externally cleaned up (we bypass the usual HTable - * cleanup mechanisms since we own everything). - * @param env environment hosting the {@link HConnection} - * @return instance of {@link HConnection}. + * Create a {@link ClusterConnection} based on the environment in which we are running the + * coprocessor. The {@link ClusterConnection} must be externally cleaned up + * (we bypass the usual HTable cleanup mechanisms since we own everything). + * @param env environment hosting the {@link ClusterConnection} + * @return instance of {@link ClusterConnection}. * @throws IOException if we cannot create the connection */ public static ClusterConnection getConnectionForEnvironment(CoprocessorEnvironment env) diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java index 292a935..5da0df7 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java @@ -18,6 +18,11 @@ */ package org.apache.hadoop.hbase.client; +import com.google.protobuf.Descriptors.MethodDescriptor; +import com.google.protobuf.Message; +import com.google.protobuf.Service; +import com.google.protobuf.ServiceException; + import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -37,11 +42,6 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.io.MultipleIOException; -import com.google.protobuf.Descriptors.MethodDescriptor; -import com.google.protobuf.Message; -import com.google.protobuf.Service; -import com.google.protobuf.ServiceException; - /** * A wrapper for HTable. Can be used to restrict privilege. * @@ -61,7 +61,7 @@ import com.google.protobuf.ServiceException; public final class HTableWrapper implements Table { private final Table table; - private ClusterConnection connection; + private final ClusterConnection connection; private final List openTables; /** @@ -134,7 +134,9 @@ public final class HTableWrapper implements Table { public Boolean[] exists(List gets) throws IOException { // Do convertion. boolean [] exists = table.existsAll(gets); - if (exists == null) return null; + if (exists == null) { + return null; + } Boolean [] results = new Boolean [exists.length]; for (int i = 0; i < exists.length; i++) { results[i] = exists[i]? Boolean.TRUE: Boolean.FALSE; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java index eaf9f43..bff727a 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java @@ -452,6 +452,16 @@ public class BaseRegionObserver implements RegionObserver { final InternalScanner s) throws IOException { } + @Override + public void preReplayWALs(ObserverContext env, + HRegionInfo info, Path edits) throws IOException { + } + + @Override + public void postReplayWALs(ObserverContext env, + HRegionInfo info, Path edits) throws IOException { + } + /** * Implementers should override this version of the method and leave the deprecated one as-is. */ diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java index da0e8b1..e937569 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java @@ -158,9 +158,10 @@ public abstract class CoprocessorHost { implClass = cl.loadClass(className); // Add coprocessors as we go to guard against case where a coprocessor is specified twice // in the configuration - this.coprocessors.add(loadInstance(implClass, Coprocessor.PRIORITY_SYSTEM, conf)); + this.coprocessors.add(loadInstance(implClass, priority, conf)); LOG.info("System coprocessor " + className + " was loaded " + - "successfully with priority (" + priority++ + ")."); + "successfully with priority (" + priority + ")."); + ++priority; } catch (Throwable t) { // We always abort if system coprocessors cannot be loaded abortServer(className, t); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index 1d985df..4729954 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -1219,6 +1219,28 @@ public interface RegionObserver extends Coprocessor { throws IOException; /** + * Called before replaying WALs for this region. + * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no + * effect in this hook. + * @param ctx the environment provided by the region server + * @param info the RegionInfo for this region + * @param edits the file of recovered edits + * @throws IOException if an error occurred on the coprocessor + */ + void preReplayWALs(final ObserverContext ctx, + HRegionInfo info, Path edits) throws IOException; + + /** + * Called after replaying WALs for this region. + * @param ctx the environment provided by the region server + * @param info the RegionInfo for this region + * @param edits the file of recovered edits + * @throws IOException if an error occurred on the coprocessor + */ + void postReplayWALs(final ObserverContext ctx, + HRegionInfo info, Path edits) throws IOException; + + /** * Called before a {@link org.apache.hadoop.hbase.regionserver.wal.WALEdit} * replayed for this region. */ diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java hbase-server/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java index 7701a25..1a95c70 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java @@ -72,14 +72,14 @@ public class LogLevel { System.out.println("Connecting to " + url); URLConnection connection = url.openConnection(); connection.connect(); - - BufferedReader in = new BufferedReader(new InputStreamReader( - connection.getInputStream())); - for(String line; (line = in.readLine()) != null; ) - if (line.startsWith(MARKER)) { - System.out.println(TAG.matcher(line).replaceAll("")); + try (BufferedReader in = new BufferedReader(new InputStreamReader( + connection.getInputStream()))) { + for(String line; (line = in.readLine()) != null; ) { + if (line.startsWith(MARKER)) { + System.out.println(TAG.matcher(line).replaceAll("")); + } } - in.close(); + } } catch (IOException ioe) { System.err.println("" + ioe); } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index efc9a30..14a5cd1 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -1712,7 +1712,7 @@ public class HFileBlock implements Cacheable { ByteBuffer onDiskBlockByteBuffer = ByteBuffer.wrap(onDiskBlock, 0, onDiskSizeWithHeader); // Verify checksum of the data before using it for building HFileBlock. if (verifyChecksum && - !validateChecksum(offset, onDiskBlockByteBuffer.asReadOnlyBuffer(), hdrSize)) { + !validateChecksum(offset, onDiskBlockByteBuffer, hdrSize)) { return null; } // The onDiskBlock will become the headerAndDataBuffer for this block. diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java index e4205eb..3505221 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java @@ -22,6 +22,8 @@ import java.util.List; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -36,6 +38,7 @@ import org.apache.hadoop.hbase.util.ReflectionUtils; @InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX }) @InterfaceStability.Evolving public class BalancedQueueRpcExecutor extends RpcExecutor { + private static final Log LOG = LogFactory.getLog(BalancedQueueRpcExecutor.class); protected final List> queues; private final QueueBalancer balancer; @@ -62,6 +65,7 @@ public class BalancedQueueRpcExecutor extends RpcExecutor { queues = new ArrayList>(numQueues); this.balancer = getBalancer(numQueues); initializeQueues(numQueues, queueClass, initargs); + LOG.debug(name + " queues=" + numQueues + " handlerCount=" + handlerCount); } protected void initializeQueues(final int numQueues, diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java index ee36f3f..70d903a 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.ipc; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DaemonThreadFactory; @@ -32,6 +34,7 @@ import java.util.concurrent.atomic.AtomicInteger; * This can be used for HMaster, where no prioritization is needed. */ public class FifoRpcScheduler extends RpcScheduler { + private static final Log LOG = LogFactory.getLog(FifoRpcScheduler.class); private final int handlerCount; private final int maxQueueLength; private final AtomicInteger queueSize = new AtomicInteger(0); @@ -41,6 +44,8 @@ public class FifoRpcScheduler extends RpcScheduler { this.handlerCount = handlerCount; this.maxQueueLength = conf.getInt(RpcScheduler.IPC_SERVER_MAX_CALLQUEUE_LENGTH, handlerCount * RpcServer.DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); + LOG.info("Using " + this.getClass().getSimpleName() + " as user call queue; handlerCount=" + + handlerCount + "; maxQueueLength=" + maxQueueLength); } @Override diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java index 9979c75..4f53709 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java @@ -36,7 +36,7 @@ public class MetricsHBaseServerWrapperImpl implements MetricsHBaseServerWrapper if (!isServerStarted()) { return 0; } - return server.callQueueSize.get(); + return server.callQueueSizeInBytes.get(); } @Override @@ -65,10 +65,10 @@ public class MetricsHBaseServerWrapperImpl implements MetricsHBaseServerWrapper @Override public int getNumOpenConnections() { - if (!isServerStarted() || this.server.connectionList == null) { + if (!isServerStarted()) { return 0; } - return server.connectionList.size(); + return server.getNumOpenConnections(); } @Override diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java index 40c11aa..880df36 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.ipc; import java.util.ArrayList; import java.util.List; +import java.util.Locale; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicInteger; @@ -221,7 +222,7 @@ public abstract class RpcExecutor { */ public void resizeQueues(Configuration conf) { String configKey = RpcScheduler.IPC_SERVER_MAX_CALLQUEUE_LENGTH; - if (name != null && name.toLowerCase().contains("priority")) { + if (name != null && name.toLowerCase(Locale.ROOT).contains("priority")) { configKey = RpcScheduler.IPC_SERVER_PRIORITY_MAX_CALLQUEUE_LENGTH; } currentQueueLimit = conf.getInt(configKey, currentQueueLimit); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index 483ce86..1087c42 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -48,15 +48,17 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; -import java.util.LinkedList; import java.util.List; import java.util.Map; -import java.util.Random; import java.util.Set; +import java.util.Timer; +import java.util.TimerTask; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedDeque; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -113,6 +115,7 @@ import org.apache.hadoop.hbase.util.Counter; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; @@ -183,11 +186,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { */ static final int DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER = 10; - /** - * The maximum size that we can hold in the RPC queue - */ - private static final int DEFAULT_MAX_CALLQUEUE_SIZE = 1024 * 1024 * 1024; - private final IPCUtil ipcUtil; private static final String AUTH_FAILED_FOR = "Auth failed for "; @@ -210,22 +208,30 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { protected int port; // port we listen on protected InetSocketAddress address; // inet address we listen on private int readThreads; // number of read threads - protected int maxIdleTime; // the maximum idle time after - // which a client may be - // disconnected - protected int thresholdIdleConnections; // the number of idle - // connections after which we - // will start cleaning up idle - // connections - int maxConnectionsToNuke; // the max number of - // connections to nuke - // during a cleanup - protected MetricsHBaseServer metrics; protected final Configuration conf; - private int maxQueueSize; + /** + * Maximum size in bytes of the currently queued and running Calls. If a new Call puts us over + * this size, then we will reject the call (after parsing it though). It will go back to the + * client and client will retry. Set this size with "hbase.ipc.server.max.callqueue.size". The + * call queue size gets incremented after we parse a call and before we add it to the queue of + * calls for the scheduler to use. It get decremented after we have 'run' the Call. The current + * size is kept in {@link #callQueueSizeInBytes}. + * @see {@link #callQueueSizeInBytes} + * @see {@link #DEFAULT_MAX_CALLQUEUE_SIZE} + * @see {@link #callQueueSizeInBytes} + */ + private final long maxQueueSizeInBytes; + private static final int DEFAULT_MAX_CALLQUEUE_SIZE = 1024 * 1024 * 1024; + + /** + * This is a running count of the size in bytes of all outstanding calls whether currently + * executing or queued waiting to be run. + */ + protected final Counter callQueueSizeInBytes = new Counter(); + protected int socketSendBufferSize; protected final boolean tcpNoDelay; // if T then disable Nagle's Algorithm protected final boolean tcpKeepAlive; // if T then use keepalives @@ -244,19 +250,11 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { */ volatile boolean started = false; - /** - * This is a running count of the size of all outstanding calls by size. - */ - protected final Counter callQueueSize = new Counter(); - - protected final List connectionList = - Collections.synchronizedList(new LinkedList()); - //maintain a list - //of client connections + // maintains the set of client connections and handles idle timeouts + private ConnectionManager connectionManager; private Listener listener = null; protected Responder responder = null; protected AuthenticationTokenSecretManager authTokenSecretMgr = null; - protected int numConnections = 0; protected HBaseRPCErrorHandler errorHandler = null; @@ -623,18 +621,16 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { private Selector selector = null; //the selector that we use for the server private Reader[] readers = null; private int currentReader = 0; - private Random rand = new Random(); - private long lastCleanupRunTime = 0; //the last time when a cleanup connec- - //-tion (for idle connections) ran - private long cleanupInterval = 10000; //the minimum interval between - //two cleanup runs - private int backlogLength; + private final int readerPendingConnectionQueueLength; private ExecutorService readPool; public Listener(final String name) throws IOException { super(name); - backlogLength = conf.getInt("hbase.ipc.server.listen.queue.size", 128); + // The backlog of requests that we will have the serversocket carry. + int backlogLength = conf.getInt("hbase.ipc.server.listen.queue.size", 128); + readerPendingConnectionQueueLength = + conf.getInt("hbase.ipc.server.read.connection-queue.size", 100); // Create a new server socket and set to non blocking mode acceptChannel = ServerSocketChannel.open(); acceptChannel.configureBlocking(false); @@ -644,9 +640,11 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { port = acceptChannel.socket().getLocalPort(); //Could be an ephemeral port address = (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress(); // create a selector; - selector= Selector.open(); + selector = Selector.open(); readers = new Reader[readThreads]; + // Why this executor thing? Why not like hadoop just start up all the threads? I suppose it + // has an advantage in that it is easy to shutdown the pool. readPool = Executors.newFixedThreadPool(readThreads, new ThreadFactoryBuilder().setNameFormat( "RpcServer.reader=%d,bindAddress=" + bindAddress.getHostName() + @@ -667,12 +665,15 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { private class Reader implements Runnable { - private volatile boolean adding = false; + final private LinkedBlockingQueue pendingConnections; private final Selector readSelector; Reader() throws IOException { + this.pendingConnections = + new LinkedBlockingQueue(readerPendingConnectionQueueLength); this.readSelector = Selector.open(); } + @Override public void run() { try { @@ -689,11 +690,14 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { private synchronized void doRunLoop() { while (running) { try { - readSelector.select(); - while (adding) { - this.wait(1000); + // Consume as many connections as currently queued to avoid + // unbridled acceptance of connections that starves the select + int size = pendingConnections.size(); + for (int i=size; i>0; i--) { + Connection conn = pendingConnections.take(); + conn.channel.register(readSelector, SelectionKey.OP_READ, conn); } - + readSelector.select(); Iterator iter = readSelector.selectedKeys().iterator(); while (iter.hasNext()) { SelectionKey key = iter.next(); @@ -703,9 +707,12 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { doRead(key); } } + key = null; } } catch (InterruptedException e) { - LOG.debug("Interrupted while sleeping"); + if (running) { // unexpected -- log it + LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e); + } return; } catch (IOException ex) { LOG.info(getName() + ": IOException in Reader", ex); @@ -714,76 +721,14 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { } /** - * This gets reader into the state that waits for the new channel - * to be registered with readSelector. If it was waiting in select() - * the thread will be woken up, otherwise whenever select() is called - * it will return even if there is nothing to read and wait - * in while(adding) for finishAdd call + * Updating the readSelector while it's being used is not thread-safe, + * so the connection must be queued. The reader will drain the queue + * and update its readSelector before performing the next select */ - public void startAdd() { - adding = true; + public void addConnection(Connection conn) throws IOException { + pendingConnections.add(conn); readSelector.wakeup(); } - - public synchronized SelectionKey registerChannel(SocketChannel channel) - throws IOException { - return channel.register(readSelector, SelectionKey.OP_READ); - } - - public synchronized void finishAdd() { - adding = false; - this.notify(); - } - } - - /** cleanup connections from connectionList. Choose a random range - * to scan and also have a limit on the number of the connections - * that will be cleanedup per run. The criteria for cleanup is the time - * for which the connection was idle. If 'force' is true then all - * connections will be looked at for the cleanup. - * @param force all connections will be looked at for cleanup - */ - private void cleanupConnections(boolean force) { - if (force || numConnections > thresholdIdleConnections) { - long currentTime = System.currentTimeMillis(); - if (!force && (currentTime - lastCleanupRunTime) < cleanupInterval) { - return; - } - int start = 0; - int end = numConnections - 1; - if (!force) { - start = rand.nextInt() % numConnections; - end = rand.nextInt() % numConnections; - int temp; - if (end < start) { - temp = start; - start = end; - end = temp; - } - } - int i = start; - int numNuked = 0; - while (i <= end) { - Connection c; - synchronized (connectionList) { - try { - c = connectionList.get(i); - } catch (Exception e) {return;} - } - if (c.timedOut(currentTime)) { - if (LOG.isDebugEnabled()) - LOG.debug(getName() + ": disconnecting client " + c.getHostAddress()); - closeConnection(c); - numNuked++; - end--; - //noinspection UnusedAssignment - c = null; - if (!force && numNuked == maxConnectionsToNuke) break; - } - else i++; - } - lastCleanupRunTime = System.currentTimeMillis(); - } } @Override @@ -792,6 +737,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { "it will have per impact") public void run() { LOG.info(getName() + ": starting"); + connectionManager.startIdleScan(); while (running) { SelectionKey key = null; try { @@ -815,7 +761,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { if (errorHandler.checkOOME(e)) { LOG.info(getName() + ": exiting on OutOfMemoryError"); closeCurrentConnection(key, e); - cleanupConnections(true); + connectionManager.closeIdle(true); return; } } else { @@ -824,22 +770,18 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { // some thread(s) a chance to finish LOG.warn(getName() + ": OutOfMemoryError in server select", e); closeCurrentConnection(key, e); - cleanupConnections(true); + connectionManager.closeIdle(true); try { Thread.sleep(60000); } catch (InterruptedException ex) { LOG.debug("Interrupted while sleeping"); - return; } } } catch (Exception e) { closeCurrentConnection(key, e); } - cleanupConnections(false); } - LOG.info(getName() + ": stopping"); - synchronized (this) { try { acceptChannel.close(); @@ -851,10 +793,9 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { selector= null; acceptChannel= null; - // clean up all connections - while (!connectionList.isEmpty()) { - closeConnection(connectionList.remove(0)); - } + // close all connections + connectionManager.stopIdleScan(); + connectionManager.closeAll(); } } @@ -862,10 +803,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { if (key != null) { Connection c = (Connection)key.attachment(); if (c != null) { - if (LOG.isDebugEnabled()) { - LOG.debug(getName() + ": disconnecting client " + c.getHostAddress() + - (e != null ? " on error " + e.getMessage() : "")); - } closeConnection(c); key.attach(null); } @@ -876,37 +813,24 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { return address; } - void doAccept(SelectionKey key) throws IOException, OutOfMemoryError { - Connection c; + void doAccept(SelectionKey key) throws InterruptedException, IOException, OutOfMemoryError { ServerSocketChannel server = (ServerSocketChannel) key.channel(); - SocketChannel channel; while ((channel = server.accept()) != null) { - try { - channel.configureBlocking(false); - channel.socket().setTcpNoDelay(tcpNoDelay); - channel.socket().setKeepAlive(tcpKeepAlive); - } catch (IOException ioe) { - channel.close(); - throw ioe; - } - + channel.configureBlocking(false); + channel.socket().setTcpNoDelay(tcpNoDelay); + channel.socket().setKeepAlive(tcpKeepAlive); Reader reader = getReader(); - try { - reader.startAdd(); - SelectionKey readKey = reader.registerChannel(channel); - c = getConnection(channel, System.currentTimeMillis()); - readKey.attach(c); - synchronized (connectionList) { - connectionList.add(numConnections, c); - numConnections++; + Connection c = connectionManager.register(channel); + // If the connectionManager can't take it, close the connection. + if (c == null) { + if (channel.isOpen()) { + IOUtils.cleanup(null, channel); } - if (LOG.isDebugEnabled()) - LOG.debug(getName() + ": connection from " + c.toString() + - "; # active connections: " + numConnections); - } finally { - reader.finishAdd(); + continue; } + key.attach(c); // so closeCurrentConnection can get the object + reader.addConnection(c); } } @@ -919,12 +843,8 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { c.setLastContact(System.currentTimeMillis()); try { count = c.readAndProcess(); - - if (count > 0) { - c.setLastContact(System.currentTimeMillis()); - } - } catch (InterruptedException ieo) { + LOG.info(Thread.currentThread().getName() + ": readAndProcess caught InterruptedException", ieo); throw ieo; } catch (Exception e) { if (LOG.isDebugEnabled()) { @@ -933,12 +853,10 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { count = -1; //so that the (count < 0) block is executed } if (count < 0) { - if (LOG.isDebugEnabled()) { - LOG.debug(getName() + ": DISCONNECTING client " + c.toString() + - " because read count=" + count + - ". Number of active connections: " + numConnections); - } closeConnection(c); + c = null; + } else { + c.setLastContact(System.currentTimeMillis()); } } @@ -957,6 +875,8 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { readPool.shutdownNow(); } + synchronized Selector getSelector() { return selector; } + // The method that will return the next reader to work with // Simplistic implementation of round robin for now Reader getReader() { @@ -1355,6 +1275,10 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { return null; } + public long getLastContact() { + return lastContact; + } + /* Return true if the connection has no outstanding rpc */ private boolean isIdle() { return rpcCount.get() == 0; @@ -1370,10 +1294,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { rpcCount.increment(); } - protected boolean timedOut(long currentTime) { - return isIdle() && currentTime - lastContact > maxIdleTime; - } - private UserGroupInformation getAuthorizedUgi(String authorizedId) throws IOException { UserGroupInformation authorizedUgi; @@ -1883,7 +1803,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { } // Enforcing the call queue size, this triggers a retry in the client // This is a bit late to be doing this check - we have already read in the total request. - if ((totalRequestSize + callQueueSize.get()) > maxQueueSize) { + if ((totalRequestSize + callQueueSizeInBytes.get()) > maxQueueSizeInBytes) { final Call callTooBig = new Call(id, this.service, null, null, null, null, this, responder, totalRequestSize, null, null, 0); @@ -1954,7 +1874,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { totalRequestSize, traceInfo, this.addr, timeout); if (!scheduler.dispatch(new CallRunner(RpcServer.this, call))) { - callQueueSize.add(-1 * call.getSize()); + callQueueSizeInBytes.add(-1 * call.getSize()); ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream(); metrics.exception(CALL_QUEUE_TOO_BIG_EXCEPTION); @@ -2093,12 +2013,10 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { this.bindAddress = bindAddress; this.conf = conf; this.socketSendBufferSize = 0; - this.maxQueueSize = - this.conf.getInt("hbase.ipc.server.max.callqueue.size", DEFAULT_MAX_CALLQUEUE_SIZE); + // See declaration above for documentation on what this size is. + this.maxQueueSizeInBytes = + this.conf.getLong("hbase.ipc.server.max.callqueue.size", DEFAULT_MAX_CALLQUEUE_SIZE); this.readThreads = conf.getInt("hbase.ipc.server.read.threadpool.size", 10); - this.maxIdleTime = 2 * conf.getInt("hbase.ipc.client.connection.maxidletime", 1000); - this.maxConnectionsToNuke = conf.getInt("hbase.ipc.client.kill.max", 10); - this.thresholdIdleConnections = conf.getInt("hbase.ipc.client.idlethreshold", 4000); this.purgeTimeout = conf.getLong("hbase.ipc.client.call.purge.timeout", 2 * HConstants.DEFAULT_HBASE_RPC_TIMEOUT); this.warnResponseTime = conf.getInt(WARN_RESPONSE_TIME, DEFAULT_WARN_RESPONSE_TIME); @@ -2120,6 +2038,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { // Create the responder here responder = new Responder(); + connectionManager = new ConnectionManager(); this.authorize = conf.getBoolean(HADOOP_SECURITY_AUTHORIZATION, false); this.userProvider = UserProvider.instantiate(conf); this.isSecurityEnabled = userProvider.isHBaseSecurityEnabled(); @@ -2177,12 +2096,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { } protected void closeConnection(Connection connection) { - synchronized (connectionList) { - if (connectionList.remove(connection)) { - numConnections--; - } - } - connection.close(); + connectionManager.close(connection); } Configuration getConf() { @@ -2440,7 +2354,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { @Override public void addCallSize(final long diff) { - this.callQueueSize.add(diff); + this.callQueueSizeInBytes.add(diff); } /** @@ -2578,6 +2492,14 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { } /** + * The number of open RPC conections + * @return the number of open rpc connections + */ + public int getNumOpenConnections() { + return connectionManager.size(); + } + + /** * Returns the username for any user associated with the current RPC * request or null if no user is set. */ @@ -2695,4 +2617,149 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { public RpcScheduler getScheduler() { return scheduler; } + + private class ConnectionManager { + final private AtomicInteger count = new AtomicInteger(); + final private Set connections; + + final private Timer idleScanTimer; + final private int idleScanThreshold; + final private int idleScanInterval; + final private int maxIdleTime; + final private int maxIdleToClose; + + ConnectionManager() { + this.idleScanTimer = new Timer("RpcServer idle connection scanner for port " + port, true); + this.idleScanThreshold = conf.getInt("hbase.ipc.client.idlethreshold", 4000); + this.idleScanInterval = + conf.getInt("hbase.ipc.client.connection.idle-scan-interval.ms", 10000); + this.maxIdleTime = 2 * conf.getInt("hbase.ipc.client.connection.maxidletime", 10000); + this.maxIdleToClose = conf.getInt("hbase.ipc.client.kill.max", 10); + int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, + HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT); + int maxConnectionQueueSize = + handlerCount * conf.getInt("hbase.ipc.server.handler.queue.size", 100); + // create a set with concurrency -and- a thread-safe iterator, add 2 + // for listener and idle closer threads + this.connections = Collections.newSetFromMap( + new ConcurrentHashMap( + maxConnectionQueueSize, 0.75f, readThreads+2)); + } + + private boolean add(Connection connection) { + boolean added = connections.add(connection); + if (added) { + count.getAndIncrement(); + } + return added; + } + + private boolean remove(Connection connection) { + boolean removed = connections.remove(connection); + if (removed) { + count.getAndDecrement(); + } + return removed; + } + + int size() { + return count.get(); + } + + Connection[] toArray() { + return connections.toArray(new Connection[0]); + } + + Connection register(SocketChannel channel) { + Connection connection = new Connection(channel, System.currentTimeMillis()); + add(connection); + if (LOG.isDebugEnabled()) { + LOG.debug("Server connection from " + connection + + "; connections=" + size() + + ", queued calls size (bytes)=" + callQueueSizeInBytes.get() + + ", general queued calls=" + scheduler.getGeneralQueueLength() + + ", priority queued calls=" + scheduler.getPriorityQueueLength()); + } + return connection; + } + + boolean close(Connection connection) { + boolean exists = remove(connection); + if (exists) { + if (LOG.isDebugEnabled()) { + LOG.debug(Thread.currentThread().getName() + + ": disconnecting client " + connection + + ". Number of active connections: "+ size()); + } + // only close if actually removed to avoid double-closing due + // to possible races + connection.close(); + } + return exists; + } + + // synch'ed to avoid explicit invocation upon OOM from colliding with + // timer task firing + synchronized void closeIdle(boolean scanAll) { + long minLastContact = System.currentTimeMillis() - maxIdleTime; + // concurrent iterator might miss new connections added + // during the iteration, but that's ok because they won't + // be idle yet anyway and will be caught on next scan + int closed = 0; + for (Connection connection : connections) { + // stop if connections dropped below threshold unless scanning all + if (!scanAll && size() < idleScanThreshold) { + break; + } + // stop if not scanning all and max connections are closed + if (connection.isIdle() && + connection.getLastContact() < minLastContact && + close(connection) && + !scanAll && (++closed == maxIdleToClose)) { + break; + } + } + } + + void closeAll() { + // use a copy of the connections to be absolutely sure the concurrent + // iterator doesn't miss a connection + for (Connection connection : toArray()) { + close(connection); + } + } + + void startIdleScan() { + scheduleIdleScanTask(); + } + + void stopIdleScan() { + idleScanTimer.cancel(); + } + + private void scheduleIdleScanTask() { + if (!running) { + return; + } + TimerTask idleScanTask = new TimerTask(){ + @Override + public void run() { + if (!running) { + return; + } + if (LOG.isDebugEnabled()) { + LOG.debug(Thread.currentThread().getName()+": task running"); + } + try { + closeIdle(false); + } finally { + // explicitly reschedule so next execution occurs relative + // to the end of this scan, not the beginning + scheduleIdleScanTask(); + } + } + }; + idleScanTimer.schedule(idleScanTask, idleScanInterval); + } + } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java index 431aeeb..d9d61c1 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java @@ -34,8 +34,11 @@ import org.apache.hadoop.hbase.conf.ConfigurationObserver; import org.apache.hadoop.hbase.util.BoundedPriorityBlockingQueue; /** - * A scheduler that maintains isolated handler pools for general, - * high-priority, and replication requests. + * The default scheduler. Configurable. Maintains isolated handler pools for general ('default'), + * high-priority ('priority'), and replication ('replication') requests. Default behavior is to + * balance the requests across handlers. Add configs to enable balancing by read vs writes, etc. + * See below article for explanation of options. + * @see Overview on Request Queuing */ @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) @InterfaceStability.Evolving @@ -49,7 +52,8 @@ public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObs public static final String CALL_QUEUE_HANDLER_FACTOR_CONF_KEY = "hbase.ipc.server.callqueue.handler.factor"; - /** If set to 'deadline', uses a priority queue and deprioritize long-running scans */ + /** If set to 'deadline', the default, uses a priority queue and deprioritizes long-running scans + */ public static final String CALL_QUEUE_TYPE_CONF_KEY = "hbase.ipc.server.callqueue.type"; public static final String CALL_QUEUE_TYPE_CODEL_CONF_VALUE = "codel"; public static final String CALL_QUEUE_TYPE_DEADLINE_CONF_VALUE = "deadline"; @@ -190,54 +194,58 @@ public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObs float callQueuesHandlersFactor = conf.getFloat(CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 0); int numCallQueues = Math.max(1, (int)Math.round(handlerCount * callQueuesHandlersFactor)); - - LOG.info("Using " + callQueueType + " as user call queue, count=" + numCallQueues); - + LOG.info("Using " + callQueueType + " as user call queue; numCallQueues=" + numCallQueues + + "; callQReadShare=" + callqReadShare + ", callQScanShare=" + callqScanShare); if (numCallQueues > 1 && callqReadShare > 0) { // multiple read/write queues - if (callQueueType.equals(CALL_QUEUE_TYPE_DEADLINE_CONF_VALUE)) { + if (isDeadlineQueueType(callQueueType)) { CallPriorityComparator callPriority = new CallPriorityComparator(conf, this.priority); - callExecutor = new RWQueueRpcExecutor("RW.default", handlerCount, numCallQueues, + callExecutor = new RWQueueRpcExecutor("RWQ.default", handlerCount, numCallQueues, callqReadShare, callqScanShare, maxQueueLength, conf, abortable, BoundedPriorityBlockingQueue.class, callPriority); } else if (callQueueType.equals(CALL_QUEUE_TYPE_CODEL_CONF_VALUE)) { Object[] callQueueInitArgs = {maxQueueLength, codelTargetDelay, codelInterval, codelLifoThreshold, numGeneralCallsDropped, numLifoModeSwitches}; - callExecutor = new RWQueueRpcExecutor("RW.default", handlerCount, + callExecutor = new RWQueueRpcExecutor("RWQ.default", handlerCount, numCallQueues, callqReadShare, callqScanShare, AdaptiveLifoCoDelCallQueue.class, callQueueInitArgs, AdaptiveLifoCoDelCallQueue.class, callQueueInitArgs); } else { - callExecutor = new RWQueueRpcExecutor("RW.default", handlerCount, numCallQueues, + callExecutor = new RWQueueRpcExecutor("RWQ.default", handlerCount, numCallQueues, callqReadShare, callqScanShare, maxQueueLength, conf, abortable); } } else { // multiple queues - if (callQueueType.equals(CALL_QUEUE_TYPE_DEADLINE_CONF_VALUE)) { + if (isDeadlineQueueType(callQueueType)) { CallPriorityComparator callPriority = new CallPriorityComparator(conf, this.priority); - callExecutor = new BalancedQueueRpcExecutor("B.default", handlerCount, numCallQueues, - conf, abortable, BoundedPriorityBlockingQueue.class, maxQueueLength, callPriority); + callExecutor = + new BalancedQueueRpcExecutor("BalancedQ.default", handlerCount, numCallQueues, + conf, abortable, BoundedPriorityBlockingQueue.class, maxQueueLength, callPriority); } else if (callQueueType.equals(CALL_QUEUE_TYPE_CODEL_CONF_VALUE)) { - callExecutor = new BalancedQueueRpcExecutor("B.default", handlerCount, numCallQueues, - conf, abortable, AdaptiveLifoCoDelCallQueue.class, maxQueueLength, - codelTargetDelay, codelInterval, codelLifoThreshold, - numGeneralCallsDropped, numLifoModeSwitches); + callExecutor = + new BalancedQueueRpcExecutor("BalancedQ.default", handlerCount, numCallQueues, + conf, abortable, AdaptiveLifoCoDelCallQueue.class, maxQueueLength, + codelTargetDelay, codelInterval, codelLifoThreshold, + numGeneralCallsDropped, numLifoModeSwitches); } else { - callExecutor = new BalancedQueueRpcExecutor("B.default", handlerCount, + callExecutor = new BalancedQueueRpcExecutor("BalancedQ.default", handlerCount, numCallQueues, maxQueueLength, conf, abortable); } } - // Create 2 queues to help priorityExecutor be more scalable. this.priorityExecutor = priorityHandlerCount > 0 ? - new BalancedQueueRpcExecutor("Priority", priorityHandlerCount, 2, maxPriorityQueueLength) : - null; - + new BalancedQueueRpcExecutor("BalancedQ.priority", priorityHandlerCount, 2, + maxPriorityQueueLength): + null; this.replicationExecutor = - replicationHandlerCount > 0 ? new BalancedQueueRpcExecutor("Replication", + replicationHandlerCount > 0 ? new BalancedQueueRpcExecutor("BalancedQ.replication", replicationHandlerCount, 1, maxQueueLength, conf, abortable) : null; } + private static boolean isDeadlineQueueType(final String callQueueType) { + return callQueueType.equals(CALL_QUEUE_TYPE_DEADLINE_CONF_VALUE); + } + public SimpleRpcScheduler( Configuration conf, int handlerCount, diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index b7f67e6..7668ac9 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -28,6 +28,7 @@ import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.TreeMap; import java.util.UUID; @@ -401,7 +402,7 @@ public class Import extends Configured implements Tool { filter = instantiateFilter(conf); String durabilityStr = conf.get(WAL_DURABILITY); if(durabilityStr != null){ - durability = Durability.valueOf(durabilityStr.toUpperCase()); + durability = Durability.valueOf(durabilityStr.toUpperCase(Locale.ROOT)); LOG.info("setting WAL durability to " + durability); } else { LOG.info("setting WAL durability to default."); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java index dfbe648..cf334db 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java @@ -124,8 +124,9 @@ public class JarFinder { jarDir)); } } - JarOutputStream zos = new JarOutputStream(new FileOutputStream(jarFile)); - jarDir(dir, "", zos); + try (JarOutputStream zos = new JarOutputStream(new FileOutputStream(jarFile))) { + jarDir(dir, "", zos); + } } /** diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index 0084878..a23d739 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -25,6 +25,32 @@ import com.google.common.collect.Multimap; import com.google.common.collect.Multimaps; import com.google.common.util.concurrent.ThreadFactoryBuilder; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Deque; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.TreeMap; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + import org.apache.commons.lang.mutable.MutableInt; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -45,7 +71,6 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.RegionServerCallable; import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory; @@ -76,32 +101,6 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.InterruptedIOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Deque; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.TreeMap; -import java.util.UUID; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - /** * Tool to load the output of HFileOutputFormat into an existing table. * @see #usage() @@ -165,7 +164,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { + "\n"); } - private static interface BulkHFileVisitor { + private interface BulkHFileVisitor { TFamily bulkFamily(final byte[] familyName) throws IOException; void bulkHFile(final TFamily family, final FileStatus hfileStatus) @@ -308,25 +307,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * pre-existing table. This method is not threadsafe. * * @param hfofDir the directory that was provided as the output path - * of a job using HFileOutputFormat - * @param table the table to load into - * @throws TableNotFoundException if table does not yet exist - */ - @SuppressWarnings("deprecation") - public void doBulkLoad(Path hfofDir, final HTable table) - throws TableNotFoundException, IOException { - try (Admin admin = table.getConnection().getAdmin(); - RegionLocator rl = table.getRegionLocator()) { - doBulkLoad(hfofDir, admin, table, rl); - } - } - - /** - * Perform a bulk load of the given directory into the given - * pre-existing table. This method is not threadsafe. - * - * @param hfofDir the directory that was provided as the output path - * of a job using HFileOutputFormat + * of a job using HFileOutputFormat * @param table the table to load into * @throws TableNotFoundException if table does not yet exist */ @@ -341,7 +322,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { // LQI queue does not need to be threadsafe -- all operations on this queue // happen in this thread - Deque queue = new LinkedList(); + Deque queue = new LinkedList<>(); try { /* * Checking hfile format is a time-consuming operation, we should have an option to skip @@ -426,8 +407,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } if (queue != null && !queue.isEmpty()) { - throw new RuntimeException("Bulk load aborted with some files not yet loaded." - + "Please check log for more details."); + throw new RuntimeException("Bulk load aborted with some files not yet loaded." + + "Please check log for more details."); } } @@ -463,7 +444,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { private void validateFamiliesInHFiles(Table table, Deque queue) throws IOException { Collection families = table.getTableDescriptor().getFamilies(); - List familyNames = new ArrayList(families.size()); + List familyNames = new ArrayList<>(families.size()); for (HColumnDescriptor family : families) { familyNames.add(family.getNameAsString()); } @@ -520,7 +501,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { ExecutorService pool, Deque queue, final Multimap regionGroups) throws IOException { // atomically bulk load the groups. - Set>> loadingFutures = new HashSet>>(); + Set>> loadingFutures = new HashSet<>(); for (Entry> e: regionGroups.asMap().entrySet()){ final byte[] first = e.getKey().array(); final Collection lqis = e.getValue(); @@ -563,9 +544,9 @@ public class LoadIncrementalHFiles extends Configured implements Tool { private boolean checkHFilesCountPerRegionPerFamily( final Multimap regionGroups) { for (Entry> e: regionGroups.asMap().entrySet()) { + ? extends Collection> e: regionGroups.asMap().entrySet()) { final Collection lqis = e.getValue(); - HashMap filesMap = new HashMap(); + HashMap filesMap = new HashMap<>(); for (LoadQueueItem lqi: lqis) { MutableInt count = filesMap.get(lqi.family); if (count == null) { @@ -597,7 +578,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { final Multimap regionGroups = Multimaps.synchronizedMultimap(rgs); // drain LQIs and figure out bulk load groups - Set>> splittingFutures = new HashSet>>(); + Set>> splittingFutures = new HashSet<>(); while (!queue.isEmpty()) { final LoadQueueItem item = queue.remove(); @@ -650,7 +631,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } LOG.info("HFile at " + hfilePath + " no longer fits inside a single " + - "region. Splitting..."); + "region. Splitting..."); String uniqueName = getUniqueName(); HColumnDescriptor familyDesc = table.getTableDescriptor().getFamily(item.family); @@ -692,7 +673,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * LQI's corresponding to the resultant hfiles. * * protected for testing - * @throws IOException + * @throws IOException if an IO failure is encountered */ protected List groupOrSplit(Multimap regionGroups, final LoadQueueItem item, final Table table, @@ -786,13 +767,13 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * Protected for testing. * * @return empty list if success, list of items to retry on recoverable - * failure + * failure */ protected List tryAtomicRegionLoad(final Connection conn, final TableName tableName, final byte[] first, final Collection lqis) throws IOException { final List> famPaths = - new ArrayList>(lqis.size()); + new ArrayList<>(lqis.size()); for (LoadQueueItem lqi : lqis) { famPaths.add(Pair.newPair(lqi.family, lqi.hfilePath.toString())); } @@ -857,7 +838,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { }; try { - List toRetry = new ArrayList(); + List toRetry = new ArrayList<>(); Configuration conf = getConf(); boolean success = RpcRetryingCallerFactory.instantiate(conf, null). newCaller() @@ -890,8 +871,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { static void splitStoreFile( Configuration conf, Path inFile, HColumnDescriptor familyDesc, byte[] splitKey, - Path bottomOut, Path topOut) throws IOException - { + Path bottomOut, Path topOut) throws IOException { // Open reader with no block cache, and not in-memory Reference topReference = Reference.createTopReference(splitKey); Reference bottomReference = Reference.createBottomReference(splitKey); @@ -944,8 +924,12 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } } } finally { - if (halfWriter != null) halfWriter.close(); - if (halfReader != null) halfReader.close(cacheConf.shouldEvictOnClose()); + if (halfWriter != null) { + halfWriter.close(); + } + if (halfReader != null) { + halfReader.close(cacheConf.shouldEvictOnClose()); + } } } @@ -972,16 +956,20 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * 2) Return the boundary list. */ public static byte[][] inferBoundaries(TreeMap bdryMap) { - ArrayList keysArray = new ArrayList(); + ArrayList keysArray = new ArrayList<>(); int runningValue = 0; byte[] currStartKey = null; boolean firstBoundary = true; for (Map.Entry item: bdryMap.entrySet()) { - if (runningValue == 0) currStartKey = item.getKey(); + if (runningValue == 0) { + currStartKey = item.getKey(); + } runningValue += item.getValue(); if (runningValue == 0) { - if (!firstBoundary) keysArray.add(currStartKey); + if (!firstBoundary) { + keysArray.add(currStartKey); + } firstBoundary = false; } } @@ -1000,7 +988,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { // Add column families // Build a set of keys final HTableDescriptor htd = new HTableDescriptor(tableName); - final TreeMap map = new TreeMap(Bytes.BYTES_COMPARATOR); + final TreeMap map = new TreeMap<>(Bytes.BYTES_COMPARATOR); visitBulkHFiles(fs, hfofDir, new BulkHFileVisitor() { @Override public HColumnDescriptor bulkFamily(final byte[] familyName) { @@ -1073,8 +1061,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool { Path hfofDir = new Path(dirPath); try (Table table = connection.getTable(tableName); - RegionLocator locator = connection.getRegionLocator(tableName)) { - doBulkLoad(hfofDir, admin, table, locator); + RegionLocator locator = connection.getRegionLocator(tableName)) { + doBulkLoad(hfofDir, admin, table, locator); } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java index be20d90..7ad68ea 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.Locale; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -253,7 +254,7 @@ implements Configurable { @Override public List getSplits(JobContext context) throws IOException { List splits = super.getSplits(context); - if ((conf.get(SHUFFLE_MAPS) != null) && "true".equals(conf.get(SHUFFLE_MAPS).toLowerCase())) { + if ((conf.get(SHUFFLE_MAPS) != null) && "true".equals(conf.get(SHUFFLE_MAPS).toLowerCase(Locale.ROOT))) { Collections.shuffle(splits); } return splits; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index a452036..655c71a 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.mapreduce.replication; import java.io.IOException; +import java.util.Arrays; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -35,6 +36,9 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.FilterList; +import org.apache.hadoop.hbase.filter.PrefixFilter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.TableInputFormat; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; @@ -77,6 +81,7 @@ public class VerifyReplication extends Configured implements Tool { static String tableName = null; static String families = null; static String peerId = null; + static String rowPrefixes = null; private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name"; @@ -123,6 +128,8 @@ public class VerifyReplication extends Configured implements Tool { scan.addFamily(Bytes.toBytes(fam)); } } + String rowPrefixes = conf.get(NAME + ".rowPrefixes", null); + setRowPrefixFilter(scan, rowPrefixes); scan.setTimeRange(startTime, endTime); int versions = conf.getInt(NAME+".versions", -1); LOG.info("Setting number of version inside map as: " + versions); @@ -271,6 +278,9 @@ public class VerifyReplication extends Configured implements Tool { if (families != null) { conf.set(NAME+".families", families); } + if (rowPrefixes != null){ + conf.set(NAME+".rowPrefixes", rowPrefixes); + } Pair peerConfigPair = getPeerQuorumConfig(conf); ReplicationPeerConfig peerConfig = peerConfigPair.getFirst(); @@ -299,6 +309,9 @@ public class VerifyReplication extends Configured implements Tool { scan.addFamily(Bytes.toBytes(fam)); } } + + setRowPrefixFilter(scan, rowPrefixes); + TableMapReduceUtil.initTableMapperJob(tableName, scan, Verifier.class, null, null, job); @@ -311,11 +324,38 @@ public class VerifyReplication extends Configured implements Tool { return job; } + private static void setRowPrefixFilter(Scan scan, String rowPrefixes) { + if (rowPrefixes != null && !rowPrefixes.isEmpty()) { + String[] rowPrefixArray = rowPrefixes.split(","); + Arrays.sort(rowPrefixArray); + FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ONE); + for (String prefix : rowPrefixArray) { + Filter filter = new PrefixFilter(Bytes.toBytes(prefix)); + filterList.addFilter(filter); + } + scan.setFilter(filterList); + byte[] startPrefixRow = Bytes.toBytes(rowPrefixArray[0]); + byte[] lastPrefixRow = Bytes.toBytes(rowPrefixArray[rowPrefixArray.length -1]); + setStartAndStopRows(scan, startPrefixRow, lastPrefixRow); + } + } + + private static void setStartAndStopRows(Scan scan, byte[] startPrefixRow, byte[] lastPrefixRow) { + scan.setStartRow(startPrefixRow); + byte[] stopRow = Bytes.add(Bytes.head(lastPrefixRow, lastPrefixRow.length - 1), + new byte[]{(byte) (lastPrefixRow[lastPrefixRow.length - 1] + 1)}); + scan.setStopRow(stopRow); + } + private static boolean doCommandLine(final String[] args) { if (args.length < 2) { printUsage(null); return false; } + //in case we've been run before, restore all parameters to their initial states + //Otherwise, if our previous run included a parameter not in args this time, + //we might hold on to the old value. + restoreDefaults(); try { for (int i = 0; i < args.length; i++) { String cmd = args[i]; @@ -354,6 +394,12 @@ public class VerifyReplication extends Configured implements Tool { continue; } + final String rowPrefixesKey = "--row-prefixes="; + if (cmd.startsWith(rowPrefixesKey)){ + rowPrefixes = cmd.substring(rowPrefixesKey.length()); + continue; + } + if (i == args.length-2) { peerId = cmd; } @@ -370,6 +416,17 @@ public class VerifyReplication extends Configured implements Tool { return true; } + private static void restoreDefaults() { + startTime = 0; + endTime = Long.MAX_VALUE; + batch = Integer.MAX_VALUE; + versions = -1; + tableName = null; + families = null; + peerId = null; + rowPrefixes = null; + } + /* * @param errorMsg Error message. Can be null. */ @@ -378,7 +435,7 @@ public class VerifyReplication extends Configured implements Tool { System.err.println("ERROR: " + errorMsg); } System.err.println("Usage: verifyrep [--starttime=X]" + - " [--stoptime=Y] [--families=A] "); + " [--stoptime=Y] [--families=A] [--row-prefixes=B] "); System.err.println(); System.err.println("Options:"); System.err.println(" starttime beginning of the time range"); @@ -386,6 +443,7 @@ public class VerifyReplication extends Configured implements Tool { System.err.println(" endtime end of the time range"); System.err.println(" versions number of cell versions to verify"); System.err.println(" families comma-separated list of families to copy"); + System.err.println(" row-prefixes comma-separated list of row key prefixes to filter on "); System.err.println(); System.err.println("Args:"); System.err.println(" peerid Id of the peer used for verification, must match the one given for replication"); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index bcf7b7a..f16463f 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -471,9 +471,9 @@ public class AssignmentManager { } if (!failover) { // If any region except meta is in transition on a live server, it's a failover. - Map regionsInTransition = regionStates.getRegionsInTransition(); + Set regionsInTransition = regionStates.getRegionsInTransition(); if (!regionsInTransition.isEmpty()) { - for (RegionState regionState: regionsInTransition.values()) { + for (RegionState regionState: regionsInTransition) { ServerName serverName = regionState.getServerName(); if (!regionState.getRegion().isMetaRegion() && serverName != null && onlineServers.contains(serverName)) { @@ -542,7 +542,7 @@ public class AssignmentManager { } } } - processRegionsInTransition(regionStates.getRegionsInTransition().values()); + processRegionsInTransition(regionStates.getRegionsInTransition()); } // Now we can safely claim failover cleanup completed and enable @@ -2010,7 +2010,7 @@ public class AssignmentManager { long oldestRITTime = 0; int ritThreshold = this.server.getConfiguration(). getInt(HConstants.METRICS_RIT_STUCK_WARNING_THRESHOLD, 60000); - for (RegionState state: regionStates.getRegionsInTransition().values()) { + for (RegionState state: regionStates.getRegionsInTransition()) { totalRITs++; long ritTime = currentTime - state.getStamp(); if (ritTime > ritThreshold) { // more than the threshold diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java index b9abc65..c93b307 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.PairOfSameType; +import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Triple; /** @@ -86,7 +87,17 @@ public class CatalogJanitor extends ScheduledChore { * @param enabled */ public boolean setEnabled(final boolean enabled) { - return this.enabled.getAndSet(enabled); + boolean alreadyEnabled = this.enabled.getAndSet(enabled); + // If disabling is requested on an already enabled chore, we could have an active + // scan still going on, callers might not be aware of that and do further action thinkng + // that no action would be from this chore. In this case, the right action is to wait for + // the active scan to complete before exiting this function. + if (!enabled && alreadyEnabled) { + while (alreadyRunning.get()) { + Threads.sleepWithoutInterrupt(100); + } + } + return alreadyEnabled; } boolean getEnabled() { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 109097b..d368ffb 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1254,7 +1254,7 @@ public class HMaster extends HRegionServer implements MasterServices { if (!this.loadBalancerTracker.isBalancerOn()) return false; // Only allow one balance run at at time. if (this.assignmentManager.getRegionStates().isRegionsInTransition()) { - Map regionsInTransition = + Set regionsInTransition = this.assignmentManager.getRegionStates().getRegionsInTransition(); // if hbase:meta region is in transition, result of assignment cannot be recorded // ignore the force flag in that case @@ -2152,7 +2152,7 @@ public class HMaster extends HRegionServer implements MasterServices { String clusterId = fileSystemManager != null ? fileSystemManager.getClusterId().toString() : null; - Map regionsInTransition = assignmentManager != null ? + Set regionsInTransition = assignmentManager != null ? assignmentManager.getRegionStates().getRegionsInTransition() : null; String[] coprocessors = cpHost != null ? getMasterCoprocessors() : null; boolean balancerOn = loadBalancerTracker != null ? @@ -2378,10 +2378,6 @@ public class HMaster extends HRegionServer implements MasterServices { return this.initializationBeforeMetaAssignment; } - public void assignRegion(HRegionInfo hri) { - assignmentManager.assign(hri); - } - /** * Compute the average load across all region servers. * Currently, this uses a very naive computation - just uses the number of diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java index fc5b474..a921ab5 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java @@ -24,6 +24,7 @@ import java.io.PrintStream; import java.io.PrintWriter; import java.util.Date; import java.util.Map; +import java.util.Set; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; @@ -116,11 +117,9 @@ public class MasterDumpServlet extends StateDumpServlet { return; } - Map regionsInTransition = - am.getRegionStates().getRegionsInTransition(); - for (Map.Entry e : regionsInTransition.entrySet()) { - String rid = e.getKey(); - RegionState rs = e.getValue(); + Set regionsInTransition = am.getRegionStates().getRegionsInTransition(); + for (RegionState rs : regionsInTransition) { + String rid = rs.getRegion().getRegionNameAsString(); out.println("Region " + rid + ": " + rs.toDescriptiveString()); } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java index bc5173a..82e28df 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java @@ -144,7 +144,7 @@ public class RegionStateStore { if (metaRegion == null) { Configuration conf = server.getConfiguration(); // Config to determine the no of HConnections to META. - // A single HConnection should be sufficient in most cases. Only if + // A single Connection should be sufficient in most cases. Only if // you are doing lot of writes (>1M) to META, // increasing this value might improve the write throughput. multiHConnection = diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java index be9758a..9da8033 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java @@ -20,6 +20,8 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -28,9 +30,9 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.Collections; -import java.util.Comparator; +import java.util.SortedSet; import java.util.TreeMap; +import java.util.TreeSet; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -63,6 +65,15 @@ import org.apache.hadoop.hbase.util.Pair; public class RegionStates { private static final Log LOG = LogFactory.getLog(RegionStates.class); + public final static RegionStateStampComparator REGION_STATE_COMPARATOR = + new RegionStateStampComparator(); + private static class RegionStateStampComparator implements Comparator { + @Override + public int compare(RegionState l, RegionState r) { + return Long.compare(l.getStamp(), r.getStamp()); + } + } + /** * Regions currently in transition. */ @@ -205,31 +216,16 @@ public class RegionStates { /** * Get regions in transition and their states */ - @SuppressWarnings("unchecked") - public synchronized Map getRegionsInTransition() { - return (Map)regionsInTransition.clone(); + public synchronized Set getRegionsInTransition() { + return new HashSet(regionsInTransition.values()); } - @SuppressWarnings("unchecked") - public synchronized Map getRegionsInTransitionOrderedByTimestamp() { - Map rit = (Map)regionsInTransition.clone(); - List> list = new LinkedList<>(rit.entrySet()); - - // Compare the RITs' timestamps for ordering. - Comparator> c = - new Comparator>() { - @Override - public int compare(Map.Entry o1, Map.Entry o2) { - return ((Long)o1.getValue().getStamp()).compareTo((Long)o2.getValue().getStamp()); - } - }; - - Collections.sort(list, c); - Map result = new LinkedHashMap<>(); - for (Map.Entry entry : list) { - result.put(entry.getKey(), entry.getValue()); + public synchronized SortedSet getRegionsInTransitionOrderedByTimestamp() { + final TreeSet rit = new TreeSet(REGION_STATE_COMPARATOR); + for (RegionState rs: regionsInTransition.values()) { + rit.add(rs); } - return result; + return rit; } /** diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index 6a28006..f52dbdf 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -68,7 +68,7 @@ import com.google.common.collect.Sets; * */ public abstract class BaseLoadBalancer implements LoadBalancer { - private static final int MIN_SERVER_BALANCE = 2; + protected static final int MIN_SERVER_BALANCE = 2; private volatile boolean stopped = false; private static final List EMPTY_REGION_LIST = new ArrayList(0); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index ab075db..181990b 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster; import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action; import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type; import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction; @@ -107,6 +108,8 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { protected static final String KEEP_REGION_LOADS = "hbase.master.balancer.stochastic.numRegionLoadsToRemember"; private static final String TABLE_FUNCTION_SEP = "_"; + protected static final String MIN_COST_NEED_BALANCE_KEY = + "hbase.master.balancer.stochastic.minCostNeedBalance"; private static final Random RANDOM = new Random(System.currentTimeMillis()); private static final Log LOG = LogFactory.getLog(StochasticLoadBalancer.class); @@ -118,6 +121,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { private int stepsPerRegion = 800; private long maxRunningTime = 30 * 1000 * 1; // 30 seconds. private int numRegionLoadsToRemember = 15; + private float minCostNeedBalance = 0.05f; private CandidateGenerator[] candidateGenerators; private CostFromRegionLoadFunction[] regionLoadFunctions; @@ -163,6 +167,8 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { numRegionLoadsToRemember = conf.getInt(KEEP_REGION_LOADS, numRegionLoadsToRemember); isByTable = conf.getBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, isByTable); + minCostNeedBalance = conf.getFloat(MIN_COST_NEED_BALANCE_KEY, minCostNeedBalance); + if (localityCandidateGenerator == null) { localityCandidateGenerator = new LocalityBasedCandidateGenerator(services); } @@ -258,6 +264,41 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { } @Override + protected boolean needsBalance(Cluster cluster) { + ClusterLoadState cs = new ClusterLoadState(cluster.clusterState); + if (cs.getNumServers() < MIN_SERVER_BALANCE) { + if (LOG.isDebugEnabled()) { + LOG.debug("Not running balancer because only " + cs.getNumServers() + + " active regionserver(s)"); + } + return false; + } + if (areSomeRegionReplicasColocated(cluster)) { + return true; + } + + double total = 0.0; + float sumMultiplier = 0.0f; + for (CostFunction c : costFunctions) { + float multiplier = c.getMultiplier(); + if (multiplier <= 0) { + continue; + } + sumMultiplier += multiplier; + total += c.cost() * multiplier; + } + + if (total <= 0 || sumMultiplier <= 0 + || (sumMultiplier > 0 && (total / sumMultiplier) < minCostNeedBalance)) { + LOG.info("Skipping load balancing because balanced cluster; " + "total cost is " + total + + ", sum multiplier is " + sumMultiplier + " min cost which need balance is " + + minCostNeedBalance); + return false; + } + return true; + } + + @Override public synchronized List balanceCluster(TableName tableName, Map> clusterState) { this.tableName = tableName; @@ -298,19 +339,21 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { // Keep track of servers to iterate through them. Cluster cluster = new Cluster(clusterState, loads, finder, rackManager); - if (!needsBalance(cluster)) { - return null; - } - long startTime = EnvironmentEdgeManager.currentTime(); initCosts(cluster); + if (!needsBalance(cluster)) { + return null; + } + double currentCost = computeCost(cluster, Double.MAX_VALUE); curOverallCost = currentCost; for (int i = 0; i < this.curFunctionCosts.length; i++) { curFunctionCosts[i] = tempFunctionCosts[i]; } + LOG.info("start StochasticLoadBalancer.balancer, initCost=" + currentCost + ", functionCost=" + + functionCost()); double initCost = currentCost; double newCost = currentCost; @@ -407,6 +450,18 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { } } + private String functionCost() { + StringBuilder builder = new StringBuilder(); + for (CostFunction c:costFunctions) { + builder.append(c.getClass().getSimpleName()); + builder.append(" : ("); + builder.append(c.getMultiplier()); + builder.append(", "); + builder.append(c.cost()); + builder.append("); "); + } + return builder.toString(); + } /** * Create all of the RegionPlan's needed to move from the initial cluster state to the desired diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index 583f873..7a54d87 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -140,7 +140,9 @@ public class SimpleRegionNormalizer implements RegionNormalizer { for (int i = 0; i < tableRegions.size(); i++) { HRegionInfo hri = tableRegions.get(i); long regionSize = getRegionSize(hri); - totalSizeMb += regionSize; + if (regionSize > 0) { + totalSizeMb += regionSize; + } } double avgRegionSize = totalSizeMb / (double) tableRegions.size(); @@ -180,7 +182,7 @@ public class SimpleRegionNormalizer implements RegionNormalizer { if (mergeEnabled) { HRegionInfo hri2 = tableRegions.get(candidateIdx+1); long regionSize2 = getRegionSize(hri2); - if (regionSize + regionSize2 < avgRegionSize) { + if (regionSize > 0 && regionSize2 > 0 && regionSize + regionSize2 < avgRegionSize) { LOG.info("Table " + table + ", small region size: " + regionSize + " plus its neighbor size: " + regionSize2 + ", less than the avg size " + avgRegionSize + ", merging them"); @@ -204,6 +206,10 @@ public class SimpleRegionNormalizer implements RegionNormalizer { getRegionServerOfRegion(hri); RegionLoad regionLoad = masterServices.getServerManager().getLoad(sn). getRegionsLoad().get(hri.getRegionName()); + if (regionLoad == null) { + LOG.debug(hri.getRegionNameAsString() + " was not found in RegionsLoad"); + return -1; + } return regionLoad.getStorefileSizeMB(); } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java index f2ee97f..1214268 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java @@ -56,14 +56,6 @@ public final class MasterDDLOperationHelper { private MasterDDLOperationHelper() {} /** - * Check whether online schema change is allowed from config - **/ - public static boolean isOnlineSchemaChangeAllowed(final MasterProcedureEnv env) { - return env.getMasterServices().getConfiguration() - .getBoolean("hbase.online.schema.update.enable", false); - } - - /** * Check whether a table is modifiable - exists and either offline or online with config set * @param env MasterProcedureEnv * @param tableName name of the table @@ -75,13 +67,6 @@ public final class MasterDDLOperationHelper { if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) { throw new TableNotFoundException(tableName); } - - // We only execute this procedure with table online if online schema change config is set. - if (!env.getMasterServices().getTableStateManager() - .isTableState(tableName, TableState.State.DISABLED) - && !MasterDDLOperationHelper.isOnlineSchemaChangeAllowed(env)) { - throw new TableNotDisabledException(tableName); - } } /** diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java index 5f37720..d4791fe 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java @@ -20,12 +20,15 @@ package org.apache.hadoop.hbase.master.procedure; import java.io.IOException; import java.util.ArrayDeque; +import java.util.Arrays; +import java.util.HashMap; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; @@ -103,6 +106,10 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet { } private void doAdd(final Procedure proc, final boolean addFront) { + doAdd(proc, addFront, true); + } + + private void doAdd(final Procedure proc, final boolean addFront, final boolean notify) { schedLock.lock(); try { if (isTableProcedure(proc)) { @@ -117,7 +124,9 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet { throw new UnsupportedOperationException( "RQs for non-table/non-server procedures are not implemented yet"); } - schedWaitCond.signal(); + if (notify) { + schedWaitCond.signal(); + } } finally { schedLock.unlock(); } @@ -125,12 +134,28 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet { private > void doAdd(final FairQueue fairq, final Queue queue, final Procedure proc, final boolean addFront) { + if (proc.isSuspended()) return; + queue.add(proc, addFront); + if (!(queue.isSuspended() || queue.hasExclusiveLock())) { + // the queue is not suspended or removed from the fairq (run-queue) + // because someone has an xlock on it. + // so, if the queue is not-linked we should add it if (queue.size() == 1 && !IterableList.isLinked(queue)) { fairq.add(queue); } queueSize++; + } else if (proc.hasParent() && queue.isLockOwner(proc.getParentProcId())) { + assert addFront : "expected to add a child in the front"; + assert !queue.isSuspended() : "unexpected suspended state for the queue"; + // our (proc) parent has the xlock, + // so the queue is not in the fairq (run-queue) + // add it back to let the child run (inherit the lock) + if (!IterableList.isLinked(queue)) { + fairq.add(queue); + } + queueSize++; } } @@ -140,7 +165,7 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet { } @edu.umd.cs.findbugs.annotations.SuppressWarnings("WA_AWAIT_NOT_IN_LOOP") - Procedure poll(long waitNsec) { + protected Procedure poll(long waitNsec) { Procedure pollResult = null; schedLock.lock(); try { @@ -185,7 +210,16 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet { this.queueSize--; if (rq.isEmpty() || rq.requireExclusiveLock(pollResult)) { removeFromRunQueue(fairq, rq); + } else if (pollResult.hasParent() && rq.isLockOwner(pollResult.getParentProcId())) { + // if the rq is in the fairq because of runnable child + // check if the next procedure is still a child. + // if not, remove the rq from the fairq and go back to the xlock state + Procedure nextProc = rq.peek(); + if (nextProc != null && nextProc.getParentProcId() != pollResult.getParentProcId()) { + removeFromRunQueue(fairq, rq); + } } + return pollResult; } @@ -300,18 +334,25 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet { } public boolean waitEvent(ProcedureEvent event, Procedure procedure, boolean suspendQueue) { + return waitEvent(event, /* lockEvent= */false, procedure, suspendQueue); + } + + private boolean waitEvent(ProcedureEvent event, boolean lockEvent, + Procedure procedure, boolean suspendQueue) { synchronized (event) { if (event.isReady()) { + if (lockEvent) { + event.setReady(false); + } return false; } - // TODO: Suspend single procedure not implemented yet, fallback to suspending the queue - if (!suspendQueue) suspendQueue = true; - - if (isTableProcedure(procedure)) { - waitTableEvent(event, procedure, suspendQueue); + if (!suspendQueue) { + suspendProcedure(event, procedure); + } else if (isTableProcedure(procedure)) { + waitTableEvent(event, procedure); } else if (isServerProcedure(procedure)) { - waitServerEvent(event, procedure, suspendQueue); + waitServerEvent(event, procedure); } else { // TODO: at the moment we only have Table and Server procedures // if you are implementing a non-table/non-server procedure, you have two options: create @@ -324,17 +365,16 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet { return true; } - private void waitTableEvent(ProcedureEvent event, Procedure procedure, boolean suspendQueue) { + private void waitTableEvent(ProcedureEvent event, Procedure procedure) { final TableName tableName = getTableName(procedure); final boolean isDebugEnabled = LOG.isDebugEnabled(); schedLock.lock(); try { TableQueue queue = getTableQueue(tableName); + queue.addFront(procedure); if (queue.isSuspended()) return; - // TODO: if !suspendQueue - if (isDebugEnabled) { LOG.debug("Suspend table queue " + tableName); } @@ -346,7 +386,7 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet { } } - private void waitServerEvent(ProcedureEvent event, Procedure procedure, boolean suspendQueue) { + private void waitServerEvent(ProcedureEvent event, Procedure procedure) { final ServerName serverName = getServerName(procedure); final boolean isDebugEnabled = LOG.isDebugEnabled(); @@ -354,10 +394,9 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet { try { // TODO: This will change once we have the new AM ServerQueue queue = getServerQueue(serverName); + queue.addFront(procedure); if (queue.isSuspended()) return; - // TODO: if !suspendQueue - if (isDebugEnabled) { LOG.debug("Suspend server queue " + serverName); } @@ -399,6 +438,10 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet { addToRunQueue(serverRunQueue, queue); } + while (event.hasWaitingProcedures()) { + wakeProcedure(event.popWaitingProcedure(false)); + } + if (queueSize > 1) { schedWaitCond.signalAll(); } else if (queueSize > 0) { @@ -410,7 +453,41 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet { } } - public static class ProcedureEvent { + private void suspendProcedure(BaseProcedureEvent event, Procedure procedure) { + procedure.suspend(); + event.suspendProcedure(procedure); + } + + private void wakeProcedure(Procedure procedure) { + procedure.resume(); + doAdd(procedure, /* addFront= */ true, /* notify= */false); + } + + private static abstract class BaseProcedureEvent { + private ArrayDeque waitingProcedures = null; + + protected void suspendProcedure(Procedure proc) { + if (waitingProcedures == null) { + waitingProcedures = new ArrayDeque(); + } + waitingProcedures.addLast(proc); + } + + protected boolean hasWaitingProcedures() { + return waitingProcedures != null; + } + + protected Procedure popWaitingProcedure(boolean popFront) { + // it will be nice to use IterableList on a procedure and avoid allocations... + Procedure proc = popFront ? waitingProcedures.removeFirst() : waitingProcedures.removeLast(); + if (waitingProcedures.isEmpty()) { + waitingProcedures = null; + } + return proc; + } + } + + public static class ProcedureEvent extends BaseProcedureEvent { private final String description; private Queue waitingServers = null; @@ -585,9 +662,47 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet { } } + private static class RegionEvent extends BaseProcedureEvent { + private final HRegionInfo regionInfo; + private long exclusiveLockProcIdOwner = Long.MIN_VALUE; + + public RegionEvent(HRegionInfo regionInfo) { + this.regionInfo = regionInfo; + } + + public boolean hasExclusiveLock() { + return exclusiveLockProcIdOwner != Long.MIN_VALUE; + } + + public boolean isLockOwner(long procId) { + return exclusiveLockProcIdOwner == procId; + } + + public boolean tryExclusiveLock(long procIdOwner) { + assert procIdOwner != Long.MIN_VALUE; + if (hasExclusiveLock()) return false; + exclusiveLockProcIdOwner = procIdOwner; + return true; + } + + private void releaseExclusiveLock() { + exclusiveLockProcIdOwner = Long.MIN_VALUE; + } + + public HRegionInfo getRegionInfo() { + return regionInfo; + } + + @Override + public String toString() { + return String.format("region %s event", regionInfo.getRegionNameAsString()); + } + } + public static class TableQueue extends QueueImpl { private final NamespaceQueue namespaceQueue; + private HashMap regionEventMap; private TableLock tableLock = null; public TableQueue(TableName tableName, NamespaceQueue namespaceQueue, int priority) { @@ -601,7 +716,41 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet { @Override public synchronized boolean isAvailable() { - return super.isAvailable() && !namespaceQueue.hasExclusiveLock(); + // if there are no items in the queue, or the namespace is locked. + // we can't execute operation on this table + if (isEmpty() || namespaceQueue.hasExclusiveLock()) { + return false; + } + + if (hasExclusiveLock()) { + // if we have an exclusive lock already taken + // only child of the lock owner can be executed + Procedure availProc = peek(); + return availProc != null && availProc.hasParent() && + isLockOwner(availProc.getParentProcId()); + } + + // no xlock + return true; + } + + public synchronized RegionEvent getRegionEvent(final HRegionInfo regionInfo) { + if (regionEventMap == null) { + regionEventMap = new HashMap(); + } + RegionEvent event = regionEventMap.get(regionInfo); + if (event == null) { + event = new RegionEvent(regionInfo); + regionEventMap.put(regionInfo, event); + } + return event; + } + + public synchronized void removeRegionEvent(final RegionEvent event) { + regionEventMap.remove(event.getRegionInfo()); + if (regionEventMap.isEmpty()) { + regionEventMap = null; + } } // TODO: We can abort pending/in-progress operation if the new call is @@ -630,6 +779,13 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet { return !tpi.getTableName().equals(TableName.NAMESPACE_TABLE_NAME); case READ: return false; + // region operations are using the shared-lock on the table + // and then they will grab an xlock on the region. + case SPLIT: + case MERGE: + case ASSIGN: + case UNASSIGN: + return false; default: break; } @@ -883,6 +1039,100 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet { } // ============================================================================ + // Region Locking Helpers + // ============================================================================ + public boolean waitRegion(final Procedure procedure, final HRegionInfo regionInfo) { + return waitRegions(procedure, regionInfo.getTable(), regionInfo); + } + + public boolean waitRegions(final Procedure procedure, final TableName table, + final HRegionInfo... regionInfo) { + Arrays.sort(regionInfo); + + final TableQueue queue; + if (procedure.hasParent()) { + // the assumption is that the parent procedure have already the table xlock + queue = getTableQueueWithLock(table); + } else { + // acquire the table shared-lock + queue = tryAcquireTableQueueSharedLock(procedure, table); + if (queue == null) return false; + } + + // acquire region xlocks or wait + boolean hasLock = true; + final RegionEvent[] event = new RegionEvent[regionInfo.length]; + synchronized (queue) { + for (int i = 0; i < regionInfo.length; ++i) { + assert regionInfo[i].getTable().equals(table); + event[i] = queue.getRegionEvent(regionInfo[i]); + if (!event[i].tryExclusiveLock(procedure.getProcId())) { + suspendProcedure(event[i], procedure); + hasLock = false; + while (i-- > 0) { + event[i].releaseExclusiveLock(); + } + break; + } + } + } + + if (!hasLock && !procedure.hasParent()) { + releaseTableSharedLock(procedure, table); + } + return hasLock; + } + + public void wakeRegion(final Procedure procedure, final HRegionInfo regionInfo) { + wakeRegions(procedure, regionInfo.getTable(), regionInfo); + } + + public void wakeRegions(final Procedure procedure,final TableName table, + final HRegionInfo... regionInfo) { + Arrays.sort(regionInfo); + + final TableQueue queue = getTableQueueWithLock(table); + + int numProcs = 0; + final Procedure[] nextProcs = new Procedure[regionInfo.length]; + synchronized (queue) { + for (int i = 0; i < regionInfo.length; ++i) { + assert regionInfo[i].getTable().equals(table); + RegionEvent event = queue.getRegionEvent(regionInfo[i]); + event.releaseExclusiveLock(); + if (event.hasWaitingProcedures()) { + // release one procedure at the time since regions has an xlock + nextProcs[numProcs++] = event.popWaitingProcedure(true); + } else { + queue.removeRegionEvent(event); + } + } + } + + // awake procedures if any + schedLock.lock(); + try { + for (int i = numProcs - 1; i >= 0; --i) { + wakeProcedure(nextProcs[i]); + } + + if (numProcs > 1) { + schedWaitCond.signalAll(); + } else if (numProcs > 0) { + schedWaitCond.signal(); + } + + if (!procedure.hasParent()) { + // release the table shared-lock. + // (if we have a parent, it is holding an xlock so we didn't take the shared-lock) + releaseTableSharedLock(procedure, table); + } + } finally { + schedLock.unlock(); + } + } + + // ============================================================================ // Namespace Locking Helpers // ============================================================================ /** @@ -1080,6 +1330,10 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet { return sharedLock == 1; } + public synchronized boolean isLockOwner(long procId) { + return exclusiveLockProcIdOwner == procId; + } + public synchronized boolean tryExclusiveLock(long procIdOwner) { assert procIdOwner != Long.MIN_VALUE; if (isLocked()) return false; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index 3f76df3..6c65718 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -300,11 +300,6 @@ public class ModifyTableProcedure if (env.getMasterServices().getTableStateManager() .isTableState(getTableName(), TableState.State.ENABLED)) { - // We only execute this procedure with table online if online schema change config is set. - if (!MasterDDLOperationHelper.isOnlineSchemaChangeAllowed(env)) { - throw new TableNotDisabledException(getTableName()); - } - if (modifiedHTableDescriptor.getRegionReplication() != unmodifiedHTableDescriptor .getRegionReplication()) { throw new IOException("REGION_REPLICATION change is not supported for enabled tables"); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java index cc088f3..deaf406 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; public interface TableProcedureInterface { public enum TableOperationType { CREATE, DELETE, DISABLE, EDIT, ENABLE, READ, + SPLIT, MERGE, ASSIGN, UNASSIGN, /* region operations */ }; /** diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java index cfe76ae..29b7e8a 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java @@ -50,7 +50,6 @@ import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.HFileLink; @@ -90,10 +89,10 @@ public class PartitionedMobCompactor extends MobCompactor { protected int compactionBatchSize; protected int compactionKVMax; - private Path tempPath; - private Path bulkloadPath; - private CacheConfig compactionCacheConfig; - private Tag tableNameTag; + private final Path tempPath; + private final Path bulkloadPath; + private final CacheConfig compactionCacheConfig; + private final Tag tableNameTag; private Encryption.Context cryptoContext = Encryption.Context.NONE; public PartitionedMobCompactor(Configuration conf, FileSystem fs, TableName tableName, @@ -137,13 +136,12 @@ public class PartitionedMobCompactor extends MobCompactor { * @param candidates All the candidates. * @param allFiles Whether add all mob files into the compaction. * @return A compaction request. - * @throws IOException + * @throws IOException if IO failure is encountered */ protected PartitionedMobCompactionRequest select(List candidates, boolean allFiles) throws IOException { - Collection allDelFiles = new ArrayList(); - Map filesToCompact = - new HashMap(); + Collection allDelFiles = new ArrayList<>(); + Map filesToCompact = new HashMap<>(); int selectedFileCount = 0; int irrelevantFileCount = 0; for (FileStatus file : candidates) { @@ -202,17 +200,17 @@ public class PartitionedMobCompactor extends MobCompactor { * * @param request The compaction request. * @return The paths of new mob files generated in the compaction. - * @throws IOException + * @throws IOException if IO failure is encountered */ protected List performCompaction(PartitionedMobCompactionRequest request) throws IOException { // merge the del files - List delFilePaths = new ArrayList(); + List delFilePaths = new ArrayList<>(); for (FileStatus delFile : request.delFiles) { delFilePaths.add(delFile.getPath()); } List newDelPaths = compactDelFiles(request, delFilePaths); - List newDelFiles = new ArrayList(); + List newDelFiles = new ArrayList<>(); List paths = null; try { for (Path newDelPath : newDelPaths) { @@ -247,7 +245,7 @@ public class PartitionedMobCompactor extends MobCompactor { * @param request The compaction request. * @param delFiles The del files. * @return The paths of new mob files after compactions. - * @throws IOException + * @throws IOException if IO failure is encountered */ protected List compactMobFiles(final PartitionedMobCompactionRequest request, final List delFiles) throws IOException { @@ -256,24 +254,23 @@ public class PartitionedMobCompactor extends MobCompactor { LOG.info("No partitions of mob files"); return Collections.emptyList(); } - List paths = new ArrayList(); - Connection c = ConnectionFactory.createConnection(conf); + List paths = new ArrayList<>(); + final Connection c = ConnectionFactory.createConnection(conf); final Table table = c.getTable(tableName); try { - Map>> results = - new HashMap>>(); + Map>> results = new HashMap<>(); // compact the mob files by partitions in parallel. for (final CompactionPartition partition : partitions) { results.put(partition.getPartitionId(), pool.submit(new Callable>() { @Override public List call() throws Exception { LOG.info("Compacting mob files for partition " + partition.getPartitionId()); - return compactMobFilePartition(request, partition, delFiles, table); + return compactMobFilePartition(request, partition, delFiles, c, table); } })); } // compact the partitions in parallel. - List failedPartitions = new ArrayList(); + List failedPartitions = new ArrayList<>(); for (Entry>> result : results.entrySet()) { try { paths.addAll(result.getValue().get()); @@ -291,7 +288,7 @@ public class PartitionedMobCompactor extends MobCompactor { try { table.close(); } catch (IOException e) { - LOG.error("Failed to close the HTable", e); + LOG.error("Failed to close the Table", e); } } return paths; @@ -302,13 +299,16 @@ public class PartitionedMobCompactor extends MobCompactor { * @param request The compaction request. * @param partition A compaction partition. * @param delFiles The del files. - * @param table The current table. - * @return The paths of new mob files after compactions. - * @throws IOException + * @param connection to use + * @param table The current table. @return The paths of new mob files after compactions. + * @throws IOException if IO failure is encountered */ private List compactMobFilePartition(PartitionedMobCompactionRequest request, - CompactionPartition partition, List delFiles, Table table) throws IOException { - List newFiles = new ArrayList(); + CompactionPartition partition, + List delFiles, + Connection connection, + Table table) throws IOException { + List newFiles = new ArrayList<>(); List files = partition.listFiles(); int offset = 0; Path bulkloadPathOfPartition = new Path(bulkloadPath, partition.getPartitionId().toString()); @@ -328,7 +328,7 @@ public class PartitionedMobCompactor extends MobCompactor { // clean the bulkload directory to avoid loading old files. fs.delete(bulkloadPathOfPartition, true); // add the selected mob files and del files into filesToCompact - List filesToCompact = new ArrayList(); + List filesToCompact = new ArrayList<>(); for (int i = offset; i < batch + offset; i++) { StoreFile sf = new StoreFile(fs, files.get(i).getPath(), conf, compactionCacheConfig, BloomType.NONE); @@ -336,7 +336,7 @@ public class PartitionedMobCompactor extends MobCompactor { } filesToCompact.addAll(delFiles); // compact the mob files in a batch. - compactMobFilesInBatch(request, partition, table, filesToCompact, batch, + compactMobFilesInBatch(request, partition, connection, table, filesToCompact, batch, bulkloadPathOfPartition, bulkloadColumnPath, newFiles); // move to the next batch. offset += batch; @@ -364,19 +364,23 @@ public class PartitionedMobCompactor extends MobCompactor { * Compacts a partition of selected small mob files and all the del files in a batch. * @param request The compaction request. * @param partition A compaction partition. + * @param connection To use for transport * @param table The current table. * @param filesToCompact The files to be compacted. * @param batch The number of mob files to be compacted in a batch. * @param bulkloadPathOfPartition The directory where the bulkload column of the current - * partition is saved. + * partition is saved. * @param bulkloadColumnPath The directory where the bulkload files of current partition - * are saved. + * are saved. * @param newFiles The paths of new mob files after compactions. - * @throws IOException + * @throws IOException if IO failure is encountered */ private void compactMobFilesInBatch(PartitionedMobCompactionRequest request, - CompactionPartition partition, Table table, List filesToCompact, int batch, - Path bulkloadPathOfPartition, Path bulkloadColumnPath, List newFiles) + CompactionPartition partition, + Connection connection, Table table, + List filesToCompact, int batch, + Path bulkloadPathOfPartition, Path bulkloadColumnPath, + List newFiles) throws IOException { // open scanner to the selected mob files and del files. StoreScanner scanner = createScanner(filesToCompact, ScanType.COMPACT_DROP_DELETES); @@ -392,7 +396,7 @@ public class PartitionedMobCompactor extends MobCompactor { long mobCells = 0; try { writer = MobUtils.createWriter(conf, fs, column, partition.getPartitionId().getDate(), - tempPath, Long.MAX_VALUE, column.getCompactionCompression(), partition.getPartitionId() + tempPath, Long.MAX_VALUE, column.getCompactionCompressionType(), partition.getPartitionId() .getStartKey(), compactionCacheConfig, cryptoContext); filePath = writer.getPath(); byte[] fileName = Bytes.toBytes(filePath.getName()); @@ -400,8 +404,8 @@ public class PartitionedMobCompactor extends MobCompactor { refFileWriter = MobUtils.createRefFileWriter(conf, fs, column, bulkloadColumnPath, fileInfo .getSecond().longValue(), compactionCacheConfig, cryptoContext); refFilePath = refFileWriter.getPath(); - List cells = new ArrayList(); - boolean hasMore = false; + List cells = new ArrayList<>(); + boolean hasMore; ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); do { @@ -428,7 +432,7 @@ public class PartitionedMobCompactor extends MobCompactor { // commit mob file MobUtils.commitFile(conf, fs, filePath, mobFamilyDir, compactionCacheConfig); // bulkload the ref file - bulkloadRefFile(table, bulkloadPathOfPartition, filePath.getName()); + bulkloadRefFile(connection, table, bulkloadPathOfPartition, filePath.getName()); newFiles.add(new Path(mobFamilyDir, filePath.getName())); } else { // remove the new files @@ -450,10 +454,10 @@ public class PartitionedMobCompactor extends MobCompactor { /** * Compacts the del files in batches which avoids opening too many files. * @param request The compaction request. - * @param delFilePaths + * @param delFilePaths Del file paths to compact * @return The paths of new del files after merging or the original files if no merging * is necessary. - * @throws IOException + * @throws IOException if IO failure is encountered */ protected List compactDelFiles(PartitionedMobCompactionRequest request, List delFilePaths) throws IOException { @@ -462,14 +466,14 @@ public class PartitionedMobCompactor extends MobCompactor { } // when there are more del files than the number that is allowed, merge it firstly. int offset = 0; - List paths = new ArrayList(); + List paths = new ArrayList<>(); while (offset < delFilePaths.size()) { // get the batch int batch = compactionBatchSize; if (delFilePaths.size() - offset < compactionBatchSize) { batch = delFilePaths.size() - offset; } - List batchedDelFiles = new ArrayList(); + List batchedDelFiles = new ArrayList<>(); if (batch == 1) { // only one file left, do not compact it, directly add it to the new files. paths.add(delFilePaths.get(offset)); @@ -493,7 +497,7 @@ public class PartitionedMobCompactor extends MobCompactor { * @param request The compaction request. * @param delFiles The del files. * @return The path of new del file after merging. - * @throws IOException + * @throws IOException if IO failure is encountered */ private Path compactDelFilesInBatch(PartitionedMobCompactionRequest request, List delFiles) throws IOException { @@ -504,11 +508,11 @@ public class PartitionedMobCompactor extends MobCompactor { try { writer = MobUtils.createDelFileWriter(conf, fs, column, MobUtils.formatDate(new Date(request.selectionTime)), tempPath, Long.MAX_VALUE, - column.getCompactionCompression(), HConstants.EMPTY_START_ROW, compactionCacheConfig, + column.getCompactionCompressionType(), HConstants.EMPTY_START_ROW, compactionCacheConfig, cryptoContext); filePath = writer.getPath(); - List cells = new ArrayList(); - boolean hasMore = false; + List cells = new ArrayList<>(); + boolean hasMore; ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); do { @@ -544,7 +548,7 @@ public class PartitionedMobCompactor extends MobCompactor { * @param filesToCompact The files to be compacted. * @param scanType The scan type. * @return The store scanner. - * @throws IOException + * @throws IOException if IO failure is encountered */ private StoreScanner createScanner(List filesToCompact, ScanType scanType) throws IOException { @@ -554,24 +558,29 @@ public class PartitionedMobCompactor extends MobCompactor { scan.setMaxVersions(column.getMaxVersions()); long ttl = HStore.determineTTLFromFamily(column); ScanInfo scanInfo = new ScanInfo(conf, column, ttl, 0, CellComparator.COMPARATOR); - StoreScanner scanner = new StoreScanner(scan, scanInfo, scanType, null, scanners, 0L, + return new StoreScanner(scan, scanInfo, scanType, null, scanners, 0L, HConstants.LATEST_TIMESTAMP); - return scanner; } /** * Bulkloads the current file. + * + * @param connection to use to get admin/RegionLocator * @param table The current table. * @param bulkloadDirectory The path of bulkload directory. * @param fileName The current file name. - * @throws IOException + * @throws IOException if IO failure is encountered */ - private void bulkloadRefFile(Table table, Path bulkloadDirectory, String fileName) + private void bulkloadRefFile(Connection connection, Table table, Path bulkloadDirectory, + String fileName) throws IOException { // bulkload the ref file try { LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf); - bulkload.doBulkLoad(bulkloadDirectory, (HTable)table); + bulkload.doBulkLoad(bulkloadDirectory, + connection.getAdmin(), + table, + connection.getRegionLocator(table.getName())); } catch (Exception e) { // delete the committed mob file deletePath(new Path(mobFamilyDir, fileName)); @@ -587,7 +596,7 @@ public class PartitionedMobCompactor extends MobCompactor { * @param writer The mob file writer. * @param maxSeqId Maximum sequence id. * @param mobCellsCount The number of mob cells. - * @throws IOException + * @throws IOException if IO failure is encountered */ private void closeMobFileWriter(StoreFileWriter writer, long maxSeqId, long mobCellsCount) throws IOException { @@ -606,7 +615,7 @@ public class PartitionedMobCompactor extends MobCompactor { * @param writer The ref file writer. * @param maxSeqId Maximum sequence id. * @param bulkloadTime The timestamp at which the bulk load file is created. - * @throws IOException + * @throws IOException if IO failure is encountered */ private void closeRefFileWriter(StoreFileWriter writer, long maxSeqId, long bulkloadTime) throws IOException { @@ -626,7 +635,7 @@ public class PartitionedMobCompactor extends MobCompactor { * Gets the max seqId and number of cells of the store files. * @param storeFiles The store files. * @return The pair of the max seqId and number of cells of the store files. - * @throws IOException + * @throws IOException if IO failure is encountered */ private Pair getFileInfo(List storeFiles) throws IOException { long maxSeqId = 0; @@ -639,7 +648,7 @@ public class PartitionedMobCompactor extends MobCompactor { maxKeyCount += Bytes.toLong(count); } } - return new Pair(Long.valueOf(maxSeqId), Long.valueOf(maxKeyCount)); + return new Pair<>(maxSeqId, maxKeyCount); } /** @@ -670,8 +679,7 @@ public class PartitionedMobCompactor extends MobCompactor { private FileStatus getFileStatus(Path path) throws IOException { try { if (path != null) { - FileStatus file = fs.getFileStatus(path); - return file; + return fs.getFileStatus(path); } } catch (FileNotFoundException e) { LOG.warn("The file " + path + " can not be found", e); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java index c3724fc..0f27e0e 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java @@ -64,7 +64,7 @@ public abstract class AbstractMemStore implements MemStore { (2 * Bytes.SIZEOF_LONG)); public final static long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD + - 2 * (ClassSize.ATOMIC_LONG + ClassSize.TIMERANGE_TRACKER + + (ClassSize.ATOMIC_LONG + ClassSize.TIMERANGE_TRACKER + ClassSize.CELL_SKIPLIST_SET + ClassSize.CONCURRENT_SKIPLISTMAP)); @@ -99,7 +99,7 @@ public abstract class AbstractMemStore implements MemStore { * @param onlyIfMoreRecent a flag that marks whether to update the sequence id no matter what or * only if it is greater than the previous sequence id */ - public abstract void updateLowestUnflushedSequenceIdInWal(boolean onlyIfMoreRecent); + public abstract void updateLowestUnflushedSequenceIdInWAL(boolean onlyIfMoreRecent); /** * Write an update @@ -162,17 +162,9 @@ public abstract class AbstractMemStore implements MemStore { } /** - * An override on snapshot so the no arg version of the method implies zero seq num, - * like for cases without wal - */ - public MemStoreSnapshot snapshot() { - return snapshot(0); - } - - /** * The passed snapshot was successfully persisted; it can be let go. * @param id Id of the snapshot to clean out. - * @see MemStore#snapshot(long) + * @see MemStore#snapshot() */ @Override public void clearSnapshot(long id) throws UnexpectedStateException { @@ -201,18 +193,6 @@ public abstract class AbstractMemStore implements MemStore { } /** - * On flush, how much memory we will clear from the active cell set. - * - * @return size of data that is going to be flushed from active set - */ - @Override - public long getFlushableSize() { - long snapshotSize = getSnapshot().getSize(); - return snapshotSize > 0 ? snapshotSize : keySize(); - } - - - /** * @return a list containing a single memstore scanner. */ @Override @@ -230,7 +210,7 @@ public abstract class AbstractMemStore implements MemStore { StringBuffer buf = new StringBuffer(); int i = 1; try { - for (Segment segment : getListOfSegments()) { + for (Segment segment : getSegments()) { buf.append("Segment (" + i + ") " + segment.toString() + "; "); i++; } @@ -471,9 +451,6 @@ public abstract class AbstractMemStore implements MemStore { * Returns an ordered list of segments from most recent to oldest in memstore * @return an ordered list of segments from most recent to oldest in memstore */ - protected abstract List getListOfSegments() throws IOException; + protected abstract List getSegments() throws IOException; - public long getActiveSize() { - return getActive().getSize(); - } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BaseRowProcessor.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BaseRowProcessor.java index 65375b8..be2bd91 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BaseRowProcessor.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BaseRowProcessor.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Locale; import java.util.UUID; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -61,7 +62,7 @@ implements RowProcessor { @Override public String getName() { - return this.getClass().getSimpleName().toLowerCase(); + return this.getClass().getSimpleName().toLowerCase(Locale.ROOT); } @Override diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java new file mode 100644 index 0000000..7aaece6 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java @@ -0,0 +1,406 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import com.google.common.annotations.VisibleForTesting; +import java.io.IOException; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.wal.WAL; + +/** + * A memstore implementation which supports in-memory compaction. + * A compaction pipeline is added between the active set and the snapshot data structures; + * it consists of a list of kv-sets that are subject to compaction. + * Like the snapshot, all pipeline components are read-only; updates only affect the active set. + * To ensure this property we take advantage of the existing blocking mechanism -- the active set + * is pushed to the pipeline while holding the region's updatesLock in exclusive mode. + * Periodically, a compaction is applied in the background to all pipeline components resulting + * in a single read-only component. The ``old'' components are discarded when no scanner is reading + * them. + */ +@InterfaceAudience.Private +public class CompactingMemStore extends AbstractMemStore { + public final static long DEEP_OVERHEAD_PER_PIPELINE_ITEM = ClassSize.align( + ClassSize.TIMERANGE_TRACKER + ClassSize.TIMERANGE + + ClassSize.CELL_SKIPLIST_SET + ClassSize.CONCURRENT_SKIPLISTMAP); + // Default fraction of in-memory-flush size w.r.t. flush-to-disk size + public static final String IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY = + "hbase.memestore.inmemoryflush.threshold.factor"; + private static final double IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT = 0.25; + + private static final Log LOG = LogFactory.getLog(CompactingMemStore.class); + private Store store; + private RegionServicesForStores regionServices; + private CompactionPipeline pipeline; + private MemStoreCompactor compactor; + // the threshold on active size for in-memory flush + private long inmemoryFlushSize; + private final AtomicBoolean inMemoryFlushInProgress = new AtomicBoolean(false); + @VisibleForTesting + private final AtomicBoolean allowCompaction = new AtomicBoolean(true); + + public CompactingMemStore(Configuration conf, CellComparator c, + HStore store, RegionServicesForStores regionServices) throws IOException { + super(conf, c); + this.store = store; + this.regionServices = regionServices; + this.pipeline = new CompactionPipeline(getRegionServices()); + this.compactor = new MemStoreCompactor(this); + initInmemoryFlushSize(conf); + } + + private void initInmemoryFlushSize(Configuration conf) { + long memstoreFlushSize = getRegionServices().getMemstoreFlushSize(); + int numStores = getRegionServices().getNumStores(); + if (numStores <= 1) { + // Family number might also be zero in some of our unit test case + numStores = 1; + } + inmemoryFlushSize = memstoreFlushSize / numStores; + // multiply by a factor + double factor = conf.getDouble(IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, + IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT); + inmemoryFlushSize *= factor; + LOG.debug("Setting in-memory flush size threshold to " + inmemoryFlushSize); + } + + public static long getSegmentSize(Segment segment) { + return segment.getSize() - DEEP_OVERHEAD_PER_PIPELINE_ITEM; + } + + public static long getSegmentsSize(List list) { + long res = 0; + for (Segment segment : list) { + res += getSegmentSize(segment); + } + return res; + } + + /** + * @return Total memory occupied by this MemStore. + * This is not thread safe and the memstore may be changed while computing its size. + * It is the responsibility of the caller to make sure this doesn't happen. + */ + @Override + public long size() { + long res = 0; + for (Segment item : getSegments()) { + res += item.getSize(); + } + return res; + } + + /** + * This method is called when it is clear that the flush to disk is completed. + * The store may do any post-flush actions at this point. + * One example is to update the WAL with sequence number that is known only at the store level. + */ + @Override public void finalizeFlush() { + updateLowestUnflushedSequenceIdInWAL(false); + } + + @Override public boolean isSloppy() { + return true; + } + + /** + * Push the current active memstore segment into the pipeline + * and create a snapshot of the tail of current compaction pipeline + * Snapshot must be cleared by call to {@link #clearSnapshot}. + * {@link #clearSnapshot(long)}. + * @return {@link MemStoreSnapshot} + */ + @Override + public MemStoreSnapshot snapshot() { + MutableSegment active = getActive(); + // If snapshot currently has entries, then flusher failed or didn't call + // cleanup. Log a warning. + if (!getSnapshot().isEmpty()) { + LOG.warn("Snapshot called again without clearing previous. " + + "Doing nothing. Another ongoing flush or did we fail last attempt?"); + } else { + LOG.info("FLUSHING TO DISK: region "+ getRegionServices().getRegionInfo() + .getRegionNameAsString() + "store: "+ getFamilyName()); + stopCompaction(); + pushActiveToPipeline(active); + snapshotId = EnvironmentEdgeManager.currentTime(); + pushTailToSnapshot(); + } + return new MemStoreSnapshot(snapshotId, getSnapshot()); + } + + /** + * On flush, how much memory we will clear. + * @return size of data that is going to be flushed + */ + @Override public long getFlushableSize() { + long snapshotSize = getSnapshot().getSize(); + if(snapshotSize == 0) { + //if snapshot is empty the tail of the pipeline is flushed + snapshotSize = pipeline.getTailSize(); + } + return snapshotSize > 0 ? snapshotSize : keySize(); + } + + @Override + public void updateLowestUnflushedSequenceIdInWAL(boolean onlyIfGreater) { + long minSequenceId = pipeline.getMinSequenceId(); + if(minSequenceId != Long.MAX_VALUE) { + byte[] encodedRegionName = getRegionServices().getRegionInfo().getEncodedNameAsBytes(); + byte[] familyName = getFamilyNameInByte(); + WAL WAL = getRegionServices().getWAL(); + if (WAL != null) { + WAL.updateStore(encodedRegionName, familyName, minSequenceId, onlyIfGreater); + } + } + } + + @Override + public List getSegments() { + List pipelineList = pipeline.getSegments(); + List list = new LinkedList(); + list.add(getActive()); + list.addAll(pipelineList); + list.add(getSnapshot()); + return list; + } + + public void setInMemoryFlushInProgress(boolean inMemoryFlushInProgress) { + this.inMemoryFlushInProgress.set(inMemoryFlushInProgress); + } + + public void swapCompactedSegments(VersionedSegmentsList versionedList, ImmutableSegment result) { + pipeline.swap(versionedList, result); + } + + public boolean hasCompactibleSegments() { + return !pipeline.isEmpty(); + } + + public VersionedSegmentsList getCompactibleSegments() { + return pipeline.getVersionedList(); + } + + public long getSmallestReadPoint() { + return store.getSmallestReadPoint(); + } + + public Store getStore() { + return store; + } + + public String getFamilyName() { + return Bytes.toString(getFamilyNameInByte()); + } + + @Override + /* + * Scanners are ordered from 0 (oldest) to newest in increasing order. + */ + protected List getListOfScanners(long readPt) throws IOException { + List pipelineList = pipeline.getSegments(); + long order = pipelineList.size(); + LinkedList list = new LinkedList(); + list.add(getActive().getSegmentScanner(readPt, order+1)); + for (Segment item : pipelineList) { + list.add(item.getSegmentScanner(readPt, order)); + order--; + } + list.add(getSnapshot().getSegmentScanner(readPt, order)); + return list; + } + + /** + * Check whether anything need to be done based on the current active set size. + * The method is invoked upon every addition to the active set. + * For CompactingMemStore, flush the active set to the read-only memory if it's + * size is above threshold + */ + @Override + protected void checkActiveSize() { + if (shouldFlushInMemory()) { + /* The thread is dispatched to flush-in-memory. This cannot be done + * on the same thread, because for flush-in-memory we require updatesLock + * in exclusive mode while this method (checkActiveSize) is invoked holding updatesLock + * in the shared mode. */ + InMemoryFlushRunnable runnable = new InMemoryFlushRunnable(); + LOG.info("Dispatching the MemStore in-memory flush for store " + store.getColumnFamilyName()); + getPool().execute(runnable); + // guard against queuing same old compactions over and over again + inMemoryFlushInProgress.set(true); + } + } + + // internally used method, externally visible only for tests + // when invoked directly from tests it must be verified that the caller doesn't hold updatesLock, + // otherwise there is a deadlock + @VisibleForTesting + void flushInMemory() throws IOException { + // Phase I: Update the pipeline + getRegionServices().blockUpdates(); + try { + MutableSegment active = getActive(); + LOG.info("IN-MEMORY FLUSH: Pushing active segment into compaction pipeline, " + + "and initiating compaction."); + pushActiveToPipeline(active); + } finally { + getRegionServices().unblockUpdates(); + } + // Phase II: Compact the pipeline + try { + if (allowCompaction.get()) { + // setting the inMemoryFlushInProgress flag again for the case this method is invoked + // directly (only in tests) in the common path setting from true to true is idempotent + inMemoryFlushInProgress.set(true); + // Speculative compaction execution, may be interrupted if flush is forced while + // compaction is in progress + compactor.startCompaction(); + } + } catch (IOException e) { + LOG.warn("Unable to run memstore compaction. region " + + getRegionServices().getRegionInfo().getRegionNameAsString() + + "store: "+ getFamilyName(), e); + } + } + + private byte[] getFamilyNameInByte() { + return store.getFamily().getName(); + } + + private ThreadPoolExecutor getPool() { + return getRegionServices().getInMemoryCompactionPool(); + } + + private boolean shouldFlushInMemory() { + if(getActive().getSize() > inmemoryFlushSize) { + // size above flush threshold + return (allowCompaction.get() && !inMemoryFlushInProgress.get()); + } + return false; + } + + /** + * The request to cancel the compaction asynchronous task (caused by in-memory flush) + * The compaction may still happen if the request was sent too late + * Non-blocking request + */ + private void stopCompaction() { + if (inMemoryFlushInProgress.get()) { + compactor.stopCompact(); + inMemoryFlushInProgress.set(false); + } + } + + private void pushActiveToPipeline(MutableSegment active) { + if (!active.isEmpty()) { + long delta = DEEP_OVERHEAD_PER_PIPELINE_ITEM - DEEP_OVERHEAD; + active.setSize(active.getSize() + delta); + pipeline.pushHead(active); + resetCellSet(); + } + } + + private void pushTailToSnapshot() { + ImmutableSegment tail = pipeline.pullTail(); + if (!tail.isEmpty()) { + setSnapshot(tail); + long size = getSegmentSize(tail); + setSnapshotSize(size); + } + } + + private RegionServicesForStores getRegionServices() { + return regionServices; + } + + /** + * The in-memory-flusher thread performs the flush asynchronously. + * There is at most one thread per memstore instance. + * It takes the updatesLock exclusively, pushes active into the pipeline, releases updatesLock + * and compacts the pipeline. + */ + private class InMemoryFlushRunnable implements Runnable { + + @Override public void run() { + try { + flushInMemory(); + } catch (IOException e) { + LOG.warn("Unable to run memstore compaction. region " + + getRegionServices().getRegionInfo().getRegionNameAsString() + + "store: "+ getFamilyName(), e); + } + } + } + + //---------------------------------------------------------------------- + //methods for tests + //---------------------------------------------------------------------- + boolean isMemStoreFlushingInMemory() { + return inMemoryFlushInProgress.get(); + } + + void disableCompaction() { + allowCompaction.set(false); + } + + void enableCompaction() { + allowCompaction.set(true); + } + + /** + * @param cell Find the row that comes after this one. If null, we return the + * first. + * @return Next row or null if none found. + */ + Cell getNextRow(final Cell cell) { + Cell lowest = null; + List segments = getSegments(); + for (Segment segment : segments) { + if (lowest == null) { + lowest = getNextRow(cell, segment.getCellSet()); + } else { + lowest = getLowest(lowest, getNextRow(cell, segment.getCellSet())); + } + } + return lowest; + } + + // debug method + private void debug() { + String msg = "active size="+getActive().getSize(); + msg += " threshold="+IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT* inmemoryFlushSize; + msg += " allow compaction is "+ (allowCompaction.get() ? "true" : "false"); + msg += " inMemoryFlushInProgress is "+ (inMemoryFlushInProgress.get() ? "true" : "false"); + LOG.debug(msg); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java new file mode 100644 index 0000000..e33ceae --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java @@ -0,0 +1,190 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * The compaction pipeline of a {@link CompactingMemStore}, is a FIFO queue of segments. + * It supports pushing a segment at the head of the pipeline and pulling a segment from the + * tail to flush to disk. + * It also supports swap operation to allow the compactor swap a subset of the segments with a new + * (compacted) one. This swap succeeds only if the version number passed with the list of segments + * to swap is the same as the current version of the pipeline. + * The pipeline version is updated whenever swapping segments or pulling the segment at the tail. + */ +@InterfaceAudience.Private +public class CompactionPipeline { + private static final Log LOG = LogFactory.getLog(CompactionPipeline.class); + + private final RegionServicesForStores region; + private LinkedList pipeline; + private long version; + + private static final ImmutableSegment EMPTY_MEM_STORE_SEGMENT = SegmentFactory.instance() + .createImmutableSegment(null, + CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_ITEM); + + public CompactionPipeline(RegionServicesForStores region) { + this.region = region; + this.pipeline = new LinkedList(); + this.version = 0; + } + + public boolean pushHead(MutableSegment segment) { + ImmutableSegment immutableSegment = SegmentFactory.instance(). + createImmutableSegment(segment); + synchronized (pipeline){ + return addFirst(immutableSegment); + } + } + + public ImmutableSegment pullTail() { + synchronized (pipeline){ + if(pipeline.isEmpty()) { + return EMPTY_MEM_STORE_SEGMENT; + } + return removeLast(); + } + } + + public VersionedSegmentsList getVersionedList() { + synchronized (pipeline){ + LinkedList segmentList = new LinkedList(pipeline); + VersionedSegmentsList res = new VersionedSegmentsList(segmentList, version); + return res; + } + } + + /** + * Swaps the versioned list at the tail of the pipeline with the new compacted segment. + * Swapping only if there were no changes to the suffix of the list while it was compacted. + * @param versionedList tail of the pipeline that was compacted + * @param segment new compacted segment + * @return true iff swapped tail with new compacted segment + */ + public boolean swap(VersionedSegmentsList versionedList, ImmutableSegment segment) { + if(versionedList.getVersion() != version) { + return false; + } + LinkedList suffix; + synchronized (pipeline){ + if(versionedList.getVersion() != version) { + return false; + } + suffix = versionedList.getStoreSegments(); + LOG.info("Swapping pipeline suffix with compacted item. " + +"Just before the swap the number of segments in pipeline is:" + +versionedList.getStoreSegments().size() + +", and the number of cells in new segment is:"+segment.getCellsCount()); + swapSuffix(suffix,segment); + } + if(region != null) { + // update the global memstore size counter + long suffixSize = CompactingMemStore.getSegmentsSize(suffix); + long newSize = CompactingMemStore.getSegmentSize(segment); + long delta = suffixSize - newSize; + long globalMemstoreSize = region.addAndGetGlobalMemstoreSize(-delta); + LOG.info("Suffix size: "+ suffixSize+" compacted item size: "+newSize+ + " globalMemstoreSize: "+globalMemstoreSize); + } + return true; + } + + public boolean isEmpty() { + return pipeline.isEmpty(); + } + + public List getSegments() { + synchronized (pipeline){ + List res = new LinkedList(pipeline); + return res; + } + } + + public long size() { + return pipeline.size(); + } + + public long getMinSequenceId() { + long minSequenceId = Long.MAX_VALUE; + if(!isEmpty()) { + minSequenceId = pipeline.getLast().getMinSequenceId(); + } + return minSequenceId; + } + + public long getTailSize() { + if(isEmpty()) return 0; + return CompactingMemStore.getSegmentSize(pipeline.peekLast()); + } + + private void swapSuffix(LinkedList suffix, ImmutableSegment segment) { + version++; + for(Segment itemInSuffix : suffix) { + itemInSuffix.close(); + } + pipeline.removeAll(suffix); + pipeline.addLast(segment); + } + + private ImmutableSegment removeLast() { + version++; + return pipeline.removeLast(); + } + + private boolean addFirst(ImmutableSegment segment) { + pipeline.add(0,segment); + return true; + } + + // debug method + private boolean validateSuffixList(LinkedList suffix) { + if(suffix.isEmpty()) { + // empty suffix is always valid + return true; + } + + Iterator pipelineBackwardIterator = pipeline.descendingIterator(); + Iterator suffixBackwardIterator = suffix.descendingIterator(); + ImmutableSegment suffixCurrent; + ImmutableSegment pipelineCurrent; + for( ; suffixBackwardIterator.hasNext(); ) { + if(!pipelineBackwardIterator.hasNext()) { + // a suffix longer than pipeline is invalid + return false; + } + suffixCurrent = suffixBackwardIterator.next(); + pipelineCurrent = pipelineBackwardIterator.next(); + if(suffixCurrent != pipelineCurrent) { + // non-matching suffix + return false; + } + } + // suffix matches pipeline suffix + return true; + } + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java index 3d65bca..cdc910e 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java @@ -77,10 +77,9 @@ public class DefaultMemStore extends AbstractMemStore { /** * Creates a snapshot of the current memstore. * Snapshot must be cleared by call to {@link #clearSnapshot(long)} - * @param flushOpSeqId the sequence id that is attached to the flush operation in the wal */ @Override - public MemStoreSnapshot snapshot(long flushOpSeqId) { + public MemStoreSnapshot snapshot() { // If snapshot currently has entries, then flusher failed or didn't call // cleanup. Log a warning. if (!getSnapshot().isEmpty()) { @@ -90,7 +89,7 @@ public class DefaultMemStore extends AbstractMemStore { this.snapshotId = EnvironmentEdgeManager.currentTime(); if (!getActive().isEmpty()) { ImmutableSegment immutableSegment = SegmentFactory.instance(). - createImmutableSegment(getConfiguration(), getActive()); + createImmutableSegment(getActive()); setSnapshot(immutableSegment); setSnapshotSize(keySize()); resetCellSet(); @@ -99,16 +98,30 @@ public class DefaultMemStore extends AbstractMemStore { return new MemStoreSnapshot(this.snapshotId, getSnapshot()); } + /** + * On flush, how much memory we will clear from the active cell set. + * + * @return size of data that is going to be flushed from active set + */ + @Override + public long getFlushableSize() { + long snapshotSize = getSnapshot().getSize(); + return snapshotSize > 0 ? snapshotSize : keySize(); + } + @Override + /* + * Scanners are ordered from 0 (oldest) to newest in increasing order. + */ protected List getListOfScanners(long readPt) throws IOException { List list = new ArrayList(2); - list.add(0, getActive().getSegmentScanner(readPt)); - list.add(1, getSnapshot().getSegmentScanner(readPt)); + list.add(0, getActive().getSegmentScanner(readPt, 1)); + list.add(1, getSnapshot().getSegmentScanner(readPt, 0)); return list; } @Override - protected List getListOfSegments() throws IOException { + protected List getSegments() throws IOException { List list = new ArrayList(2); list.add(0, getActive()); list.add(1, getSnapshot()); @@ -126,7 +139,7 @@ public class DefaultMemStore extends AbstractMemStore { getNextRow(cell, getSnapshot().getCellSet())); } - @Override public void updateLowestUnflushedSequenceIdInWal(boolean onlyIfMoreRecent) { + @Override public void updateLowestUnflushedSequenceIdInWAL(boolean onlyIfMoreRecent) { } /** @@ -150,6 +163,10 @@ public class DefaultMemStore extends AbstractMemStore { public void finalizeFlush() { } + @Override public boolean isSloppy() { + return false; + } + /** * Code to help figure if our approximation of object heap sizes is close * enough. See hbase-900. Fills memstores then waits so user can heap diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java new file mode 100644 index 0000000..f4b51ba --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.ipc.FifoRpcScheduler; +import org.apache.hadoop.hbase.ipc.PriorityFunction; +import org.apache.hadoop.hbase.ipc.RpcScheduler; + +/** + * Factory to use when you want to use the {@link FifoRpcScheduler} + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class FifoRpcSchedulerFactory implements RpcSchedulerFactory { + @Override + public RpcScheduler create(Configuration conf, PriorityFunction priority, Abortable server) { + int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, + HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT); + return new FifoRpcScheduler(conf, handlerCount); + } + + @Deprecated + @Override + public RpcScheduler create(Configuration conf, PriorityFunction priority) { + return create(conf, priority, null); + } +} \ No newline at end of file diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java new file mode 100644 index 0000000..362d0f9 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.util.Collection; +import java.util.HashSet; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +/** + * A {@link FlushPolicy} that only flushes store larger a given threshold. If no store is large + * enough, then all stores will be flushed. + */ +public class FlushAllLargeStoresPolicy extends FlushLargeStoresPolicy{ + + private static final Log LOG = LogFactory.getLog(FlushAllLargeStoresPolicy.class); + + @Override + protected void configureForRegion(HRegion region) { + super.configureForRegion(region); + int familyNumber = region.getTableDesc().getFamilies().size(); + if (familyNumber <= 1) { + // No need to parse and set flush size lower bound if only one family + // Family number might also be zero in some of our unit test case + return; + } + this.flushSizeLowerBound = getFlushSizeLowerBound(region); + } + + @Override + public Collection selectStoresToFlush() { + // no need to select stores if only one family + if (region.getTableDesc().getFamilies().size() == 1) { + return region.stores.values(); + } + // start selection + Collection stores = region.stores.values(); + Set specificStoresToFlush = new HashSet(); + for (Store store : stores) { + if (shouldFlush(store)) { + specificStoresToFlush.add(store); + } + } + if (!specificStoresToFlush.isEmpty()) return specificStoresToFlush; + + // Didn't find any CFs which were above the threshold for selection. + if (LOG.isDebugEnabled()) { + LOG.debug("Since none of the CFs were above the size, flushing all."); + } + return stores; + } + + @Override + protected boolean shouldFlush(Store store) { + return (super.shouldFlush(store) || region.shouldFlushStore(store)); + } + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java index b4d47c7..49cb747 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java @@ -17,10 +17,6 @@ */ package org.apache.hadoop.hbase.regionserver; -import java.util.Collection; -import java.util.HashSet; -import java.util.Set; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -31,7 +27,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; * enough, then all stores will be flushed. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -public class FlushLargeStoresPolicy extends FlushPolicy { +public abstract class FlushLargeStoresPolicy extends FlushPolicy { private static final Log LOG = LogFactory.getLog(FlushLargeStoresPolicy.class); @@ -41,20 +37,13 @@ public class FlushLargeStoresPolicy extends FlushPolicy { public static final String HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN = "hbase.hregion.percolumnfamilyflush.size.lower.bound.min"; - private static final long DEFAULT_HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN = + public static final long DEFAULT_HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN = 1024 * 1024 * 16L; - private long flushSizeLowerBound = -1; + protected long flushSizeLowerBound = -1; - @Override - protected void configureForRegion(HRegion region) { - super.configureForRegion(region); + protected long getFlushSizeLowerBound(HRegion region) { int familyNumber = region.getTableDesc().getFamilies().size(); - if (familyNumber <= 1) { - // No need to parse and set flush size lower bound if only one family - // Family number might also be zero in some of our unit test case - return; - } // For multiple families, lower bound is the "average flush size" by default // unless setting in configuration is larger. long flushSizeLowerBound = region.getMemstoreFlushSize() / familyNumber; @@ -85,44 +74,19 @@ public class FlushLargeStoresPolicy extends FlushPolicy { } } - this.flushSizeLowerBound = flushSizeLowerBound; + return flushSizeLowerBound; } - private boolean shouldFlush(Store store) { + protected boolean shouldFlush(Store store) { if (store.getMemStoreSize() > this.flushSizeLowerBound) { if (LOG.isDebugEnabled()) { LOG.debug("Flush Column Family " + store.getColumnFamilyName() + " of " + - region.getRegionInfo().getEncodedName() + " because memstoreSize=" + - store.getMemStoreSize() + " > lower bound=" + this.flushSizeLowerBound); + region.getRegionInfo().getEncodedName() + " because memstoreSize=" + + store.getMemStoreSize() + " > lower bound=" + this.flushSizeLowerBound); } return true; } - return region.shouldFlushStore(store); - } - - @Override - public Collection selectStoresToFlush() { - // no need to select stores if only one family - if (region.getTableDesc().getFamilies().size() == 1) { - return region.stores.values(); - } - // start selection - Collection stores = region.stores.values(); - Set specificStoresToFlush = new HashSet(); - for (Store store : stores) { - if (shouldFlush(store)) { - specificStoresToFlush.add(store); - } - } - // Didn't find any CFs which were above the threshold for selection. - if (specificStoresToFlush.isEmpty()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Since none of the CFs were above the size, flushing all."); - } - return stores; - } else { - return specificStoresToFlush; - } + return false; } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java new file mode 100644 index 0000000..2921f23 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.util.Collection; +import java.util.HashSet; + +/** + * A {@link FlushPolicy} that only flushes store larger than a given threshold. If no store is large + * enough, then all stores will be flushed. + * Gives priority to selecting regular stores first, and only if no other + * option, selects sloppy stores which normaly require more memory. + */ +public class FlushNonSloppyStoresFirstPolicy extends FlushLargeStoresPolicy { + + private Collection regularStores = new HashSet<>(); + private Collection sloppyStores = new HashSet<>(); + + /** + * @return the stores need to be flushed. + */ + @Override public Collection selectStoresToFlush() { + Collection specificStoresToFlush = new HashSet(); + for(Store store : regularStores) { + if(shouldFlush(store) || region.shouldFlushStore(store)) { + specificStoresToFlush.add(store); + } + } + if(!specificStoresToFlush.isEmpty()) return specificStoresToFlush; + for(Store store : sloppyStores) { + if(shouldFlush(store)) { + specificStoresToFlush.add(store); + } + } + if(!specificStoresToFlush.isEmpty()) return specificStoresToFlush; + return region.stores.values(); + } + + @Override + protected void configureForRegion(HRegion region) { + super.configureForRegion(region); + this.flushSizeLowerBound = getFlushSizeLowerBound(region); + for(Store store : region.stores.values()) { + if(store.getMemStore().isSloppy()) { + sloppyStores.add(store); + } else { + regularStores.add(store); + } + } + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushPolicyFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushPolicyFactory.java index e80b696..b93594e 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushPolicyFactory.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushPolicyFactory.java @@ -41,7 +41,7 @@ public class FlushPolicyFactory { public static final String HBASE_FLUSH_POLICY_KEY = "hbase.regionserver.flush.policy"; private static final Class DEFAULT_FLUSH_POLICY_CLASS = - FlushLargeStoresPolicy.class; + FlushAllLargeStoresPolicy.class; /** * Create the FlushPolicy configured for the given table. diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java index bfa1f80..8634e37 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java @@ -514,6 +514,10 @@ public class HMobStore extends HStore { @Override public void finalizeFlush() { } + @Override public MemStore getMemStore() { + return null; + } + public void updateCellsCountCompactedToMob(long count) { cellsCountCompactedToMob += count; } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index e7a99a9..9c966cd 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -19,6 +19,19 @@ package org.apache.hadoop.hbase.regionserver; import static org.apache.hadoop.hbase.HConstants.REPLICATION_SCOPE_LOCAL; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Optional; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.io.Closeables; +import com.google.protobuf.ByteString; +import com.google.protobuf.Descriptors; +import com.google.protobuf.Message; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; +import com.google.protobuf.TextFormat; import java.io.EOFException; import java.io.FileNotFoundException; import java.io.IOException; @@ -182,19 +195,6 @@ import org.apache.hadoop.util.StringUtils; import org.apache.htrace.Trace; import org.apache.htrace.TraceScope; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Optional; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.io.Closeables; -import com.google.protobuf.ByteString; -import com.google.protobuf.Descriptors; -import com.google.protobuf.Message; -import com.google.protobuf.RpcCallback; -import com.google.protobuf.RpcController; -import com.google.protobuf.Service; -import com.google.protobuf.TextFormat; @SuppressWarnings("deprecation") @InterfaceAudience.Private @@ -923,11 +923,16 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi }); } boolean allStoresOpened = false; + boolean hasSloppyStores = false; try { for (int i = 0; i < htableDescriptor.getFamilies().size(); i++) { Future future = completionService.take(); HStore store = future.get(); this.stores.put(store.getFamily().getName(), store); + MemStore memStore = store.getMemStore(); + if(memStore != null && memStore.isSloppy()) { + hasSloppyStores = true; + } long storeMaxSequenceId = store.getMaxSequenceId(); maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), @@ -941,6 +946,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } } allStoresOpened = true; + if(hasSloppyStores) { + htableDescriptor.setFlushPolicyClassName(FlushNonSloppyStoresFirstPolicy.class + .getName()); + LOG.info("Setting FlushNonSloppyStoresFirstPolicy for the region=" + this); + } } catch (InterruptedException e) { throw (InterruptedIOException)new InterruptedIOException().initCause(e); } catch (ExecutionException e) { @@ -1107,7 +1117,15 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi if (this.rsAccounting != null) { rsAccounting.addAndGetGlobalMemstoreSize(memStoreSize); } - return this.memstoreSize.addAndGet(memStoreSize); + long size = this.memstoreSize.addAndGet(memStoreSize); + // This is extremely bad if we make memstoreSize negative. Log as much info on the offending + // caller as possible. (memStoreSize might be a negative value already -- freeing memory) + if (size < 0) { + LOG.error("Asked to modify this region's (" + this.toString() + + ") memstoreSize to a negative value which is incorrect. Current memstoreSize=" + + (size-memStoreSize) + ", delta=" + memStoreSize, new Exception()); + } + return size; } @Override @@ -1457,22 +1475,30 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi LOG.debug("Updates disabled for region " + this); // Don't flush the cache if we are aborting if (!abort && canFlush) { + int failedfFlushCount = 0; int flushCount = 0; - while (this.memstoreSize.get() > 0) { + long tmp = 0; + long remainingSize = this.memstoreSize.get(); + while (remainingSize > 0) { try { - if (flushCount++ > 0) { - int actualFlushes = flushCount - 1; - if (actualFlushes > 5) { - // If we tried 5 times and are unable to clear memory, abort - // so we do not lose data - throw new DroppedSnapshotException("Failed clearing memory after " + - actualFlushes + " attempts on region: " + - Bytes.toStringBinary(getRegionInfo().getRegionName())); - } - LOG.info("Running extra flush, " + actualFlushes + - " (carrying snapshot?) " + this); - } internalFlushcache(status); + if(flushCount >0) { + LOG.info("Running extra flush, " + flushCount + + " (carrying snapshot?) " + this); + } + flushCount++; + tmp = this.memstoreSize.get(); + if (tmp >= remainingSize) { + failedfFlushCount++; + } + remainingSize = tmp; + if (failedfFlushCount > 5) { + // If we failed 5 times and are unable to clear memory, abort + // so we do not lose data + throw new DroppedSnapshotException("Failed clearing memory after " + + flushCount + " attempts on region: " + + Bytes.toStringBinary(getRegionInfo().getRegionName())); + } } catch (IOException ioe) { status.setStatus("Failed flush " + this + ", putting online again"); synchronized (writestate) { @@ -2349,7 +2375,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi perCfExtras = new StringBuilder(); for (Store store: storesToFlush) { perCfExtras.append("; ").append(store.getColumnFamilyName()); - perCfExtras.append("=").append(StringUtils.byteDesc(store.getMemStoreSize())); + perCfExtras.append("=").append(StringUtils.byteDesc(store.getFlushableSize())); } } LOG.info("Flushing " + + storesToFlush.size() + "/" + stores.size() + @@ -2925,8 +2951,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } initialized = true; } - long addedSize = doMiniBatchMutate(batchOp); - long newSize = this.addAndGetGlobalMemstoreSize(addedSize); + doMiniBatchMutate(batchOp); + long newSize = this.getMemstoreSize(); requestFlushIfNeeded(newSize); } } finally { @@ -3007,6 +3033,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi int cellCount = 0; /** Keep track of the locks we hold so we can release them in finally clause */ List acquiredRowLocks = Lists.newArrayListWithCapacity(batchOp.operations.length); + long addedSize = 0; try { // STEP 1. Try to acquire as many locks as we can, and ensure we acquire at least one. int numReadyToWrite = 0; @@ -3107,6 +3134,35 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi batchOp.retCodeDetails, batchOp.walEditsFromCoprocessors, firstIndex, lastIndexExclusive); if (coprocessorHost.preBatchMutate(miniBatchOp)) { return 0L; + } else { + for (int i = firstIndex; i < lastIndexExclusive; i++) { + if (batchOp.retCodeDetails[i].getOperationStatusCode() != OperationStatusCode.NOT_RUN) { + // lastIndexExclusive was incremented above. + continue; + } + // we pass (i - firstIndex) below since the call expects a relative index + Mutation[] cpMutations = miniBatchOp.getOperationsFromCoprocessors(i - firstIndex); + if (cpMutations == null) { + continue; + } + // Else Coprocessor added more Mutations corresponding to the Mutation at this index. + for (int j = 0; j < cpMutations.length; j++) { + Mutation cpMutation = cpMutations[j]; + Map> cpFamilyMap = cpMutation.getFamilyCellMap(); + checkAndPrepareMutation(cpMutation, replay, cpFamilyMap, now); + + // Acquire row locks. If not, the whole batch will fail. + acquiredRowLocks.add(getRowLockInternal(cpMutation.getRow(), true)); + + if (cpMutation.getDurability() == Durability.SKIP_WAL) { + recordMutationWithoutWal(cpFamilyMap); + } + + // Returned mutations from coprocessor correspond to the Mutation at index i. We can + // directly add the cells from those mutations to the familyMaps of this mutation. + mergeFamilyMaps(familyMaps[i], cpFamilyMap); // will get added to the memstore later + } + } } } @@ -3188,7 +3244,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } // STEP 5. Write back to memstore - long addedSize = 0; for (int i = firstIndex; i < lastIndexExclusive; i++) { if (batchOp.retCodeDetails[i].getOperationStatusCode() != OperationStatusCode.NOT_RUN) { continue; @@ -3249,6 +3304,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } finally { // Call complete rather than completeAndWait because we probably had error if walKey != null if (writeEntry != null) mvcc.complete(writeEntry); + this.addAndGetGlobalMemstoreSize(addedSize); if (locked) { this.updatesLock.readLock().unlock(); } @@ -3283,9 +3339,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // call the coprocessor hook to do any finalization steps // after the put is done MiniBatchOperationInProgress miniBatchOp = - new MiniBatchOperationInProgress(batchOp.getMutationsForCoprocs(), - batchOp.retCodeDetails, batchOp.walEditsFromCoprocessors, firstIndex, - lastIndexExclusive); + new MiniBatchOperationInProgress(batchOp.getMutationsForCoprocs(), + batchOp.retCodeDetails, batchOp.walEditsFromCoprocessors, firstIndex, lastIndexExclusive); coprocessorHost.postBatchMutateIndispensably(miniBatchOp, success); } @@ -3293,6 +3348,18 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } } + private void mergeFamilyMaps(Map> familyMap, + Map> toBeMerged) { + for (Map.Entry> entry : toBeMerged.entrySet()) { + List cells = familyMap.get(entry.getKey()); + if (cells == null) { + familyMap.put(entry.getKey(), entry.getValue()); + } else { + cells.addAll(entry.getValue()); + } + } + } + private void appendCurrentNonces(final Mutation mutation, final boolean replay, final WALEdit walEdit, final long now, final long currentNonceGroup, final long currentNonce) throws IOException { @@ -3321,18 +3388,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi familyMaps[lastIndexExclusive] = familyMap; try { - if (mutation instanceof Put) { - // Check the families in the put. If bad, skip this one. - if (batchOp.isInReplay()) { - removeNonExistentColumnFamilyForReplay(familyMap); - } else { - checkFamilies(familyMap.keySet()); - } - checkTimestamps(mutation.getFamilyCellMap(), now); - } else { - prepareDelete((Delete)mutation); - } - checkRow(mutation.getRow(), "doMiniBatchMutation"); + checkAndPrepareMutation(mutation, batchOp.isInReplay(), familyMap, now); } catch (NoSuchColumnFamilyException nscf) { LOG.warn("No such column family in batch mutation", nscf); batchOp.retCodeDetails[lastIndexExclusive] = new OperationStatus( @@ -3352,6 +3408,23 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi return skip; } + private void checkAndPrepareMutation(Mutation mutation, boolean replay, + final Map> familyMap, final long now) + throws IOException { + if (mutation instanceof Put) { + // Check the families in the put. If bad, skip this one. + if (replay) { + removeNonExistentColumnFamilyForReplay(familyMap); + } else { + checkFamilies(familyMap.keySet()); + } + checkTimestamps(mutation.getFamilyCellMap(), now); + } else { + prepareDelete((Delete)mutation); + } + checkRow(mutation.getRow(), "doMiniBatchMutation"); + } + /** * During replay, there could exist column families which are removed between region server * failure and replay @@ -4008,6 +4081,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi int period = this.conf.getInt("hbase.hstore.report.period", 300000); long lastReport = EnvironmentEdgeManager.currentTime(); + if (coprocessorHost != null) { + coprocessorHost.preReplayWALs(this.getRegionInfo(), edits); + } + while ((entry = reader.next()) != null) { WALKey key = entry.getKey(); WALEdit val = entry.getEdit(); @@ -4126,6 +4203,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi coprocessorHost.postWALRestore(this.getRegionInfo(), key, val); } } + + if (coprocessorHost != null) { + coprocessorHost.postReplayWALs(this.getRegionInfo(), edits); + } } catch (EOFException eof) { Path p = WALSplitter.moveAsideBadEditsFile(fs, edits); msg = "Encountered EOF. Most likely due to Master failure during " + diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 76eba61..279affa 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -712,7 +712,7 @@ public class HRegionServer extends HasThread implements } /** - * Create a 'smarter' HConnection, one that is capable of by-passing RPC if the request is to + * Create a 'smarter' Connection, one that is capable of by-passing RPC if the request is to * the local server. Safe to use going to local or remote server. * Create this instance in a method can be intercepted and mocked in tests. * @throws IOException @@ -1074,7 +1074,7 @@ public class HRegionServer extends HasThread implements } catch (IOException e) { // Although the {@link Closeable} interface throws an {@link // IOException}, in reality, the implementation would never do that. - LOG.warn("Attempt to close server's short circuit HConnection failed.", e); + LOG.warn("Attempt to close server's short circuit ClusterConnection failed.", e); } } @@ -1786,7 +1786,7 @@ public class HRegionServer extends HasThread implements // Create the log splitting worker and start it // set a smaller retries to fast fail otherwise splitlogworker could be blocked for - // quite a while inside HConnection layer. The worker won't be available for other + // quite a while inside Connection layer. The worker won't be available for other // tasks even after current task is preempted after a split task times out. Configuration sinkConf = HBaseConfiguration.create(conf); sinkConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 8dfa0e0..2d1b9a6 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -230,8 +230,15 @@ public class HStore implements Store { // to clone it? scanInfo = new ScanInfo(conf, family, ttl, timeToPurgeDeletes, this.comparator); String className = conf.get(MEMSTORE_CLASS_NAME, DefaultMemStore.class.getName()); - this.memstore = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] { - Configuration.class, CellComparator.class }, new Object[] { conf, this.comparator }); + if (family.isInMemoryCompaction()) { + className = CompactingMemStore.class.getName(); + this.memstore = new CompactingMemStore(conf, this.comparator, this, + this.getHRegion().getRegionServicesForStores()); + } else { + this.memstore = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] { + Configuration.class, CellComparator.class }, new Object[] { conf, this.comparator }); + } + LOG.info("Memstore class name is " + className); this.offPeakHours = OffPeakHours.getInstance(conf); // Setting up cache configuration for this family @@ -2149,7 +2156,7 @@ public class HStore implements Store { @Override public void prepare() { // passing the current sequence number of the wal - to allow bookkeeping in the memstore - this.snapshot = memstore.snapshot(cacheFlushSeqNum); + this.snapshot = memstore.snapshot(); this.cacheFlushCount = snapshot.getCellsCount(); this.cacheFlushSize = snapshot.getSize(); committedFiles = new ArrayList(1); @@ -2476,6 +2483,10 @@ public class HStore implements Store { memstore.finalizeFlush(); } + @Override public MemStore getMemStore() { + return memstore; + } + private void clearCompactedfiles(final List filesToRemove) throws IOException { if (LOG.isTraceEnabled()) { LOG.trace("Clearing the compacted file " + filesToRemove + " from this store"); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java index 70a608d..13d9fbf 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java @@ -64,8 +64,4 @@ public class ImmutableSegment extends Segment { return this.timeRange.getMin(); } - @Override - protected void updateMetaInfo(Cell toAdd, long s) { - throw new IllegalAccessError("This is an immutable segment"); - } } \ No newline at end of file diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java index ea72b7f..00d49d1 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java @@ -42,19 +42,10 @@ public interface MemStore extends HeapSize { MemStoreSnapshot snapshot(); /** - * Creates a snapshot of the current memstore. Snapshot must be cleared by call to - * {@link #clearSnapshot(long)}. - * @param flushOpSeqId the current sequence number of the wal; to be attached to the flushed - * segment - * @return {@link MemStoreSnapshot} - */ - MemStoreSnapshot snapshot(long flushOpSeqId); - - /** * Clears the current snapshot of the Memstore. * @param id * @throws UnexpectedStateException - * @see #snapshot(long) + * @see #snapshot() */ void clearSnapshot(long id) throws UnexpectedStateException; @@ -144,4 +135,7 @@ public interface MemStore extends HeapSize { * One example is to update the wal with sequence number that is known only at the store level. */ void finalizeFlush(); + + /* Return true if the memstore may need some extra memory space*/ + boolean isSloppy(); } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java new file mode 100644 index 0000000..88e067e --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java @@ -0,0 +1,197 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.client.Scan; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * The ongoing MemStore Compaction manager, dispatches a solo running compaction + * and interrupts the compaction if requested. + * The MemStoreScanner is used to traverse the compaction pipeline. The MemStoreScanner + * is included in internal store scanner, where all compaction logic is implemented. + * Threads safety: It is assumed that the compaction pipeline is immutable, + * therefore no special synchronization is required. + */ +class MemStoreCompactor { + + private static final Log LOG = LogFactory.getLog(MemStoreCompactor.class); + private CompactingMemStore compactingMemStore; + private MemStoreScanner scanner; // scanner for pipeline only + // scanner on top of MemStoreScanner that uses ScanQueryMatcher + private StoreScanner compactingScanner; + + // smallest read point for any ongoing MemStore scan + private long smallestReadPoint; + + // a static version of the segment list from the pipeline + private VersionedSegmentsList versionedList; + private final AtomicBoolean isInterrupted = new AtomicBoolean(false); + + public MemStoreCompactor(CompactingMemStore compactingMemStore) { + this.compactingMemStore = compactingMemStore; + } + + /** + * The request to dispatch the compaction asynchronous task. + * The method returns true if compaction was successfully dispatched, or false if there + * is already an ongoing compaction or nothing to compact. + */ + public boolean startCompaction() throws IOException { + if (!compactingMemStore.hasCompactibleSegments()) return false; // no compaction on empty + + List scanners = new ArrayList(); + // get the list of segments from the pipeline + versionedList = compactingMemStore.getCompactibleSegments(); + // the list is marked with specific version + + // create the list of scanners with maximally possible read point, meaning that + // all KVs are going to be returned by the pipeline traversing + for (Segment segment : versionedList.getStoreSegments()) { + scanners.add(segment.getSegmentScanner(Long.MAX_VALUE)); + } + scanner = + new MemStoreScanner(compactingMemStore, scanners, Long.MAX_VALUE, + MemStoreScanner.Type.COMPACT_FORWARD); + + smallestReadPoint = compactingMemStore.getSmallestReadPoint(); + compactingScanner = createScanner(compactingMemStore.getStore()); + + LOG.info("Starting the MemStore in-memory compaction for store " + + compactingMemStore.getStore().getColumnFamilyName()); + + doCompaction(); + return true; + } + + /** + * The request to cancel the compaction asynchronous task + * The compaction may still happen if the request was sent too late + * Non-blocking request + */ + public void stopCompact() { + isInterrupted.compareAndSet(false, true); + } + + + /** + * Close the scanners and clear the pointers in order to allow good + * garbage collection + */ + private void releaseResources() { + isInterrupted.set(false); + scanner.close(); + scanner = null; + compactingScanner.close(); + compactingScanner = null; + versionedList = null; + } + + /** + * The worker thread performs the compaction asynchronously. + * The solo (per compactor) thread only reads the compaction pipeline. + * There is at most one thread per memstore instance. + */ + private void doCompaction() { + + ImmutableSegment result = SegmentFactory.instance() // create the scanner + .createImmutableSegment( + compactingMemStore.getConfiguration(), compactingMemStore.getComparator(), + CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_ITEM); + + // the compaction processing + try { + // Phase I: create the compacted MutableCellSetSegment + compactSegments(result); + + // Phase II: swap the old compaction pipeline + if (!isInterrupted.get()) { + compactingMemStore.swapCompactedSegments(versionedList, result); + // update the wal so it can be truncated and not get too long + compactingMemStore.updateLowestUnflushedSequenceIdInWAL(true); // only if greater + } + } catch (Exception e) { + LOG.debug("Interrupting the MemStore in-memory compaction for store " + compactingMemStore + .getFamilyName()); + Thread.currentThread().interrupt(); + return; + } finally { + releaseResources(); + compactingMemStore.setInMemoryFlushInProgress(false); + } + + } + + /** + * Creates the scanner for compacting the pipeline. + * + * @return the scanner + */ + private StoreScanner createScanner(Store store) throws IOException { + + Scan scan = new Scan(); + scan.setMaxVersions(); //Get all available versions + + StoreScanner internalScanner = + new StoreScanner(store, store.getScanInfo(), scan, Collections.singletonList(scanner), + ScanType.COMPACT_RETAIN_DELETES, smallestReadPoint, HConstants.OLDEST_TIMESTAMP); + + return internalScanner; + } + + /** + * Updates the given single Segment using the internal store scanner, + * who in turn uses ScanQueryMatcher + */ + private void compactSegments(Segment result) throws IOException { + + List kvs = new ArrayList(); + // get the limit to the size of the groups to be returned by compactingScanner + int compactionKVMax = compactingMemStore.getConfiguration().getInt( + HConstants.COMPACTION_KV_MAX, + HConstants.COMPACTION_KV_MAX_DEFAULT); + + ScannerContext scannerContext = + ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); + + boolean hasMore; + do { + hasMore = compactingScanner.next(kvs, scannerContext); + if (!kvs.isEmpty()) { + for (Cell c : kvs) { + // The scanner is doing all the elimination logic + // now we just copy it to the new segment + Cell newKV = result.maybeCloneWithAllocator(c); + result.internalAdd(newKV); + + } + kvs.clear(); + } + } while (hasMore && (!isInterrupted.get())); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java index 2b12dec..cdbecac 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java @@ -18,20 +18,22 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; /** * Wraps together the mutations which are applied as a batch to the region and their operation - * status and WALEdits. + * status and WALEdits. * @see org.apache.hadoop.hbase.coprocessor.RegionObserver#preBatchMutate( * ObserverContext, MiniBatchOperationInProgress) * @see org.apache.hadoop.hbase.coprocessor.RegionObserver#postBatchMutate( * ObserverContext, MiniBatchOperationInProgress) * @param T Pair<Mutation, Integer> pair of Mutations and associated rowlock ids . */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate("Coprocessors") public class MiniBatchOperationInProgress { private final T[] operations; + private Mutation[][] operationsFromCoprocessors; private final OperationStatus[] retCodeDetails; private final WALEdit[] walEditsFromCoprocessors; private final int firstIndex; @@ -63,7 +65,7 @@ public class MiniBatchOperationInProgress { /** * Sets the status code for the operation(Mutation) at the specified position. - * By setting this status, {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} + * By setting this status, {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} * can make HRegion to skip Mutations. * @param index * @param opStatus @@ -103,4 +105,25 @@ public class MiniBatchOperationInProgress { } return this.firstIndex + index; } + + /** + * Add more Mutations corresponding to the Mutation at the given index to be committed atomically + * in the same batch. These mutations are applied to the WAL and applied to the memstore as well. + * The timestamp of the cells in the given Mutations MUST be obtained from the original mutation. + * + * @param index the index that corresponds to the original mutation index in the batch + * @param newOperations the Mutations to add + */ + public void addOperationsFromCP(int index, Mutation[] newOperations) { + if (this.operationsFromCoprocessors == null) { + // lazy allocation to save on object allocation in case this is not used + this.operationsFromCoprocessors = new Mutation[operations.length][]; + } + this.operationsFromCoprocessors[getAbsoluteIndex(index)] = newOperations; + } + + public Mutation[] getOperationsFromCoprocessors(int index) { + return operationsFromCoprocessors == null ? null : + operationsFromCoprocessors[getAbsoluteIndex(index)]; + } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java index 1947a1b..995ea93 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java @@ -60,7 +60,7 @@ MultiRowMutationProcessorResponse> { public boolean readOnly() { return false; } - + @Override public MultiRowMutationProcessorResponse getResult() { return MultiRowMutationProcessorResponse.getDefaultInstance(); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index ebd85bd..e09e0e3 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -1033,7 +1033,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } priority = createPriority(); String name = rs.getProcessName() + "/" + initialIsa.toString(); - // Set how many times to retry talking to another server over HConnection. + // Set how many times to retry talking to another server over Connection. ConnectionUtils.setServerSideHConnectionRetriesConfig(rs.conf, name, LOG); try { rpcServer = new RpcServer(rs, name, getServices(), diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index 25e0200..f6ccaa1 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -1402,6 +1402,36 @@ public class RegionCoprocessorHost } /** + * @param info the RegionInfo for this region + * @param edits the file of recovered edits + * @throws IOException Exception + */ + public void preReplayWALs(final HRegionInfo info, final Path edits) throws IOException { + execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + @Override + public void call(RegionObserver oserver, ObserverContext ctx) + throws IOException { + oserver.preReplayWALs(ctx, info, edits); + } + }); + } + + /** + * @param info the RegionInfo for this region + * @param edits the file of recovered edits + * @throws IOException Exception + */ + public void postReplayWALs(final HRegionInfo info, final Path edits) throws IOException { + execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + @Override + public void call(RegionObserver oserver, ObserverContext ctx) + throws IOException { + oserver.postReplayWALs(ctx, info, edits); + } + }); + } + + /** * @param info * @param logKey * @param logEdit diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java index d3c35b3..72f7bf5 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java @@ -18,8 +18,15 @@ */ package org.apache.hadoop.hbase.regionserver; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.StealJobQueue; +import org.apache.hadoop.hbase.wal.WAL; /** * Services a Store needs from a Region. @@ -32,6 +39,20 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; @InterfaceStability.Evolving public class RegionServicesForStores { + private static final int POOL_SIZE = 10; + private static final ThreadPoolExecutor INMEMORY_COMPACTION_POOL = + new ThreadPoolExecutor(POOL_SIZE, POOL_SIZE, 60, TimeUnit.SECONDS, + new StealJobQueue().getStealFromQueue(), + new ThreadFactory() { + @Override + public Thread newThread(Runnable r) { + Thread t = new Thread(r); + t.setName(Thread.currentThread().getName() + + "-inmemoryCompactions-" + + System.currentTimeMillis()); + return t; + } + }); private final HRegion region; public RegionServicesForStores(HRegion region) { @@ -39,15 +60,37 @@ public class RegionServicesForStores { } public void blockUpdates() { - this.region.blockUpdates(); + region.blockUpdates(); } public void unblockUpdates() { - this.region.unblockUpdates(); + region.unblockUpdates(); } public long addAndGetGlobalMemstoreSize(long size) { - return this.region.addAndGetGlobalMemstoreSize(size); + return region.addAndGetGlobalMemstoreSize(size); + } + + public HRegionInfo getRegionInfo() { + return region.getRegionInfo(); } + public WAL getWAL() { + return region.getWAL(); + } + + public ThreadPoolExecutor getInMemoryCompactionPool() { return INMEMORY_COMPACTION_POOL; } + + public long getMemstoreFlushSize() { + return region.getMemstoreFlushSize(); + } + + public int getNumStores() { + return region.getStores().size(); + } + + // methods for tests + long getGlobalMemstoreTotalSize() { + return region.getMemstoreSize(); + } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java index f554781..7bc59da 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.ipc.RpcScheduler; @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) @InterfaceStability.Evolving public interface RpcSchedulerFactory { - /** * Constructs a {@link org.apache.hadoop.hbase.ipc.RpcScheduler}. */ @@ -39,5 +38,4 @@ public interface RpcSchedulerFactory { @Deprecated RpcScheduler create(Configuration conf, PriorityFunction priority); - -} +} \ No newline at end of file diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java index dcad5a0..6435232 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java @@ -23,6 +23,7 @@ import java.util.SortedSet; import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; @@ -40,8 +41,11 @@ import org.apache.hadoop.hbase.util.ByteRange; */ @InterfaceAudience.Private public abstract class Segment { + + private static final Log LOG = LogFactory.getLog(Segment.class); private volatile CellSet cellSet; private final CellComparator comparator; + private long minSequenceId; private volatile MemStoreLAB memStoreLAB; protected final AtomicLong size; protected volatile boolean tagsPresent; @@ -51,6 +55,7 @@ public abstract class Segment { long size) { this.cellSet = cellSet; this.comparator = comparator; + this.minSequenceId = Long.MAX_VALUE; this.memStoreLAB = memStoreLAB; this.size = new AtomicLong(size); this.tagsPresent = false; @@ -60,6 +65,7 @@ public abstract class Segment { protected Segment(Segment segment) { this.cellSet = segment.getCellSet(); this.comparator = segment.getComparator(); + this.minSequenceId = segment.getMinSequenceId(); this.memStoreLAB = segment.getMemStoreLAB(); this.size = new AtomicLong(segment.getSize()); this.tagsPresent = segment.isTagsPresent(); @@ -75,6 +81,14 @@ public abstract class Segment { } /** + * Creates the scanner for the given read point, and a specific order in a list + * @return a scanner for the given read point + */ + public SegmentScanner getSegmentScanner(long readPoint, long order) { + return new SegmentScanner(this, readPoint, order); + } + + /** * Returns whether the segment has any cells * @return whether the segment has any cells */ @@ -183,6 +197,10 @@ public abstract class Segment { size.addAndGet(delta); } + public long getMinSequenceId() { + return minSequenceId; + } + public TimeRangeTracker getTimeRangeTracker() { return this.timeRangeTracker; } @@ -231,10 +249,18 @@ public abstract class Segment { return s; } - /** - * Only mutable Segments implement this. - */ - protected abstract void updateMetaInfo(Cell toAdd, long s); + protected void updateMetaInfo(Cell toAdd, long s) { + getTimeRangeTracker().includeTimestamp(toAdd); + size.addAndGet(s); + minSequenceId = Math.min(minSequenceId, toAdd.getSequenceId()); + // In no tags case this NoTagsKeyValue.getTagsLength() is a cheap call. + // When we use ACL CP or Visibility CP which deals with Tags during + // mutation, the TagRewriteCell.getTagsLength() is a cheaper call. We do not + // parse the byte[] to identify the tags length. + if(toAdd.getTagsLength() > 0) { + tagsPresent = true; + } + } /** * Returns a subset of the segment cell set, which starts with the given cell diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java index 394ffa1..7ac80ae 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java @@ -44,16 +44,16 @@ public final class SegmentFactory { final CellComparator comparator, long size) { MemStoreLAB memStoreLAB = getMemStoreLAB(conf); MutableSegment segment = generateMutableSegment(conf, comparator, memStoreLAB, size); - return createImmutableSegment(conf, segment); + return createImmutableSegment(segment); } public ImmutableSegment createImmutableSegment(CellComparator comparator, long size) { MutableSegment segment = generateMutableSegment(null, comparator, null, size); - return createImmutableSegment(null, segment); + return createImmutableSegment(segment); } - public ImmutableSegment createImmutableSegment(final Configuration conf, MutableSegment segment) { + public ImmutableSegment createImmutableSegment(MutableSegment segment) { return new ImmutableSegment(segment); } public MutableSegment createMutableSegment(final Configuration conf, diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java index 45f72d83..a04c1da 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java @@ -62,6 +62,7 @@ public class SegmentScanner implements KeyValueScanner { /** * @param scannerOrder see {@link KeyValueScanner#getScannerOrder()}. + * Scanners are ordered from 0 (oldest) to newest in increasing order. */ protected SegmentScanner(Segment segment, long readPoint, long scannerOrder) { this.segment = segment; @@ -84,7 +85,6 @@ public class SegmentScanner implements KeyValueScanner { throw new RuntimeException("current is invalid: read point is "+readPoint+", " + "while current sequence id is " +current.getSequenceId()); } - return current; } @@ -172,9 +172,8 @@ public class SegmentScanner implements KeyValueScanner { */ @Override public boolean seekToPreviousRow(Cell cell) throws IOException { - boolean keepSeeking = false; + boolean keepSeeking; Cell key = cell; - do { Cell firstKeyOnRow = CellUtil.createFirstOnRow(key); SortedSet cellHead = segment.headSet(firstKeyOnRow); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java index 1f496b4..92462c8 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java @@ -27,11 +27,11 @@ import org.apache.hadoop.hbase.ipc.PriorityFunction; import org.apache.hadoop.hbase.ipc.RpcScheduler; import org.apache.hadoop.hbase.ipc.SimpleRpcScheduler; -/** Constructs a {@link SimpleRpcScheduler}. */ +/** Constructs a {@link SimpleRpcScheduler}. + */ @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) @InterfaceStability.Evolving public class SimpleRpcSchedulerFactory implements RpcSchedulerFactory { - @Override @Deprecated public RpcScheduler create(Configuration conf, PriorityFunction priority) { @@ -41,8 +41,7 @@ public class SimpleRpcSchedulerFactory implements RpcSchedulerFactory { @Override public RpcScheduler create(Configuration conf, PriorityFunction priority, Abortable server) { int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, - HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT); - + HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT); return new SimpleRpcScheduler( conf, handlerCount, @@ -54,5 +53,4 @@ public class SimpleRpcSchedulerFactory implements RpcSchedulerFactory { server, HConstants.QOS_THRESHOLD); } - } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index b77a33b..3419937 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -520,4 +520,5 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf */ void finalizeFlush(); + MemStore getMemStore(); } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index abade0e..4955ffe 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -253,9 +253,9 @@ public class StoreFileScanner implements KeyValueScanner { while(enforceMVCC && cur != null && (cur.getSequenceId() > readPt)) { - hfs.next(); + boolean hasNext = hfs.next(); setCurrentCell(hfs.getCell()); - if (this.stopSkippingKVsIfNextRow + if (hasNext && this.stopSkippingKVsIfNextRow && getComparator().compareRows(cur, startKV) > 0) { return false; } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java new file mode 100644 index 0000000..9d7a723 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java @@ -0,0 +1,54 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.util.LinkedList; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * A list of segment managers coupled with the version of the memstore (version at the time it was + * created). + * This structure helps to guarantee that the compaction pipeline updates after the compaction is + * updated in a consistent (atomic) way. + * Specifically, swapping some of the elements in a compaction pipeline with a new compacted + * element is permitted only if the pipeline version is the same as the version attached to the + * elements. + * + */ +@InterfaceAudience.Private +public class VersionedSegmentsList { + + private final LinkedList storeSegments; + private final long version; + + public VersionedSegmentsList( + LinkedList storeSegments, long version) { + this.storeSegments = storeSegments; + this.version = version; + } + + public LinkedList getStoreSegments() { + return storeSegments; + } + + public long getVersion() { + return version; + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java index ae48f6c..3aafc23 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java @@ -19,6 +19,8 @@ package org.apache.hadoop.hbase.regionserver.wal; import static org.apache.hadoop.hbase.wal.AbstractFSWALProvider.WAL_FILE_NAME_DELIMITER; +import com.google.common.annotations.VisibleForTesting; + import java.io.IOException; import java.io.InterruptedIOException; import java.lang.management.ManagementFactory; @@ -41,8 +43,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantLock; -import com.google.common.annotations.VisibleForTesting; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -772,6 +772,21 @@ public abstract class AbstractFSWAL implements WAL { LOG.info("Closed WAL: " + toString()); } + /** + * updates the sequence number of a specific store. + * depending on the flag: replaces current seq number if the given seq id is bigger, + * or even if it is lower than existing one + * @param encodedRegionName + * @param familyName + * @param sequenceid + * @param onlyIfGreater + */ + @Override public void updateStore(byte[] encodedRegionName, byte[] familyName, Long sequenceid, + boolean onlyIfGreater) { + sequenceIdAccounting.updateStore(encodedRegionName,familyName,sequenceid,onlyIfGreater); + } + + protected SyncFuture getSyncFuture(final long sequence, Span span) { SyncFuture syncFuture = this.syncFuturesByHandler.get(Thread.currentThread()); if (syncFuture == null) { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index a465ea9..cdf5757 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -17,15 +17,6 @@ */ package org.apache.hadoop.hbase.regionserver.wal; -import com.google.common.annotations.VisibleForTesting; -import com.lmax.disruptor.BlockingWaitStrategy; -import com.lmax.disruptor.EventHandler; -import com.lmax.disruptor.ExceptionHandler; -import com.lmax.disruptor.LifecycleAware; -import com.lmax.disruptor.TimeoutException; -import com.lmax.disruptor.dsl.Disruptor; -import com.lmax.disruptor.dsl.ProducerType; - import java.io.FileNotFoundException; import java.io.IOException; import java.io.OutputStream; @@ -39,6 +30,15 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import com.google.common.annotations.VisibleForTesting; +import com.lmax.disruptor.BlockingWaitStrategy; +import com.lmax.disruptor.EventHandler; +import com.lmax.disruptor.ExceptionHandler; +import com.lmax.disruptor.LifecycleAware; +import com.lmax.disruptor.TimeoutException; +import com.lmax.disruptor.dsl.Disruptor; +import com.lmax.disruptor.dsl.ProducerType; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java index 6e10f3c..53c501f 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.regionserver.wal; +import com.google.common.collect.Maps; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -32,8 +33,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.util.Bytes; -import com.google.common.collect.Maps; - /** * Accounting of sequence ids per region and then by column family. So we can our accounting * current, call startCacheFlush and then finishedCacheFlush or abortCacheFlush so this instance @@ -163,6 +162,38 @@ class SequenceIdAccounting { } } + /** + * Update the store sequence id, e.g., upon executing in-memory compaction + */ + void updateStore(byte[] encodedRegionName, byte[] familyName, Long sequenceId, + boolean onlyIfGreater) { + if(sequenceId == null) return; + Long highest = this.highestSequenceIds.get(encodedRegionName); + if(highest == null || sequenceId > highest) { + this.highestSequenceIds.put(encodedRegionName,sequenceId); + } + synchronized (this.tieLock) { + ConcurrentMap m = getOrCreateLowestSequenceIds(encodedRegionName); + boolean replaced = false; + while (!replaced) { + Long oldSeqId = m.get(familyName); + if (oldSeqId == null) { + m.put(familyName, sequenceId); + replaced = true; + } else if (onlyIfGreater) { + if (sequenceId > oldSeqId) { + replaced = m.replace(familyName, oldSeqId, sequenceId); + } else { + return; + } + } else { // replace even if sequence id is not greater than oldSeqId + m.put(familyName, sequenceId); + return; + } + } + } + } + ConcurrentMap getOrCreateLowestSequenceIds(byte[] encodedRegionName) { // Intentionally, this access is done outside of this.regionSequenceIdLock. Done per append. ConcurrentMap m = this.lowestUnflushedSequenceIds.get(encodedRegionName); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java index 9d5e052..3eb85bd 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java @@ -18,6 +18,8 @@ */ package org.apache.hadoop.hbase.regionserver.wal; +import com.google.protobuf.ServiceException; + import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; @@ -27,7 +29,6 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; @@ -36,7 +37,9 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionServerCallable; import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory; import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController; @@ -50,8 +53,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.WAL.Entry; -import com.google.protobuf.ServiceException; - /** * This class is responsible for replaying the edits coming from a failed region server. *

@@ -65,22 +66,22 @@ public class WALEditsReplaySink { private static final int MAX_BATCH_SIZE = 1024; private final Configuration conf; - private final HConnection conn; + private final ClusterConnection conn; private final TableName tableName; private final MetricsWALEditsReplay metrics; private final AtomicLong totalReplayedEdits = new AtomicLong(); private final boolean skipErrors; private final int replayTimeout; - private RpcControllerFactory rpcControllerFactory; + private final RpcControllerFactory rpcControllerFactory; /** * Create a sink for WAL log entries replay - * @param conf - * @param tableName - * @param conn - * @throws IOException + * @param conf configuration + * @param tableName of table to replay edits of + * @param conn connection to use + * @throws IOException on IO failure */ - public WALEditsReplaySink(Configuration conf, TableName tableName, HConnection conn) + public WALEditsReplaySink(Configuration conf, TableName tableName, ClusterConnection conn) throws IOException { this.conf = conf; this.metrics = new MetricsWALEditsReplay(); @@ -95,8 +96,8 @@ public class WALEditsReplaySink { /** * Replay an array of actions of the same region directly into the newly assigned Region Server - * @param entries - * @throws IOException + * @param entries to replay + * @throws IOException on IO failure */ public void replayEntries(List> entries) throws IOException { if (entries.size() == 0) { @@ -105,7 +106,7 @@ public class WALEditsReplaySink { int batchSize = entries.size(); Map> entriesByRegion = - new HashMap>(); + new HashMap<>(); HRegionLocation loc = null; Entry entry = null; List regionEntries = null; @@ -186,7 +187,7 @@ public class WALEditsReplaySink { private HRegionInfo regionInfo; private List entries; - ReplayServerCallable(final HConnection connection, final TableName tableName, + ReplayServerCallable(final Connection connection, final TableName tableName, final HRegionLocation regionLoc, final HRegionInfo regionInfo, final List entries) { super(connection, tableName, null); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index 78e3e00..28340b5 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.replication.regionserver; +import com.google.common.annotations.VisibleForTesting; + import java.io.IOException; import java.net.ConnectException; import java.net.SocketTimeoutException; @@ -33,7 +35,6 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -43,8 +44,8 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface; import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint; @@ -70,7 +71,7 @@ import org.apache.hadoop.ipc.RemoteException; public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoint { private static final Log LOG = LogFactory.getLog(HBaseInterClusterReplicationEndpoint.class); - private HConnection conn; + private ClusterConnection conn; private Configuration conf; @@ -104,7 +105,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi // TODO: This connection is replication specific or we should make it particular to // replication and make replication specific settings such as compression or codec to use // passing Cells. - this.conn = (HConnection) ConnectionFactory.createConnection(this.conf); + this.conn = (ClusterConnection) ConnectionFactory.createConnection(this.conf); this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); this.metrics = context.getMetrics(); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java index 3611608..b0fd176 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java @@ -562,10 +562,10 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { if (cause instanceof IOException) { // The table can be disabled or dropped at this time. For disabled tables, we have no // cheap mechanism to detect this case because meta does not contain this information. - // HConnection.isTableDisabled() is a zk call which we cannot do for every replay RPC. - // So instead we start the replay RPC with retries and - // check whether the table is dropped or disabled which might cause - // SocketTimeoutException, or RetriesExhaustedException or similar if we get IOE. + // ClusterConnection.isTableDisabled() is a zk call which we cannot do for every replay + // RPC. So instead we start the replay RPC with retries and check whether the table is + // dropped or disabled which might cause SocketTimeoutException, or + // RetriesExhaustedException or similar if we get IOE. if (cause instanceof TableNotFoundException || connection.isTableDisabled(tableName)) { if (LOG.isTraceEnabled()) { LOG.trace("Skipping " + entries.size() + " entries in table " + tableName diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index fa5e222..d55472d 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -48,16 +48,17 @@ import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor; import org.apache.hadoop.hbase.regionserver.ReplicationSinkService; import org.apache.hadoop.hbase.regionserver.ReplicationSourceService; -import org.apache.hadoop.hbase.wal.WALKey; -import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; -import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationPeers; import org.apache.hadoop.hbase.replication.ReplicationQueues; +import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments; import org.apache.hadoop.hbase.replication.ReplicationTracker; +import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; +import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner; import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner; +import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.zookeeper.KeeperException; @@ -127,7 +128,8 @@ public class Replication extends WALActionsListener.Base implements if (replication) { try { this.replicationQueues = - ReplicationFactory.getReplicationQueues(server.getZooKeeper(), this.conf, this.server); + ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, this.server, + server.getZooKeeper())); this.replicationQueues.init(this.server.getServerName().toString()); this.replicationPeers = ReplicationFactory.getReplicationPeers(server.getZooKeeper(), this.conf, this.server); @@ -135,7 +137,7 @@ public class Replication extends WALActionsListener.Base implements this.replicationTracker = ReplicationFactory.getReplicationTracker(server.getZooKeeper(), this.replicationPeers, this.conf, this.server, this.server); - } catch (ReplicationException e) { + } catch (Exception e) { throw new IOException("Failed replication handler create", e); } UUID clusterId = null; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java index 0469f9b..0ad0175 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java @@ -17,22 +17,25 @@ */ package org.apache.hadoop.hbase.replication.regionserver; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Random; -import com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; + /** * Maintains a collection of peers to replicate to, and randomly selects a @@ -57,7 +60,7 @@ public class ReplicationSinkManager { static final float DEFAULT_REPLICATION_SOURCE_RATIO = 0.1f; - private final HConnection conn; + private final Connection conn; private final String peerClusterId; @@ -89,7 +92,7 @@ public class ReplicationSinkManager { * @param conf HBase configuration, used for determining replication source ratio and bad peer * threshold */ - public ReplicationSinkManager(HConnection conn, String peerClusterId, + public ReplicationSinkManager(ClusterConnection conn, String peerClusterId, HBaseReplicationEndpoint endpoint, Configuration conf) { this.conn = conn; this.peerClusterId = peerClusterId; @@ -116,7 +119,7 @@ public class ReplicationSinkManager { throw new IOException("No replication sinks are available"); } ServerName serverName = sinks.get(random.nextInt(sinks.size())); - return new SinkPeer(serverName, conn.getAdmin(serverName)); + return new SinkPeer(serverName, ((ClusterConnection) conn).getAdmin(serverName)); } /** diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java index b585513..e9330f4 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java @@ -115,6 +115,7 @@ public class ReplicationSourceManager implements ReplicationListener { private final ThreadPoolExecutor executor; private final Random rand; + private final boolean replicationForBulkLoadDataEnabled; /** @@ -166,6 +167,9 @@ public class ReplicationSourceManager implements ReplicationListener { this.executor.setThreadFactory(tfb.build()); this.rand = new Random(); this.latestPaths = Collections.synchronizedSet(new HashSet()); + replicationForBulkLoadDataEnabled = + conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, + HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT); } /** @@ -227,9 +231,6 @@ public class ReplicationSourceManager implements ReplicationListener { * old region server wal queues */ protected void init() throws IOException, ReplicationException { - boolean replicationForBulkLoadDataEnabled = - conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, - HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT); for (String id : this.replicationPeers.getPeerIds()) { addSource(id); if (replicationForBulkLoadDataEnabled) { @@ -315,9 +316,6 @@ public class ReplicationSourceManager implements ReplicationListener { */ public void join() { this.executor.shutdown(); - if (this.sources.size() == 0) { - this.replicationQueues.removeAllQueues(); - } for (ReplicationSourceInterface source : this.sources) { source.terminate("Region server is closing"); } @@ -582,6 +580,7 @@ public class ReplicationSourceManager implements ReplicationListener { @Override public void peerRemoved(String peerId) { removePeer(peerId); + this.replicationQueues.removePeerFromHFileRefs(peerId); } @Override @@ -591,6 +590,9 @@ public class ReplicationSourceManager implements ReplicationListener { boolean added = this.replicationPeers.peerAdded(id); if (added) { addSource(id); + if (replicationForBulkLoadDataEnabled) { + this.replicationQueues.addPeerToHFileRefs(id); + } } } catch (Exception e) { LOG.error("Error while adding a new peer", e); @@ -624,7 +626,7 @@ public class ReplicationSourceManager implements ReplicationListener { @Override public void run() { - if (this.rq.isThisOurZnode(rsZnode)) { + if (this.rq.isThisOurRegionServer(rsZnode)) { return; } // Wait a bit before transferring the queues, we may be shutting down. diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java index 450db64..b0cf70b 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.security; import java.io.ByteArrayInputStream; import java.io.DataInputStream; import java.io.IOException; +import java.util.Locale; import java.util.Map; import javax.security.auth.callback.Callback; @@ -53,7 +54,7 @@ public class HBaseSaslRpcServer { public static void init(Configuration conf) { saslProps = SaslUtil.initSaslProperties(conf.get("hbase.rpc.protection", - QualityOfProtection.AUTHENTICATION.name().toLowerCase())); + QualityOfProtection.AUTHENTICATION.name().toLowerCase(Locale.ROOT))); } public static Map getSaslProps() { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java index f0723c2..2d98919 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java @@ -497,11 +497,19 @@ public class AccessControlLists { List perms = new ArrayList(); - for (Map.Entry entry : allPerms.entries()) { - UserPermission up = new UserPermission(Bytes.toBytes(entry.getKey()), - entry.getValue().getTableName(), entry.getValue().getFamily(), - entry.getValue().getQualifier(), entry.getValue().getActions()); - perms.add(up); + if(isNamespaceEntry(entryName)) { // Namespace + for (Map.Entry entry : allPerms.entries()) { + UserPermission up = new UserPermission(Bytes.toBytes(entry.getKey()), + entry.getValue().getNamespace(), entry.getValue().getActions()); + perms.add(up); + } + } else { // Table + for (Map.Entry entry : allPerms.entries()) { + UserPermission up = new UserPermission(Bytes.toBytes(entry.getKey()), + entry.getValue().getTableName(), entry.getValue().getFamily(), + entry.getValue().getQualifier(), entry.getValue().getActions()); + perms.add(up); + } } return perms; } @@ -620,11 +628,11 @@ public class AccessControlLists { } public static boolean isNamespaceEntry(String entryName) { - return entryName.charAt(0) == NAMESPACE_PREFIX; + return entryName != null && entryName.charAt(0) == NAMESPACE_PREFIX; } public static boolean isNamespaceEntry(byte[] entryName) { - return entryName[0] == NAMESPACE_PREFIX; + return entryName != null && entryName.length !=0 && entryName[0] == NAMESPACE_PREFIX; } public static String toNamespaceEntry(String namespace) { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java index 9ad7242..360b0f5 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java @@ -19,10 +19,17 @@ package org.apache.hadoop.hbase.tool; +import static org.apache.hadoop.hbase.HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT; +import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT; + +import com.google.common.collect.Lists; + import java.io.Closeable; import java.io.IOException; +import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; @@ -31,12 +38,12 @@ import java.util.Map; import java.util.Random; import java.util.Set; import java.util.TreeSet; -import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.atomic.AtomicLong; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -77,20 +84,29 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.hbase.util.RegionSplitter; +import org.apache.hadoop.hbase.zookeeper.EmptyWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.client.ConnectStringParser; +import org.apache.zookeeper.data.Stat; /** * HBase Canary Tool, that that can be used to do * "canary monitoring" of a running HBase cluster. * - * Here are two modes + * Here are three modes * 1. region mode - Foreach region tries to get one row per column family * and outputs some information about failure or latency. * * 2. regionserver mode - Foreach regionserver tries to get one row from one table * selected randomly and outputs some information about failure or latency. + * + * 3. zookeeper mode - for each zookeeper instance, selects a zNode and + * outputs some information about failure or latency. */ public final class Canary implements Tool { // Sink interface used by the canary to outputs information @@ -187,6 +203,55 @@ public final class Canary implements Tool { } } + public static class ZookeeperStdOutSink extends StdOutSink implements ExtendedSink { + @Override public void publishReadFailure(String zNode, String server) { + incReadFailureCount(); + LOG.error(String.format("Read from zNode:%s on zookeeper instance:%s", zNode, server)); + } + + @Override public void publishReadTiming(String znode, String server, long msTime) { + LOG.info(String.format("Read from zNode:%s on zookeeper instance:%s in %dms", + znode, server, msTime)); + } + } + + static class ZookeeperTask implements Callable { + private final Connection connection; + private final String host; + private String znode; + private final int timeout; + private ZookeeperStdOutSink sink; + + public ZookeeperTask(Connection connection, String host, String znode, int timeout, + ZookeeperStdOutSink sink) { + this.connection = connection; + this.host = host; + this.znode = znode; + this.timeout = timeout; + this.sink = sink; + } + + @Override public Void call() throws Exception { + ZooKeeper zooKeeper = null; + try { + zooKeeper = new ZooKeeper(host, timeout, EmptyWatcher.instance); + Stat exists = zooKeeper.exists(znode, false); + StopWatch stopwatch = new StopWatch(); + stopwatch.start(); + zooKeeper.getData(znode, false, exists); + stopwatch.stop(); + sink.publishReadTiming(znode, host, stopwatch.getTime()); + } catch (KeeperException | InterruptedException e) { + sink.publishReadFailure(znode, host); + } finally { + if (zooKeeper != null) { + zooKeeper.close(); + } + } + return null; + } + } + /** * For each column family of the region tries to get one row and outputs the latency, or the * failure. @@ -461,6 +526,7 @@ public final class Canary implements Tool { private long timeout = DEFAULT_TIMEOUT; private boolean failOnError = true; private boolean regionServerMode = false; + private boolean zookeeperMode = false; private boolean regionServerAllRegions = false; private boolean writeSniffing = false; private boolean treatFailureAsError = false; @@ -521,6 +587,8 @@ public final class Canary implements Tool { System.err.println("-interval needs a numeric value argument."); printUsageAndExit(); } + } else if (cmd.equals("-zookeeper")) { + this.zookeeperMode = true; } else if(cmd.equals("-regionserver")) { this.regionServerMode = true; } else if(cmd.equals("-allRegions")) { @@ -577,6 +645,13 @@ public final class Canary implements Tool { System.err.println("-allRegions can only be specified in regionserver mode."); printUsageAndExit(); } + if (this.zookeeperMode) { + if (this.regionServerMode || this.regionServerAllRegions || this.writeSniffing) { + System.err.println("-zookeeper is exclusive and cannot be combined with " + + "other modes."); + printUsageAndExit(); + } + } return index; } @@ -661,6 +736,8 @@ public final class Canary implements Tool { System.err.println(" which means to enable regionserver mode"); System.err.println(" -allRegions Tries all regions on a regionserver,"); System.err.println(" only works in regionserver mode."); + System.err.println(" -zookeeper Tries to grab zookeeper.znode.parent "); + System.err.println(" on each zookeeper instance"); System.err.println(" -daemon Continuous check at defined intervals."); System.err.println(" -interval Interval between checks (sec)"); System.err.println(" -e Use table/regionserver as regular expression"); @@ -699,6 +776,10 @@ public final class Canary implements Tool { new RegionServerMonitor(connection, monitorTargets, this.useRegExp, (ExtendedSink) this.sink, this.executor, this.regionServerAllRegions, this.treatFailureAsError); + } else if (this.zookeeperMode) { + monitor = + new ZookeeperMonitor(connection, monitorTargets, this.useRegExp, + (ZookeeperStdOutSink) this.sink, this.executor, this.treatFailureAsError); } else { monitor = new RegionMonitor(connection, monitorTargets, this.useRegExp, this.sink, this.executor, @@ -1039,6 +1120,62 @@ public final class Canary implements Tool { } return executor.invokeAll(tasks); } + + // monitor for zookeeper mode + private static class ZookeeperMonitor extends Monitor { + private List hosts; + private final String znode; + private final int timeout; + + protected ZookeeperMonitor(Connection connection, String[] monitorTargets, boolean useRegExp, + ExtendedSink sink, ExecutorService executor, boolean treatFailureAsError) { + super(connection, monitorTargets, useRegExp, sink, executor, treatFailureAsError); + Configuration configuration = connection.getConfiguration(); + znode = + configuration.get(ZOOKEEPER_ZNODE_PARENT, + DEFAULT_ZOOKEEPER_ZNODE_PARENT); + timeout = configuration + .getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT); + ConnectStringParser parser = + new ConnectStringParser(ZKConfig.getZKQuorumServersString(configuration)); + hosts = Lists.newArrayList(); + for (InetSocketAddress server : parser.getServerAddresses()) { + hosts.add(server.toString()); + } + } + + @Override public void run() { + List tasks = Lists.newArrayList(); + for (final String host : hosts) { + tasks.add(new ZookeeperTask(connection, host, znode, timeout, getSink())); + } + try { + for (Future future : this.executor.invokeAll(tasks)) { + try { + future.get(); + } catch (ExecutionException e) { + LOG.error("Sniff zookeeper failed!", e); + this.errorCode = ERROR_EXIT_CODE; + } + } + } catch (InterruptedException e) { + this.errorCode = ERROR_EXIT_CODE; + Thread.currentThread().interrupt(); + LOG.error("Sniff zookeeper interrupted!", e); + } + this.done = true; + } + + + private ZookeeperStdOutSink getSink() { + if (!(sink instanceof ZookeeperStdOutSink)) { + throw new RuntimeException("Can only write to zookeeper sink"); + } + return ((ZookeeperStdOutSink) sink); + } + } + + // a monitor for regionserver mode private static class RegionServerMonitor extends Monitor { @@ -1106,7 +1243,9 @@ public final class Canary implements Tool { String serverName = entry.getKey(); AtomicLong successes = new AtomicLong(0); successMap.put(serverName, successes); - if (this.allRegions) { + if (entry.getValue().isEmpty()) { + LOG.error(String.format("Regionserver not serving any regions - %s", serverName)); + } else if (this.allRegions) { for (HRegionInfo region : entry.getValue()) { tasks.add(new RegionServerTask(this.connection, serverName, @@ -1182,6 +1321,13 @@ public final class Canary implements Tool { table.close(); } + //get any live regionservers not serving any regions + for (ServerName rs : this.admin.getClusterStatus().getServers()) { + String rsName = rs.getHostname(); + if (!rsAndRMap.containsKey(rsName)) { + rsAndRMap.put(rsName, Collections.emptyList()); + } + } } catch (IOException e) { String msg = "Get HTables info failed"; LOG.error(msg, e); @@ -1245,7 +1391,7 @@ public final class Canary implements Tool { new GenericOptionsParser(conf, args); int numThreads = conf.getInt("hbase.canary.threads.num", MAX_THREADS_NUM); - LOG.info("Number of exection threads " + numThreads); + LOG.info("Number of execution threads " + numThreads); ExecutorService executor = new ScheduledThreadPoolExecutor(numThreads); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java index ea43820..ace45ec 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; +import java.util.Locale; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; @@ -53,7 +54,7 @@ public class CompressionTest { private static final Log LOG = LogFactory.getLog(CompressionTest.class); public static boolean testCompression(String codec) { - codec = codec.toLowerCase(); + codec = codec.toLowerCase(Locale.ROOT); Compression.Algorithm a; @@ -109,7 +110,7 @@ public class CompressionTest { System.err.println( "Usage: CompressionTest " + - StringUtils.join( Compression.Algorithm.values(), "|").toLowerCase() + + StringUtils.join( Compression.Algorithm.values(), "|").toLowerCase(Locale.ROOT) + "\n" + "For example:\n" + " hbase " + CompressionTest.class + " file:///tmp/testfile gz\n"); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java index 1eeeee0..a860f20 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java @@ -23,6 +23,7 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.Lock; +import org.apache.commons.logging.Log; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.ScheduledChore; @@ -37,7 +38,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.log4j.Logger; +import org.apache.commons.logging.LogFactory; /** * A utility to store user specific HConnections in memory. @@ -47,7 +48,7 @@ import org.apache.log4j.Logger; */ @InterfaceAudience.Private public class ConnectionCache { - private static final Logger LOG = Logger.getLogger(ConnectionCache.class); + private static final Log LOG = LogFactory.getLog(ConnectionCache.class); private final Map connections = new ConcurrentHashMap(); @@ -60,6 +61,7 @@ public class ConnectionCache { private final ThreadLocal effectiveUserNames = new ThreadLocal() { + @Override protected String initialValue() { return realUserName; } @@ -207,7 +209,7 @@ public class ConnectionCache { return false; } if (connection.isAborted() || connection.isClosed()) { - LOG.info("Unexpected: cached HConnection is aborted/closed, removed from cache"); + LOG.info("Unexpected: cached Connection is aborted/closed, removed from cache"); connections.remove(userName); return false; } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java index c1947a2..9013bab 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/DirectMemoryUtils.java @@ -25,6 +25,7 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.nio.ByteBuffer; import java.util.List; +import java.util.Locale; import javax.management.JMException; import javax.management.MBeanServer; @@ -87,7 +88,7 @@ public class DirectMemoryUtils { long multiplier = 1; //for the byte case. for (String s : arguments) { if (s.contains("-XX:MaxDirectMemorySize=")) { - String memSize = s.toLowerCase() + String memSize = s.toLowerCase(Locale.ROOT) .replace("-xx:maxdirectmemorysize=", "").trim(); if (memSize.contains("k")) { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 18c86c8..ed02162 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -35,6 +35,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.ConcurrentHashMap; @@ -121,7 +122,7 @@ public abstract class FSUtils { */ public static void setStoragePolicy(final FileSystem fs, final Configuration conf, final Path path, final String policyKey, final String defaultPolicy) { - String storagePolicy = conf.get(policyKey, defaultPolicy).toUpperCase(); + String storagePolicy = conf.get(policyKey, defaultPolicy).toUpperCase(Locale.ROOT); if (storagePolicy.equals(defaultPolicy)) { if (LOG.isTraceEnabled()) { LOG.trace("default policy of " + defaultPolicy + " requested, exiting early."); @@ -1912,7 +1913,7 @@ public abstract class FSUtils { return false; } - if (!regionName.toLowerCase().matches("[0-9a-f]+")) { + if (!regionName.toLowerCase(Locale.ROOT).matches("[0-9a-f]+")) { return false; } return true; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index eec5ea4..e3ebe64 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -17,6 +17,16 @@ */ package org.apache.hadoop.hbase.util; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Joiner; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.google.common.collect.Multimap; +import com.google.common.collect.Ordering; +import com.google.common.collect.TreeMultimap; +import com.google.protobuf.ServiceException; + import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; @@ -34,6 +44,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Map.Entry; import java.util.Set; @@ -54,15 +65,6 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; -import com.google.common.collect.Multimap; -import com.google.common.collect.Ordering; -import com.google.common.collect.TreeMultimap; -import com.google.protobuf.ServiceException; import org.apache.commons.io.IOUtils; import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.StringUtils; @@ -103,7 +105,6 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -412,7 +413,7 @@ public class HBaseFsck extends Configured implements Closeable { * This method maintains a lock using a file. If the creation fails we return null * * @return FSDataOutputStream object corresponding to the newly opened lock file - * @throws IOException + * @throws IOException if IO failure occurs */ private FSDataOutputStream checkAndMarkRunningHbck() throws IOException { RetryCounter retryCounter = lockFileRetryCounterFactory.create(); @@ -547,10 +548,10 @@ public class HBaseFsck extends Configured implements Closeable { errors.print("Number of requests: " + status.getRequestsCount()); errors.print("Number of regions: " + status.getRegionsCount()); - Map rits = status.getRegionsInTransition(); + Set rits = status.getRegionsInTransition(); errors.print("Number of regions in transition: " + rits.size()); if (details) { - for (RegionState state: rits.values()) { + for (RegionState state: rits) { errors.print(" " + state.toDescriptiveString()); } } @@ -722,7 +723,7 @@ public class HBaseFsck extends Configured implements Closeable { checkAndFixTableLocks(); checkAndFixReplication(); - + // Remove the hbck lock unlockHbck(); @@ -3283,7 +3284,7 @@ public class HBaseFsck extends Configured implements Closeable { checker.fixExpiredTableLocks(); } } - + private void checkAndFixReplication() throws IOException { ReplicationChecker checker = new ReplicationChecker(getConf(), zkw, connection, errors); checker.checkUnDeletedQueues(); @@ -3980,13 +3981,13 @@ public class HBaseFsck extends Configured implements Closeable { * Contact a region server and get all information from it */ static class WorkItemRegion implements Callable { - private HBaseFsck hbck; - private ServerName rsinfo; - private ErrorReporter errors; - private HConnection connection; + private final HBaseFsck hbck; + private final ServerName rsinfo; + private final ErrorReporter errors; + private final ClusterConnection connection; WorkItemRegion(HBaseFsck hbck, ServerName info, - ErrorReporter errors, HConnection connection) { + ErrorReporter errors, ClusterConnection connection) { this.hbck = hbck; this.rsinfo = info; this.errors = errors; @@ -4067,7 +4068,7 @@ public class HBaseFsck extends Configured implements Closeable { errors.progress(); String encodedName = regionDir.getPath().getName(); // ignore directories that aren't hexadecimal - if (!encodedName.toLowerCase().matches("[0-9a-f]+")) { + if (!encodedName.toLowerCase(Locale.ROOT).matches("[0-9a-f]+")) { continue; } @@ -4214,7 +4215,7 @@ public class HBaseFsck extends Configured implements Closeable { public boolean shouldDisableSplitAndMerge() { return fixAny || disableSplitAndMerge; } - + /** * Set summary mode. * Print only summary of the tables and status (OK or INCONSISTENT) @@ -4246,7 +4247,7 @@ public class HBaseFsck extends Configured implements Closeable { fixTableLocks = shouldFix; fixAny |= shouldFix; } - + /** * Set replication fix mode. */ @@ -4517,7 +4518,7 @@ public class HBaseFsck extends Configured implements Closeable { out.println(""); out.println(" Replication options"); out.println(" -fixReplication Deletes replication queues for removed peers"); - + out.flush(); errors.reportError(ERROR_CODE.WRONG_USAGE, sw.toString()); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java index e681789..eaf8d54 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java @@ -18,6 +18,11 @@ */ package org.apache.hadoop.hbase.util; +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Random; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -33,7 +38,6 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.master.RegionState; @@ -41,12 +45,6 @@ import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.zookeeper.KeeperException; -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Random; - /** * This class contains helper methods that repair parts of hbase's filesystem * contents. @@ -64,7 +62,7 @@ public class HBaseFsckRepair { * @param region Region to undeploy * @param servers list of Servers to undeploy from */ - public static void fixMultiAssignment(HConnection connection, HRegionInfo region, + public static void fixMultiAssignment(Connection connection, HRegionInfo region, List servers) throws IOException, KeeperException, InterruptedException { HRegionInfo actualRegion = new HRegionInfo(region); @@ -124,10 +122,14 @@ public class HBaseFsckRepair { long expiration = timeout + EnvironmentEdgeManager.currentTime(); while (EnvironmentEdgeManager.currentTime() < expiration) { try { - Map rits= - admin.getClusterStatus().getRegionsInTransition(); - - if (rits.keySet() != null && !rits.keySet().contains(region.getEncodedName())) { + boolean inTransition = false; + for (RegionState rs: admin.getClusterStatus().getRegionsInTransition()) { + if (rs.getRegion().equals(region)) { + inTransition = true; + break; + } + } + if (!inTransition) { // yay! no longer RIT return; } @@ -149,7 +151,7 @@ public class HBaseFsckRepair { * (default 120s) to close the region. This bypasses the active hmaster. */ @SuppressWarnings("deprecation") - public static void closeRegionSilentlyAndWait(HConnection connection, + public static void closeRegionSilentlyAndWait(Connection connection, ServerName server, HRegionInfo region) throws IOException, InterruptedException { long timeout = connection.getConfiguration() .getLong("hbase.hbck.close.timeout", 120000); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java index 53c5cef..e86d32a 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java @@ -37,10 +37,10 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Table; @@ -103,10 +103,10 @@ class HMerge { final TableName tableName, final boolean testMasterRunning) throws IOException { boolean masterIsRunning = false; - HConnection hConnection = null; + ClusterConnection hConnection = null; if (testMasterRunning) { try { - hConnection = (HConnection) ConnectionFactory.createConnection(conf); + hConnection = (ClusterConnection) ConnectionFactory.createConnection(conf); masterIsRunning = hConnection.isMasterRunning(); } finally { if (hConnection != null) { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java index 6efb10c..1688874 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; -import java.io.InterruptedIOException; import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingQueue; @@ -30,42 +29,44 @@ import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Row; import org.apache.hadoop.hbase.client.coprocessor.Batch; +import org.apache.hadoop.hbase.ipc.AsyncRpcClient; /** - * Provides ability to create multiple HConnection instances and allows to process a batch of - * actions using HConnection.processBatchCallback() + * Provides ability to create multiple Connection instances and allows to process a batch of + * actions using CHTable.doBatchWithCallback() */ @InterfaceAudience.Private public class MultiHConnection { private static final Log LOG = LogFactory.getLog(MultiHConnection.class); - private HConnection[] hConnections; - private final Object hConnectionsLock = new Object(); - private int noOfConnections; + private Connection[] connections; + private final Object connectionsLock = new Object(); + private final int noOfConnections; private ExecutorService batchPool; /** - * Create multiple HConnection instances and initialize a thread pool executor + * Create multiple Connection instances and initialize a thread pool executor * @param conf configuration - * @param noOfConnections total no of HConnections to create - * @throws IOException + * @param noOfConnections total no of Connections to create + * @throws IOException if IO failure occurs */ public MultiHConnection(Configuration conf, int noOfConnections) throws IOException { this.noOfConnections = noOfConnections; - synchronized (this.hConnectionsLock) { - hConnections = new HConnection[noOfConnections]; + synchronized (this.connectionsLock) { + connections = new Connection[noOfConnections]; for (int i = 0; i < noOfConnections; i++) { - HConnection conn = (HConnection) ConnectionFactory.createConnection(conf); - hConnections[i] = conn; + Connection conn = ConnectionFactory.createConnection(conf); + connections[i] = conn; } } createBatchPool(conf); @@ -75,9 +76,9 @@ public class MultiHConnection { * Close the open connections and shutdown the batchpool */ public void close() { - synchronized (hConnectionsLock) { - if (hConnections != null) { - for (Connection conn : hConnections) { + synchronized (connectionsLock) { + if (connections != null) { + for (Connection conn : connections) { if (conn != null) { try { conn.close(); @@ -88,7 +89,7 @@ public class MultiHConnection { } } } - hConnections = null; + connections = null; } } if (this.batchPool != null && !this.batchPool.isShutdown()) { @@ -109,28 +110,21 @@ public class MultiHConnection { * @param actions the actions * @param tableName table name * @param results the results array - * @param callback - * @throws IOException + * @param callback to run when results are in + * @throws IOException If IO failure occurs */ @SuppressWarnings("deprecation") public void processBatchCallback(List actions, TableName tableName, Object[] results, Batch.Callback callback) throws IOException { // Currently used by RegionStateStore - // A deprecated method is used as multiple threads accessing RegionStateStore do a single put - // and htable is not thread safe. Alternative would be to create an Htable instance for each - // put but that is not very efficient. - // See HBASE-11610 for more details. - try { - hConnections[ThreadLocalRandom.current().nextInt(noOfConnections)].processBatchCallback( - actions, tableName, this.batchPool, results, callback); - } catch (InterruptedException e) { - throw new InterruptedIOException(e.getMessage()); - } + ClusterConnection conn = + (ClusterConnection) connections[ThreadLocalRandom.current().nextInt(noOfConnections)]; + + HTable.doBatchWithCallback(actions, results, callback, conn, batchPool, tableName); } - // Copied from ConnectionImplementation.getBatchPool() - // We should get rid of this when HConnection.processBatchCallback is un-deprecated and provides + // We should get rid of this when Connection.processBatchCallback is un-deprecated and provides // an API to manage a batch pool private void createBatchPool(Configuration conf) { // Use the same config for keep alive as in ConnectionImplementation.getBatchPool(); @@ -140,7 +134,7 @@ public class MultiHConnection { } long keepAliveTime = conf.getLong("hbase.multihconnection.threads.keepalivetime", 60); LinkedBlockingQueue workQueue = - new LinkedBlockingQueue(maxThreads + new LinkedBlockingQueue<>(maxThreads * conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS)); ThreadPoolExecutor tpe = diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java index f661e0d..fcd265d 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java @@ -31,6 +31,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Locale; import java.util.concurrent.Callable; import java.util.concurrent.CancellationException; import java.util.concurrent.ExecutionException; @@ -975,7 +976,7 @@ public class RegionMover extends AbstractHBaseTool { if (cmd.hasOption('t')) { rmbuilder.timeout(Integer.parseInt(cmd.getOptionValue('t'))); } - this.loadUnload = cmd.getOptionValue("o").toLowerCase(); + this.loadUnload = cmd.getOptionValue("o").toLowerCase(Locale.ROOT); } @Override diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerCommandLine.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerCommandLine.java index 983d49c..e6b746c 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerCommandLine.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerCommandLine.java @@ -22,6 +22,7 @@ import java.lang.management.ManagementFactory; import java.lang.management.RuntimeMXBean; import java.util.Arrays; import java.util.HashSet; +import java.util.Locale; import java.util.Map.Entry; import java.util.Set; @@ -103,8 +104,8 @@ public abstract class ServerCommandLine extends Configured implements Tool { nextEnv: for (Entry entry : System.getenv().entrySet()) { - String key = entry.getKey().toLowerCase(); - String value = entry.getValue().toLowerCase(); + String key = entry.getKey().toLowerCase(Locale.ROOT); + String value = entry.getValue().toLowerCase(Locale.ROOT); // exclude variables which may contain skip words for(String skipWord : skipWords) { if (key.contains(skipWord) || value.contains(skipWord)) diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java index bf44a50..e472558 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java @@ -32,7 +32,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationPeers; @@ -51,16 +51,21 @@ import org.apache.zookeeper.KeeperException; @InterfaceAudience.Private public class ReplicationChecker { private static final Log LOG = LogFactory.getLog(ReplicationChecker.class); - private ErrorReporter errorReporter; - private ReplicationQueuesClient queuesClient; - private ReplicationPeers replicationPeers; - private ReplicationQueueDeletor queueDeletor; + private final ZooKeeperWatcher zkw; + private final ErrorReporter errorReporter; + private final ReplicationQueuesClient queuesClient; + private final ReplicationPeers replicationPeers; + private final ReplicationQueueDeletor queueDeletor; // replicator with its queueIds for removed peers - private Map> undeletedQueueIds = new HashMap>(); - - public ReplicationChecker(Configuration conf, ZooKeeperWatcher zkw, HConnection connection, + private final Map> undeletedQueueIds = new HashMap<>(); + // replicator with its undeleted queueIds for removed peers in hfile-refs queue + private Set undeletedHFileRefsQueueIds = new HashSet<>(); + private final String hfileRefsZNode; + + public ReplicationChecker(Configuration conf, ZooKeeperWatcher zkw, ClusterConnection connection, ErrorReporter errorReporter) throws IOException { try { + this.zkw = zkw; this.errorReporter = errorReporter; this.queuesClient = ReplicationFactory.getReplicationQueuesClient(zkw, conf, connection); this.queuesClient.init(); @@ -71,6 +76,13 @@ public class ReplicationChecker { } catch (ReplicationException e) { throw new IOException("failed to construct ReplicationChecker", e); } + + String replicationZNodeName = conf.get("zookeeper.znode.replication", "replication"); + String replicationZNode = ZKUtil.joinZNode(this.zkw.baseZNode, replicationZNodeName); + String hfileRefsZNodeName = + conf.get(ReplicationStateZKBase.ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_KEY, + ReplicationStateZKBase.ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_DEFAULT); + hfileRefsZNode = ZKUtil.joinZNode(replicationZNode, hfileRefsZNodeName); } public boolean hasUnDeletedQueues() { @@ -79,7 +91,7 @@ public class ReplicationChecker { } public void checkUnDeletedQueues() throws IOException { - Set peerIds = new HashSet(this.replicationPeers.getAllPeerIds()); + Set peerIds = new HashSet<>(this.replicationPeers.getAllPeerIds()); try { List replicators = this.queuesClient.getListOfReplicators(); for (String replicator : replicators) { @@ -103,13 +115,37 @@ public class ReplicationChecker { } catch (KeeperException ke) { throw new IOException(ke); } + + checkUnDeletedHFileRefsQueues(peerIds); + } + + private void checkUnDeletedHFileRefsQueues(Set peerIds) throws IOException { + try { + if (-1 == ZKUtil.checkExists(zkw, hfileRefsZNode)) { + return; + } + List listOfPeers = this.queuesClient.getAllPeersFromHFileRefsQueue(); + Set peers = new HashSet<>(listOfPeers); + peers.removeAll(peerIds); + if (!peers.isEmpty()) { + undeletedHFileRefsQueueIds.addAll(peers); + String msg = + "Undeleted replication hfile-refs queue for removed peer found: " + + undeletedHFileRefsQueueIds + " under hfile-refs node " + hfileRefsZNode; + errorReporter.reportError(HBaseFsck.ErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE, + msg); + } + } catch (KeeperException e) { + throw new IOException("Failed to get list of all peers from hfile-refs znode " + + hfileRefsZNode, e); + } } - + private static class ReplicationQueueDeletor extends ReplicationStateZKBase { public ReplicationQueueDeletor(ZooKeeperWatcher zk, Configuration conf, Abortable abortable) { super(zk, conf, abortable); } - + public void removeQueue(String replicator, String queueId) throws IOException { String queueZnodePath = ZKUtil.joinZNode(ZKUtil.joinZNode(this.queuesZNode, replicator), queueId); @@ -122,7 +158,7 @@ public class ReplicationChecker { } } } - + public void fixUnDeletedQueues() throws IOException { for (Entry> replicatorAndQueueIds : undeletedQueueIds.entrySet()) { String replicator = replicatorAndQueueIds.getKey(); @@ -130,5 +166,20 @@ public class ReplicationChecker { queueDeletor.removeQueue(replicator, queueId); } } + fixUnDeletedHFileRefsQueue(); + } + + private void fixUnDeletedHFileRefsQueue() throws IOException { + for (String hfileRefsQueueId : undeletedHFileRefsQueueIds) { + String node = ZKUtil.joinZNode(hfileRefsZNode, hfileRefsQueueId); + try { + ZKUtil.deleteNodeRecursively(this.zkw, node); + LOG.info("Successfully deleted hfile-refs queue " + hfileRefsQueueId + " from path " + + hfileRefsZNode); + } catch (KeeperException e) { + throw new IOException("Failed to delete hfile-refs queue " + hfileRefsQueueId + + " from path " + hfileRefsZNode); + } + } } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java index 10fe04c..b5ddd00 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java @@ -25,19 +25,19 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.util.FSUtils; // imports for things that haven't moved from regionserver.wal yet. import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; +import org.apache.hadoop.hbase.util.FSUtils; /** * No-op implementation of {@link WALProvider} used when the WAL is disabled. @@ -170,6 +170,10 @@ class DisabledWALProvider implements WALProvider { } @Override + public void updateStore(byte[] encodedRegionName, byte[] familyName, + Long sequenceid, boolean onlyIfGreater) { return; } + + @Override public void sync() { if (!this.listeners.isEmpty()) { for (WALActionsListener listener : this.listeners) { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java index 051ce54..af63b0b 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.wal; +import com.google.common.annotations.VisibleForTesting; import java.io.Closeable; import java.io.IOException; import java.util.Set; @@ -35,8 +36,6 @@ import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; -import com.google.common.annotations.VisibleForTesting; - /** * A Write Ahead Log (WAL) provides service for reading, writing waledits. This interface provides * APIs for WAL users (such as RegionServer) to use the WAL (do append, sync, etc). @@ -118,6 +117,18 @@ public interface WAL { long append(HRegionInfo info, WALKey key, WALEdit edits, boolean inMemstore) throws IOException; /** + * updates the seuence number of a specific store. + * depending on the flag: replaces current seq number if the given seq id is bigger, + * or even if it is lower than existing one + * @param encodedRegionName + * @param familyName + * @param sequenceid + * @param onlyIfGreater + */ + void updateStore(byte[] encodedRegionName, byte[] familyName, Long sequenceid, + boolean onlyIfGreater); + + /** * Sync what we have in the WAL. * @throws IOException */ diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index d84d3dc..3e27834 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -76,11 +76,11 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; -import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.TableState; @@ -1694,29 +1694,28 @@ public class WALSplitter { private static final double BUFFER_THRESHOLD = 0.35; private static final String KEY_DELIMITER = "#"; - private long waitRegionOnlineTimeOut; + private final long waitRegionOnlineTimeOut; private final Set recoveredRegions = Collections.synchronizedSet(new HashSet()); - private final Map writers = - new ConcurrentHashMap(); + private final Map writers = new ConcurrentHashMap<>(); // online encoded region name -> region location map private final Map onlineRegions = new ConcurrentHashMap(); - private Map tableNameToHConnectionMap = Collections - .synchronizedMap(new TreeMap()); + private final Map tableNameToHConnectionMap = Collections + .synchronizedMap(new TreeMap()); /** * Map key -> value layout * {@literal :

-> Queue} */ - private Map>> serverToBufferQueueMap = - new ConcurrentHashMap>>(); - private List thrown = new ArrayList(); + private final Map>> serverToBufferQueueMap = + new ConcurrentHashMap<>(); + private final List thrown = new ArrayList<>(); // The following sink is used in distrubitedLogReplay mode for entries of regions in a disabling // table. It's a limitation of distributedLogReplay. Because log replay needs a region is // assigned and online before it can replay wal edits while regions of disabling/disabled table // won't be assigned by AM. We can retire this code after HBASE-8234. - private LogRecoveredEditsOutputSink logRecoveredEditsOutputSink; + private final LogRecoveredEditsOutputSink logRecoveredEditsOutputSink; private boolean hasEditsInDisablingOrDisabledTables = false; public LogReplayOutputSink(PipelineController controller, EntryBuffers entryBuffers, @@ -1809,8 +1808,8 @@ public class WALSplitter { HRegionLocation loc = null; String locKey = null; List cells = edit.getCells(); - List skippedCells = new ArrayList(); - HConnection hconn = this.getConnectionByTableName(table); + List skippedCells = new ArrayList<>(); + ClusterConnection cconn = this.getConnectionByTableName(table); for (Cell cell : cells) { byte[] row = CellUtil.cloneRow(cell); @@ -1838,7 +1837,7 @@ public class WALSplitter { try { loc = - locateRegionAndRefreshLastFlushedSequenceId(hconn, table, row, + locateRegionAndRefreshLastFlushedSequenceId(cconn, table, row, encodeRegionNameStr); // skip replaying the compaction if the region is gone if (isCompactionEntry && !encodeRegionNameStr.equalsIgnoreCase( @@ -1912,13 +1911,13 @@ public class WALSplitter { * destination region is online for replay. * @throws IOException */ - private HRegionLocation locateRegionAndRefreshLastFlushedSequenceId(HConnection hconn, + private HRegionLocation locateRegionAndRefreshLastFlushedSequenceId(ClusterConnection cconn, TableName table, byte[] row, String originalEncodedRegionName) throws IOException { // fetch location from cache HRegionLocation loc = onlineRegions.get(originalEncodedRegionName); if(loc != null) return loc; // fetch location from hbase:meta directly without using cache to avoid hit old dead server - loc = hconn.getRegionLocation(table, row, true); + loc = cconn.getRegionLocation(table, row, true); if (loc == null) { throw new IOException("Can't locate location for row:" + Bytes.toString(row) + " of table:" + table); @@ -1931,7 +1930,7 @@ public class WALSplitter { if (tmpLoc != null) return tmpLoc; } - Long lastFlushedSequenceId = -1l; + Long lastFlushedSequenceId = -1L; AtomicBoolean isRecovering = new AtomicBoolean(true); loc = waitUntilRegionOnline(loc, row, this.waitRegionOnlineTimeOut, isRecovering); if (!isRecovering.get()) { @@ -2010,11 +2009,11 @@ public class WALSplitter { while (endTime > EnvironmentEdgeManager.currentTime()) { try { // Try and get regioninfo from the hosting server. - HConnection hconn = getConnectionByTableName(tableName); + ClusterConnection cconn = getConnectionByTableName(tableName); if(reloadLocation) { - loc = hconn.getRegionLocation(tableName, row, true); + loc = cconn.getRegionLocation(tableName, row, true); } - BlockingInterface remoteSvr = hconn.getAdmin(loc.getServerName()); + BlockingInterface remoteSvr = cconn.getAdmin(loc.getServerName()); HRegionInfo region = loc.getRegionInfo(); try { GetRegionInfoRequest request = @@ -2146,12 +2145,12 @@ public class WALSplitter { // close connections synchronized (this.tableNameToHConnectionMap) { - for (Map.Entry entry : + for (Map.Entry entry : this.tableNameToHConnectionMap.entrySet()) { - HConnection hconn = entry.getValue(); + ClusterConnection cconn = entry.getValue(); try { - hconn.clearRegionCache(); - hconn.close(); + cconn.clearRegionCache(); + cconn.close(); } catch (IOException ioe) { result.add(ioe); } @@ -2165,7 +2164,7 @@ public class WALSplitter { @Override public Map getOutputCounts() { - TreeMap ret = new TreeMap(Bytes.BYTES_COMPARATOR); + TreeMap ret = new TreeMap<>(Bytes.BYTES_COMPARATOR); synchronized (writers) { for (Map.Entry entry : writers.entrySet()) { ret.put(Bytes.toBytes(entry.getKey()), entry.getValue().editsWritten); @@ -2215,7 +2214,7 @@ public class WALSplitter { throw new IOException("Invalid location string:" + loc + " found. Replay aborted."); } - HConnection hconn = getConnectionByTableName(tableName); + ClusterConnection hconn = getConnectionByTableName(tableName); synchronized (writers) { ret = writers.get(loc); if (ret == null) { @@ -2226,18 +2225,18 @@ public class WALSplitter { return ret; } - private HConnection getConnectionByTableName(final TableName tableName) throws IOException { - HConnection hconn = this.tableNameToHConnectionMap.get(tableName); - if (hconn == null) { + private ClusterConnection getConnectionByTableName(final TableName tableName) throws IOException { + ClusterConnection cconn = this.tableNameToHConnectionMap.get(tableName); + if (cconn == null) { synchronized (this.tableNameToHConnectionMap) { - hconn = this.tableNameToHConnectionMap.get(tableName); - if (hconn == null) { - hconn = (HConnection) ConnectionFactory.createConnection(conf); - this.tableNameToHConnectionMap.put(tableName, hconn); + cconn = this.tableNameToHConnectionMap.get(tableName); + if (cconn == null) { + cconn = (ClusterConnection) ConnectionFactory.createConnection(conf); + this.tableNameToHConnectionMap.put(tableName, cconn); } } } - return hconn; + return cconn; } private TableName getTableFromLocationStr(String loc) { /** @@ -2258,7 +2257,7 @@ public class WALSplitter { private final static class RegionServerWriter extends SinkWriter { final WALEditsReplaySink sink; - RegionServerWriter(final Configuration conf, final TableName tableName, final HConnection conn) + RegionServerWriter(final Configuration conf, final TableName tableName, final ClusterConnection conn) throws IOException { this.sink = new WALEditsReplaySink(conf, tableName, conn); } diff --git hbase-server/src/main/resources/hbase-webapps/master/table.jsp hbase-server/src/main/resources/hbase-webapps/master/table.jsp index ee2a7ba..cc13972 100644 --- hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -32,10 +32,6 @@ import="org.owasp.esapi.ESAPI" import="org.apache.hadoop.conf.Configuration" import="org.apache.hadoop.util.StringUtils" - import="org.apache.hadoop.hbase.client.HTable" - import="org.apache.hadoop.hbase.client.Admin" - import="org.apache.hadoop.hbase.client.CompactionState" - import="org.apache.hadoop.hbase.client.RegionLocator" import="org.apache.hadoop.hbase.HRegionInfo" import="org.apache.hadoop.hbase.HRegionLocation" import="org.apache.hadoop.hbase.ServerName" @@ -50,9 +46,9 @@ import="org.apache.hadoop.hbase.protobuf.generated.HBaseProtos" import="org.apache.hadoop.hbase.TableName" import="org.apache.hadoop.hbase.HColumnDescriptor" - import="org.apache.hadoop.hbase.client.RegionReplicaUtil" import="org.apache.hadoop.hbase.HBaseConfiguration" import="org.apache.hadoop.hbase.TableNotFoundException"%> +<%@ page import="org.apache.hadoop.hbase.client.*" %> <% HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER); Configuration conf = master.getConfiguration(); @@ -64,7 +60,7 @@ final boolean reverseOrder = (reverse==null||!reverse.equals("false")); String showWholeKey = request.getParameter("showwhole"); final boolean showWhole = (showWholeKey!=null && showWholeKey.equals("true")); - HTable table = null; + Table table; String tableHeader; boolean withReplica = false; ServerName rl = metaTableLocator.getMetaRegionLocation(master.getZooKeeper()); @@ -141,7 +137,7 @@ <% if ( fqtn != null ) { try { - table = (HTable) master.getConnection().getTable(TableName.valueOf(fqtn)); + table = master.getConnection().getTable(TableName.valueOf(fqtn)); if (table.getTableDescriptor().getRegionReplication() > 1) { tableHeader = "

Table Regions

"; withReplica = true; diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 010e184..bfa14cb 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -70,7 +70,6 @@ import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; @@ -144,7 +143,10 @@ import org.apache.zookeeper.ZooKeeper.States; * Not all methods work with the real cluster. * Depends on log4j being on classpath and * hbase-site.xml for logging and test-run configuration. It does not set - * logging levels nor make changes to configuration parameters. + * logging levels. + * In the configuration properties, default values for master-info-port and + * region-server-port are overridden such that a random port will be assigned (thus + * avoiding port contention if another local HBase instance is already running). *

To preserve test data directories, pass the system property "hbase.testing.preserve.testdir" * setting it to true. */ @@ -214,12 +216,12 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** This is for unit tests parameterized with a two booleans. */ public static final List BOOLEAN_PARAMETERIZED = Arrays.asList(new Object[][] { - { new Boolean(false) }, - { new Boolean(true) } + {false}, + {true} }); /** This is for unit tests parameterized with a single boolean. */ - public static final List MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination() ; + public static final List MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination(); /** Compression algorithms to use in testing */ public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={ Compression.Algorithm.NONE, Compression.Algorithm.GZ @@ -264,7 +266,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * testing. */ private static List bloomAndCompressionCombinations() { - List configurations = new ArrayList(); + List configurations = new ArrayList<>(); for (Compression.Algorithm comprAlgo : HBaseTestingUtility.COMPRESSION_ALGORITHMS) { for (BloomType bloomType : BloomType.values()) { @@ -278,7 +280,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * Create combination of memstoreTS and tags */ private static List memStoreTSAndTagsCombination() { - List configurations = new ArrayList(); + List configurations = new ArrayList<>(); configurations.add(new Object[] { false, false }); configurations.add(new Object[] { false, true }); configurations.add(new Object[] { true, false }); @@ -287,7 +289,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } public static List memStoreTSTagsAndOffheapCombination() { - List configurations = new ArrayList(); + List configurations = new ArrayList<>(); configurations.add(new Object[] { false, false, true }); configurations.add(new Object[] { false, false, false }); configurations.add(new Object[] { false, true, true }); @@ -311,6 +313,20 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { // a hbase checksum verification failure will cause unit tests to fail ChecksumUtil.generateExceptionForChecksumFailureForTest(true); + + // prevent contention for ports if other hbase thread(s) already running + if (conf != null) { + if (conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT) + == HConstants.DEFAULT_MASTER_INFOPORT) { + conf.setInt(HConstants.MASTER_INFO_PORT, -1); + LOG.debug("Config property " + HConstants.MASTER_INFO_PORT + " changed to -1"); + } + if (conf.getInt(HConstants.REGIONSERVER_PORT, HConstants.DEFAULT_REGIONSERVER_PORT) + == HConstants.DEFAULT_REGIONSERVER_PORT) { + conf.setInt(HConstants.REGIONSERVER_PORT, -1); + LOG.debug("Config property " + HConstants.REGIONSERVER_PORT + " changed to -1"); + } + } } /** @@ -358,8 +374,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** * Returns this classes's instance of {@link Configuration}. Be careful how - * you use the returned Configuration since {@link HConnection} instances - * can be shared. The Map of HConnections is keyed by the Configuration. If + * you use the returned Configuration since {@link Connection} instances + * can be shared. The Map of Connections is keyed by the Configuration. If * say, a Connection was being used against a cluster that had been shutdown, * see {@link #shutdownMiniCluster()}, then the Connection will no longer * be wholesome. Rather than use the return direct, its usually best to @@ -543,7 +559,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { //file system, the tests should use getBaseTestDir, otherwise, we can use //the working directory, and create a unique sub dir there FileSystem fs = getTestFileSystem(); - Path newDataTestDir = null; + Path newDataTestDir; if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) { File dataTestDir = new File(getDataTestDir().toString()); if (deleteOnExit()) dataTestDir.deleteOnExit(); @@ -1085,7 +1101,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { s.close(); t.close(); - getHBaseAdmin(); // create immediately the hbaseAdmin + getAdmin(); // create immediately the hbaseAdmin LOG.info("Minicluster is up"); // Set the hbase.fs.tmp.dir config to make sure that we have some default value. This is @@ -1317,7 +1333,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public Table createTable(TableName tableName, String[] families) throws IOException { - List fams = new ArrayList(families.length); + List fams = new ArrayList<>(families.length); for (String family : families) { fams.add(Bytes.toBytes(family)); } @@ -1331,7 +1347,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createTable(TableName tableName, byte[] family) + public Table createTable(TableName tableName, byte[] family) throws IOException{ return createTable(tableName, new byte[][]{family}); } @@ -1344,7 +1360,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createMultiRegionTable(TableName tableName, byte[] family, int numRegions) + public Table createMultiRegionTable(TableName tableName, byte[] family, int numRegions) throws IOException { if (numRegions < 3) throw new IOException("Must create at least 3 regions"); byte[] startKey = Bytes.toBytes("aaaaa"); @@ -1361,7 +1377,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createTable(TableName tableName, byte[][] families) + public Table createTable(TableName tableName, byte[][] families) throws IOException { return createTable(tableName, families, (byte[][]) null); } @@ -1373,7 +1389,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createMultiRegionTable(TableName tableName, byte[][] families) throws IOException { + public Table createMultiRegionTable(TableName tableName, byte[][] families) throws IOException { return createTable(tableName, families, KEYS_FOR_HBA_CREATE_TABLE); } @@ -1385,12 +1401,12 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createTable(TableName tableName, byte[][] families, byte[][] splitKeys) + public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys) throws IOException { return createTable(tableName, families, splitKeys, new Configuration(getConfiguration())); } - public HTable createTable(TableName tableName, byte[][] families, + public Table createTable(TableName tableName, byte[][] families, int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException{ HTableDescriptor desc = new HTableDescriptor(tableName); @@ -1399,11 +1415,11 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { .setMaxVersions(numVersions); desc.addFamily(hcd); } - getHBaseAdmin().createTable(desc, startKey, endKey, numRegions); + getAdmin().createTable(desc, startKey, endKey, numRegions); // HBaseAdmin only waits for regions to appear in hbase:meta we // should wait until they are assigned waitUntilAllRegionsAssigned(tableName); - return (HTable) getConnection().getTable(tableName); + return getConnection().getTable(tableName); } /** @@ -1414,9 +1430,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c) + public Table createTable(HTableDescriptor htd, byte[][] families, Configuration c) throws IOException { - return createTable(htd, families, (byte[][]) null, c); + return createTable(htd, families, null, c); } /** @@ -1428,7 +1444,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createTable(HTableDescriptor htd, byte[][] families, byte[][] splitKeys, + public Table createTable(HTableDescriptor htd, byte[][] families, byte[][] splitKeys, Configuration c) throws IOException { for (byte[] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family); @@ -1438,11 +1454,11 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { hcd.setBloomFilterType(BloomType.NONE); htd.addFamily(hcd); } - getHBaseAdmin().createTable(htd, splitKeys); + getAdmin().createTable(htd, splitKeys); // HBaseAdmin only waits for regions to appear in hbase:meta // we should wait until they are assigned waitUntilAllRegionsAssigned(htd.getTableName()); - return (HTable) getConnection().getTable(htd.getTableName()); + return getConnection().getTable(htd.getTableName()); } /** @@ -1452,13 +1468,13 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createTable(HTableDescriptor htd, byte[][] splitRows) + public Table createTable(HTableDescriptor htd, byte[][] splitRows) throws IOException { - getHBaseAdmin().createTable(htd, splitRows); + getAdmin().createTable(htd, splitRows); // HBaseAdmin only waits for regions to appear in hbase:meta // we should wait until they are assigned waitUntilAllRegionsAssigned(htd.getTableName()); - return (HTable) getConnection().getTable(htd.getTableName()); + return getConnection().getTable(htd.getTableName()); } /** @@ -1470,7 +1486,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createTable(TableName tableName, byte[][] families, byte[][] splitKeys, + public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys, final Configuration c) throws IOException { return createTable(new HTableDescriptor(tableName), families, splitKeys, c); } @@ -1483,7 +1499,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createTable(TableName tableName, byte[] family, int numVersions) + public Table createTable(TableName tableName, byte[] family, int numVersions) throws IOException { return createTable(tableName, new byte[][]{family}, numVersions); } @@ -1496,7 +1512,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createTable(TableName tableName, byte[][] families, int numVersions) + public Table createTable(TableName tableName, byte[][] families, int numVersions) throws IOException { return createTable(tableName, families, numVersions, (byte[][]) null); } @@ -1510,17 +1526,18 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createTable(TableName tableName, byte[][] families, int numVersions, + public Table createTable(TableName tableName, byte[][] families, int numVersions, byte[][] splitKeys) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); for (byte[] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions); desc.addFamily(hcd); } - getHBaseAdmin().createTable(desc, splitKeys); - // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned + getAdmin().createTable(desc, splitKeys); + // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are + // assigned waitUntilAllRegionsAssigned(tableName); - return (HTable) getConnection().getTable(tableName); + return getConnection().getTable(tableName); } /** @@ -1531,7 +1548,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createMultiRegionTable(TableName tableName, byte[][] families, int numVersions) + public Table createMultiRegionTable(TableName tableName, byte[][] families, int numVersions) throws IOException { return createTable(tableName, families, numVersions, KEYS_FOR_HBA_CREATE_TABLE); } @@ -1545,7 +1562,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createTable(TableName tableName, byte[][] families, + public Table createTable(TableName tableName, byte[][] families, int numVersions, int blockSize) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); for (byte[] family : families) { @@ -1554,13 +1571,14 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { .setBlocksize(blockSize); desc.addFamily(hcd); } - getHBaseAdmin().createTable(desc); - // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned + getAdmin().createTable(desc); + // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are + // assigned waitUntilAllRegionsAssigned(tableName); - return (HTable) getConnection().getTable(tableName); + return getConnection().getTable(tableName); } - public HTable createTable(TableName tableName, byte[][] families, + public Table createTable(TableName tableName, byte[][] families, int numVersions, int blockSize, String cpName) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); for (byte[] family : families) { @@ -1572,10 +1590,11 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { if(cpName != null) { desc.addCoprocessor(cpName); } - getHBaseAdmin().createTable(desc); - // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned + getAdmin().createTable(desc); + // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are + // assigned waitUntilAllRegionsAssigned(tableName); - return (HTable) getConnection().getTable(tableName); + return getConnection().getTable(tableName); } /** @@ -1586,7 +1605,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createTable(TableName tableName, byte[][] families, + public Table createTable(TableName tableName, byte[][] families, int[] numVersions) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); @@ -1597,10 +1616,11 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { desc.addFamily(hcd); i++; } - getHBaseAdmin().createTable(desc); - // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned + getAdmin().createTable(desc); + // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are + // assigned waitUntilAllRegionsAssigned(tableName); - return (HTable) getConnection().getTable(tableName); + return getConnection().getTable(tableName); } /** @@ -1611,15 +1631,16 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows) + public Table createTable(TableName tableName, byte[] family, byte[][] splitRows) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); HColumnDescriptor hcd = new HColumnDescriptor(family); desc.addFamily(hcd); - getHBaseAdmin().createTable(desc, splitRows); - // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned + getAdmin().createTable(desc, splitRows); + // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are + // assigned waitUntilAllRegionsAssigned(tableName); - return (HTable) getConnection().getTable(tableName); + return getConnection().getTable(tableName); } /** @@ -1629,7 +1650,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createMultiRegionTable(TableName tableName, byte[] family) throws IOException { + public Table createMultiRegionTable(TableName tableName, byte[] family) throws IOException { return createTable(tableName, family, KEYS_FOR_HBA_CREATE_TABLE); } @@ -1679,11 +1700,11 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public void deleteTable(TableName tableName) throws IOException { try { - getHBaseAdmin().disableTable(tableName); + getAdmin().disableTable(tableName); } catch (TableNotEnabledException e) { LOG.debug("Table: " + tableName + " already disabled, so just deleting it."); } - getHBaseAdmin().deleteTable(tableName); + getAdmin().deleteTable(tableName); } /** @@ -1829,10 +1850,27 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public HRegion createLocalHRegion(TableName tableName, byte[] startKey, byte[] stopKey, boolean isReadOnly, Durability durability, WAL wal, byte[]... families) throws IOException { + return createLocalHRegionWithInMemoryFlags(tableName,startKey, stopKey, isReadOnly, + durability, wal, null, families); + } + + public HRegion createLocalHRegionWithInMemoryFlags(TableName tableName, byte[] startKey, + byte[] stopKey, + boolean isReadOnly, Durability durability, WAL wal, boolean[] compactedMemStore, + byte[]... families) + throws IOException { HTableDescriptor htd = new HTableDescriptor(tableName); htd.setReadOnly(isReadOnly); + int i=0; for (byte[] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family); + if(compactedMemStore != null && i < compactedMemStore.length) { + hcd.setInMemoryCompaction(true); + } else { + hcd.setInMemoryCompaction(false); + + } + i++; // Set default to be three versions. hcd.setMaxVersions(Integer.MAX_VALUE); htd.addFamily(hcd); @@ -1852,8 +1890,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return HTable to that new table * @throws IOException */ - public HTable deleteTableData(TableName tableName) throws IOException { - HTable table = (HTable) getConnection().getTable(tableName); + public Table deleteTableData(TableName tableName) throws IOException { + Table table = getConnection().getTable(tableName); Scan scan = new Scan(); ResultScanner resScan = table.getScanner(scan); for(Result res : resScan) { @@ -1872,13 +1910,14 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @param preserveRegions keep the existing split points * @return HTable for the new table */ - public HTable truncateTable(final TableName tableName, final boolean preserveRegions) throws IOException { - Admin admin = getHBaseAdmin(); + public Table truncateTable(final TableName tableName, final boolean preserveRegions) throws + IOException { + Admin admin = getAdmin(); if (!admin.isTableDisabled(tableName)) { admin.disableTable(tableName); } admin.truncateTable(tableName, preserveRegions); - return (HTable) getConnection().getTable(tableName); + return getConnection().getTable(tableName); } /** @@ -1890,7 +1929,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @param tableName table which must exist. * @return HTable for the new table */ - public HTable truncateTable(final TableName tableName) throws IOException { + public Table truncateTable(final TableName tableName) throws IOException { return truncateTable(tableName, false); } @@ -1947,7 +1986,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return Count of rows loaded. * @throws IOException */ - public int loadTable(final Table t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException { + public int loadTable(final Table t, final byte[][] f, byte[] value, + boolean writeToWAL) throws IOException { List puts = new ArrayList<>(); for (byte[] row : HBaseTestingUtility.ROWS) { Put put = new Put(row); @@ -2005,7 +2045,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } if (count != expectedCount) { String row = new String(new byte[] {b1,b2,b3}); - throw new RuntimeException("Row:" + row + " has a seen count of " + count + " instead of " + expectedCount); + throw new RuntimeException("Row:" + row + " has a seen count of " + count + " " + + "instead of " + expectedCount); } } } @@ -2515,7 +2556,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * Create a stubbed out RegionServerService, mainly for getting FS. * This version is used by TestTokenAuthentication */ - public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws IOException { + public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws + IOException { final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher()); rss.setFileSystem(getTestFileSystem()); rss.setRpcServer(rpc); @@ -2758,7 +2800,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @throws IOException */ public void closeRegion(byte[] regionName) throws IOException { - getHBaseAdmin().closeRegion(regionName, null); + getAdmin().closeRegion(regionName, null); } /** @@ -2794,7 +2836,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { public HRegion getSplittableRegion(TableName tableName, int maxAttempts) { List regions = getHBaseCluster().getRegions(tableName); int regCount = regions.size(); - Set attempted = new HashSet(); + Set attempted = new HashSet<>(); int idx; int attempts = 0; do { @@ -3124,7 +3166,10 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * Set maxRecoveryErrorCount in DFSClient. In 0.20 pre-append its hard-coded to 5 and * makes tests linger. Here is the exception you'll see: *

-   * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/wal.1276627923013 block blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683 failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
+   * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/wal.1276627923013 block
+   * blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block
+   * blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683
+   * failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
    * 
* @param stream A DFSClient.DFSOutputStream. * @param max @@ -3155,6 +3200,21 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } /** + * Uses directly the assignment manager to assign the region. + * and waits until the specified region has completed assignment. + * @param tableName the table name + * @throws IOException + * @throw InterruptedException + * @return true if the region is assigned false otherwise. + */ + public boolean assignRegion(final HRegionInfo regionInfo) + throws IOException, InterruptedException { + final AssignmentManager am = getHBaseCluster().getMaster().getAssignmentManager(); + am.assign(regionInfo); + return am.waitForAssignment(regionInfo); + } + + /** * Wait until all regions for a table in hbase:meta have a non-empty * info:server, up to a configuable timeout value (default is 60 seconds) * This means all regions have been deployed, @@ -3691,7 +3751,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { final long timeout) throws IOException, InterruptedException { long timeoutTime = System.currentTimeMillis() + timeout; while (true) { - List regions = getHBaseAdmin().getOnlineRegions(server); + List regions = getAdmin().getOnlineRegions(server); if (regions.contains(hri)) return; long now = System.currentTimeMillis(); if (now > timeoutTime) break; @@ -3710,7 +3770,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { final long timeout) throws IOException, InterruptedException { long timeoutTime = System.currentTimeMillis() + timeout; while (true) { - List regions = getHBaseAdmin().getOnlineRegions(server); + List regions = getAdmin().getOnlineRegions(server); if (regions.contains(hri)) { List rsThreads = getHBaseCluster().getLiveRegionServerThreads(); @@ -3808,7 +3868,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { @Override public boolean evaluate() throws IOException { - return getHBaseAdmin().tableExists(tableName) && getHBaseAdmin().isTableEnabled(tableName); + return getAdmin().tableExists(tableName) && getAdmin().isTableEnabled(tableName); } }; } @@ -3825,7 +3885,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { @Override public boolean evaluate() throws IOException { - return getHBaseAdmin().isTableDisabled(tableName); + return getAdmin().isTableDisabled(tableName); } }; } @@ -3842,10 +3902,10 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { @Override public boolean evaluate() throws IOException { - boolean tableAvailable = getHBaseAdmin().isTableAvailable(tableName); + boolean tableAvailable = getAdmin().isTableAvailable(tableName); if (tableAvailable) { try { - Canary.sniff(getHBaseAdmin(), tableName); + Canary.sniff(getAdmin(), tableName); } catch (Exception e) { throw new IOException("Canary sniff failed for table " + tableName, e); } @@ -3858,14 +3918,21 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** * Wait until no regions in transition. * @param timeout How long to wait. - * @throws Exception + * @throws IOException */ - public void waitUntilNoRegionsInTransition( - final long timeout) throws Exception { + public void waitUntilNoRegionsInTransition(final long timeout) throws IOException { waitFor(timeout, predicateNoRegionsInTransition()); } /** + * Wait until no regions in transition. (time limit 15min) + * @throws IOException + */ + public void waitUntilNoRegionsInTransition() throws IOException { + waitUntilNoRegionsInTransition(15 * 60000); + } + + /** * Wait until labels is ready in VisibilityLabelsCache. * @param timeoutMillis * @param labels @@ -3912,7 +3979,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return the list of column descriptors */ public static List generateColumnDescriptors(final String prefix) { - List htds = new ArrayList(); + List htds = new ArrayList<>(); long familyId = 0; for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) { for (DataBlockEncoding encodingType: DataBlockEncoding.values()) { @@ -3936,7 +4003,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public static Compression.Algorithm[] getSupportedCompressionAlgorithms() { String[] allAlgos = HFile.getSupportedCompressionAlgorithms(); - List supportedAlgos = new ArrayList(); + List supportedAlgos = new ArrayList<>(); for (String algoName : allAlgos) { try { Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName); @@ -3956,7 +4023,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { scan.setReversed(true); scan.addFamily(family); try (RegionScanner scanner = r.getScanner(scan)) { - List cells = new ArrayList(1); + List cells = new ArrayList<>(1); scanner.next(cells); if (r.getRegionInfo().isMetaRegion() && !isTargetTable(row, cells.get(0))) { return null; diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java index 002bdb2..f788bed 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java @@ -86,6 +86,9 @@ public class MiniHBaseCluster extends HBaseCluster { throws IOException, InterruptedException { super(conf); conf.set(HConstants.MASTER_PORT, "0"); + if (conf.getInt(HConstants.MASTER_INFO_PORT, 0) != -1) { + conf.set(HConstants.MASTER_INFO_PORT, "0"); + } // Hadoop 2 CompatibilityFactory.getInstance(MetricsAssertHelper.class).init(); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index 02b994a..7a68359 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -31,6 +31,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.LinkedList; +import java.util.Locale; import java.util.Map; import java.util.Queue; import java.util.Random; @@ -310,8 +311,8 @@ public class PerformanceEvaluation extends Configured implements Tool { static boolean checkTable(Admin admin, TestOptions opts) throws IOException { TableName tableName = TableName.valueOf(opts.tableName); boolean needsDelete = false, exists = admin.tableExists(tableName); - boolean isReadCmd = opts.cmdName.toLowerCase().contains("read") - || opts.cmdName.toLowerCase().contains("scan"); + boolean isReadCmd = opts.cmdName.toLowerCase(Locale.ROOT).contains("read") + || opts.cmdName.toLowerCase(Locale.ROOT).contains("scan"); if (!exists && isReadCmd) { throw new IllegalStateException( "Must specify an existing table for read commands. Run a write command first."); @@ -2143,8 +2144,7 @@ public class PerformanceEvaluation extends Configured implements Tool { // total size in GB specified opts.totalRows = (int) opts.size * rowsPerGB; opts.perClientRunRows = opts.totalRows / opts.numClientThreads; - } else if (opts.perClientRunRows != DEFAULT_OPTS.perClientRunRows) { - // number of rows specified + } else { opts.totalRows = opts.perClientRunRows * opts.numClientThreads; opts.size = opts.totalRows / rowsPerGB; } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtility.java hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtility.java index 10c9d86..15d4bac 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtility.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtility.java @@ -26,6 +26,7 @@ import static org.junit.Assert.assertTrue; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; @@ -224,7 +225,7 @@ public class TestHBaseTestingUtility { // kill another active zk server currentActivePort = cluster2.killCurrentActiveZooKeeperServer(); assertTrue(currentActivePort >= defaultClientPort); - assertTrue(cluster2.getClientPort() == currentActivePort); + assertTrue(cluster2.getClientPort() == currentActivePort); assertEquals(2, cluster2.getBackupZooKeeperServerNum()); assertEquals(3, cluster2.getZooKeeperServerNum()); @@ -418,4 +419,28 @@ public class TestHBaseTestingUtility { assertNotEquals(port1, port2); Mockito.verify(random, Mockito.times(3)).nextInt(Mockito.any(Integer.class)); } + + @Test + public void testOverridingOfDefaultPorts() { + + // confirm that default port properties being overridden to "-1" + Configuration defaultConfig = HBaseConfiguration.create(); + defaultConfig.setInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT); + defaultConfig.setInt(HConstants.REGIONSERVER_PORT, HConstants.DEFAULT_REGIONSERVER_PORT); + HBaseTestingUtility htu = new HBaseTestingUtility(defaultConfig); + assertEquals(-1, htu.getConfiguration().getInt(HConstants.MASTER_INFO_PORT, 0)); + assertEquals(-1, htu.getConfiguration().getInt(HConstants.REGIONSERVER_PORT, 0)); + + // confirm that nonDefault (custom) port settings are NOT overridden + Configuration altConfig = HBaseConfiguration.create(); + final int nonDefaultMasterInfoPort = 3333; + final int nonDefaultRegionServerPort = 4444; + altConfig.setInt(HConstants.MASTER_INFO_PORT, nonDefaultMasterInfoPort); + altConfig.setInt(HConstants.REGIONSERVER_PORT, nonDefaultRegionServerPort); + htu = new HBaseTestingUtility(altConfig); + assertEquals(nonDefaultMasterInfoPort, + htu.getConfiguration().getInt(HConstants.MASTER_INFO_PORT, 0)); + assertEquals(nonDefaultRegionServerPort + , htu.getConfiguration().getInt(HConstants.REGIONSERVER_PORT, 0)); + } } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java index d8363d4..3c10ddc 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HStore; +import org.apache.hadoop.hbase.regionserver.MemStore; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.Store; @@ -208,6 +209,10 @@ public class TestIOFencing { @Override public void finalizeFlush() { } + + @Override public MemStore getMemStore() { + return null; + } } private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/TestIPv6NIOServerSocketChannel.java hbase-server/src/test/java/org/apache/hadoop/hbase/TestIPv6NIOServerSocketChannel.java index a598bff..d9df3e8 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/TestIPv6NIOServerSocketChannel.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/TestIPv6NIOServerSocketChannel.java @@ -24,6 +24,7 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.ServerSocket; import java.nio.channels.ServerSocketChannel; +import java.util.Locale; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -124,7 +125,7 @@ public class TestIPv6NIOServerSocketChannel { //java.net.SocketException: Address family not supported by protocol family //or java.net.SocketException: Protocol family not supported Assert.assertFalse(ex.getClass().isInstance(BindException.class)); - Assert.assertTrue(ex.getMessage().toLowerCase().contains("protocol family")); + Assert.assertTrue(ex.getMessage().toLowerCase(Locale.ROOT).contains("protocol family")); LOG.info("Received expected exception:"); LOG.info(ex); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java index 227db6f..affa9b3 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java @@ -170,7 +170,7 @@ public class TestMetaTableAccessorNoCluster { return builder.build(); } }).thenReturn(ScanResponse.newBuilder().setMoreResults(false).build()); - // Associate a spied-upon HConnection with UTIL.getConfiguration. Need + // Associate a spied-upon Connection with UTIL.getConfiguration. Need // to shove this in here first so it gets picked up all over; e.g. by // HTable. connection = HConnectionTestingUtility.getSpiedConnection(UTIL.getConfiguration()); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java index 4c4697d..3bbd359 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java @@ -84,6 +84,8 @@ public class TestRegionRebalancing { @Before public void before() throws Exception { UTIL.getConfiguration().set("hbase.master.loadbalancer.class", this.balancerName); + // set minCostNeedBalance to 0, make sure balancer run + UTIL.getConfiguration().setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 0.0f); UTIL.startMiniCluster(1); this.desc = new HTableDescriptor(TableName.valueOf("test")); this.desc.addFamily(new HColumnDescriptor(FAMILY_NAME)); @@ -103,39 +105,39 @@ public class TestRegionRebalancing { admin.createTable(this.desc, Arrays.copyOfRange(HBaseTestingUtility.KEYS, 1, HBaseTestingUtility.KEYS.length)); this.regionLocator = connection.getRegionLocator(this.desc.getTableName()); - + MetaTableAccessor.fullScanMetaAndPrint(admin.getConnection()); - + assertEquals("Test table should have right number of regions", HBaseTestingUtility.KEYS.length, this.regionLocator.getStartKeys().length); - + // verify that the region assignments are balanced to start out assertRegionsAreBalanced(); - + // add a region server - total of 2 LOG.info("Started second server=" + UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName()); UTIL.getHBaseCluster().getMaster().balance(); assertRegionsAreBalanced(); - + // On a balanced cluster, calling balance() should return true assert(UTIL.getHBaseCluster().getMaster().balance() == true); - + // if we add a server, then the balance() call should return true // add a region server - total of 3 LOG.info("Started third server=" + UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName()); assert(UTIL.getHBaseCluster().getMaster().balance() == true); assertRegionsAreBalanced(); - + // kill a region server - total of 2 LOG.info("Stopped third server=" + UTIL.getHBaseCluster().stopRegionServer(2, false)); UTIL.getHBaseCluster().waitOnRegionServer(2); waitOnCrashProcessing(); UTIL.getHBaseCluster().getMaster().balance(); assertRegionsAreBalanced(); - + // start two more region servers - total of 4 LOG.info("Readding third server=" + UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName()); @@ -253,10 +255,7 @@ public class TestRegionRebalancing { Thread.sleep(200); } catch (InterruptedException e) {} } - RegionStates regionStates = UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); - while (!regionStates.getRegionsInTransition().isEmpty()) { - Threads.sleep(100); - } + UTIL.waitUntilNoRegionsInTransition(); } } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionServerMetrics.java hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionServerMetrics.java deleted file mode 100644 index 76e5842..0000000 --- hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionServerMetrics.java +++ /dev/null @@ -1,379 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Append; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Increment; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.RowMutations; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.filter.BinaryComparator; -import org.apache.hadoop.hbase.filter.CompareFilter; -import org.apache.hadoop.hbase.filter.RowFilter; -import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import java.io.IOException; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.assertEquals; - -@Category(MediumTests.class) -public class TestRegionServerMetrics { - private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final TableName TABLE_NAME = TableName.valueOf("test"); - private static final byte[] CF1 = "c1".getBytes(); - private static final byte[] CF2 = "c2".getBytes(); - - private static final byte[] ROW1 = "a".getBytes(); - private static final byte[] ROW2 = "b".getBytes(); - private static final byte[] ROW3 = "c".getBytes(); - private static final byte[] COL1 = "q1".getBytes(); - private static final byte[] COL2 = "q2".getBytes(); - private static final byte[] COL3 = "q3".getBytes(); - private static final byte[] VAL1 = "v1".getBytes(); - private static final byte[] VAL2 = "v2".getBytes(); - private static final byte[] VAL3 = Bytes.toBytes(0L); - - private static final int MAX_TRY = 20; - private static final int SLEEP_MS = 100; - private static final int TTL = 1; - - private static Admin admin; - private static Collection serverNames; - private static Table table; - private static List tableRegions; - - private static Map requestsMap = new HashMap<>(); - private static Map requestsMapPrev = new HashMap<>(); - - @BeforeClass - public static void setUpOnce() throws Exception { - TEST_UTIL.startMiniCluster(); - admin = TEST_UTIL.getAdmin(); - serverNames = admin.getClusterStatus().getServers(); - table = createTable(); - putData(); - tableRegions = admin.getTableRegions(TABLE_NAME); - - for (Metric metric : Metric.values()) { - requestsMap.put(metric, 0L); - requestsMapPrev.put(metric, 0L); - } - } - - private static Table createTable() throws IOException { - HTableDescriptor td = new HTableDescriptor(TABLE_NAME); - HColumnDescriptor cd1 = new HColumnDescriptor(CF1); - td.addFamily(cd1); - HColumnDescriptor cd2 = new HColumnDescriptor(CF2); - cd2.setTimeToLive(TTL); - td.addFamily(cd2); - - admin.createTable(td); - return TEST_UTIL.getConnection().getTable(TABLE_NAME); - } - - private static void testReadRequests(long resultCount, - long expectedReadRequests, long expectedFilteredReadRequests) - throws IOException, InterruptedException { - updateMetricsMap(); - System.out.println("requestsMapPrev = " + requestsMapPrev); - System.out.println("requestsMap = " + requestsMap); - - assertEquals(expectedReadRequests, - requestsMap.get(Metric.REGION_READ) - requestsMapPrev.get(Metric.REGION_READ)); - assertEquals(expectedReadRequests, - requestsMap.get(Metric.SERVER_READ) - requestsMapPrev.get(Metric.SERVER_READ)); - assertEquals(expectedFilteredReadRequests, - requestsMap.get(Metric.FILTERED_REGION_READ) - - requestsMapPrev.get(Metric.FILTERED_REGION_READ)); - assertEquals(expectedFilteredReadRequests, - requestsMap.get(Metric.FILTERED_SERVER_READ) - - requestsMapPrev.get(Metric.FILTERED_SERVER_READ)); - assertEquals(expectedReadRequests, resultCount); - } - - private static void updateMetricsMap() throws IOException, InterruptedException { - for (Metric metric : Metric.values()) { - requestsMapPrev.put(metric, requestsMap.get(metric)); - } - - ServerLoad serverLoad = null; - RegionLoad regionLoadOuter = null; - boolean metricsUpdated = false; - for (int i = 0; i < MAX_TRY; i++) { - for (ServerName serverName : serverNames) { - serverLoad = admin.getClusterStatus().getLoad(serverName); - - Map regionsLoad = serverLoad.getRegionsLoad(); - for (HRegionInfo tableRegion : tableRegions) { - RegionLoad regionLoad = regionsLoad.get(tableRegion.getRegionName()); - if (regionLoad != null) { - regionLoadOuter = regionLoad; - for (Metric metric : Metric.values()) { - if (getReadRequest(serverLoad, regionLoad, metric) > requestsMapPrev.get(metric)) { - for (Metric metricInner : Metric.values()) { - requestsMap.put(metricInner, getReadRequest(serverLoad, regionLoad, metricInner)); - } - metricsUpdated = true; - break; - } - } - } - } - } - if (metricsUpdated) { - break; - } - Thread.sleep(SLEEP_MS); - } - if (!metricsUpdated) { - for (Metric metric : Metric.values()) { - requestsMap.put(metric, getReadRequest(serverLoad, regionLoadOuter, metric)); - } - } - } - - private static long getReadRequest(ServerLoad serverLoad, RegionLoad regionLoad, Metric metric) { - switch (metric) { - case REGION_READ: - return regionLoad.getReadRequestsCount(); - case SERVER_READ: - return serverLoad.getReadRequestsCount(); - case FILTERED_REGION_READ: - return regionLoad.getFilteredReadRequestsCount(); - case FILTERED_SERVER_READ: - return serverLoad.getFilteredReadRequestsCount(); - default: - throw new IllegalStateException(); - } - } - - private static void putData() throws IOException { - Put put; - - put = new Put(ROW1); - put.addColumn(CF1, COL1, VAL1); - put.addColumn(CF1, COL2, VAL2); - put.addColumn(CF1, COL3, VAL3); - table.put(put); - put = new Put(ROW2); - put.addColumn(CF1, COL1, VAL2); // put val2 instead of val1 - put.addColumn(CF1, COL2, VAL2); - table.put(put); - put = new Put(ROW3); - put.addColumn(CF1, COL1, VAL1); - put.addColumn(CF1, COL2, VAL2); - table.put(put); - } - - private static void putTTLExpiredData() throws IOException, InterruptedException { - Put put; - - put = new Put(ROW1); - put.addColumn(CF2, COL1, VAL1); - put.addColumn(CF2, COL2, VAL2); - table.put(put); - - Thread.sleep(TTL * 1000); - - put = new Put(ROW2); - put.addColumn(CF2, COL1, VAL1); - put.addColumn(CF2, COL2, VAL2); - table.put(put); - - put = new Put(ROW3); - put.addColumn(CF2, COL1, VAL1); - put.addColumn(CF2, COL2, VAL2); - table.put(put); - } - - @AfterClass - public static void tearDownOnce() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } - - @Test - public void testReadRequestsCountNotFiltered() throws Exception { - int resultCount; - Scan scan; - Append append; - Put put; - Increment increment; - Get get; - - // test for scan - scan = new Scan(); - try (ResultScanner scanner = table.getScanner(scan)) { - resultCount = 0; - for (Result ignore : scanner) { - resultCount++; - } - testReadRequests(resultCount, 3, 0); - } - - // test for scan - scan = new Scan(ROW2, ROW3); - try (ResultScanner scanner = table.getScanner(scan)) { - resultCount = 0; - for (Result ignore : scanner) { - resultCount++; - } - testReadRequests(resultCount, 1, 0); - } - - // test for get - get = new Get(ROW2); - Result result = table.get(get); - resultCount = result.isEmpty() ? 0 : 1; - testReadRequests(resultCount, 1, 0); - - // test for increment - increment = new Increment(ROW1); - increment.addColumn(CF1, COL3, 1); - result = table.increment(increment); - resultCount = result.isEmpty() ? 0 : 1; - testReadRequests(resultCount, 1, 0); - - // test for checkAndPut - put = new Put(ROW1); - put.addColumn(CF1, COL2, VAL2); - boolean checkAndPut = - table.checkAndPut(ROW1, CF1, COL2, CompareFilter.CompareOp.EQUAL, VAL2, put); - resultCount = checkAndPut ? 1 : 0; - testReadRequests(resultCount, 1, 0); - - // test for append - append = new Append(ROW1); - append.add(CF1, COL2, VAL2); - result = table.append(append); - resultCount = result.isEmpty() ? 0 : 1; - testReadRequests(resultCount, 1, 0); - - // test for checkAndMutate - put = new Put(ROW1); - put.addColumn(CF1, COL1, VAL1); - RowMutations rm = new RowMutations(ROW1); - rm.add(put); - boolean checkAndMutate = - table.checkAndMutate(ROW1, CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1, rm); - resultCount = checkAndMutate ? 1 : 0; - testReadRequests(resultCount, 1, 0); - } - - @Test - public void testReadRequestsCountWithFilter() throws Exception { - int resultCount; - Scan scan; - - // test for scan - scan = new Scan(); - scan.setFilter(new SingleColumnValueFilter(CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1)); - try (ResultScanner scanner = table.getScanner(scan)) { - resultCount = 0; - for (Result ignore : scanner) { - resultCount++; - } - testReadRequests(resultCount, 2, 1); - } - - // test for scan - scan = new Scan(); - scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(ROW1))); - try (ResultScanner scanner = table.getScanner(scan)) { - resultCount = 0; - for (Result ignore : scanner) { - resultCount++; - } - testReadRequests(resultCount, 1, 2); - } - - // test for scan - scan = new Scan(ROW2, ROW3); - scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(ROW1))); - try (ResultScanner scanner = table.getScanner(scan)) { - resultCount = 0; - for (Result ignore : scanner) { - resultCount++; - } - testReadRequests(resultCount, 0, 1); - } - - // fixme filtered get should not increase readRequestsCount -// Get get = new Get(ROW2); -// get.setFilter(new SingleColumnValueFilter(CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1)); -// Result result = table.get(get); -// resultCount = result.isEmpty() ? 0 : 1; -// testReadRequests(resultCount, 0, 1); - } - - @Test - public void testReadRequestsCountWithDeletedRow() throws Exception { - try { - Delete delete = new Delete(ROW3); - table.delete(delete); - - Scan scan = new Scan(); - try (ResultScanner scanner = table.getScanner(scan)) { - int resultCount = 0; - for (Result ignore : scanner) { - resultCount++; - } - testReadRequests(resultCount, 2, 1); - } - } finally { - Put put = new Put(ROW3); - put.addColumn(CF1, COL1, VAL1); - put.addColumn(CF1, COL2, VAL2); - table.put(put); - } - } - - @Test - public void testReadRequestsCountWithTTLExpiration() throws Exception { - putTTLExpiredData(); - - Scan scan = new Scan(); - scan.addFamily(CF2); - try (ResultScanner scanner = table.getScanner(scan)) { - int resultCount = 0; - for (Result ignore : scanner) { - resultCount++; - } - testReadRequests(resultCount, 2, 1); - } - } - - private enum Metric {REGION_READ, SERVER_READ, FILTERED_REGION_READ, FILTERED_SERVER_READ} -} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/TimedOutTestsListener.java hbase-server/src/test/java/org/apache/hadoop/hbase/TimedOutTestsListener.java index 9b3784d..5beeace 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/TimedOutTestsListener.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/TimedOutTestsListener.java @@ -27,6 +27,7 @@ import java.lang.management.ThreadMXBean; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.Date; +import java.util.Locale; import java.util.Map; import org.junit.runner.notification.Failure; @@ -93,7 +94,7 @@ public class TimedOutTestsListener extends RunListener { thread.getPriority(), thread.getId(), Thread.State.WAITING.equals(thread.getState()) ? - "in Object.wait()" : thread.getState().name().toLowerCase(), + "in Object.wait()" : thread.getState().name().toLowerCase(Locale.ROOT), Thread.State.WAITING.equals(thread.getState()) ? "WAITING (on object monitor)" : thread.getState())); for (StackTraceElement stackTraceElement : e.getValue()) { diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java index 24ef5b2..c8ccd2a 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java @@ -37,17 +37,17 @@ import org.mockito.Mockito; public class HConnectionTestingUtility { /* * Not part of {@link HBaseTestingUtility} because this class is not - * in same package as {@link HConnection}. Would have to reveal ugly + * in same package as {@link ClusterConnection}. Would have to reveal ugly * {@link ConnectionImplementation} innards to HBaseTestingUtility to give it access. */ /** - * Get a Mocked {@link HConnection} that goes with the passed conf + * Get a Mocked {@link ClusterConnection} that goes with the passed conf * configuration instance. Minimally the mock will return * conf when {@link ClusterConnection#getConfiguration()} is invoked. * Be sure to shutdown the connection when done by calling * {@link Connection#close()} else it will stick around; this is probably not what you want. * @param conf configuration - * @return HConnection object for conf + * @return ClusterConnection object for conf * @throws ZooKeeperConnectionException */ public static ClusterConnection getMockedConnection(final Configuration conf) @@ -126,7 +126,7 @@ public class HConnectionTestingUtility { RpcRetryingCallerFactory.instantiate(conf, RetryingCallerInterceptorFactory.NO_OP_INTERCEPTOR, null)); Mockito.when(c.getRpcControllerFactory()).thenReturn(Mockito.mock(RpcControllerFactory.class)); - HTableInterface t = Mockito.mock(HTableInterface.class); + Table t = Mockito.mock(Table.class); Mockito.when(c.getTable((TableName)Mockito.any())).thenReturn(t); ResultScanner rs = Mockito.mock(ResultScanner.class); Mockito.when(t.getScanner((Scan)Mockito.any())).thenReturn(rs); @@ -139,7 +139,7 @@ public class HConnectionTestingUtility { * Be sure to shutdown the connection when done by calling * {@link Connection#close()} else it will stick around; this is probably not what you want. * @param conf configuration - * @return HConnection object for conf + * @return ClusterConnection object for conf * @throws ZooKeeperConnectionException * @see @link * {http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index ce48032..fd55f66 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -85,7 +85,6 @@ public class TestAdmin1 { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true); TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250); TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 6); @@ -503,8 +502,6 @@ public class TestAdmin1 { public void testOnlineChangeTableSchema() throws IOException, InterruptedException { final TableName tableName = TableName.valueOf("changeTableSchemaOnline"); - TEST_UTIL.getMiniHBaseCluster().getMaster().getConfiguration().setBoolean( - "hbase.online.schema.update.enable", true); HTableDescriptor [] tables = admin.listTables(); int numTables = tables.length; TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); @@ -588,55 +585,16 @@ public class TestAdmin1 { assertFalse(this.admin.tableExists(tableName)); } - @Test (timeout=300000) - public void testShouldFailOnlineSchemaUpdateIfOnlineSchemaIsNotEnabled() - throws Exception { - final TableName tableName = TableName.valueOf("changeTableSchemaOnlineFailure"); - TEST_UTIL.getMiniHBaseCluster().getMaster().getConfiguration().setBoolean( - "hbase.online.schema.update.enable", false); - HTableDescriptor[] tables = admin.listTables(); - int numTables = tables.length; - TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); - tables = this.admin.listTables(); - assertEquals(numTables + 1, tables.length); - - // FIRST, do htabledescriptor changes. - HTableDescriptor htd = this.admin.getTableDescriptor(tableName); - // Make a copy and assert copy is good. - HTableDescriptor copy = new HTableDescriptor(htd); - assertTrue(htd.equals(copy)); - // Now amend the copy. Introduce differences. - long newFlushSize = htd.getMemStoreFlushSize() / 2; - if (newFlushSize <=0) { - newFlushSize = HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE / 2; - } - copy.setMemStoreFlushSize(newFlushSize); - final String key = "anyoldkey"; - assertTrue(htd.getValue(key) == null); - copy.setValue(key, key); - boolean expectedException = false; - try { - admin.modifyTable(tableName, copy); - } catch (TableNotDisabledException re) { - expectedException = true; - } - assertTrue("Online schema update should not happen.", expectedException); - - // Reset the value for the other tests - TEST_UTIL.getMiniHBaseCluster().getMaster().getConfiguration().setBoolean( - "hbase.online.schema.update.enable", true); - } - protected void verifyRoundRobinDistribution(ClusterConnection c, RegionLocator regionLocator, int expectedRegions) throws IOException { int numRS = c.getCurrentNrHRS(); List regions = regionLocator.getAllRegionLocations(); - Map> server2Regions = new HashMap>(); + Map> server2Regions = new HashMap<>(); for (HRegionLocation loc : regions) { ServerName server = loc.getServerName(); List regs = server2Regions.get(server); if (regs == null) { - regs = new ArrayList(); + regs = new ArrayList<>(); server2Regions.put(server, regs); } regs.add(loc.getRegionInfo()); @@ -1176,7 +1134,7 @@ public class TestAdmin1 { byte[][] splitRows = new byte[2][]; splitRows[0] = new byte[]{(byte)'4'}; splitRows[1] = new byte[]{(byte)'7'}; - TEST_UTIL.getHBaseAdmin().createTable(desc, splitRows); + TEST_UTIL.getAdmin().createTable(desc, splitRows); List oldRegions; do { oldRegions = TEST_UTIL.getHBaseCluster().getRegions(tableName); @@ -1203,7 +1161,7 @@ public class TestAdmin1 { // the element at index 1 would be a replica (since the metareader gives us ordered // regions). Try splitting that region via the split API . Should fail try { - TEST_UTIL.getHBaseAdmin().splitRegion(regions.get(1).getFirst().getRegionName()); + TEST_UTIL.getAdmin().splitRegion(regions.get(1).getFirst().getRegionName()); } catch (IllegalArgumentException ex) { gotException = true; } @@ -1222,7 +1180,7 @@ public class TestAdmin1 { gotException = false; // Try merging a replica with another. Should fail. try { - TEST_UTIL.getHBaseAdmin().mergeRegions(regions.get(1).getFirst().getEncodedNameAsBytes(), + TEST_UTIL.getAdmin().mergeRegions(regions.get(1).getFirst().getEncodedNameAsBytes(), regions.get(2).getFirst().getEncodedNameAsBytes(), true); } catch (IllegalArgumentException m) { gotException = true; @@ -1233,7 +1191,8 @@ public class TestAdmin1 { DispatchMergingRegionsRequest request = RequestConverter .buildDispatchMergingRegionsRequest(regions.get(1).getFirst().getEncodedNameAsBytes(), regions.get(2).getFirst().getEncodedNameAsBytes(), true); - TEST_UTIL.getHBaseAdmin().getConnection().getMaster().dispatchMergingRegions(null, request); + ((ClusterConnection) TEST_UTIL.getAdmin().getConnection()).getMaster() + .dispatchMergingRegions(null, request); } catch (ServiceException m) { Throwable t = m.getCause(); do { @@ -1252,8 +1211,8 @@ public class TestAdmin1 { moveRegionAndWait(regions.get(2).getFirst(), regions.get(1).getSecond()); } try { - AdminService.BlockingInterface admin = TEST_UTIL.getHBaseAdmin().getConnection() - .getAdmin(regions.get(1).getSecond()); + AdminService.BlockingInterface admin = ((ClusterConnection) TEST_UTIL.getAdmin() + .getConnection()).getAdmin(regions.get(1).getSecond()); ProtobufUtil.mergeRegions(null, admin, regions.get(1).getFirst(), regions.get(2).getFirst(), true, null); } catch (MergeRegionException mm) { @@ -1266,7 +1225,7 @@ public class TestAdmin1 { throws InterruptedException, MasterNotRunningException, ZooKeeperConnectionException, IOException { HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); - TEST_UTIL.getHBaseAdmin().move( + TEST_UTIL.getAdmin().move( destRegion.getEncodedNameAsBytes(), Bytes.toBytes(destServer.getServerName())); while (true) { diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java index ff53c49..d088fc4 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java @@ -83,7 +83,6 @@ public class TestAdmin2 { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true); TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250); TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 6); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java index f94ed2f..ba75d6e 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java @@ -165,7 +165,7 @@ public class TestBlockEvictionFromClient { @Test public void testBlockEvictionWithParallelScans() throws Exception { - HTable table = null; + Table table = null; try { latch = new CountDownLatch(1); TableName tableName = TableName.valueOf("testBlockEvictionWithParallelScans"); @@ -173,7 +173,7 @@ public class TestBlockEvictionFromClient { table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserver.class.getName()); // get the block cache and region - RegionLocator locator = table.getRegionLocator(); + RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions( regionName); @@ -253,7 +253,7 @@ public class TestBlockEvictionFromClient { @Test public void testParallelGetsAndScans() throws IOException, InterruptedException { - HTable table = null; + Table table = null; try { latch = new CountDownLatch(2); // Check if get() returns blocks on its close() itself @@ -264,7 +264,7 @@ public class TestBlockEvictionFromClient { table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserver.class.getName()); // get the block cache and region - RegionLocator locator = table.getRegionLocator(); + RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions( regionName); @@ -312,7 +312,7 @@ public class TestBlockEvictionFromClient { @Test public void testGetWithCellsInDifferentFiles() throws IOException, InterruptedException { - HTable table = null; + Table table = null; try { latch = new CountDownLatch(1); // Check if get() returns blocks on its close() itself @@ -323,7 +323,7 @@ public class TestBlockEvictionFromClient { table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserver.class.getName()); // get the block cache and region - RegionLocator locator = table.getRegionLocator(); + RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions( regionName); @@ -374,7 +374,7 @@ public class TestBlockEvictionFromClient { // TODO : check how block index works here public void testGetsWithMultiColumnsAndExplicitTracker() throws IOException, InterruptedException { - HTable table = null; + Table table = null; try { latch = new CountDownLatch(1); // Check if get() returns blocks on its close() itself @@ -385,7 +385,7 @@ public class TestBlockEvictionFromClient { table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserver.class.getName()); // get the block cache and region - RegionLocator locator = table.getRegionLocator(); + RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions( regionName); @@ -462,7 +462,7 @@ public class TestBlockEvictionFromClient { @Test public void testGetWithMultipleColumnFamilies() throws IOException, InterruptedException { - HTable table = null; + Table table = null; try { latch = new CountDownLatch(1); // Check if get() returns blocks on its close() itself @@ -478,7 +478,7 @@ public class TestBlockEvictionFromClient { table = TEST_UTIL.createTable(tableName, fams, 1, 1024, CustomInnerRegionObserver.class.getName()); // get the block cache and region - RegionLocator locator = table.getRegionLocator(); + RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions( regionName); @@ -557,12 +557,12 @@ public class TestBlockEvictionFromClient { @Test public void testBlockRefCountAfterSplits() throws IOException, InterruptedException { - HTable table = null; + Table table = null; try { TableName tableName = TableName.valueOf("testBlockRefCountAfterSplits"); table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024); // get the block cache and region - RegionLocator locator = table.getRegionLocator(); + RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName); @@ -608,7 +608,7 @@ public class TestBlockEvictionFromClient { @Test public void testMultiGets() throws IOException, InterruptedException { - HTable table = null; + Table table = null; try { latch = new CountDownLatch(2); // Check if get() returns blocks on its close() itself @@ -619,7 +619,7 @@ public class TestBlockEvictionFromClient { table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserver.class.getName()); // get the block cache and region - RegionLocator locator = table.getRegionLocator(); + RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions( regionName); @@ -687,7 +687,7 @@ public class TestBlockEvictionFromClient { } @Test public void testScanWithMultipleColumnFamilies() throws IOException, InterruptedException { - HTable table = null; + Table table = null; try { latch = new CountDownLatch(1); // Check if get() returns blocks on its close() itself @@ -702,7 +702,7 @@ public class TestBlockEvictionFromClient { table = TEST_UTIL.createTable(tableName, fams, 1, 1024, CustomInnerRegionObserver.class.getName()); // get the block cache and region - RegionLocator locator = table.getRegionLocator(); + RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions( regionName); @@ -791,7 +791,7 @@ public class TestBlockEvictionFromClient { @Test public void testParallelGetsAndScanWithWrappedRegionScanner() throws IOException, InterruptedException { - HTable table = null; + Table table = null; try { latch = new CountDownLatch(2); // Check if get() returns blocks on its close() itself @@ -802,7 +802,7 @@ public class TestBlockEvictionFromClient { table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserverWrapper.class.getName()); // get the block cache and region - RegionLocator locator = table.getRegionLocator(); + RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions( regionName); @@ -859,7 +859,7 @@ public class TestBlockEvictionFromClient { private void testScanWithCompactionInternals(String tableNameStr, boolean reversed) throws IOException, InterruptedException { - HTable table = null; + Table table = null; try { latch = new CountDownLatch(1); compactionLatch = new CountDownLatch(1); @@ -868,7 +868,7 @@ public class TestBlockEvictionFromClient { table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserverWrapper.class.getName()); // get the block cache and region - RegionLocator locator = table.getRegionLocator(); + RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions( regionName); @@ -977,7 +977,7 @@ public class TestBlockEvictionFromClient { public void testBlockEvictionAfterHBASE13082WithCompactionAndFlush() throws IOException, InterruptedException { // do flush and scan in parallel - HTable table = null; + Table table = null; try { latch = new CountDownLatch(1); compactionLatch = new CountDownLatch(1); @@ -987,7 +987,7 @@ public class TestBlockEvictionFromClient { table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserverWrapper.class.getName()); // get the block cache and region - RegionLocator locator = table.getRegionLocator(); + RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions( regionName); @@ -1107,7 +1107,7 @@ public class TestBlockEvictionFromClient { @Test public void testScanWithException() throws IOException, InterruptedException { - HTable table = null; + Table table = null; try { latch = new CountDownLatch(1); exceptionLatch = new CountDownLatch(1); @@ -1117,7 +1117,7 @@ public class TestBlockEvictionFromClient { table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserverWrapper.class.getName()); // get the block cache and region - RegionLocator locator = table.getRegionLocator(); + RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions( regionName); @@ -1225,7 +1225,7 @@ public class TestBlockEvictionFromClient { } } - private void insertData(HTable table) throws IOException { + private void insertData(Table table) throws IOException { Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, data); table.put(put); @@ -1238,7 +1238,7 @@ public class TestBlockEvictionFromClient { table.put(put); } - private ScanThread[] initiateScan(HTable table, boolean reverse) throws IOException, + private ScanThread[] initiateScan(Table table, boolean reverse) throws IOException, InterruptedException { ScanThread[] scanThreads = new ScanThread[NO_OF_THREADS]; for (int i = 0; i < NO_OF_THREADS; i++) { @@ -1250,7 +1250,7 @@ public class TestBlockEvictionFromClient { return scanThreads; } - private GetThread[] initiateGet(HTable table, boolean tracker, boolean multipleCFs) + private GetThread[] initiateGet(Table table, boolean tracker, boolean multipleCFs) throws IOException, InterruptedException { GetThread[] getThreads = new GetThread[NO_OF_THREADS]; for (int i = 0; i < NO_OF_THREADS; i++) { @@ -1262,7 +1262,7 @@ public class TestBlockEvictionFromClient { return getThreads; } - private MultiGetThread[] initiateMultiGet(HTable table) + private MultiGetThread[] initiateMultiGet(Table table) throws IOException, InterruptedException { MultiGetThread[] multiGetThreads = new MultiGetThread[NO_OF_THREADS]; for (int i = 0; i < NO_OF_THREADS; i++) { @@ -1337,9 +1337,9 @@ public class TestBlockEvictionFromClient { } private static class MultiGetThread extends Thread { - private final HTable table; + private final Table table; private final List gets = new ArrayList(); - public MultiGetThread(HTable table) { + public MultiGetThread(Table table) { this.table = table; } @Override @@ -1357,11 +1357,11 @@ public class TestBlockEvictionFromClient { } private static class GetThread extends Thread { - private final HTable table; + private final Table table; private final boolean tracker; private final boolean multipleCFs; - public GetThread(HTable table, boolean tracker, boolean multipleCFs) { + public GetThread(Table table, boolean tracker, boolean multipleCFs) { this.table = table; this.tracker = tracker; this.multipleCFs = multipleCFs; @@ -1376,7 +1376,7 @@ public class TestBlockEvictionFromClient { } } - private void initiateGet(HTable table) throws IOException { + private void initiateGet(Table table) throws IOException { Get get = new Get(ROW); if (tracker) { // Change this @@ -1421,10 +1421,10 @@ public class TestBlockEvictionFromClient { } private static class ScanThread extends Thread { - private final HTable table; + private final Table table; private final boolean reverse; - public ScanThread(HTable table, boolean reverse) { + public ScanThread(Table table, boolean reverse) { this.table = table; this.reverse = reverse; } @@ -1438,7 +1438,7 @@ public class TestBlockEvictionFromClient { } } - private void initiateScan(HTable table) throws IOException { + private void initiateScan(Table table) throws IOException { Scan scan = new Scan(); if (reverse) { scan.setReversed(true); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java index e82b1c7..baec37e 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java @@ -61,7 +61,7 @@ public class TestClientPushback { private static final TableName tableName = TableName.valueOf("client-pushback"); private static final byte[] family = Bytes.toBytes("f"); private static final byte[] qualifier = Bytes.toBytes("q"); - private static long flushSizeBytes = 1024; + private static final long flushSizeBytes = 1024; @BeforeClass public static void setupCluster() throws Exception{ @@ -91,7 +91,7 @@ public class TestClientPushback { Configuration conf = UTIL.getConfiguration(); ClusterConnection conn = (ClusterConnection) ConnectionFactory.createConnection(conf); - HTable table = (HTable) conn.getTable(tableName); + Table table = conn.getTable(tableName); HRegionServer rs = UTIL.getHBaseCluster().getRegionServer(0); Region region = rs.getOnlineRegions(tableName).get(0); @@ -130,13 +130,13 @@ public class TestClientPushback { // Reach into the connection and submit work directly to AsyncProcess so we can // monitor how long the submission was delayed via a callback - List ops = new ArrayList(1); + List ops = new ArrayList<>(1); ops.add(p); final CountDownLatch latch = new CountDownLatch(1); final AtomicLong endTime = new AtomicLong(); long startTime = EnvironmentEdgeManager.currentTime(); - table.mutator.ap.submit(tableName, ops, true, new Batch.Callback() { + ((HTable) table).mutator.ap.submit(tableName, ops, true, new Batch.Callback() { @Override public void update(byte[] region, byte[] row, Result result) { endTime.set(EnvironmentEdgeManager.currentTime()); @@ -172,7 +172,7 @@ public class TestClientPushback { public void testMutateRowStats() throws IOException { Configuration conf = UTIL.getConfiguration(); ClusterConnection conn = (ClusterConnection) ConnectionFactory.createConnection(conf); - HTable table = (HTable) conn.getTable(tableName); + Table table = conn.getTable(tableName); HRegionServer rs = UTIL.getHBaseCluster().getRegionServer(0); Region region = rs.getOnlineRegions(tableName).get(0); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java index 36276fa..109e416 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java @@ -24,6 +24,7 @@ import static org.junit.Assert.assertTrue; import java.net.SocketAddress; import java.net.SocketTimeoutException; +import java.net.UnknownHostException; import java.util.Random; import java.util.concurrent.atomic.AtomicInteger; @@ -142,7 +143,7 @@ public class TestClientTimeouts { // Return my own instance, one that does random timeouts @Override public BlockingRpcChannel createBlockingRpcChannel(ServerName sn, - User ticket, int rpcTimeout) { + User ticket, int rpcTimeout) throws UnknownHostException { return new RandomTimeoutBlockingRpcChannel(this, sn, ticket, rpcTimeout); } } @@ -157,7 +158,7 @@ public class TestClientTimeouts { private static AtomicInteger invokations = new AtomicInteger(); RandomTimeoutBlockingRpcChannel(final RpcClientImpl rpcClient, final ServerName sn, - final User ticket, final int rpcTimeout) { + final User ticket, final int rpcTimeout) throws UnknownHostException { super(rpcClient, sn, ticket, rpcTimeout); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.java index aeb82f4..65a67d0 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.java @@ -61,7 +61,6 @@ public class TestCloneSnapshotFromClient { protected static void setupConfiguration() { TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); - TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true); TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10); TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java new file mode 100644 index 0000000..af7b652 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.fail; + +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.net.UnknownHostException; + +/** + * Tests that we fail fast when hostname resolution is not working and do not cache + * unresolved InetSocketAddresses. + */ +@Category({MediumTests.class, ClientTests.class}) +public class TestConnectionImplementation { + private static HBaseTestingUtility testUtil; + private static ConnectionImplementation conn; + + @BeforeClass + public static void setupBeforeClass() throws Exception { + testUtil = HBaseTestingUtility.createLocalHTU(); + testUtil.startMiniCluster(); + conn = (ConnectionImplementation) testUtil.getConnection(); + } + + @AfterClass + public static void teardownAfterClass() throws Exception { + conn.close(); + testUtil.shutdownMiniCluster(); + } + + @Test(expected = UnknownHostException.class) + public void testGetAdminBadHostname() throws Exception { + // verify that we can get an instance with the cluster hostname + ServerName master = testUtil.getHBaseCluster().getMaster().getServerName(); + try { + conn.getAdmin(master); + } catch (UnknownHostException uhe) { + fail("Obtaining admin to the cluster master should have succeeded"); + } + + // test that we fail to get a client to an unresolvable hostname, which + // means it won't be cached + ServerName badHost = + ServerName.valueOf("unknownhost.example.com:" + HConstants.DEFAULT_MASTER_PORT, + System.currentTimeMillis()); + conn.getAdmin(badHost); + fail("Obtaining admin to unresolvable hostname should have failed"); + } + + @Test(expected = UnknownHostException.class) + public void testGetClientBadHostname() throws Exception { + // verify that we can get an instance with the cluster hostname + ServerName rs = testUtil.getHBaseCluster().getRegionServer(0).getServerName(); + try { + conn.getClient(rs); + } catch (UnknownHostException uhe) { + fail("Obtaining client to the cluster regionserver should have succeeded"); + } + + // test that we fail to get a client to an unresolvable hostname, which + // means it won't be cached + ServerName badHost = + ServerName.valueOf("unknownhost.example.com:" + HConstants.DEFAULT_REGIONSERVER_PORT, + System.currentTimeMillis()); + conn.getAdmin(badHost); + fail("Obtaining client to unresolvable hostname should have failed"); + } +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionUtils.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionUtils.java index aea8a2b..69729f0 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionUtils.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionUtils.java @@ -56,6 +56,26 @@ public class TestConnectionUtils { } @Test + public void testAddJitter() { + long basePause = 10000; + long maxTimeExpected = (long) (basePause * 1.25f); + long minTimeExpected = (long) (basePause * 0.75f); + int testTries = 100; + + Set timeSet = new TreeSet(); + for (int i = 0; i < testTries; i++) { + long withJitter = ConnectionUtils.addJitter(basePause, 0.5f); + assertTrue(withJitter >= minTimeExpected); + assertTrue(withJitter <= maxTimeExpected); + // Add the long to the set + timeSet.add(withJitter); + } + + //Make sure that most are unique. some overlap will happen + assertTrue(timeSet.size() > (testTries * 0.90)); + } + + @Test public void testGetPauseTime() { long pauseTime; long baseTime = 100; diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index b35a58f..ca4b609 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -3809,51 +3809,6 @@ public class TestFromClientSide { } @Test - public void testRowsPutBufferedOneFlush() throws IOException { - final byte [] CONTENTS_FAMILY = Bytes.toBytes("contents"); - final byte [] SMALL_FAMILY = Bytes.toBytes("smallfam"); - final byte [] value = Bytes.toBytes("abcd"); - final int NB_BATCH_ROWS = 10; - Table t = TEST_UTIL.createTable(TableName.valueOf("testRowsPutBufferedOneFlush"), - new byte[][] { CONTENTS_FAMILY, SMALL_FAMILY }); - - // Only do this test if it is a HTable - if(t instanceof HTableInterface) { - HTable table = (HTable) t; - table.setAutoFlushTo(false); - ArrayList rowsUpdate = new ArrayList(); - for (int i = 0; i < NB_BATCH_ROWS * 10; i++) { - byte[] row = Bytes.toBytes("row" + i); - Put put = new Put(row); - put.setDurability(Durability.SKIP_WAL); - put.addColumn(CONTENTS_FAMILY, null, value); - rowsUpdate.add(put); - } - table.put(rowsUpdate); - - Scan scan = new Scan(); - scan.addFamily(CONTENTS_FAMILY); - ResultScanner scanner = table.getScanner(scan); - int nbRows = 0; - for (@SuppressWarnings("unused") Result row : scanner) - nbRows++; - assertEquals(0, nbRows); - scanner.close(); - - table.flushCommits(); - - scan = new Scan(); - scan.addFamily(CONTENTS_FAMILY); - scanner = table.getScanner(scan); - nbRows = 0; - for (@SuppressWarnings("unused") Result row : scanner) - nbRows++; - assertEquals(NB_BATCH_ROWS * 10, nbRows); - table.close(); - } - } - - @Test public void testRowsPutBufferedManyManyFlushes() throws IOException { final byte[] CONTENTS_FAMILY = Bytes.toBytes("contents"); final byte[] SMALL_FAMILY = Bytes.toBytes("smallfam"); @@ -4036,7 +3991,7 @@ public class TestFromClientSide { /** * simple test that just executes parts of the client - * API that accept a pre-created HConnection instance + * API that accept a pre-created Connection instance * * @throws IOException */ @@ -4175,92 +4130,89 @@ public class TestFromClientSide { final byte[] beforeThirdRow = Bytes.toBytes("row33"); final byte[] beforeForthRow = Bytes.toBytes("row44"); - try (Table t = + try (Table table = TEST_UTIL.createTable(tableName, new byte[][] { HConstants.CATALOG_FAMILY, Bytes.toBytes("info2") }, 1, 1024); - RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { - if (t instanceof HTableInterface) { - HTableInterface table = (HTableInterface) t; - - // set block size to 64 to making 2 kvs into one block, bypassing the walkForwardInSingleRow - // in Store.rowAtOrBeforeFromStoreFile - String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); - Region region = - TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName); - Put put1 = new Put(firstRow); - Put put2 = new Put(secondRow); - Put put3 = new Put(thirdRow); - Put put4 = new Put(forthRow); - byte[] one = new byte[] { 1 }; - byte[] two = new byte[] { 2 }; - byte[] three = new byte[] { 3 }; - byte[] four = new byte[] { 4 }; - - put1.addColumn(HConstants.CATALOG_FAMILY, null, one); - put2.addColumn(HConstants.CATALOG_FAMILY, null, two); - put3.addColumn(HConstants.CATALOG_FAMILY, null, three); - put4.addColumn(HConstants.CATALOG_FAMILY, null, four); - table.put(put1); - table.put(put2); - table.put(put3); - table.put(put4); - region.flush(true); - - Result result; - - // Test before first that null is returned - result = getReverseScanResult(table, beforeFirstRow, - HConstants.CATALOG_FAMILY); - assertNull(result); - - // Test at first that first is returned - result = getReverseScanResult(table, firstRow, HConstants.CATALOG_FAMILY); - assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); - assertTrue(Bytes.equals(result.getRow(), firstRow)); - assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one)); - - // Test in between first and second that first is returned - result = getReverseScanResult(table, beforeSecondRow, HConstants.CATALOG_FAMILY); - assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); - assertTrue(Bytes.equals(result.getRow(), firstRow)); - assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one)); - - // Test at second make sure second is returned - result = getReverseScanResult(table, secondRow, HConstants.CATALOG_FAMILY); - assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); - assertTrue(Bytes.equals(result.getRow(), secondRow)); - assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), two)); - - // Test in second and third, make sure second is returned - result = getReverseScanResult(table, beforeThirdRow, HConstants.CATALOG_FAMILY); - assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); - assertTrue(Bytes.equals(result.getRow(), secondRow)); - assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), two)); - - // Test at third make sure third is returned - result = getReverseScanResult(table, thirdRow, HConstants.CATALOG_FAMILY); - assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); - assertTrue(Bytes.equals(result.getRow(), thirdRow)); - assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), three)); - - // Test in third and forth, make sure third is returned - result = getReverseScanResult(table, beforeForthRow, HConstants.CATALOG_FAMILY); - assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); - assertTrue(Bytes.equals(result.getRow(), thirdRow)); - assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), three)); - - // Test at forth make sure forth is returned - result = getReverseScanResult(table, forthRow, HConstants.CATALOG_FAMILY); - assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); - assertTrue(Bytes.equals(result.getRow(), forthRow)); - assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), four)); - - // Test after forth make sure forth is returned - result = getReverseScanResult(table, Bytes.add(forthRow, one), HConstants.CATALOG_FAMILY); - assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); - assertTrue(Bytes.equals(result.getRow(), forthRow)); - assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), four)); - } + RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { + + // set block size to 64 to making 2 kvs into one block, bypassing the walkForwardInSingleRow + // in Store.rowAtOrBeforeFromStoreFile + String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); + Region region = + TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName); + Put put1 = new Put(firstRow); + Put put2 = new Put(secondRow); + Put put3 = new Put(thirdRow); + Put put4 = new Put(forthRow); + byte[] one = new byte[] { 1 }; + byte[] two = new byte[] { 2 }; + byte[] three = new byte[] { 3 }; + byte[] four = new byte[] { 4 }; + + put1.addColumn(HConstants.CATALOG_FAMILY, null, one); + put2.addColumn(HConstants.CATALOG_FAMILY, null, two); + put3.addColumn(HConstants.CATALOG_FAMILY, null, three); + put4.addColumn(HConstants.CATALOG_FAMILY, null, four); + table.put(put1); + table.put(put2); + table.put(put3); + table.put(put4); + region.flush(true); + + Result result; + + // Test before first that null is returned + result = getReverseScanResult(table, beforeFirstRow, + HConstants.CATALOG_FAMILY); + assertNull(result); + + // Test at first that first is returned + result = getReverseScanResult(table, firstRow, HConstants.CATALOG_FAMILY); + assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); + assertTrue(Bytes.equals(result.getRow(), firstRow)); + assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one)); + + // Test in between first and second that first is returned + result = getReverseScanResult(table, beforeSecondRow, HConstants.CATALOG_FAMILY); + assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); + assertTrue(Bytes.equals(result.getRow(), firstRow)); + assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one)); + + // Test at second make sure second is returned + result = getReverseScanResult(table, secondRow, HConstants.CATALOG_FAMILY); + assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); + assertTrue(Bytes.equals(result.getRow(), secondRow)); + assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), two)); + + // Test in second and third, make sure second is returned + result = getReverseScanResult(table, beforeThirdRow, HConstants.CATALOG_FAMILY); + assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); + assertTrue(Bytes.equals(result.getRow(), secondRow)); + assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), two)); + + // Test at third make sure third is returned + result = getReverseScanResult(table, thirdRow, HConstants.CATALOG_FAMILY); + assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); + assertTrue(Bytes.equals(result.getRow(), thirdRow)); + assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), three)); + + // Test in third and forth, make sure third is returned + result = getReverseScanResult(table, beforeForthRow, HConstants.CATALOG_FAMILY); + assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); + assertTrue(Bytes.equals(result.getRow(), thirdRow)); + assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), three)); + + // Test at forth make sure forth is returned + result = getReverseScanResult(table, forthRow, HConstants.CATALOG_FAMILY); + assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); + assertTrue(Bytes.equals(result.getRow(), forthRow)); + assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), four)); + + // Test after forth make sure forth is returned + result = getReverseScanResult(table, Bytes.add(forthRow, one), HConstants.CATALOG_FAMILY); + assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); + assertTrue(Bytes.equals(result.getRow(), forthRow)); + assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), four)); } } @@ -5015,57 +4967,53 @@ public class TestFromClientSide { TableName TABLE = TableName.valueOf("testGetRegionsInRange"); Table t = TEST_UTIL.createMultiRegionTable(TABLE, new byte[][] { FAMILY }, 10); - if (t instanceof HTable){ - HTable table = (HTable) t; - - int numOfRegions = -1; - try (RegionLocator r = table.getRegionLocator()) { - numOfRegions = r.getStartKeys().length; - } - assertEquals(26, numOfRegions); - - // Get the regions in this range - List regionsList = getRegionsInRange(TABLE, startKey, endKey); - assertEquals(10, regionsList.size()); - - // Change the start key - startKey = Bytes.toBytes("fff"); - regionsList = getRegionsInRange(TABLE, startKey, endKey); - assertEquals(7, regionsList.size()); - - // Change the end key - endKey = Bytes.toBytes("nnn"); - regionsList = getRegionsInRange(TABLE, startKey, endKey); - assertEquals(8, regionsList.size()); - - // Empty start key - regionsList = getRegionsInRange(TABLE, HConstants.EMPTY_START_ROW, endKey); - assertEquals(13, regionsList.size()); - - // Empty end key - regionsList = getRegionsInRange(TABLE, startKey, HConstants.EMPTY_END_ROW); - assertEquals(21, regionsList.size()); - - // Both start and end keys empty - regionsList = getRegionsInRange(TABLE, HConstants.EMPTY_START_ROW, - HConstants.EMPTY_END_ROW); - assertEquals(26, regionsList.size()); - - // Change the end key to somewhere in the last block - endKey = Bytes.toBytes("zzz1"); - regionsList = getRegionsInRange(TABLE, startKey, endKey); - assertEquals(21, regionsList.size()); - - // Change the start key to somewhere in the first block - startKey = Bytes.toBytes("aac"); - regionsList = getRegionsInRange(TABLE, startKey, endKey); - assertEquals(26, regionsList.size()); - - // Make start and end key the same - startKey = endKey = Bytes.toBytes("ccc"); - regionsList = getRegionsInRange(TABLE, startKey, endKey); - assertEquals(1, regionsList.size()); + int numOfRegions = -1; + try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(TABLE)) { + numOfRegions = r.getStartKeys().length; } + assertEquals(26, numOfRegions); + + // Get the regions in this range + List regionsList = getRegionsInRange(TABLE, startKey, endKey); + assertEquals(10, regionsList.size()); + + // Change the start key + startKey = Bytes.toBytes("fff"); + regionsList = getRegionsInRange(TABLE, startKey, endKey); + assertEquals(7, regionsList.size()); + + // Change the end key + endKey = Bytes.toBytes("nnn"); + regionsList = getRegionsInRange(TABLE, startKey, endKey); + assertEquals(8, regionsList.size()); + + // Empty start key + regionsList = getRegionsInRange(TABLE, HConstants.EMPTY_START_ROW, endKey); + assertEquals(13, regionsList.size()); + + // Empty end key + regionsList = getRegionsInRange(TABLE, startKey, HConstants.EMPTY_END_ROW); + assertEquals(21, regionsList.size()); + + // Both start and end keys empty + regionsList = getRegionsInRange(TABLE, HConstants.EMPTY_START_ROW, + HConstants.EMPTY_END_ROW); + assertEquals(26, regionsList.size()); + + // Change the end key to somewhere in the last block + endKey = Bytes.toBytes("zzz1"); + regionsList = getRegionsInRange(TABLE, startKey, endKey); + assertEquals(21, regionsList.size()); + + // Change the start key to somewhere in the first block + startKey = Bytes.toBytes("aac"); + regionsList = getRegionsInRange(TABLE, startKey, endKey); + assertEquals(26, regionsList.size()); + + // Make start and end key the same + startKey = endKey = Bytes.toBytes("ccc"); + regionsList = getRegionsInRange(TABLE, startKey, endKey); + assertEquals(1, regionsList.size()); } private List getRegionsInRange(TableName tableName, byte[] startKey, diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java index ddd5fa3..a918ce6 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java @@ -70,8 +70,6 @@ public class TestFromClientSide3 { */ @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.getConfiguration().setBoolean( - "hbase.online.schema.update.enable", true); TEST_UTIL.startMiniCluster(SLAVES); } @@ -121,7 +119,7 @@ public class TestFromClientSide3 { // connection needed for poll-wait HRegionLocation loc = locator.getRegionLocation(row, true); AdminProtos.AdminService.BlockingInterface server = - admin.getConnection().getAdmin(loc.getServerName()); + ((ClusterConnection) admin.getConnection()).getAdmin(loc.getServerName()); byte[] regName = loc.getRegionInfo().getRegionName(); for (int i = 0; i < nFlushes; i++) { diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java index 550a4c9..3da8454 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java @@ -82,15 +82,12 @@ public class TestFromClientSideNoCodec { Bytes.equals(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), f, 0, f.length)); } - if(ht instanceof HTableInterface) { - HTableInterface hti = (HTableInterface) ht; - // Check getRowOrBefore - byte[] f = fs[0]; - Get get = new Get(row); - get.addFamily(f); - r = ht.get(get); - assertTrue(r.toString(), r.containsColumn(f, f)); - } + // Check getRowOrBefore + byte[] f = fs[0]; + Get get = new Get(row); + get.addFamily(f); + r = ht.get(get); + assertTrue(r.toString(), r.containsColumn(f, f)); // Check scan. ResultScanner scanner = ht.getScanner(new Scan()); int count = 0; diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java index 289e6f2..4723fa8 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java @@ -176,45 +176,56 @@ public class TestHCM { TableName tableName = TableName.valueOf("testClusterConnection"); TEST_UTIL.createTable(tableName, FAM_NAM).close(); - HTable t = (HTable)con1.getTable(tableName, otherPool); - // make sure passing a pool to the getTable does not trigger creation of an internal pool - assertNull("Internal Thread pool should be null", - ((ConnectionImplementation) con1).getCurrentBatchPool()); - // table should use the pool passed - assertTrue(otherPool == t.getPool()); - t.close(); - - t = (HTable)con2.getTable(tableName); - // table should use the connectin's internal pool - assertTrue(otherPool == t.getPool()); - t.close(); - - t = (HTable)con2.getTable(tableName); - // try other API too - assertTrue(otherPool == t.getPool()); - t.close(); + Table table = con1.getTable(tableName, otherPool); - t = (HTable)con2.getTable(tableName); - // try other API too - assertTrue(otherPool == t.getPool()); - t.close(); + ExecutorService pool = null; - t = (HTable)con1.getTable(tableName); - ExecutorService pool = ((ConnectionImplementation)con1).getCurrentBatchPool(); - // make sure an internal pool was created - assertNotNull("An internal Thread pool should have been created", pool); - // and that the table is using it - assertTrue(t.getPool() == pool); - t.close(); - - t = (HTable)con1.getTable(tableName); - // still using the *same* internal pool - assertTrue(t.getPool() == pool); - t.close(); + if(table instanceof HTable) { + HTable t = (HTable) table; + // make sure passing a pool to the getTable does not trigger creation of an internal pool + assertNull("Internal Thread pool should be null", + ((ConnectionImplementation) con1).getCurrentBatchPool()); + // table should use the pool passed + assertTrue(otherPool == t.getPool()); + t.close(); + + t = (HTable) con2.getTable(tableName); + // table should use the connectin's internal pool + assertTrue(otherPool == t.getPool()); + t.close(); + + t = (HTable) con2.getTable(tableName); + // try other API too + assertTrue(otherPool == t.getPool()); + t.close(); + + t = (HTable) con2.getTable(tableName); + // try other API too + assertTrue(otherPool == t.getPool()); + t.close(); + + t = (HTable) con1.getTable(tableName); + pool = ((ConnectionImplementation) con1).getCurrentBatchPool(); + // make sure an internal pool was created + assertNotNull("An internal Thread pool should have been created", pool); + // and that the table is using it + assertTrue(t.getPool() == pool); + t.close(); + + t = (HTable) con1.getTable(tableName); + // still using the *same* internal pool + assertTrue(t.getPool() == pool); + t.close(); + } else { + table.close(); + } con1.close(); + // if the pool was created on demand it should be closed upon connection close - assertTrue(pool.isShutdown()); + if(pool != null) { + assertTrue(pool.isShutdown()); + } con2.close(); // if the pool is passed, it is not closed @@ -223,7 +234,7 @@ public class TestHCM { } /** - * Naive test to check that HConnection#getAdmin returns a properly constructed HBaseAdmin object + * Naive test to check that Connection#getAdmin returns a properly constructed HBaseAdmin object * @throws IOException Unable to construct admin */ @Test @@ -250,20 +261,14 @@ public class TestHCM { Table t = TEST_UTIL.createTable(tn, cf); TEST_UTIL.waitTableAvailable(tn); + TEST_UTIL.waitUntilNoRegionsInTransition(); - while(TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager(). - getRegionStates().isRegionsInTransition()){ - Thread.sleep(1); - } final ConnectionImplementation hci = (ConnectionImplementation)TEST_UTIL.getConnection(); try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tn)) { while (l.getRegionLocation(rk).getPort() != sn.getPort()) { TEST_UTIL.getHBaseAdmin().move(l.getRegionLocation(rk).getRegionInfo(). getEncodedNameAsBytes(), Bytes.toBytes(sn.toString())); - while (TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager(). - getRegionStates().isRegionsInTransition()) { - Thread.sleep(1); - } + TEST_UTIL.waitUntilNoRegionsInTransition(); hci.clearRegionCache(tn); } Assert.assertNotNull(hci.clusterStatusListener); @@ -322,30 +327,27 @@ public class TestHCM { public void testOperationTimeout() throws Exception { HTableDescriptor hdt = TEST_UTIL.createTableDescriptor("HCM-testOperationTimeout"); hdt.addCoprocessor(SleepAndFailFirstTime.class.getName()); - Table t = TEST_UTIL.createTable(hdt, new byte[][]{FAM_NAM}); - if (t instanceof HTable) { - HTable table = (HTable) t; - table.setRpcTimeout(Integer.MAX_VALUE); - // Check that it works if the timeout is big enough - table.setOperationTimeout(120 * 1000); + Table table = TEST_UTIL.createTable(hdt, new byte[][]{FAM_NAM}); + table.setRpcTimeout(Integer.MAX_VALUE); + // Check that it works if the timeout is big enough + table.setOperationTimeout(120 * 1000); + table.get(new Get(FAM_NAM)); + + // Resetting and retrying. Will fail this time, not enough time for the second try + SleepAndFailFirstTime.ct.set(0); + try { + table.setOperationTimeout(30 * 1000); table.get(new Get(FAM_NAM)); - - // Resetting and retrying. Will fail this time, not enough time for the second try - SleepAndFailFirstTime.ct.set(0); - try { - table.setOperationTimeout(30 * 1000); - table.get(new Get(FAM_NAM)); - Assert.fail("We expect an exception here"); - } catch (SocketTimeoutException e) { - // The client has a CallTimeout class, but it's not shared.We're not very clean today, - // in the general case you can expect the call to stop, but the exception may vary. - // In this test however, we're sure that it will be a socket timeout. - LOG.info("We received an exception, as expected ", e); - } catch (IOException e) { - Assert.fail("Wrong exception:" + e.getMessage()); - } finally { - table.close(); - } + Assert.fail("We expect an exception here"); + } catch (SocketTimeoutException e) { + // The client has a CallTimeout class, but it's not shared.We're not very clean today, + // in the general case you can expect the call to stop, but the exception may vary. + // In this test however, we're sure that it will be a socket timeout. + LOG.info("We received an exception, as expected ", e); + } catch (IOException e) { + Assert.fail("Wrong exception:" + e.getMessage()); + } finally { + table.close(); } } @@ -356,11 +358,9 @@ public class TestHCM { Configuration c = new Configuration(TEST_UTIL.getConfiguration()); try (Table t = TEST_UTIL.createTable(hdt, new byte[][] { FAM_NAM }, c)) { - assert t instanceof HTable; - HTable table = (HTable) t; - table.setRpcTimeout(SleepCoprocessor.SLEEP_TIME / 2); - table.setOperationTimeout(SleepCoprocessor.SLEEP_TIME * 100); - table.get(new Get(FAM_NAM)); + t.setRpcTimeout(SleepCoprocessor.SLEEP_TIME / 2); + t.setOperationTimeout(SleepCoprocessor.SLEEP_TIME * 100); + t.get(new Get(FAM_NAM)); } } @@ -379,29 +379,26 @@ public class TestHCM { c.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 4000); Connection connection = ConnectionFactory.createConnection(c); - Table t = connection.getTable(TableName.valueOf("HCM-testRpcRetryingCallerSleep")); - if (t instanceof HTable) { - HTable table = (HTable) t; - table.setOperationTimeout(8000); - // Check that it works. Because 2s + 3s * RETRY_BACKOFF[0] + 2s < 8s - table.get(new Get(FAM_NAM)); + Table table = connection.getTable(TableName.valueOf("HCM-testRpcRetryingCallerSleep")); + table.setOperationTimeout(8000); + // Check that it works. Because 2s + 3s * RETRY_BACKOFF[0] + 2s < 8s + table.get(new Get(FAM_NAM)); - // Resetting and retrying. - SleepAndFailFirstTime.ct.set(0); - try { - table.setOperationTimeout(6000); - // Will fail this time. After sleep, there are not enough time for second retry - // Beacuse 2s + 3s + 2s > 6s - table.get(new Get(FAM_NAM)); - Assert.fail("We expect an exception here"); - } catch (SocketTimeoutException e) { - LOG.info("We received an exception, as expected ", e); - } catch (IOException e) { - Assert.fail("Wrong exception:" + e.getMessage()); - } finally { - table.close(); - connection.close(); - } + // Resetting and retrying. + SleepAndFailFirstTime.ct.set(0); + try { + table.setOperationTimeout(6000); + // Will fail this time. After sleep, there are not enough time for second retry + // Beacuse 2s + 3s + 2s > 6s + table.get(new Get(FAM_NAM)); + Assert.fail("We expect an exception here"); + } catch (SocketTimeoutException e) { + LOG.info("We received an exception, as expected ", e); + } catch (IOException e) { + Assert.fail("Wrong exception:" + e.getMessage()); + } finally { + table.close(); + connection.close(); } } @@ -410,7 +407,7 @@ public class TestHCM { long pauseTime; long baseTime = 100; TableName tableName = TableName.valueOf("HCM-testCallableSleep"); - HTable table = TEST_UTIL.createTable(tableName, FAM_NAM); + Table table = TEST_UTIL.createTable(tableName, FAM_NAM); RegionServerCallable regionServerCallable = new RegionServerCallable( TEST_UTIL.getConnection(), tableName, ROW) { public Object call(int timeout) throws IOException { @@ -441,7 +438,7 @@ public class TestHCM { assertTrue(pauseTime <= (baseTime * HConstants.RETRY_BACKOFF[i] * 1.01f)); } - MasterCallable masterCallable = new MasterCallable((HConnection) TEST_UTIL.getConnection()) { + MasterCallable masterCallable = new MasterCallable(TEST_UTIL.getConnection()) { public Object call(int timeout) throws IOException { return null; } @@ -458,7 +455,7 @@ public class TestHCM { TableName tableName = TableName.valueOf("HCM-testConnectionClose" + allowsInterrupt); TEST_UTIL.createTable(tableName, FAM_NAM).close(); - boolean previousBalance = TEST_UTIL.getHBaseAdmin().setBalancerRunning(false, true); + boolean previousBalance = TEST_UTIL.getAdmin().setBalancerRunning(false, true); Configuration c2 = new Configuration(TEST_UTIL.getConfiguration()); // We want to work on a separate connection. @@ -741,9 +738,7 @@ public class TestHCM { HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); // We can wait for all regions to be online, that makes log reading easier when debugging - while (master.getAssignmentManager().getRegionStates().isRegionsInTransition()) { - Thread.sleep(1); - } + TEST_UTIL.waitUntilNoRegionsInTransition(); // Now moving the region to the second server HRegionLocation toMove = conn.getCachedLocation(TABLE_NAME, ROW).getRegionLocation(); @@ -890,15 +885,21 @@ public class TestHCM { public void testConnectionManagement() throws Exception{ Table table0 = TEST_UTIL.createTable(TABLE_NAME1, FAM_NAM); Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - HTable table = (HTable) conn.getTable(TABLE_NAME1); + Table table = conn.getTable(TABLE_NAME1); table.close(); assertFalse(conn.isClosed()); - assertFalse(table.getPool().isShutdown()); - table = (HTable) conn.getTable(TABLE_NAME1); + if(table instanceof HTable) { + assertFalse(((HTable) table).getPool().isShutdown()); + } + table = conn.getTable(TABLE_NAME1); table.close(); - assertFalse(table.getPool().isShutdown()); + if(table instanceof HTable) { + assertFalse(((HTable) table).getPool().isShutdown()); + } conn.close(); - assertTrue(table.getPool().isShutdown()); + if(table instanceof HTable) { + assertTrue(((HTable) table).getPool().isShutdown()); + } table0.close(); } @@ -1001,7 +1002,7 @@ public class TestHCM { TEST_UTIL.getConfiguration().get(HConstants.ZOOKEEPER_CLIENT_PORT)); // This should be enough to connect - HConnection conn = (HConnection) ConnectionFactory.createConnection(c); + ClusterConnection conn = (ClusterConnection) ConnectionFactory.createConnection(c); assertTrue(conn.isMasterRunning()); conn.close(); } @@ -1034,9 +1035,7 @@ public class TestHCM { HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); // We can wait for all regions to be online, that makes log reading easier when debugging - while (master.getAssignmentManager().getRegionStates().isRegionsInTransition()) { - Thread.sleep(1); - } + TEST_UTIL.waitUntilNoRegionsInTransition(); Put put = new Put(ROW_X); put.addColumn(FAM_NAM, ROW_X, ROW_X); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexerFlushCache.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexerFlushCache.java index 9be6b6c..8c54880 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexerFlushCache.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexerFlushCache.java @@ -128,7 +128,7 @@ public class TestHTableMultiplexerFlushCache { // Region cache (and not just tearing down the entire connection). TableName TABLE = TableName.valueOf("testOnRegionMove"); final int NUM_REGIONS = 10; - HTable htable = TEST_UTIL.createTable(TABLE, new byte[][] { FAMILY }, 3, + Table htable = TEST_UTIL.createTable(TABLE, new byte[][] { FAMILY }, 3, Bytes.toBytes("aaaaa"), Bytes.toBytes("zzzzz"), NUM_REGIONS); HTableMultiplexer multiplexer = new HTableMultiplexer(TEST_UTIL.getConfiguration(), diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLeaseRenewal.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLeaseRenewal.java index 7170299..d8bc591 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLeaseRenewal.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestLeaseRenewal.java @@ -95,7 +95,7 @@ public class TestLeaseRenewal { @Test public void testLeaseRenewal() throws Exception { - HTable table = TEST_UTIL.createTable( + Table table = TEST_UTIL.createTable( TableName.valueOf("testLeaseRenewal"), FAMILY); Put p = new Put(ROW_BYTES); p.addColumn(FAMILY, COL_QUAL, VAL_BYTES); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java index 5e302d2..8e87ceb 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java @@ -448,7 +448,7 @@ public class TestMetaWithReplicas { // create in-memory state otherwise master won't assign TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() .getRegionStates().createRegionState(h); - TEST_UTIL.getMiniHBaseCluster().getMaster().assignRegion(h); + TEST_UTIL.assignRegion(h); HBaseFsckRepair.waitUntilAssigned(TEST_UTIL.getHBaseAdmin(), h); // check that problem exists HBaseFsck hbck = doFsck(TEST_UTIL.getConfiguration(), false); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java index dcf20e5..9d965f3 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java @@ -22,13 +22,16 @@ package org.apache.hadoop.hbase.client; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.junit.BeforeClass; +import org.junit.ClassRule; import org.junit.experimental.categories.Category; +import org.junit.rules.TestRule; /** * Test to verify that the cloned table is independent of the table from which it was cloned @@ -37,6 +40,10 @@ import org.junit.experimental.categories.Category; public class TestMobSnapshotCloneIndependence extends TestSnapshotCloneIndependence { private static final Log LOG = LogFactory.getLog(TestMobSnapshotCloneIndependence.class); + @ClassRule + public static final TestRule timeout = + CategoryBasedTimeout.forClass( TestMobSnapshotCloneIndependence.class); + /** * Setup the config for the cluster and start it * @throws Exception on failure diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java index a79aa0a..48d7efc 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java @@ -79,8 +79,7 @@ public class TestReplicaWithCluster { */ public static class SlowMeCopro extends BaseRegionObserver { static final AtomicLong sleepTime = new AtomicLong(0); - static final AtomicReference cdl = - new AtomicReference(new CountDownLatch(0)); + static final AtomicReference cdl = new AtomicReference<>(new CountDownLatch(0)); public SlowMeCopro() { } @@ -336,7 +335,7 @@ public class TestReplicaWithCluster { // bulk load HFiles LOG.debug("Loading test data"); @SuppressWarnings("deprecation") - final HConnection conn = HTU.getHBaseAdmin().getConnection(); + final ClusterConnection conn = (ClusterConnection) HTU.getAdmin().getConnection(); RegionServerCallable callable = new RegionServerCallable( conn, hdt.getTableName(), TestHRegionServerBulkLoad.rowkey(0)) { @Override @@ -351,7 +350,7 @@ public class TestReplicaWithCluster { } }; RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(HTU.getConfiguration()); - RpcRetryingCaller caller = factory. newCaller(); + RpcRetryingCaller caller = factory.newCaller(); caller.callWithRetries(callable, 10000); // verify we can read them from the primary diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java index 9690a89..b3cbd33 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java @@ -204,7 +204,7 @@ public class TestReplicasClient { @Before public void before() throws IOException { - HTU.getHBaseAdmin().getConnection().clearRegionCache(); + ((ClusterConnection) HTU.getAdmin().getConnection()).clearRegionCache(); try { openRegion(hriPrimary); } catch (Exception ignored) { @@ -226,7 +226,7 @@ public class TestReplicasClient { } catch (Exception ignored) { } - HTU.getHBaseAdmin().getConnection().clearRegionCache(); + ((ClusterConnection) HTU.getAdmin().getConnection()).clearRegionCache(); } private HRegionServer getRS() { diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java index d31df42..a3fc640 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java @@ -83,7 +83,6 @@ public class TestRestoreSnapshotFromClient { protected static void setupConf(Configuration conf) { TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); - TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true); TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10); TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java index 656dedc..34d3c91 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java @@ -132,7 +132,7 @@ public class TestRpcControllerFactory { TableName name = TableName.valueOf("testcustomcontroller"); UTIL.createTable(name, fam1).close(); - // change one of the connection properties so we get a new HConnection with our configuration + // change one of the connection properties so we get a new Connection with our configuration conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT + 1); Connection connection = ConnectionFactory.createConnection(conf); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java index 618717b..c93794d 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java @@ -66,7 +66,7 @@ public class TestShortCircuitConnection { UTIL.createTable(htd, null); HRegionServer regionServer = UTIL.getRSForFirstRegionInTable(tn); ClusterConnection connection = regionServer.getClusterConnection(); - HTableInterface tableIf = connection.getTable(tn); + Table tableIf = connection.getTable(tn); assertTrue(tableIf instanceof HTable); HTable table = (HTable) tableIf; assertTrue(table.getConnection() == connection); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java index 002e04e..2d4b4c9 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java @@ -18,13 +18,13 @@ package org.apache.hadoop.hbase.client; -import java.io.IOException; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -36,15 +36,20 @@ import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Threads; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; -import org.junit.Ignore; +import org.junit.ClassRule; +import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; +import org.junit.rules.TestRule; /** * Test to verify that the cloned table is independent of the table from which it was cloned @@ -53,18 +58,33 @@ import org.junit.experimental.categories.Category; public class TestSnapshotCloneIndependence { private static final Log LOG = LogFactory.getLog(TestSnapshotCloneIndependence.class); + @ClassRule + public static final TestRule timeout = + CategoryBasedTimeout.forClass(TestSnapshotCloneIndependence.class); + + @Rule + public TestName testName = new TestName(); + protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); protected static final int NUM_RS = 2; private static final String STRING_TABLE_NAME = "test"; private static final String TEST_FAM_STR = "fam"; protected static final byte[] TEST_FAM = Bytes.toBytes(TEST_FAM_STR); - protected static final TableName TABLE_NAME = TableName.valueOf(STRING_TABLE_NAME); private static final int CLEANER_INTERVAL = 100; + private FileSystem fs; + private Path rootDir; + private Admin admin; + private TableName originalTableName; + private Table originalTable; + private TableName cloneTableName; + private int countOriginalTable; + String snapshotNameAsString; + byte[] snapshotName; + /** * Setup the config for the cluster and start it - * @throws Exception on fOailure */ @BeforeClass public static void setupCluster() throws Exception { @@ -77,9 +97,6 @@ public class TestSnapshotCloneIndependence { conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 15); // enable snapshot support conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); - // disable the ui - conf.setInt("hbase.regionsever.info.port", -1); - conf.setInt("hbase.master.info.port", -1); // change the flush size to a small amount, regulating number of store files conf.setInt("hbase.hregion.memstore.flush.size", 25000); // so make sure we get a compaction when doing a load, but keep around @@ -104,12 +121,25 @@ public class TestSnapshotCloneIndependence { @Before public void setup() throws Exception { - createTable(TABLE_NAME, TEST_FAM); + fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); + rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); + + admin = UTIL.getHBaseAdmin(); + originalTableName = TableName.valueOf("test" + testName.getMethodName()); + cloneTableName = TableName.valueOf("test-clone-" + originalTableName); + snapshotNameAsString = "snapshot_" + originalTableName; + snapshotName = Bytes.toBytes(snapshotNameAsString); + + originalTable = createTable(originalTableName, TEST_FAM); + loadData(originalTable, TEST_FAM); + countOriginalTable = countRows(originalTable); + System.out.println("Original table has: " + countOriginalTable + " rows"); } @After public void tearDown() throws Exception { - UTIL.deleteTable(TABLE_NAME); + UTIL.deleteTable(originalTableName); + UTIL.deleteTable(cloneTableName); SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin()); SnapshotTestingUtils.deleteArchiveDirectory(UTIL); } @@ -127,78 +157,77 @@ public class TestSnapshotCloneIndependence { * Verify that adding data to the cloned table will not affect the original, and vice-versa when * it is taken as an online snapshot. */ - @Ignore ("Flakey. Fix") @Test (timeout=300000) + @Test public void testOnlineSnapshotAppendIndependent() throws Exception { - runTestSnapshotAppendIndependent(true); + createAndCloneSnapshot(true); + runTestSnapshotAppendIndependent(); } /** * Verify that adding data to the cloned table will not affect the original, and vice-versa when * it is taken as an offline snapshot. */ - @Test (timeout=300000) - @Ignore + @Test public void testOfflineSnapshotAppendIndependent() throws Exception { - runTestSnapshotAppendIndependent(false); + createAndCloneSnapshot(false); + runTestSnapshotAppendIndependent(); } /** * Verify that adding metadata to the cloned table will not affect the original, and vice-versa * when it is taken as an online snapshot. */ - @Test (timeout=300000) + @Test public void testOnlineSnapshotMetadataChangesIndependent() throws Exception { - runTestSnapshotMetadataChangesIndependent(true); + createAndCloneSnapshot(true); + runTestSnapshotMetadataChangesIndependent(); } /** * Verify that adding netadata to the cloned table will not affect the original, and vice-versa * when is taken as an online snapshot. */ - @Test (timeout=300000) - @Ignore + @Test public void testOfflineSnapshotMetadataChangesIndependent() throws Exception { - runTestSnapshotMetadataChangesIndependent(false); + createAndCloneSnapshot(false); + runTestSnapshotMetadataChangesIndependent(); } /** * Verify that region operations, in this case splitting a region, are independent between the * cloned table and the original. */ - @Test (timeout=300000) - @Ignore + @Test public void testOfflineSnapshotRegionOperationsIndependent() throws Exception { - runTestRegionOperationsIndependent(false); + createAndCloneSnapshot(false); + runTestRegionOperationsIndependent(); } /** * Verify that region operations, in this case splitting a region, are independent between the * cloned table and the original. */ - @Test (timeout=300000) + @Test public void testOnlineSnapshotRegionOperationsIndependent() throws Exception { - runTestRegionOperationsIndependent(true); + createAndCloneSnapshot(true); + runTestRegionOperationsIndependent(); } - @Test (timeout=300000) - @Ignore + @Test public void testOfflineSnapshotDeleteIndependent() throws Exception { - runTestSnapshotDeleteIndependent(false); + createAndCloneSnapshot(false); + runTestSnapshotDeleteIndependent(); } - @Ignore ("Flakey test") @Test (timeout=300000) + @Test public void testOnlineSnapshotDeleteIndependent() throws Exception { - runTestSnapshotDeleteIndependent(true); + createAndCloneSnapshot(true); + runTestSnapshotDeleteIndependent(); } private static void waitOnSplit(Connection c, final Table t, int originalCount) throws Exception { for (int i = 0; i < 200; i++) { - try { - Thread.sleep(500); - } catch (InterruptedException e) { - // Restore the interrupted status - Thread.currentThread().interrupt(); - } + Threads.sleepWithoutInterrupt(500); try (RegionLocator locator = c.getRegionLocator(t.getName())) { if (locator.getAllRegionLocations().size() > originalCount) { return; @@ -208,240 +237,125 @@ public class TestSnapshotCloneIndependence { throw new Exception("Split did not increase the number of regions"); } - /* - * Take a snapshot of a table, add data, and verify that this only - * affects one table + /** + * Takes the snapshot of originalTable and clones the snapshot to another tables. + * If {@code online} is false, the original table is disabled during taking snapshot, so also + * enables it again. * @param online - Whether the table is online or not during the snapshot */ - private void runTestSnapshotAppendIndependent(boolean online) throws Exception { - FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); - Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); - - Admin admin = UTIL.getHBaseAdmin(); - final long startTime = System.currentTimeMillis(); - final TableName localTableName = - TableName.valueOf(STRING_TABLE_NAME + startTime); - - try (Table original = createTable(localTableName, TEST_FAM)) { - loadData(original, TEST_FAM); - final int origTableRowCount = countRows(original); - - // Take a snapshot - final String snapshotNameAsString = "snapshot_" + localTableName; - byte[] snapshotName = Bytes.toBytes(snapshotNameAsString); - - SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableName, TEST_FAM_STR, - snapshotNameAsString, rootDir, fs, online); - - if (!online) { - tryDisable(admin, localTableName); - } - TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName); - admin.cloneSnapshot(snapshotName, cloneTableName); - - try (Table clonedTable = UTIL.getConnection().getTable(cloneTableName)) { - - // Make sure that all the regions are available before starting - UTIL.waitUntilAllRegionsAssigned(cloneTableName); - - final int clonedTableRowCount = countRows(clonedTable); + private void createAndCloneSnapshot(boolean online) throws Exception { + SnapshotTestingUtils.createSnapshotAndValidate(admin, originalTableName, TEST_FAM_STR, + snapshotNameAsString, rootDir, fs, online); - Assert.assertEquals( - "The line counts of original and cloned tables do not match after clone. ", - origTableRowCount, clonedTableRowCount); - - // Attempt to add data to the test - final String rowKey = "new-row-" + System.currentTimeMillis(); - - Put p = new Put(Bytes.toBytes(rowKey)); - p.addColumn(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString")); - original.put(p); - - // Verify that it is not present in the original table - Assert.assertEquals("The row count of the original table was not modified by the put", - origTableRowCount + 1, countRows(original)); - Assert.assertEquals( - "The row count of the cloned table changed as a result of addition to the original", - clonedTableRowCount, countRows(clonedTable)); - - p = new Put(Bytes.toBytes(rowKey)); - p.addColumn(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString")); - clonedTable.put(p); - - // Verify that the new family is not in the restored table's description - Assert.assertEquals( - "The row count of the original table was modified by the put to the clone", - origTableRowCount + 1, countRows(original)); - Assert.assertEquals("The row count of the cloned table was not modified by the put", - clonedTableRowCount + 1, countRows(clonedTable)); - } + // If offline, enable the table disabled by snapshot testing util. + if (!online) { + admin.enableTable(originalTableName); + UTIL.waitTableAvailable(originalTableName); } + + admin.cloneSnapshot(snapshotName, cloneTableName); + UTIL.waitUntilAllRegionsAssigned(cloneTableName); } - /* - * Take a snapshot of a table, do a split, and verify that this only affects one table - * @param online - Whether the table is online or not during the snapshot + /** + * Verify that adding data to original table or clone table doesn't affect other table. */ - private void runTestRegionOperationsIndependent(boolean online) throws Exception { - FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); - Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); - - // Create a table - Admin admin = UTIL.getHBaseAdmin(); - final long startTime = System.currentTimeMillis(); - final TableName localTableName = - TableName.valueOf(STRING_TABLE_NAME + startTime); - Table original = createTable(localTableName, TEST_FAM); - loadData(original, TEST_FAM); - final int loadedTableCount = countRows(original); - System.out.println("Original table has: " + loadedTableCount + " rows"); - - final String snapshotNameAsString = "snapshot_" + localTableName; - - // Create a snapshot - SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableName, TEST_FAM_STR, - snapshotNameAsString, rootDir, fs, online); - - if (!online) { - tryDisable(admin, localTableName); + private void runTestSnapshotAppendIndependent() throws Exception { + try (Table clonedTable = UTIL.getConnection().getTable(cloneTableName)) { + final int clonedTableRowCount = countRows(clonedTable); + + Assert.assertEquals( + "The line counts of original and cloned tables do not match after clone. ", + countOriginalTable, clonedTableRowCount); + + // Attempt to add data to the test + Put p = new Put(Bytes.toBytes("new-row-" + System.currentTimeMillis())); + p.addColumn(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString")); + originalTable.put(p); + + // Verify that the new row is not in the restored table + Assert.assertEquals("The row count of the original table was not modified by the put", + countOriginalTable + 1, countRows(originalTable)); + Assert.assertEquals( + "The row count of the cloned table changed as a result of addition to the original", + clonedTableRowCount, countRows(clonedTable)); + + Put p2 = new Put(Bytes.toBytes("new-row-" + System.currentTimeMillis())); + p2.addColumn(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString")); + clonedTable.put(p2); + + // Verify that the row is not added to the original table. + Assert.assertEquals( + "The row count of the original table was modified by the put to the clone", + countOriginalTable + 1, countRows(originalTable)); + Assert.assertEquals("The row count of the cloned table was not modified by the put", + clonedTableRowCount + 1, countRows(clonedTable)); } + } - TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName); - - // Clone the snapshot - byte[] snapshotName = Bytes.toBytes(snapshotNameAsString); - admin.cloneSnapshot(snapshotName, cloneTableName); - + /** + * Do a split, and verify that this only affects one table + */ + private void runTestRegionOperationsIndependent() throws Exception { // Verify that region information is the same pre-split ((ClusterConnection) UTIL.getConnection()).clearRegionCache(); - List originalTableHRegions = admin.getTableRegions(localTableName); + List originalTableHRegions = admin.getTableRegions(originalTableName); final int originalRegionCount = originalTableHRegions.size(); final int cloneTableRegionCount = admin.getTableRegions(cloneTableName).size(); Assert.assertEquals( - "The number of regions in the cloned table is different than in the original table.", - originalRegionCount, cloneTableRegionCount); + "The number of regions in the cloned table is different than in the original table.", + originalRegionCount, cloneTableRegionCount); // Split a region on the parent table admin.splitRegion(originalTableHRegions.get(0).getRegionName()); - waitOnSplit(UTIL.getConnection(), original, originalRegionCount); + waitOnSplit(UTIL.getConnection(), originalTable, originalRegionCount); // Verify that the cloned table region is not split final int cloneTableRegionCount2 = admin.getTableRegions(cloneTableName).size(); Assert.assertEquals( - "The number of regions in the cloned table changed though none of its regions were split.", - cloneTableRegionCount, cloneTableRegionCount2); + "The number of regions in the cloned table changed though none of its regions were split.", + cloneTableRegionCount, cloneTableRegionCount2); } - /* - * Take a snapshot of a table, add metadata, and verify that this only - * affects one table - * @param online - Whether the table is online or not during the snapshot + /** + * Add metadata, and verify that this only affects one table */ - private void runTestSnapshotMetadataChangesIndependent(boolean online) throws Exception { - FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); - Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); - - // Create a table - Admin admin = UTIL.getHBaseAdmin(); - final long startTime = System.currentTimeMillis(); - final TableName localTableName = - TableName.valueOf(STRING_TABLE_NAME + startTime); - Table original = createTable(localTableName, TEST_FAM); - loadData(original, TEST_FAM); - - final String snapshotNameAsString = "snapshot_" + localTableName; - - // Create a snapshot - SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableName, TEST_FAM_STR, - snapshotNameAsString, rootDir, fs, online); - - if (!online) { - tryDisable(admin, localTableName); - } - - TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName); - - // Clone the snapshot - byte[] snapshotName = Bytes.toBytes(snapshotNameAsString); - admin.cloneSnapshot(snapshotName, cloneTableName); - + private void runTestSnapshotMetadataChangesIndependent() throws Exception { // Add a new column family to the original table byte[] TEST_FAM_2 = Bytes.toBytes("fam2"); HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM_2); - tryDisable(admin, localTableName); - admin.addColumnFamily(localTableName, hcd); + admin.disableTable(originalTableName); + admin.addColumnFamily(originalTableName, hcd); // Verify that it is not in the snapshot - admin.enableTable(localTableName); - UTIL.waitTableAvailable(localTableName); + admin.enableTable(originalTableName); + UTIL.waitTableAvailable(originalTableName); // get a description of the cloned table // get a list of its families // assert that the family is there - HTableDescriptor originalTableDescriptor = original.getTableDescriptor(); + HTableDescriptor originalTableDescriptor = originalTable.getTableDescriptor(); HTableDescriptor clonedTableDescriptor = admin.getTableDescriptor(cloneTableName); Assert.assertTrue("The original family was not found. There is something wrong. ", - originalTableDescriptor.hasFamily(TEST_FAM)); + originalTableDescriptor.hasFamily(TEST_FAM)); Assert.assertTrue("The original family was not found in the clone. There is something wrong. ", - clonedTableDescriptor.hasFamily(TEST_FAM)); + clonedTableDescriptor.hasFamily(TEST_FAM)); Assert.assertTrue("The new family was not found. ", - originalTableDescriptor.hasFamily(TEST_FAM_2)); + originalTableDescriptor.hasFamily(TEST_FAM_2)); Assert.assertTrue("The new family was not found. ", - !clonedTableDescriptor.hasFamily(TEST_FAM_2)); - } - - private void tryDisable(Admin admin, TableName localTableName) throws IOException { - int offlineRetry = 0; - while ( offlineRetry < 5 && admin.isTableEnabled(localTableName)) { - try { - admin.disableTable(localTableName); - } catch (IOException ioe) { - LOG.warn("Error disabling the table", ioe); - } - offlineRetry ++; - } + !clonedTableDescriptor.hasFamily(TEST_FAM_2)); } - /* - * Take a snapshot of a table, add data, and verify that deleting the snapshot does not affect - * either table. - * @param online - Whether the table is online or not during the snapshot + /** + * Verify that deleting the snapshot does not affect either table. */ - private void runTestSnapshotDeleteIndependent(boolean online) throws Exception { - FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); - Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); - - final Admin admin = UTIL.getHBaseAdmin(); - final long startTime = System.currentTimeMillis(); - final TableName localTableName = - TableName.valueOf(STRING_TABLE_NAME + startTime); - - try (Table original = createTable(localTableName, TEST_FAM)) { - loadData(original, TEST_FAM); - } - - // Take a snapshot - final String snapshotNameAsString = "snapshot_" + localTableName; - byte[] snapshotName = Bytes.toBytes(snapshotNameAsString); - - SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableName, TEST_FAM_STR, - snapshotNameAsString, rootDir, fs, online); - - if (!online) { - tryDisable(admin, localTableName); - } - - TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName); - admin.cloneSnapshot(snapshotName, cloneTableName); - - UTIL.waitUntilAllRegionsAssigned(cloneTableName); - + private void runTestSnapshotDeleteIndependent() throws Exception { // Ensure the original table does not reference the HFiles anymore - admin.majorCompact(localTableName); + admin.majorCompact(originalTableName); // Deleting the snapshot used to break the cloned table by deleting in-use HFiles admin.deleteSnapshot(snapshotName); @@ -451,7 +365,7 @@ public class TestSnapshotCloneIndependence { Thread.sleep(5000); } while (!admin.listSnapshots(snapshotNameAsString).isEmpty()); - try (Table original = UTIL.getConnection().getTable(localTableName)) { + try (Table original = UTIL.getConnection().getTable(originalTableName)) { try (Table clonedTable = UTIL.getConnection().getTable(cloneTableName)) { // Verify that all regions of both tables are readable final int origTableRowCount = countRows(original); @@ -462,7 +376,7 @@ public class TestSnapshotCloneIndependence { } protected Table createTable(final TableName table, byte[] family) throws Exception { - Table t = UTIL.createTable(table, family); + Table t = UTIL.createTable(table, family); // Wait for everything to be ready with the table UTIL.waitUntilAllRegionsAssigned(table); @@ -470,11 +384,11 @@ public class TestSnapshotCloneIndependence { return t; } - protected void loadData(final Table table, byte[]... families) throws Exception { - UTIL.loadTable(table, families); + public void loadData(final Table table, byte[]... families) throws Exception { + UTIL.loadTable(originalTable, TEST_FAM); } protected int countRows(final Table table, final byte[]... families) throws Exception { return UTIL.countRows(table, families); } -} \ No newline at end of file +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java index c3241c9..06a3c7e 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationPeer; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationQueues; +import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -160,7 +161,7 @@ public class TestReplicationAdmin { Configuration conf = TEST_UTIL.getConfiguration(); ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "Test HBaseAdmin", null); ReplicationQueues repQueues = - ReplicationFactory.getReplicationQueues(zkw, conf, null); + ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, null, zkw)); repQueues.init("server1"); // add queue for ID_ONE diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java index 9e50e4e..dfd41a8 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java @@ -126,6 +126,8 @@ public class SimpleRegionObserver extends BaseRegionObserver { final AtomicInteger ctPostBulkLoadHFile = new AtomicInteger(0); final AtomicInteger ctPreBatchMutate = new AtomicInteger(0); final AtomicInteger ctPostBatchMutate = new AtomicInteger(0); + final AtomicInteger ctPreReplayWALs = new AtomicInteger(0); + final AtomicInteger ctPostReplayWALs = new AtomicInteger(0); final AtomicInteger ctPreWALRestore = new AtomicInteger(0); final AtomicInteger ctPostWALRestore = new AtomicInteger(0); final AtomicInteger ctPreWALRestoreDeprecated = new AtomicInteger(0); @@ -651,6 +653,18 @@ public class SimpleRegionObserver extends BaseRegionObserver { } @Override + public void preReplayWALs(ObserverContext env, + HRegionInfo info, Path edits) throws IOException { + ctPreReplayWALs.incrementAndGet(); + } + + @Override + public void postReplayWALs(ObserverContext env, + HRegionInfo info, Path edits) throws IOException { + ctPostReplayWALs.incrementAndGet(); + } + + @Override public void preWALRestore(ObserverContext env, HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { String tableName = logKey.getTablename().getNameAsString(); @@ -798,6 +812,14 @@ public class SimpleRegionObserver extends BaseRegionObserver { return ctPrePrepareDeleteTS.get() > 0; } + public boolean hadPreReplayWALs() { + return ctPreReplayWALs.get() > 0; + } + + public boolean hadPostReplayWALs() { + return ctPostReplayWALs.get() > 0; + } + public boolean hadPreWALRestore() { return ctPreWALRestore.get() > 0; } @@ -931,6 +953,14 @@ public class SimpleRegionObserver extends BaseRegionObserver { return ctPostIncrement.get(); } + public int getCtPreReplayWALs() { + return ctPreReplayWALs.get(); + } + + public int getCtPostReplayWALs() { + return ctPostReplayWALs.get(); + } + public int getCtPreWALRestore() { return ctPreWALRestore.get(); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorHost.java hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorHost.java index 58cd0fb..66b5c60 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorHost.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorHost.java @@ -29,6 +29,8 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -51,9 +53,8 @@ public class TestCoprocessorHost { return this.aborted; } } - @Test - public void testDoubleLoading() { + public void testDoubleLoadingAndPriorityValue() { final Configuration conf = HBaseConfiguration.create(); CoprocessorHost host = new CoprocessorHost(new TestAbortable()) { @@ -61,7 +62,7 @@ public class TestCoprocessorHost { @Override public CoprocessorEnvironment createEnvironment(Class implClass, - final Coprocessor instance, int priority, int sequence, Configuration conf) { + final Coprocessor instance, final int priority, int sequence, Configuration conf) { return new CoprocessorEnvironment() { final Coprocessor envInstance = instance; @@ -82,7 +83,7 @@ public class TestCoprocessorHost { @Override public int getPriority() { - return 0; + return priority; } @Override @@ -114,10 +115,19 @@ public class TestCoprocessorHost { }; final String key = "KEY"; final String coprocessor = "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver"; - // Try and load coprocessor three times. - conf.setStrings(key, coprocessor, coprocessor, coprocessor); + // Try and load a coprocessor three times + conf.setStrings(key, coprocessor, coprocessor, coprocessor, SimpleRegionObserverV2.class.getName()); host.loadSystemCoprocessors(conf, key); - // Only one coprocessor loaded - Assert.assertEquals(1, host.coprocessors.size()); + // Two coprocessors(SimpleRegionObserver and SimpleRegionObserverV2) loaded + Assert.assertEquals(2, host.coprocessors.size()); + // Check the priority value + CoprocessorEnvironment simpleEnv = host.findCoprocessorEnvironment(SimpleRegionObserver.class.getName()); + CoprocessorEnvironment simpleEnv_v2 = host.findCoprocessorEnvironment(SimpleRegionObserverV2.class.getName()); + assertNotNull(simpleEnv); + assertNotNull(simpleEnv_v2); + assertEquals(Coprocessor.PRIORITY_SYSTEM, simpleEnv.getPriority()); + assertEquals(Coprocessor.PRIORITY_SYSTEM + 1, simpleEnv_v2.getPriority()); + } + public static class SimpleRegionObserverV2 extends SimpleRegionObserver { } } \ No newline at end of file diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java new file mode 100644 index 0000000..35ed531 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java @@ -0,0 +1,196 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.coprocessor; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Increment; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Row; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.testclassification.CoprocessorTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * This test runs batch mutation with Increments which have custom TimeRange. + * Custom Observer records the TimeRange. + * We then verify that the recorded TimeRange has same bounds as the initial TimeRange. + * See HBASE-15698 + */ +@Category({CoprocessorTests.class, MediumTests.class}) +public class TestIncrementTimeRange { + + private static final HBaseTestingUtility util = new HBaseTestingUtility(); + private static ManualEnvironmentEdge mee = new ManualEnvironmentEdge(); + + private static final TableName TEST_TABLE = TableName.valueOf("test"); + private static final byte[] TEST_FAMILY = Bytes.toBytes("f1"); + + private static final byte[] ROW_A = Bytes.toBytes("aaa"); + private static final byte[] ROW_B = Bytes.toBytes("bbb"); + private static final byte[] ROW_C = Bytes.toBytes("ccc"); + + private static final byte[] qualifierCol1 = Bytes.toBytes("col1"); + + private static final byte[] bytes1 = Bytes.toBytes(1); + private static final byte[] bytes2 = Bytes.toBytes(2); + private static final byte[] bytes3 = Bytes.toBytes(3); + + private Table hTableInterface; + private Table table; + + @BeforeClass + public static void setupBeforeClass() throws Exception { + util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, + MyObserver.class.getName()); + util.startMiniCluster(); + EnvironmentEdgeManager.injectEdge(mee); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + util.shutdownMiniCluster(); + } + + @Before + public void before() throws Exception { + table = util.createTable(TEST_TABLE, TEST_FAMILY); + + Put puta = new Put(ROW_A); + puta.addColumn(TEST_FAMILY, qualifierCol1, bytes1); + table.put(puta); + + Put putb = new Put(ROW_B); + putb.addColumn(TEST_FAMILY, qualifierCol1, bytes2); + table.put(putb); + + Put putc = new Put(ROW_C); + putc.addColumn(TEST_FAMILY, qualifierCol1, bytes3); + table.put(putc); + } + + @After + public void after() throws Exception { + try { + if (table != null) { + table.close(); + } + } finally { + try { + util.deleteTable(TEST_TABLE); + } catch (IOException ioe) { + } + } + } + + public static class MyObserver extends SimpleRegionObserver { + static TimeRange tr10 = null, tr2 = null; + @Override + public Result preIncrement(final ObserverContext e, + final Increment increment) throws IOException { + NavigableMap> map = increment.getFamilyCellMap(); + for (Map.Entry> entry : map.entrySet()) { + for (Cell cell : entry.getValue()) { + long incr = Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), + cell.getValueLength()); + if (incr == 10) { + tr10 = increment.getTimeRange(); + } else if (incr == 2 && !increment.getTimeRange().isAllTime()) { + tr2 = increment.getTimeRange(); + } + } + } + return super.preIncrement(e, increment); + } + } + + @Test + public void testHTableInterfaceMethods() throws Exception { + hTableInterface = util.getConnection().getTable(TEST_TABLE); + checkHTableInterfaceMethods(); + } + + private void checkHTableInterfaceMethods() throws Exception { + long time = EnvironmentEdgeManager.currentTime(); + mee.setValue(time); + hTableInterface.put(new Put(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, Bytes.toBytes(1L))); + checkRowValue(ROW_A, Bytes.toBytes(1L)); + + time = EnvironmentEdgeManager.currentTime(); + mee.setValue(time); + TimeRange range10 = new TimeRange(1, time+10); + hTableInterface.increment(new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 10L) + .setTimeRange(range10.getMin(), range10.getMax())); + checkRowValue(ROW_A, Bytes.toBytes(11L)); + assertEquals(MyObserver.tr10.getMin(), range10.getMin()); + assertEquals(MyObserver.tr10.getMax(), range10.getMax()); + + time = EnvironmentEdgeManager.currentTime(); + mee.setValue(time); + TimeRange range2 = new TimeRange(1, time+20); + List actions = + Arrays.asList(new Row[] { new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 2L) + .setTimeRange(range2.getMin(), range2.getMax()), + new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 2L) + .setTimeRange(range2.getMin(), range2.getMax()) }); + Object[] results3 = new Object[actions.size()]; + Object[] results1 = results3; + hTableInterface.batch(actions, results1); + assertEquals(MyObserver.tr2.getMin(), range2.getMin()); + assertEquals(MyObserver.tr2.getMax(), range2.getMax()); + for (Object r2 : results1) { + assertTrue(r2 instanceof Result); + } + checkRowValue(ROW_A, Bytes.toBytes(15L)); + + hTableInterface.close(); + } + + private void checkRowValue(byte[] row, byte[] expectedValue) throws IOException { + Get get = new Get(row).addColumn(TEST_FAMILY, qualifierCol1); + Result result = hTableInterface.get(get); + byte[] actualValue = result.getValue(TEST_FAMILY, qualifierCol1); + assertArrayEquals(expectedValue, actualValue); + } +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java index 73917cd..b4e93bd 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java @@ -51,11 +51,9 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.RegionPlan; -import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -1915,18 +1913,13 @@ public class TestMasterObserver { master.balanceSwitch(false); // wait for assignments to finish, if any - AssignmentManager mgr = master.getAssignmentManager(); - Collection transRegions = - mgr.getRegionStates().getRegionsInTransition().values(); - for (RegionState state : transRegions) { - mgr.getRegionStates().waitOnRegionToClearRegionsInTransition(state.getRegion()); - } + UTIL.waitUntilNoRegionsInTransition(); // move half the open regions from RS 0 to RS 1 HRegionServer rs = cluster.getRegionServer(0); byte[] destRS = Bytes.toBytes(cluster.getRegionServer(1).getServerName().toString()); //Make sure no regions are in transition now - waitForRITtoBeZero(master); + UTIL.waitUntilNoRegionsInTransition(); List openRegions = ProtobufUtil.getOnlineRegions(rs.getRSRpcServices()); int moveCnt = openRegions.size()/2; for (int i=0; i transRegions = - mgr.getRegionStates().getRegionsInTransition().values(); - for (RegionState state : transRegions) { - mgr.getRegionStates().waitOnRegionToClearRegionsInTransition(state.getRegion()); - } - } - @Test (timeout=180000) public void testTableDescriptorsEnumeration() throws Exception { MiniHBaseCluster cluster = UTIL.getHBaseCluster(); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverForAddingMutationsFromCoprocessors.java hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverForAddingMutationsFromCoprocessors.java new file mode 100644 index 0000000..98e930a --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverForAddingMutationsFromCoprocessors.java @@ -0,0 +1,282 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.coprocessor; + +import static org.junit.Assert.*; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; +import org.apache.hadoop.hbase.regionserver.wal.WALEdit; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.wal.WALKey; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; + +import com.google.common.collect.Lists; + +@Category(MediumTests.class) +public class TestRegionObserverForAddingMutationsFromCoprocessors { + + private static final Log LOG + = LogFactory.getLog(TestRegionObserverForAddingMutationsFromCoprocessors.class); + + private static HBaseTestingUtility util; + private static final byte[] dummy = Bytes.toBytes("dummy"); + private static final byte[] row1 = Bytes.toBytes("r1"); + private static final byte[] row2 = Bytes.toBytes("r2"); + private static final byte[] row3 = Bytes.toBytes("r3"); + private static final byte[] test = Bytes.toBytes("test"); + + @Rule + public TestName name = new TestName(); + private TableName tableName; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + Configuration conf = HBaseConfiguration.create(); + conf.set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, TestWALObserver.class.getName()); + util = new HBaseTestingUtility(conf); + util.startMiniCluster(); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + util.shutdownMiniCluster(); + } + + @Before + public void setUp() throws Exception { + tableName = TableName.valueOf(name.getMethodName()); + } + + private void createTable(String coprocessor) throws IOException { + HTableDescriptor htd = new HTableDescriptor(tableName) + .addFamily(new HColumnDescriptor(dummy)) + .addFamily(new HColumnDescriptor(test)) + .addCoprocessor(coprocessor); + util.getAdmin().createTable(htd); + } + + /** + * Test various multiput operations. + * @throws Exception + */ + @Test + public void testMulti() throws Exception { + createTable(TestMultiMutationCoprocessor.class.getName()); + + try (Table t = util.getConnection().getTable(tableName)) { + t.put(new Put(row1).addColumn(test, dummy, dummy)); + assertRowCount(t, 3); + } + } + + /** + * Tests that added mutations from coprocessors end up in the WAL. + */ + @Test + public void testCPMutationsAreWrittenToWALEdit() throws Exception { + createTable(TestMultiMutationCoprocessor.class.getName()); + + try (Table t = util.getConnection().getTable(tableName)) { + t.put(new Put(row1).addColumn(test, dummy, dummy)); + assertRowCount(t, 3); + } + + assertNotNull(TestWALObserver.savedEdit); + assertEquals(4, TestWALObserver.savedEdit.getCells().size()); + } + + private static void assertRowCount(Table t, int expected) throws IOException { + try (ResultScanner scanner = t.getScanner(new Scan())) { + int i = 0; + for (Result r: scanner) { + LOG.info(r.toString()); + i++; + } + assertEquals(expected, i); + } + } + + @Test + public void testDeleteCell() throws Exception { + createTable(TestDeleteCellCoprocessor.class.getName()); + + try (Table t = util.getConnection().getTable(tableName)) { + t.put(Lists.newArrayList( + new Put(row1).addColumn(test, dummy, dummy), + new Put(row2).addColumn(test, dummy, dummy), + new Put(row3).addColumn(test, dummy, dummy) + )); + + assertRowCount(t, 3); + + t.delete(new Delete(test).addColumn(test, dummy)); // delete non-existing row + assertRowCount(t, 1); + } + } + + @Test + public void testDeleteFamily() throws Exception { + createTable(TestDeleteFamilyCoprocessor.class.getName()); + + try (Table t = util.getConnection().getTable(tableName)) { + t.put(Lists.newArrayList( + new Put(row1).addColumn(test, dummy, dummy), + new Put(row2).addColumn(test, dummy, dummy), + new Put(row3).addColumn(test, dummy, dummy) + )); + + assertRowCount(t, 3); + + t.delete(new Delete(test).addFamily(test)); // delete non-existing row + assertRowCount(t, 1); + } + } + + @Test + public void testDeleteRow() throws Exception { + createTable(TestDeleteRowCoprocessor.class.getName()); + + try (Table t = util.getConnection().getTable(tableName)) { + t.put(Lists.newArrayList( + new Put(row1).addColumn(test, dummy, dummy), + new Put(row2).addColumn(test, dummy, dummy), + new Put(row3).addColumn(test, dummy, dummy) + )); + + assertRowCount(t, 3); + + t.delete(new Delete(test).addColumn(test, dummy)); // delete non-existing row + assertRowCount(t, 1); + } + } + + public static class TestMultiMutationCoprocessor extends BaseRegionObserver { + @Override + public void preBatchMutate(ObserverContext c, + MiniBatchOperationInProgress miniBatchOp) throws IOException { + Mutation mut = miniBatchOp.getOperation(0); + List cells = mut.getFamilyCellMap().get(test); + Put[] puts = new Put[] { + new Put(row1).addColumn(test, dummy, cells.get(0).getTimestamp(), + Bytes.toBytes("cpdummy")), + new Put(row2).addColumn(test, dummy, cells.get(0).getTimestamp(), dummy), + new Put(row3).addColumn(test, dummy, cells.get(0).getTimestamp(), dummy), + }; + LOG.info("Putting:" + puts); + miniBatchOp.addOperationsFromCP(0, puts); + } + } + + public static class TestDeleteCellCoprocessor extends BaseRegionObserver { + @Override + public void preBatchMutate(ObserverContext c, + MiniBatchOperationInProgress miniBatchOp) throws IOException { + Mutation mut = miniBatchOp.getOperation(0); + + if (mut instanceof Delete) { + List cells = mut.getFamilyCellMap().get(test); + Delete[] deletes = new Delete[] { + // delete only 2 rows + new Delete(row1).addColumns(test, dummy, cells.get(0).getTimestamp()), + new Delete(row2).addColumns(test, dummy, cells.get(0).getTimestamp()), + }; + LOG.info("Deleting:" + Arrays.toString(deletes)); + miniBatchOp.addOperationsFromCP(0, deletes); + } + } + } + + public static class TestDeleteFamilyCoprocessor extends BaseRegionObserver { + @Override + public void preBatchMutate(ObserverContext c, + MiniBatchOperationInProgress miniBatchOp) throws IOException { + Mutation mut = miniBatchOp.getOperation(0); + + if (mut instanceof Delete) { + List cells = mut.getFamilyCellMap().get(test); + Delete[] deletes = new Delete[] { + // delete only 2 rows + new Delete(row1).addFamily(test, cells.get(0).getTimestamp()), + new Delete(row2).addFamily(test, cells.get(0).getTimestamp()), + }; + LOG.info("Deleting:" + Arrays.toString(deletes)); + miniBatchOp.addOperationsFromCP(0, deletes); + } + } + } + + public static class TestDeleteRowCoprocessor extends BaseRegionObserver { + @Override + public void preBatchMutate(ObserverContext c, + MiniBatchOperationInProgress miniBatchOp) throws IOException { + Mutation mut = miniBatchOp.getOperation(0); + + if (mut instanceof Delete) { + List cells = mut.getFamilyCellMap().get(test); + Delete[] deletes = new Delete[] { + // delete only 2 rows + new Delete(row1, cells.get(0).getTimestamp()), + new Delete(row2, cells.get(0).getTimestamp()), + }; + LOG.info("Deleting:" + Arrays.toString(deletes)); + miniBatchOp.addOperationsFromCP(0, deletes); + } + } + } + + public static class TestWALObserver extends BaseWALObserver { + static WALEdit savedEdit = null; + @Override + public void postWALWrite(ObserverContext ctx, + HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { + if (info.getTable().equals(TableName.valueOf("testCPMutationsAreWrittenToWALEdit"))) { + savedEdit = logEdit; + } + super.postWALWrite(ctx, info, logKey, logEdit); + } + } +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java index abfadec..7bd4f93 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java @@ -50,7 +50,6 @@ import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; @@ -376,7 +375,7 @@ public class TestRegionObserverInterface { @Test (timeout=300000) public void testHBASE14489() throws IOException { TableName tableName = TableName.valueOf("testHBASE14489"); - HTable table = util.createTable(tableName, new byte[][] { A }); + Table table = util.createTable(tableName, new byte[][] { A }); Put put = new Put(ROW); put.addColumn(A, A, A); table.put(put); @@ -607,7 +606,6 @@ public class TestRegionObserverInterface { } } - @Ignore // TODO: HBASE-13391 to fix flaky test @Test (timeout=300000) public void testRecovery() throws Exception { LOG.info(TestRegionObserverInterface.class.getName() +".testRecovery"); @@ -630,6 +628,9 @@ public class TestRegionObserverInterface { put.addColumn(C, C, C); table.put(put); + // put two times + table.put(put); + verifyMethodResult(SimpleRegionObserver.class, new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete"}, @@ -638,10 +639,11 @@ public class TestRegionObserverInterface { ); verifyMethodResult(SimpleRegionObserver.class, - new String[] {"getCtPreWALRestore", "getCtPostWALRestore", "getCtPrePut", "getCtPostPut", - "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"}, + new String[] {"getCtPreReplayWALs", "getCtPostReplayWALs", "getCtPreWALRestore", + "getCtPostWALRestore", "getCtPrePut", "getCtPostPut", + "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"}, tableName, - new Integer[] {0, 0, 1, 1, 0, 0}); + new Integer[] {0, 0, 0, 0, 2, 2, 0, 0}); cluster.killRegionServer(rs1.getRegionServer().getServerName()); Threads.sleep(1000); // Let the kill soak in. @@ -649,17 +651,17 @@ public class TestRegionObserverInterface { LOG.info("All regions assigned"); verifyMethodResult(SimpleRegionObserver.class, - new String[] {"getCtPreWALRestore", "getCtPostWALRestore", "getCtPrePut", "getCtPostPut", - "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"}, + new String[] {"getCtPreReplayWALs", "getCtPostReplayWALs", "getCtPreWALRestore", + "getCtPostWALRestore", "getCtPrePut", "getCtPostPut", + "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"}, tableName, - new Integer[]{1, 1, 0, 0, 0, 0}); + new Integer[]{1, 1, 2, 2, 0, 0, 0, 0}); } finally { util.deleteTable(tableName); table.close(); } } - @Ignore // TODO: HBASE-13391 to fix flaky test @Test (timeout=300000) public void testLegacyRecovery() throws Exception { LOG.info(TestRegionObserverInterface.class.getName() +".testLegacyRecovery"); @@ -682,6 +684,9 @@ public class TestRegionObserverInterface { put.addColumn(C, C, C); table.put(put); + // put two times + table.put(put); + verifyMethodResult(SimpleRegionObserver.Legacy.class, new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete"}, @@ -690,10 +695,11 @@ public class TestRegionObserverInterface { ); verifyMethodResult(SimpleRegionObserver.Legacy.class, - new String[] {"getCtPreWALRestore", "getCtPostWALRestore", "getCtPrePut", - "getCtPostPut", "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"}, + new String[] {"getCtPreReplayWALs", "getCtPostReplayWALs", "getCtPreWALRestore", + "getCtPostWALRestore", "getCtPrePut", "getCtPostPut", + "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"}, tableName, - new Integer[] {0, 0, 1, 1, 0, 0}); + new Integer[] {0, 0, 0, 0, 2, 2, 0, 0}); cluster.killRegionServer(rs1.getRegionServer().getServerName()); Threads.sleep(1000); // Let the kill soak in. @@ -701,10 +707,11 @@ public class TestRegionObserverInterface { LOG.info("All regions assigned"); verifyMethodResult(SimpleRegionObserver.Legacy.class, - new String[] {"getCtPreWALRestore", "getCtPostWALRestore", "getCtPrePut", - "getCtPostPut", "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"}, + new String[] {"getCtPreReplayWALs", "getCtPostReplayWALs", "getCtPreWALRestore", + "getCtPostWALRestore", "getCtPrePut", "getCtPostPut", + "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"}, tableName, - new Integer[]{1, 1, 0, 0, 1, 1}); + new Integer[]{1, 1, 2, 2, 0, 0, 2, 2}); } } finally { util.deleteTable(tableName); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java index 5c79d72..4a4b0e9 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java @@ -305,19 +305,15 @@ public class TestHeapSize { // DefaultMemStore Deep Overhead actual = DefaultMemStore.DEEP_OVERHEAD; expected = ClassSize.estimateBase(cl, false); - expected += (2 * ClassSize.estimateBase(AtomicLong.class, false)); - expected += (2 * ClassSize.estimateBase(CellSet.class, false)); - expected += (2 * ClassSize.estimateBase(ConcurrentSkipListMap.class, false)); - expected += (2 * ClassSize.estimateBase(TimeRangeTracker.class, false)); + expected += ClassSize.estimateBase(AtomicLong.class, false); + expected += ClassSize.estimateBase(CellSet.class, false); + expected += ClassSize.estimateBase(ConcurrentSkipListMap.class, false); + expected += ClassSize.estimateBase(TimeRangeTracker.class, false); if(expected != actual) { ClassSize.estimateBase(cl, true); ClassSize.estimateBase(AtomicLong.class, true); - ClassSize.estimateBase(AtomicLong.class, true); - ClassSize.estimateBase(CellSet.class, true); ClassSize.estimateBase(CellSet.class, true); ClassSize.estimateBase(ConcurrentSkipListMap.class, true); - ClassSize.estimateBase(ConcurrentSkipListMap.class, true); - ClassSize.estimateBase(TimeRangeTracker.class, true); ClassSize.estimateBase(TimeRangeTracker.class, true); assertEquals(expected, actual); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java index 6359bef..6cf4d68 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java @@ -103,7 +103,6 @@ public class TestChangingEncoding { conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 1024); // ((Log4JLogger)RpcServerImplementation.LOG).getLogger().setLevel(Level.TRACE); // ((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.TRACE); - conf.setBoolean("hbase.online.schema.update.enable", true); TEST_UTIL.startMiniCluster(); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java index ceb945b..45cec78 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java @@ -263,7 +263,7 @@ public abstract class AbstractTestIPC { fail("Expected an exception to have been thrown!"); } catch (Exception e) { LOG.info("Caught expected exception: " + e.toString()); - assertTrue(StringUtils.stringifyException(e).contains("Injected fault")); + assertTrue(e.toString(), StringUtils.stringifyException(e).contains("Injected fault")); } finally { rpcServer.stop(); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java index fac3593..ca727e4 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java @@ -47,6 +47,7 @@ import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.NavigableMap; @@ -225,8 +226,8 @@ public abstract class MultiTableInputFormatTestBase { private void testScan(String start, String stop, String last) throws IOException, InterruptedException, ClassNotFoundException { String jobName = - "Scan" + (start != null ? start.toUpperCase() : "Empty") + "To" + - (stop != null ? stop.toUpperCase() : "Empty"); + "Scan" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + "To" + + (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); LOG.info("Before map/reduce startup - job " + jobName); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index 4d92b8a..12761d3 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -66,7 +66,6 @@ import org.apache.hadoop.hbase.TagUtil; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; @@ -515,11 +514,8 @@ public class TestHFileOutputFormat2 { if (shouldChangeRegions) { LOG.info("Changing regions in table"); admin.disableTable(table.getName()); - while(util.getMiniHBaseCluster().getMaster().getAssignmentManager(). - getRegionStates().isRegionsInTransition()) { - Threads.sleep(200); - LOG.info("Waiting on table to finish disabling"); - } + util.waitUntilNoRegionsInTransition(); + util.deleteTable(table.getName()); byte[][] newSplitKeys = generateRandomSplitKeys(14); table = util.createTable(TABLE_NAME, FAMILIES, newSplitKeys); @@ -614,7 +610,7 @@ public class TestHFileOutputFormat2 { Configuration conf = new Configuration(this.util.getConfiguration()); Map familyToCompression = getMockColumnFamiliesForCompression(numCfs); - Table table = Mockito.mock(HTable.class); + Table table = Mockito.mock(Table.class); setupMockColumnFamiliesForCompression(table, familyToCompression); HFileOutputFormat2.configureCompression(conf, table.getTableDescriptor()); @@ -685,7 +681,7 @@ public class TestHFileOutputFormat2 { Configuration conf = new Configuration(this.util.getConfiguration()); Map familyToBloomType = getMockColumnFamiliesForBloomType(numCfs); - Table table = Mockito.mock(HTable.class); + Table table = Mockito.mock(Table.class); setupMockColumnFamiliesForBloomType(table, familyToBloomType); HFileOutputFormat2.configureBloomType(table.getTableDescriptor(), conf); @@ -756,7 +752,7 @@ public class TestHFileOutputFormat2 { Configuration conf = new Configuration(this.util.getConfiguration()); Map familyToBlockSize = getMockColumnFamiliesForBlockSize(numCfs); - Table table = Mockito.mock(HTable.class); + Table table = Mockito.mock(Table.class); setupMockColumnFamiliesForBlockSize(table, familyToBlockSize); HFileOutputFormat2.configureBlockSize(table.getTableDescriptor(), conf); @@ -831,7 +827,7 @@ public class TestHFileOutputFormat2 { Configuration conf = new Configuration(this.util.getConfiguration()); Map familyToDataBlockEncoding = getMockColumnFamiliesForDataBlockEncoding(numCfs); - Table table = Mockito.mock(HTable.class); + Table table = Mockito.mock(Table.class); setupMockColumnFamiliesForDataBlockEncoding(table, familyToDataBlockEncoding); HTableDescriptor tableDescriptor = table.getTableDescriptor(); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java index 6dc8566..9f2596c 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java @@ -24,6 +24,7 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; +import java.util.Locale; import java.util.TreeMap; import org.apache.hadoop.conf.Configuration; @@ -374,7 +375,7 @@ public class TestLoadIncrementalHFiles { // set real family name to upper case in purpose to simulate the case that // family name in HFiles is invalid HColumnDescriptor family = - new HColumnDescriptor(Bytes.toBytes(new String(FAMILY).toUpperCase())); + new HColumnDescriptor(Bytes.toBytes(new String(FAMILY).toUpperCase(Locale.ROOT))); htd.addFamily(family); try { diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java index 0975fd2..66d7eb1 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java @@ -47,9 +47,9 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -285,7 +285,7 @@ public class TestLoadIncrementalHFilesSplitRecovery { throws IOException { int i = attmptedCalls.incrementAndGet(); if (i == 1) { - Connection errConn = null; + Connection errConn; try { errConn = getMockedConnection(util.getConfiguration()); } catch (Exception e) { @@ -293,10 +293,10 @@ public class TestLoadIncrementalHFilesSplitRecovery { throw new RuntimeException("mocking cruft, should never happen"); } failedCalls.incrementAndGet(); - return super.tryAtomicRegionLoad((HConnection)errConn, tableName, first, lqis); + return super.tryAtomicRegionLoad(errConn, tableName, first, lqis); } - return super.tryAtomicRegionLoad((HConnection)conn, tableName, first, lqis); + return super.tryAtomicRegionLoad(conn, tableName, first, lqis); } }; try { @@ -316,9 +316,9 @@ public class TestLoadIncrementalHFilesSplitRecovery { } @SuppressWarnings("deprecation") - private HConnection getMockedConnection(final Configuration conf) + private ClusterConnection getMockedConnection(final Configuration conf) throws IOException, ServiceException { - HConnection c = Mockito.mock(HConnection.class); + ClusterConnection c = Mockito.mock(ClusterConnection.class); Mockito.when(c.getConfiguration()).thenReturn(conf); Mockito.doNothing().when(c).close(); // Make it so we return a particular location when asked. diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java index 46e1a0e..db5b57a 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java @@ -23,6 +23,7 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.NavigableMap; @@ -176,8 +177,8 @@ public abstract class TestTableInputFormatScanBase { */ protected void testScanFromConfiguration(String start, String stop, String last) throws IOException, InterruptedException, ClassNotFoundException { - String jobName = "ScanFromConfig" + (start != null ? start.toUpperCase() : "Empty") + - "To" + (stop != null ? stop.toUpperCase() : "Empty"); + String jobName = "ScanFromConfig" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + + "To" + (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); c.set(TableInputFormat.INPUT_TABLE, TABLE_NAME.getNameAsString()); c.set(TableInputFormat.SCAN_COLUMN_FAMILY, Bytes.toString(INPUT_FAMILY)); @@ -213,8 +214,8 @@ public abstract class TestTableInputFormatScanBase { */ protected void testScan(String start, String stop, String last) throws IOException, InterruptedException, ClassNotFoundException { - String jobName = "Scan" + (start != null ? start.toUpperCase() : "Empty") + - "To" + (stop != null ? stop.toUpperCase() : "Empty"); + String jobName = "Scan" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + + "To" + (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); LOG.info("Before map/reduce startup - job " + jobName); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); Scan scan = new Scan(); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java index 95ef6d5..7d3d2e9 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java @@ -191,9 +191,8 @@ public class TestAssignmentManagerOnCluster { MetaTableAccessor.addRegionToMeta(meta, hri); HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - master.assignRegion(hri); AssignmentManager am = master.getAssignmentManager(); - am.waitForAssignment(hri); + TEST_UTIL.assignRegion(hri); RegionStates regionStates = am.getRegionStates(); ServerName serverName = regionStates.getRegionServerOfRegion(hri); @@ -248,7 +247,7 @@ public class TestAssignmentManagerOnCluster { final AssignmentManager am = master.getAssignmentManager(); RegionPlan plan = new RegionPlan(hri, null, deadServer); am.addPlan(hri.getEncodedName(), plan); - master.assignRegion(hri); + TEST_UTIL.assignRegion(hri); TEST_UTIL.waitFor(60000, new Waiter.Predicate() { @Override @@ -446,9 +445,8 @@ public class TestAssignmentManagerOnCluster { MetaTableAccessor.addRegionToMeta(meta, hri); HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - master.assignRegion(hri); AssignmentManager am = master.getAssignmentManager(); - assertTrue(am.waitForAssignment(hri)); + assertTrue(TEST_UTIL.assignRegion(hri)); ServerName sn = am.getRegionStates().getRegionServerOfRegion(hri); TEST_UTIL.assertRegionOnServer(hri, sn, 6000); @@ -495,9 +493,8 @@ public class TestAssignmentManagerOnCluster { MetaTableAccessor.addRegionToMeta(meta, hri); HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - master.assignRegion(hri); AssignmentManager am = master.getAssignmentManager(); - assertTrue(am.waitForAssignment(hri)); + assertTrue(TEST_UTIL.assignRegion(hri)); ServerName sn = am.getRegionStates().getRegionServerOfRegion(hri); TEST_UTIL.assertRegionOnServer(hri, sn, 6000); @@ -543,9 +540,8 @@ public class TestAssignmentManagerOnCluster { MyLoadBalancer.controledRegion = hri; HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - master.assignRegion(hri); AssignmentManager am = master.getAssignmentManager(); - assertFalse(am.waitForAssignment(hri)); + assertFalse(TEST_UTIL.assignRegion(hri)); RegionState state = am.getRegionStates().getRegionState(hri); assertEquals(RegionState.State.FAILED_OPEN, state.getState()); @@ -553,8 +549,7 @@ public class TestAssignmentManagerOnCluster { assertNull(state.getServerName()); MyLoadBalancer.controledRegion = null; - master.assignRegion(hri); - assertTrue(am.waitForAssignment(hri)); + assertTrue(TEST_UTIL.assignRegion(hri)); ServerName serverName = master.getAssignmentManager(). getRegionStates().getRegionServerOfRegion(hri); @@ -581,17 +576,15 @@ public class TestAssignmentManagerOnCluster { desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); MetaTableAccessor.addRegionToMeta(meta, hri); - MyLoadBalancer.controledRegion = hri; - HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); AssignmentManager am = master.getAssignmentManager(); + // round-robin assignment but balancer cannot find a plan // assignment should fail - am.assign(Arrays.asList(hri)); - + MyLoadBalancer.controledRegion = hri; // if bulk assignment cannot update region state to online // or failed_open this waits until timeout - assertFalse(am.waitForAssignment(hri)); + assertFalse(TEST_UTIL.assignRegion(hri)); RegionState state = am.getRegionStates().getRegionState(hri); assertEquals(RegionState.State.FAILED_OPEN, state.getState()); // Failed to open since no plan, so it's on no server @@ -599,8 +592,7 @@ public class TestAssignmentManagerOnCluster { // try again with valid plan MyLoadBalancer.controledRegion = null; - am.assign(Arrays.asList(hri)); - assertTrue(am.waitForAssignment(hri)); + assertTrue(TEST_UTIL.assignRegion(hri)); ServerName serverName = master.getAssignmentManager(). getRegionStates().getRegionServerOfRegion(hri); @@ -689,9 +681,8 @@ public class TestAssignmentManagerOnCluster { fs.create(regionDir, true); HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - master.assignRegion(hri); AssignmentManager am = master.getAssignmentManager(); - assertFalse(am.waitForAssignment(hri)); + assertFalse(TEST_UTIL.assignRegion(hri)); RegionState state = am.getRegionStates().getRegionState(hri); assertEquals(RegionState.State.FAILED_OPEN, state.getState()); @@ -702,8 +693,7 @@ public class TestAssignmentManagerOnCluster { // remove the blocking file, so that region can be opened fs.delete(regionDir, true); - master.assignRegion(hri); - assertTrue(am.waitForAssignment(hri)); + assertTrue(TEST_UTIL.assignRegion(hri)); ServerName serverName = master.getAssignmentManager(). getRegionStates().getRegionServerOfRegion(hri); @@ -768,9 +758,8 @@ public class TestAssignmentManagerOnCluster { MetaTableAccessor.addRegionToMeta(meta, hri); HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - master.assignRegion(hri); AssignmentManager am = master.getAssignmentManager(); - assertTrue(am.waitForAssignment(hri)); + assertTrue(TEST_UTIL.assignRegion(hri)); ServerName sn = am.getRegionStates().getRegionServerOfRegion(hri); TEST_UTIL.assertRegionOnServer(hri, sn, 6000); @@ -816,8 +805,9 @@ public class TestAssignmentManagerOnCluster { MyRegionObserver.postOpenEnabled.set(true); MyRegionObserver.postOpenCalled = false; HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); + AssignmentManager am = master.getAssignmentManager(); // Region will be opened, but it won't complete - master.assignRegion(hri); + am.assign(hri); long end = EnvironmentEdgeManager.currentTime() + 20000; // Wait till postOpen is called while (!MyRegionObserver.postOpenCalled ) { @@ -826,7 +816,6 @@ public class TestAssignmentManagerOnCluster { Thread.sleep(300); } - AssignmentManager am = master.getAssignmentManager(); // Now let's unassign it, it should do nothing am.unassign(hri); RegionState state = am.getRegionStates().getRegionState(hri); @@ -887,12 +876,14 @@ public class TestAssignmentManagerOnCluster { // Assign the region master = (MyMaster)cluster.getMaster(); - master.assignRegion(hri); + AssignmentManager am = master.getAssignmentManager(); + + am.assign(hri); // Hold SSH before killing the hosting server master.enableSSH(false); - AssignmentManager am = master.getAssignmentManager(); + RegionStates regionStates = am.getRegionStates(); ServerName metaServer = regionStates.getRegionServerOfRegion( HRegionInfo.FIRST_META_REGIONINFO); @@ -1015,10 +1006,9 @@ public class TestAssignmentManagerOnCluster { // Assign the region master = (MyMaster)cluster.getMaster(); - master.assignRegion(hri); AssignmentManager am = master.getAssignmentManager(); RegionStates regionStates = am.getRegionStates(); - assertTrue(am.waitForAssignment(hri)); + assertTrue(TEST_UTIL.assignRegion(hri)); // Disable the table admin.disableTable(table); @@ -1056,9 +1046,9 @@ public class TestAssignmentManagerOnCluster { // Assign the region master = (MyMaster)cluster.getMaster(); - master.assignRegion(hri); - AssignmentManager am = master.getAssignmentManager(); + am.assign(hri); + RegionStates regionStates = am.getRegionStates(); ServerName metaServer = regionStates.getRegionServerOfRegion( HRegionInfo.FIRST_META_REGIONINFO); @@ -1129,9 +1119,9 @@ public class TestAssignmentManagerOnCluster { // Assign the region master = (MyMaster)cluster.getMaster(); - master.assignRegion(hri); - AssignmentManager am = master.getAssignmentManager(); + am.assign(hri); + RegionStates regionStates = am.getRegionStates(); ServerName metaServer = regionStates.getRegionServerOfRegion( HRegionInfo.FIRST_META_REGIONINFO); @@ -1196,9 +1186,8 @@ public class TestAssignmentManagerOnCluster { new HRegionInfo(desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); MetaTableAccessor.addRegionToMeta(meta, hri); HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - master.assignRegion(hri); AssignmentManager am = master.getAssignmentManager(); - am.waitForAssignment(hri); + TEST_UTIL.assignRegion(hri); RegionStates regionStates = am.getRegionStates(); ServerName serverName = regionStates.getRegionServerOfRegion(hri); // Assert the the region is actually open on the server diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index cc8e2d8..0ad74c2 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -134,8 +134,8 @@ public class TestCatalogJanitor { } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); } - // Mock an HConnection and a AdminProtocol implementation. Have the - // HConnection return the HRI. Have the HRI return a few mocked up responses + // Mock an ClusterConnection and a AdminProtocol implementation. Have the + // ClusterConnection return the HRI. Have the HRI return a few mocked up responses // to make our test work. this.connection = HConnectionTestingUtility.getMockedConnectionAndDecorate(this.c, diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java index 374c773..da0b9a6 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java @@ -185,6 +185,12 @@ public class TestMasterNoCluster { // of the 'remote' mocked up regionservers. CoordinatedStateManager cp = CoordinatedStateManagerFactory.getCoordinatedStateManager( TESTUTIL.getConfiguration()); + // Insert a mock for the connection, use TESTUTIL.getConfiguration rather than + // the conf from the master; the conf will already have an ClusterConnection + // associate so the below mocking of a connection will fail. + final ClusterConnection mockedConnection = HConnectionTestingUtility.getMockedConnectionAndDecorate( + TESTUTIL.getConfiguration(), rs0, rs0, rs0.getServerName(), + HRegionInfo.FIRST_META_REGIONINFO); HMaster master = new HMaster(conf, cp) { InetAddress getRemoteInetAddress(final int port, final long serverStartCode) throws UnknownHostException { @@ -215,16 +221,12 @@ public class TestMasterNoCluster { @Override public ClusterConnection getConnection() { - // Insert a mock for the connection, use TESTUTIL.getConfiguration rather than - // the conf from the master; the conf will already have an HConnection - // associate so the below mocking of a connection will fail. - try { - return HConnectionTestingUtility.getMockedConnectionAndDecorate( - TESTUTIL.getConfiguration(), rs0, rs0, rs0.getServerName(), - HRegionInfo.FIRST_META_REGIONINFO); - } catch (IOException e) { - return null; - } + return mockedConnection; + } + + @Override + public ClusterConnection getClusterConnection() { + return mockedConnection; } }; master.start(); @@ -291,7 +293,7 @@ public class TestMasterNoCluster { @Override public ClusterConnection getConnection() { // Insert a mock for the connection, use TESTUTIL.getConfiguration rather than - // the conf from the master; the conf will already have an HConnection + // the conf from the master; the conf will already have a Connection // associate so the below mocking of a connection will fail. try { return HConnectionTestingUtility.getMockedConnectionAndDecorate( diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java index 135aae5..02f01c4 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java @@ -24,8 +24,8 @@ import java.io.IOException; import java.io.StringWriter; import java.util.HashSet; import java.util.List; -import java.util.NavigableMap; import java.util.Set; +import java.util.TreeSet; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -90,10 +90,8 @@ public class TestMasterStatusServlet { // Fake AssignmentManager and RIT AssignmentManager am = Mockito.mock(AssignmentManager.class); RegionStates rs = Mockito.mock(RegionStates.class); - NavigableMap regionsInTransition = - Maps.newTreeMap(); - regionsInTransition.put("r1", - new RegionState(FAKE_HRI, RegionState.State.CLOSING, 12345L, FAKE_HOST)); + Set regionsInTransition = new HashSet(); + regionsInTransition.add(new RegionState(FAKE_HRI, RegionState.State.CLOSING, 12345L, FAKE_HOST)); Mockito.doReturn(rs).when(am).getRegionStates(); Mockito.doReturn(regionsInTransition).when(rs).getRegionsInTransition(); Mockito.doReturn(am).when(master).getAssignmentManager(); @@ -165,19 +163,18 @@ public class TestMasterStatusServlet { RegionStates rs = Mockito.mock(RegionStates.class); // Add 100 regions as in-transition - NavigableMap regionsInTransition = - Maps.newTreeMap(); + TreeSet regionsInTransition = new TreeSet( + RegionStates.REGION_STATE_COMPARATOR); for (byte i = 0; i < 100; i++) { HRegionInfo hri = new HRegionInfo(FAKE_TABLE.getTableName(), new byte[]{i}, new byte[]{(byte) (i+1)}); - regionsInTransition.put(hri.getEncodedName(), + regionsInTransition.add( new RegionState(hri, RegionState.State.CLOSING, 12345L, FAKE_HOST)); } // Add hbase:meta in transition as well - regionsInTransition.put( - HRegionInfo.FIRST_META_REGIONINFO.getEncodedName(), + regionsInTransition.add( new RegionState(HRegionInfo.FIRST_META_REGIONINFO, - RegionState.State.CLOSING, 12345L, FAKE_HOST)); + RegionState.State.CLOSING, 123L, FAKE_HOST)); Mockito.doReturn(rs).when(am).getRegionStates(); Mockito.doReturn(regionsInTransition).when(rs).getRegionsInTransition(); Mockito.doReturn(regionsInTransition).when(rs).getRegionsInTransitionOrderedByTimestamp(); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java index 692b5a0..7c41c0f 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java @@ -86,8 +86,8 @@ public class TestRestartCluster { LOG.info("\n\nStarting cluster the second time"); UTIL.restartHBaseCluster(3); - // Need to use a new 'Configuration' so we make a new HConnection. - // Otherwise we're reusing an HConnection that has gone stale because + // Need to use a new 'Configuration' so we make a new Connection. + // Otherwise we're reusing an Connection that has gone stale because // the shutdown of the cluster also called shut of the connection. allRegions = MetaTableAccessor.getAllRegions(UTIL.getConnection(), false); assertEquals(4, allRegions.size()); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java index 573fdcb..36f505b 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java @@ -88,7 +88,6 @@ public class TestTableLockManager { private static final CountDownLatch addColumn = new CountDownLatch(1); public void prepareMiniCluster() throws Exception { - TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true); TEST_UTIL.startMiniCluster(2); TEST_UTIL.createTable(TABLE_NAME, FAMILY); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java index 903f614..7ae0133 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java @@ -70,6 +70,7 @@ public class BalancerTestBase { conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 0.75f); conf.setFloat("hbase.regions.slop", 0.0f); conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0); + conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 0.0f); loadBalancer = new StochasticLoadBalancer(); loadBalancer.setConf(conf); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java index 8300f32..9caf264 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java @@ -93,6 +93,20 @@ public class TestStochasticLoadBalancer extends BalancerTestBase { } } + @Test + public void testNeedBalance() { + conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 1.0f); + loadBalancer.setConf(conf); + for (int[] mockCluster : clusterStateMocks) { + Map> servers = mockClusterServers(mockCluster); + List plans = loadBalancer.balanceCluster(servers); + assertNull(plans); + } + // reset config + conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 0.0f); + loadBalancer.setConf(conf); + } + /** * Test the load balancing algorithm. * diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer2.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer2.java index 395eef2..44c806d 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer2.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer2.java @@ -21,6 +21,8 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.testclassification.FlakeyTests; import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.After; +import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -28,13 +30,25 @@ import org.junit.experimental.categories.Category; public class TestStochasticLoadBalancer2 extends BalancerTestBase { private static final Log LOG = LogFactory.getLog(TestStochasticLoadBalancer2.class); - @Test (timeout = 800000) - public void testRegionReplicasOnMidCluster() { + @Before + public void before() { conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f); conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 2000000L); - conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 90 * 1000); // 90 sec conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0); - TestStochasticLoadBalancer.loadBalancer.setConf(conf); + conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 90 * 1000); // 90 sec + conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 0.05f); + loadBalancer.setConf(conf); + } + + @After + public void after() { + // reset config to make sure balancer run + conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 0.0f); + loadBalancer.setConf(conf); + } + + @Test (timeout = 800000) + public void testRegionReplicasOnMidCluster() { int numNodes = 200; int numRegions = 40 * 200; int replication = 3; // 3 replicas per region @@ -45,11 +59,6 @@ public class TestStochasticLoadBalancer2 extends BalancerTestBase { @Test (timeout = 800000) public void testRegionReplicasOnLargeCluster() { - conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f); - conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 2000000L); - conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 90 * 1000); // 90 sec - conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0); - loadBalancer.setConf(conf); int numNodes = 1000; int numRegions = 20 * numNodes; // 20 * replication regions per RS int numRegionsPerServer = 19; // all servers except one @@ -62,8 +71,6 @@ public class TestStochasticLoadBalancer2 extends BalancerTestBase { public void testRegionReplicasOnMidClusterHighReplication() { conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 4000000L); conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 120 * 1000); // 120 sec - conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0); - conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f); loadBalancer.setConf(conf); int numNodes = 80; int numRegions = 6 * numNodes; @@ -75,10 +82,7 @@ public class TestStochasticLoadBalancer2 extends BalancerTestBase { @Test (timeout = 800000) public void testRegionReplicationOnMidClusterReplicationGreaterThanNumNodes() { - conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 2000000L); conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 120 * 1000); // 120 sec - conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0); - conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f); loadBalancer.setConf(conf); int numNodes = 40; int numRegions = 6 * 50; @@ -87,4 +91,4 @@ public class TestStochasticLoadBalancer2 extends BalancerTestBase { int numTables = 10; testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, true, false); } -} \ No newline at end of file +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java index 47db32b..18950a2 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationQueues; +import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments; import org.apache.hadoop.hbase.replication.ReplicationQueuesClient; import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner; import org.apache.hadoop.hbase.replication.regionserver.Replication; @@ -94,7 +95,7 @@ public class TestLogsCleaner { Replication.decorateMasterConfiguration(conf); Server server = new DummyServer(); ReplicationQueues repQueues = - ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server); + ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, server, server.getZooKeeper())); repQueues.init(server.getServerName().toString()); final Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(), HConstants.HREGION_OLDLOGDIR_NAME); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java index d4f23c8..e5f1e69 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeers; import org.apache.hadoop.hbase.replication.ReplicationQueues; +import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments; import org.apache.hadoop.hbase.replication.ReplicationQueuesClient; import org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl; import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner; @@ -87,8 +88,7 @@ public class TestReplicationHFileCleaner { Replication.decorateMasterConfiguration(conf); rp = ReplicationFactory.getReplicationPeers(server.getZooKeeper(), conf, server); rp.init(); - - rq = ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server); + rq = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, server, server.getZooKeeper())); rq.init(server.getServerName().toString()); try { fs = FileSystem.get(conf); @@ -111,6 +111,7 @@ public class TestReplicationHFileCleaner { public void setup() throws ReplicationException, IOException { root = TEST_UTIL.getDataTestDirOnTestFS(); rp.addPeer(peerId, new ReplicationPeerConfig().setClusterKey(TEST_UTIL.getClusterKey())); + rq.addPeerToHFileRefs(peerId); } @After diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDeleteFamilyHandler.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDeleteFamilyHandler.java deleted file mode 100644 index 8a8c059..0000000 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDeleteFamilyHandler.java +++ /dev/null @@ -1,276 +0,0 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master.handler; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.io.IOException; - -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.InvalidFamilyOperationException; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.hbase.testclassification.MasterTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.wal.WALSplitter; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category({MasterTests.class, LargeTests.class}) -public class TestTableDeleteFamilyHandler { - - private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final TableName TABLENAME = - TableName.valueOf("column_family_handlers"); - private static final byte[][] FAMILIES = new byte[][] { Bytes.toBytes("cf1"), - Bytes.toBytes("cf2"), Bytes.toBytes("cf3") }; - - /** - * Start up a mini cluster and put a small table of empty regions into it. - * - * @throws Exception - */ - @BeforeClass - public static void beforeAllTests() throws Exception { - TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true); - TEST_UTIL.startMiniCluster(2); - } - - @AfterClass - public static void afterAllTests() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } - - @Before - public void setup() throws IOException, InterruptedException { - // Create a table of three families. This will assign a region. - TEST_UTIL.createTable(TABLENAME, FAMILIES); - Table t = TEST_UTIL.getConnection().getTable(TABLENAME); - while(TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() - .getRegionStates().getRegionsInTransition().size() > 0) { - Thread.sleep(100); - } - // Create multiple regions in all the three column families - while(TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() - .getRegionStates().getRegionsInTransition().size() > 0) { - Thread.sleep(100); - } - // Load the table with data for all families - TEST_UTIL.loadTable(t, FAMILIES); - - TEST_UTIL.flush(); - - t.close(); - - TEST_UTIL.ensureSomeRegionServersAvailable(2); - } - - @After - public void cleanup() throws Exception { - TEST_UTIL.deleteTable(TABLENAME); - } - - @Test - public void deleteColumnFamilyWithMultipleRegions() throws Exception { - Admin admin = TEST_UTIL.getHBaseAdmin(); - HTableDescriptor beforehtd = admin.getTableDescriptor(TABLENAME); - - FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem(); - - // 1 - Check if table exists in descriptor - assertTrue(admin.isTableAvailable(TABLENAME)); - - // 2 - Check if all three families exist in descriptor - assertEquals(3, beforehtd.getColumnFamilies().length); - HColumnDescriptor[] families = beforehtd.getColumnFamilies(); - for (int i = 0; i < families.length; i++) { - assertTrue(families[i].getNameAsString().equals("cf" + (i + 1))); - } - - // 3 - Check if table exists in FS - Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), TABLENAME); - assertTrue(fs.exists(tableDir)); - - // 4 - Check if all the 3 column families exist in FS - FileStatus[] fileStatus = fs.listStatus(tableDir); - for (int i = 0; i < fileStatus.length; i++) { - if (fileStatus[i].isDirectory() == true) { - FileStatus[] cf = fs.listStatus(fileStatus[i].getPath(), new PathFilter() { - @Override - public boolean accept(Path p) { - if (p.getName().contains(HConstants.RECOVERED_EDITS_DIR)) { - return false; - } - return true; - } - }); - int k = 1; - for (int j = 0; j < cf.length; j++) { - if (cf[j].isDirectory() == true - && cf[j].getPath().getName().startsWith(".") == false) { - assertEquals(cf[j].getPath().getName(), "cf" + k); - k++; - } - } - } - } - - // TEST - Disable and delete the column family - admin.disableTable(TABLENAME); - admin.deleteColumnFamily(TABLENAME, Bytes.toBytes("cf2")); - - // 5 - Check if only 2 column families exist in the descriptor - HTableDescriptor afterhtd = admin.getTableDescriptor(TABLENAME); - assertEquals(2, afterhtd.getColumnFamilies().length); - HColumnDescriptor[] newFamilies = afterhtd.getColumnFamilies(); - assertTrue(newFamilies[0].getNameAsString().equals("cf1")); - assertTrue(newFamilies[1].getNameAsString().equals("cf3")); - - // 6 - Check if the second column family is gone from the FS - fileStatus = fs.listStatus(tableDir); - for (int i = 0; i < fileStatus.length; i++) { - if (fileStatus[i].isDirectory() == true) { - FileStatus[] cf = fs.listStatus(fileStatus[i].getPath(), new PathFilter() { - @Override - public boolean accept(Path p) { - if (WALSplitter.isSequenceIdFile(p)) { - return false; - } - return true; - } - }); - for (int j = 0; j < cf.length; j++) { - if (cf[j].isDirectory() == true) { - assertFalse(cf[j].getPath().getName().equals("cf2")); - } - } - } - } - } - - @Test - public void deleteColumnFamilyTwice() throws Exception { - - Admin admin = TEST_UTIL.getHBaseAdmin(); - HTableDescriptor beforehtd = admin.getTableDescriptor(TABLENAME); - String cfToDelete = "cf1"; - - FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem(); - - // 1 - Check if table exists in descriptor - assertTrue(admin.isTableAvailable(TABLENAME)); - - // 2 - Check if all the target column family exist in descriptor - HColumnDescriptor[] families = beforehtd.getColumnFamilies(); - Boolean foundCF = false; - int i; - for (i = 0; i < families.length; i++) { - if (families[i].getNameAsString().equals(cfToDelete)) { - foundCF = true; - break; - } - } - assertTrue(foundCF); - - // 3 - Check if table exists in FS - Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), TABLENAME); - assertTrue(fs.exists(tableDir)); - - // 4 - Check if all the target column family exist in FS - FileStatus[] fileStatus = fs.listStatus(tableDir); - foundCF = false; - for (i = 0; i < fileStatus.length; i++) { - if (fileStatus[i].isDirectory() == true) { - FileStatus[] cf = fs.listStatus(fileStatus[i].getPath(), new PathFilter() { - @Override - public boolean accept(Path p) { - if (p.getName().contains(HConstants.RECOVERED_EDITS_DIR)) { - return false; - } - return true; - } - }); - for (int j = 0; j < cf.length; j++) { - if (cf[j].isDirectory() == true && cf[j].getPath().getName().equals(cfToDelete)) { - foundCF = true; - break; - } - } - } - if (foundCF) { - break; - } - } - assertTrue(foundCF); - - // TEST - Disable and delete the column family - if (admin.isTableEnabled(TABLENAME)) { - admin.disableTable(TABLENAME); - } - admin.deleteColumnFamily(TABLENAME, Bytes.toBytes(cfToDelete)); - - // 5 - Check if the target column family is gone from the FS - fileStatus = fs.listStatus(tableDir); - for (i = 0; i < fileStatus.length; i++) { - if (fileStatus[i].isDirectory() == true) { - FileStatus[] cf = fs.listStatus(fileStatus[i].getPath(), new PathFilter() { - @Override - public boolean accept(Path p) { - if (WALSplitter.isSequenceIdFile(p)) { - return false; - } - return true; - } - }); - for (int j = 0; j < cf.length; j++) { - if (cf[j].isDirectory() == true) { - assertFalse(cf[j].getPath().getName().equals(cfToDelete)); - } - } - } - } - - try { - // Test: delete again - admin.deleteColumnFamily(TABLENAME, Bytes.toBytes(cfToDelete)); - Assert.fail("Delete a non-exist column family should fail"); - } catch (InvalidFamilyOperationException e) { - // Expected. - } - } - -} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java deleted file mode 100644 index 920d9b5..0000000 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java +++ /dev/null @@ -1,292 +0,0 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master.handler; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.util.Set; - -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.InvalidFamilyOperationException; -import org.apache.hadoop.hbase.TableDescriptor; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.master.MasterFileSystem; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.hbase.testclassification.MasterTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSTableDescriptors; -import org.apache.hadoop.hbase.util.FSUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; - -/** - * Verify that the HTableDescriptor is updated after - * addColumn(), deleteColumn() and modifyTable() operations. - */ -@Category({MasterTests.class, LargeTests.class}) -public class TestTableDescriptorModification { - - @Rule public TestName name = new TestName(); - private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static TableName TABLE_NAME = null; - private static final byte[] FAMILY_0 = Bytes.toBytes("cf0"); - private static final byte[] FAMILY_1 = Bytes.toBytes("cf1"); - - /** - * Start up a mini cluster and put a small table of empty regions into it. - * - * @throws Exception - */ - @BeforeClass - public static void beforeAllTests() throws Exception { - TEST_UTIL.startMiniCluster(1); - } - - @Before - public void setup() { - TABLE_NAME = TableName.valueOf(name.getMethodName()); - - } - - @AfterClass - public static void afterAllTests() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } - - @Test - public void testModifyTable() throws IOException { - Admin admin = TEST_UTIL.getHBaseAdmin(); - // Create a table with one family - HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); - baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); - admin.createTable(baseHtd); - admin.disableTable(TABLE_NAME); - try { - // Verify the table descriptor - verifyTableDescriptor(TABLE_NAME, FAMILY_0); - - // Modify the table adding another family and verify the descriptor - HTableDescriptor modifiedHtd = new HTableDescriptor(TABLE_NAME); - modifiedHtd.addFamily(new HColumnDescriptor(FAMILY_0)); - modifiedHtd.addFamily(new HColumnDescriptor(FAMILY_1)); - admin.modifyTable(TABLE_NAME, modifiedHtd); - verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1); - } finally { - admin.deleteTable(TABLE_NAME); - } - } - - @Test - public void testAddColumn() throws IOException { - Admin admin = TEST_UTIL.getHBaseAdmin(); - // Create a table with two families - HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); - baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); - admin.createTable(baseHtd); - admin.disableTable(TABLE_NAME); - try { - // Verify the table descriptor - verifyTableDescriptor(TABLE_NAME, FAMILY_0); - - // Modify the table removing one family and verify the descriptor - admin.addColumnFamily(TABLE_NAME, new HColumnDescriptor(FAMILY_1)); - verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1); - } finally { - admin.deleteTable(TABLE_NAME); - } - } - - @Test - public void testAddSameColumnFamilyTwice() throws IOException { - Admin admin = TEST_UTIL.getHBaseAdmin(); - // Create a table with one families - HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); - baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); - admin.createTable(baseHtd); - admin.disableTable(TABLE_NAME); - try { - // Verify the table descriptor - verifyTableDescriptor(TABLE_NAME, FAMILY_0); - - // Modify the table removing one family and verify the descriptor - admin.addColumnFamily(TABLE_NAME, new HColumnDescriptor(FAMILY_1)); - verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1); - - try { - // Add same column family again - expect failure - admin.addColumnFamily(TABLE_NAME, new HColumnDescriptor(FAMILY_1)); - Assert.fail("Delete a non-exist column family should fail"); - } catch (InvalidFamilyOperationException e) { - // Expected. - } - - } finally { - admin.deleteTable(TABLE_NAME); - } - } - - @Test - public void testModifyColumnFamily() throws IOException { - Admin admin = TEST_UTIL.getHBaseAdmin(); - - HColumnDescriptor cfDescriptor = new HColumnDescriptor(FAMILY_0); - int blockSize = cfDescriptor.getBlocksize(); - // Create a table with one families - HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); - baseHtd.addFamily(cfDescriptor); - admin.createTable(baseHtd); - admin.disableTable(TABLE_NAME); - try { - // Verify the table descriptor - verifyTableDescriptor(TABLE_NAME, FAMILY_0); - - int newBlockSize = 2 * blockSize; - cfDescriptor.setBlocksize(newBlockSize); - - // Modify colymn family - admin.modifyColumnFamily(TABLE_NAME, cfDescriptor); - - HTableDescriptor htd = admin.getTableDescriptor(TABLE_NAME); - HColumnDescriptor hcfd = htd.getFamily(FAMILY_0); - assertTrue(hcfd.getBlocksize() == newBlockSize); - } finally { - admin.deleteTable(TABLE_NAME); - } - } - - @Test - public void testModifyNonExistingColumnFamily() throws IOException { - Admin admin = TEST_UTIL.getHBaseAdmin(); - - HColumnDescriptor cfDescriptor = new HColumnDescriptor(FAMILY_1); - int blockSize = cfDescriptor.getBlocksize(); - // Create a table with one families - HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); - baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); - admin.createTable(baseHtd); - admin.disableTable(TABLE_NAME); - try { - // Verify the table descriptor - verifyTableDescriptor(TABLE_NAME, FAMILY_0); - - int newBlockSize = 2 * blockSize; - cfDescriptor.setBlocksize(newBlockSize); - - // Modify a column family that is not in the table. - try { - admin.modifyColumnFamily(TABLE_NAME, cfDescriptor); - Assert.fail("Modify a non-exist column family should fail"); - } catch (InvalidFamilyOperationException e) { - // Expected. - } - - } finally { - admin.deleteTable(TABLE_NAME); - } - } - - @Test - public void testDeleteColumn() throws IOException { - Admin admin = TEST_UTIL.getHBaseAdmin(); - // Create a table with two families - HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); - baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); - baseHtd.addFamily(new HColumnDescriptor(FAMILY_1)); - admin.createTable(baseHtd); - admin.disableTable(TABLE_NAME); - try { - // Verify the table descriptor - verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1); - - // Modify the table removing one family and verify the descriptor - admin.deleteColumnFamily(TABLE_NAME, FAMILY_1); - verifyTableDescriptor(TABLE_NAME, FAMILY_0); - } finally { - admin.deleteTable(TABLE_NAME); - } - } - - @Test - public void testDeleteSameColumnFamilyTwice() throws IOException { - Admin admin = TEST_UTIL.getHBaseAdmin(); - // Create a table with two families - HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); - baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); - baseHtd.addFamily(new HColumnDescriptor(FAMILY_1)); - admin.createTable(baseHtd); - admin.disableTable(TABLE_NAME); - try { - // Verify the table descriptor - verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1); - - // Modify the table removing one family and verify the descriptor - admin.deleteColumnFamily(TABLE_NAME, FAMILY_1); - verifyTableDescriptor(TABLE_NAME, FAMILY_0); - - try { - // Delete again - expect failure - admin.deleteColumnFamily(TABLE_NAME, FAMILY_1); - Assert.fail("Delete a non-exist column family should fail"); - } catch (Exception e) { - // Expected. - } - } finally { - admin.deleteTable(TABLE_NAME); - } - } - - private void verifyTableDescriptor(final TableName tableName, - final byte[]... families) throws IOException { - Admin admin = TEST_UTIL.getHBaseAdmin(); - - // Verify descriptor from master - HTableDescriptor htd = admin.getTableDescriptor(tableName); - verifyTableDescriptor(htd, tableName, families); - - // Verify descriptor from HDFS - MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); - Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName); - TableDescriptor td = - FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); - verifyTableDescriptor(td.getHTableDescriptor(), tableName, families); - } - - private void verifyTableDescriptor(final HTableDescriptor htd, - final TableName tableName, final byte[]... families) { - Set htdFamilies = htd.getFamiliesKeys(); - assertEquals(tableName, htd.getTableName()); - assertEquals(families.length, htdFamilies.size()); - for (byte[] familyName: families) { - assertTrue("Expected family " + Bytes.toString(familyName), htdFamilies.contains(familyName)); - } - } -} \ No newline at end of file diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java index 6ec2e95..f8bc6ab 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java @@ -28,8 +28,8 @@ import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.TableNamespaceManager; import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType; @@ -104,7 +104,7 @@ public class TestSimpleRegionNormalizerOnCluster { MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); HMaster m = cluster.getMaster(); - try (HTable ht = TEST_UTIL.createMultiRegionTable(TABLENAME, FAMILYNAME, 5)) { + try (Table ht = TEST_UTIL.createMultiRegionTable(TABLENAME, FAMILYNAME, 5)) { // Need to get sorted list of regions here List generatedRegions = TEST_UTIL.getHBaseCluster().getRegions(TABLENAME); Collections.sort(generatedRegions, new Comparator() { @@ -182,7 +182,7 @@ public class TestSimpleRegionNormalizerOnCluster { HMaster m = cluster.getMaster(); // create 5 regions with sizes to trigger merge of small regions - try (HTable ht = TEST_UTIL.createMultiRegionTable(TABLENAME, FAMILYNAME, 5)) { + try (Table ht = TEST_UTIL.createMultiRegionTable(TABLENAME, FAMILYNAME, 5)) { // Need to get sorted list of regions here List generatedRegions = TEST_UTIL.getHBaseCluster().getRegions(TABLENAME); Collections.sort(generatedRegions, new Comparator() { diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedureFromClient.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedureFromClient.java new file mode 100644 index 0000000..bc11e53 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedureFromClient.java @@ -0,0 +1,266 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.wal.WALSplitter; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({MasterTests.class, LargeTests.class}) +public class TestDeleteColumnFamilyProcedureFromClient { + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + private static final TableName TABLENAME = + TableName.valueOf("column_family_handlers"); + private static final byte[][] FAMILIES = new byte[][] { Bytes.toBytes("cf1"), + Bytes.toBytes("cf2"), Bytes.toBytes("cf3") }; + + /** + * Start up a mini cluster and put a small table of empty regions into it. + * + * @throws Exception + */ + @BeforeClass + public static void beforeAllTests() throws Exception { + TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true); + TEST_UTIL.startMiniCluster(2); + } + + @AfterClass + public static void afterAllTests() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Before + public void setup() throws IOException, InterruptedException { + // Create a table of three families. This will assign a region. + TEST_UTIL.createTable(TABLENAME, FAMILIES); + Table t = TEST_UTIL.getConnection().getTable(TABLENAME); + TEST_UTIL.waitUntilNoRegionsInTransition(); + + // Load the table with data for all families + TEST_UTIL.loadTable(t, FAMILIES); + + TEST_UTIL.flush(); + + t.close(); + + TEST_UTIL.ensureSomeRegionServersAvailable(2); + } + + @After + public void cleanup() throws Exception { + TEST_UTIL.deleteTable(TABLENAME); + } + + @Test + public void deleteColumnFamilyWithMultipleRegions() throws Exception { + Admin admin = TEST_UTIL.getHBaseAdmin(); + HTableDescriptor beforehtd = admin.getTableDescriptor(TABLENAME); + + FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem(); + + // 1 - Check if table exists in descriptor + assertTrue(admin.isTableAvailable(TABLENAME)); + + // 2 - Check if all three families exist in descriptor + assertEquals(3, beforehtd.getColumnFamilies().length); + HColumnDescriptor[] families = beforehtd.getColumnFamilies(); + for (int i = 0; i < families.length; i++) { + assertTrue(families[i].getNameAsString().equals("cf" + (i + 1))); + } + + // 3 - Check if table exists in FS + Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), TABLENAME); + assertTrue(fs.exists(tableDir)); + + // 4 - Check if all the 3 column families exist in FS + FileStatus[] fileStatus = fs.listStatus(tableDir); + for (int i = 0; i < fileStatus.length; i++) { + if (fileStatus[i].isDirectory() == true) { + FileStatus[] cf = fs.listStatus(fileStatus[i].getPath(), new PathFilter() { + @Override + public boolean accept(Path p) { + if (p.getName().contains(HConstants.RECOVERED_EDITS_DIR)) { + return false; + } + return true; + } + }); + int k = 1; + for (int j = 0; j < cf.length; j++) { + if (cf[j].isDirectory() == true + && cf[j].getPath().getName().startsWith(".") == false) { + assertEquals(cf[j].getPath().getName(), "cf" + k); + k++; + } + } + } + } + + // TEST - Disable and delete the column family + admin.disableTable(TABLENAME); + admin.deleteColumnFamily(TABLENAME, Bytes.toBytes("cf2")); + + // 5 - Check if only 2 column families exist in the descriptor + HTableDescriptor afterhtd = admin.getTableDescriptor(TABLENAME); + assertEquals(2, afterhtd.getColumnFamilies().length); + HColumnDescriptor[] newFamilies = afterhtd.getColumnFamilies(); + assertTrue(newFamilies[0].getNameAsString().equals("cf1")); + assertTrue(newFamilies[1].getNameAsString().equals("cf3")); + + // 6 - Check if the second column family is gone from the FS + fileStatus = fs.listStatus(tableDir); + for (int i = 0; i < fileStatus.length; i++) { + if (fileStatus[i].isDirectory() == true) { + FileStatus[] cf = fs.listStatus(fileStatus[i].getPath(), new PathFilter() { + @Override + public boolean accept(Path p) { + if (WALSplitter.isSequenceIdFile(p)) { + return false; + } + return true; + } + }); + for (int j = 0; j < cf.length; j++) { + if (cf[j].isDirectory() == true) { + assertFalse(cf[j].getPath().getName().equals("cf2")); + } + } + } + } + } + + @Test + public void deleteColumnFamilyTwice() throws Exception { + Admin admin = TEST_UTIL.getHBaseAdmin(); + HTableDescriptor beforehtd = admin.getTableDescriptor(TABLENAME); + String cfToDelete = "cf1"; + + FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem(); + + // 1 - Check if table exists in descriptor + assertTrue(admin.isTableAvailable(TABLENAME)); + + // 2 - Check if all the target column family exist in descriptor + HColumnDescriptor[] families = beforehtd.getColumnFamilies(); + Boolean foundCF = false; + for (int i = 0; i < families.length; i++) { + if (families[i].getNameAsString().equals(cfToDelete)) { + foundCF = true; + break; + } + } + assertTrue(foundCF); + + // 3 - Check if table exists in FS + Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), TABLENAME); + assertTrue(fs.exists(tableDir)); + + // 4 - Check if all the target column family exist in FS + FileStatus[] fileStatus = fs.listStatus(tableDir); + foundCF = false; + for (int i = 0; i < fileStatus.length; i++) { + if (fileStatus[i].isDirectory() == true) { + FileStatus[] cf = fs.listStatus(fileStatus[i].getPath(), new PathFilter() { + @Override + public boolean accept(Path p) { + if (p.getName().contains(HConstants.RECOVERED_EDITS_DIR)) { + return false; + } + return true; + } + }); + for (int j = 0; j < cf.length; j++) { + if (cf[j].isDirectory() == true && cf[j].getPath().getName().equals(cfToDelete)) { + foundCF = true; + break; + } + } + } + if (foundCF) { + break; + } + } + assertTrue(foundCF); + + // TEST - Disable and delete the column family + if (admin.isTableEnabled(TABLENAME)) { + admin.disableTable(TABLENAME); + } + admin.deleteColumnFamily(TABLENAME, Bytes.toBytes(cfToDelete)); + + // 5 - Check if the target column family is gone from the FS + fileStatus = fs.listStatus(tableDir); + for (int i = 0; i < fileStatus.length; i++) { + if (fileStatus[i].isDirectory() == true) { + FileStatus[] cf = fs.listStatus(fileStatus[i].getPath(), new PathFilter() { + @Override + public boolean accept(Path p) { + if (WALSplitter.isSequenceIdFile(p)) { + return false; + } + return true; + } + }); + for (int j = 0; j < cf.length; j++) { + if (cf[j].isDirectory() == true) { + assertFalse(cf[j].getPath().getName().equals(cfToDelete)); + } + } + } + } + + try { + // Test: delete again + admin.deleteColumnFamily(TABLENAME, Bytes.toBytes(cfToDelete)); + Assert.fail("Delete a non-exist column family should fail"); + } catch (InvalidFamilyOperationException e) { + // Expected. + } + } +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java index 6098d40..25a3b36 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.TestMobSnapshotCloneIndependence; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; @@ -54,6 +55,7 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.ModifyRegionUtils; import org.junit.After; import org.junit.Before; +import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -63,8 +65,10 @@ import org.mockito.Mockito; @Category({MasterTests.class, LargeTests.class}) public class TestMasterFailoverWithProcedures { private static final Log LOG = LogFactory.getLog(TestMasterFailoverWithProcedures.class); - @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). - withLookingForStuckThread(true).build(); + + @ClassRule + public static final TestRule timeout = + CategoryBasedTimeout.forClass(TestMasterFailoverWithProcedures.class); protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java index 12042d8..9c37404 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.master.procedure; - import java.io.IOException; import java.util.ArrayList; import java.util.HashSet; @@ -29,12 +28,15 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.TableLockManager; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler.ProcedureEvent; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.util.Bytes; import org.junit.After; import org.junit.Before; @@ -60,7 +62,8 @@ public class TestMasterProcedureScheduler { @After public void tearDown() throws IOException { - assertEquals(0, queue.size()); + assertEquals("proc-queue expected to be empty", 0, queue.size()); + queue.clear(); } @Test @@ -346,6 +349,201 @@ public class TestMasterProcedureScheduler { assertEquals(4, procId); } + @Test + public void testVerifyRegionLocks() throws Exception { + final TableName tableName = TableName.valueOf("testtb"); + final HRegionInfo regionA = new HRegionInfo(tableName, Bytes.toBytes("a"), Bytes.toBytes("b")); + final HRegionInfo regionB = new HRegionInfo(tableName, Bytes.toBytes("b"), Bytes.toBytes("c")); + final HRegionInfo regionC = new HRegionInfo(tableName, Bytes.toBytes("c"), Bytes.toBytes("d")); + + queue.addBack(new TestTableProcedure(1, tableName, + TableProcedureInterface.TableOperationType.EDIT)); + queue.addBack(new TestRegionProcedure(2, tableName, + TableProcedureInterface.TableOperationType.MERGE, regionA, regionB)); + queue.addBack(new TestRegionProcedure(3, tableName, + TableProcedureInterface.TableOperationType.SPLIT, regionA)); + queue.addBack(new TestRegionProcedure(4, tableName, + TableProcedureInterface.TableOperationType.SPLIT, regionB)); + queue.addBack(new TestRegionProcedure(5, tableName, + TableProcedureInterface.TableOperationType.UNASSIGN, regionC)); + + // Fetch the 1st item and take the write lock + Procedure proc = queue.poll(); + assertEquals(1, proc.getProcId()); + assertEquals(true, queue.tryAcquireTableExclusiveLock(proc, tableName)); + + // everything is locked by the table operation + assertEquals(null, queue.poll(0)); + + // release the table lock + queue.releaseTableExclusiveLock(proc, tableName); + + // Fetch the 2nd item and the the lock on regionA and regionB + Procedure mergeProc = queue.poll(); + assertEquals(2, mergeProc.getProcId()); + assertEquals(true, queue.waitRegions(mergeProc, tableName, regionA, regionB)); + + // Fetch the 3rd item and the try to lock region A which will fail + // because already locked. this procedure will go in waiting. + // (this stuff will be explicit until we get rid of the zk-lock) + Procedure procA = queue.poll(); + assertEquals(3, procA.getProcId()); + assertEquals(false, queue.waitRegions(procA, tableName, regionA)); + + // Fetch the 4th item, same story as the 3rd + Procedure procB = queue.poll(); + assertEquals(4, procB.getProcId()); + assertEquals(false, queue.waitRegions(procB, tableName, regionB)); + + // Fetch the 5th item, since it is a non-locked region we are able to execute it + Procedure procC = queue.poll(); + assertEquals(5, procC.getProcId()); + assertEquals(true, queue.waitRegions(procC, tableName, regionC)); + + // 3rd and 4th are in the region suspended queue + assertEquals(null, queue.poll(0)); + + // Release region A-B from merge operation (procId=2) + queue.wakeRegions(mergeProc, tableName, regionA, regionB); + + // Fetch the 3rd item, now the lock on the region is available + procA = queue.poll(); + assertEquals(3, procA.getProcId()); + assertEquals(true, queue.waitRegions(procA, tableName, regionA)); + + // Fetch the 4th item, now the lock on the region is available + procB = queue.poll(); + assertEquals(4, procB.getProcId()); + assertEquals(true, queue.waitRegions(procB, tableName, regionB)); + + // release the locks on the regions + queue.wakeRegions(procA, tableName, regionA); + queue.wakeRegions(procB, tableName, regionB); + queue.wakeRegions(procC, tableName, regionC); + } + + @Test + public void testVerifySubProcRegionLocks() throws Exception { + final TableName tableName = TableName.valueOf("testVerifySubProcRegionLocks"); + final HRegionInfo regionA = new HRegionInfo(tableName, Bytes.toBytes("a"), Bytes.toBytes("b")); + final HRegionInfo regionB = new HRegionInfo(tableName, Bytes.toBytes("b"), Bytes.toBytes("c")); + final HRegionInfo regionC = new HRegionInfo(tableName, Bytes.toBytes("c"), Bytes.toBytes("d")); + + queue.addBack(new TestTableProcedure(1, tableName, + TableProcedureInterface.TableOperationType.ENABLE)); + + // Fetch the 1st item from the queue, "the root procedure" and take the table lock + Procedure rootProc = queue.poll(); + assertEquals(1, rootProc.getProcId()); + assertEquals(true, queue.tryAcquireTableExclusiveLock(rootProc, tableName)); + assertEquals(null, queue.poll(0)); + + // Execute the 1st step of the root-proc. + // we should get 3 sub-proc back, one for each region. + // (this step is done by the executor/rootProc, we are simulating it) + Procedure[] subProcs = new Procedure[] { + new TestRegionProcedure(1, 2, tableName, + TableProcedureInterface.TableOperationType.ASSIGN, regionA), + new TestRegionProcedure(1, 3, tableName, + TableProcedureInterface.TableOperationType.ASSIGN, regionB), + new TestRegionProcedure(1, 4, tableName, + TableProcedureInterface.TableOperationType.ASSIGN, regionC), + }; + + // at this point the rootProc is going in a waiting state + // and the sub-procedures will be added in the queue. + // (this step is done by the executor, we are simulating it) + for (int i = subProcs.length - 1; i >= 0; --i) { + queue.addFront(subProcs[i]); + } + assertEquals(subProcs.length, queue.size()); + + // we should be able to fetch and execute all the sub-procs, + // since they are operating on different regions + for (int i = 0; i < subProcs.length; ++i) { + TestRegionProcedure regionProc = (TestRegionProcedure)queue.poll(0); + assertEquals(subProcs[i].getProcId(), regionProc.getProcId()); + assertEquals(true, queue.waitRegions(regionProc, tableName, regionProc.getRegionInfo())); + } + + // nothing else in the queue + assertEquals(null, queue.poll(0)); + + // release all the region locks + for (int i = 0; i < subProcs.length; ++i) { + TestRegionProcedure regionProc = (TestRegionProcedure)subProcs[i]; + queue.wakeRegions(regionProc, tableName, regionProc.getRegionInfo()); + } + + // nothing else in the queue + assertEquals(null, queue.poll(0)); + + // release the table lock (for the root procedure) + queue.releaseTableExclusiveLock(rootProc, tableName); + } + + @Test + public void testSuspendedTableQueue() throws Exception { + final TableName tableName = TableName.valueOf("testSuspendedQueue"); + + queue.addBack(new TestTableProcedure(1, tableName, + TableProcedureInterface.TableOperationType.EDIT)); + queue.addBack(new TestTableProcedure(2, tableName, + TableProcedureInterface.TableOperationType.EDIT)); + + Procedure proc = queue.poll(); + assertEquals(1, proc.getProcId()); + assertTrue(queue.tryAcquireTableExclusiveLock(proc, tableName)); + + // Suspend + // TODO: If we want to keep the zk-lock we need to retain the lock on suspend + ProcedureEvent event = new ProcedureEvent("testSuspendedTableQueueEvent"); + queue.waitEvent(event, proc, true); + queue.releaseTableExclusiveLock(proc, tableName); + assertEquals(null, queue.poll(0)); + + // Resume + queue.wake(event); + + proc = queue.poll(); + assertTrue(queue.tryAcquireTableExclusiveLock(proc, tableName)); + assertEquals(1, proc.getProcId()); + queue.releaseTableExclusiveLock(proc, tableName); + + proc = queue.poll(); + assertTrue(queue.tryAcquireTableExclusiveLock(proc, tableName)); + assertEquals(2, proc.getProcId()); + queue.releaseTableExclusiveLock(proc, tableName); + } + + @Test + public void testSuspendedProcedure() throws Exception { + final TableName tableName = TableName.valueOf("testSuspendedProcedure"); + + queue.addBack(new TestTableProcedure(1, tableName, + TableProcedureInterface.TableOperationType.READ)); + queue.addBack(new TestTableProcedure(2, tableName, + TableProcedureInterface.TableOperationType.READ)); + + Procedure proc = queue.poll(); + assertEquals(1, proc.getProcId()); + + // suspend + ProcedureEvent event = new ProcedureEvent("testSuspendedProcedureEvent"); + queue.waitEvent(event, proc); + + proc = queue.poll(); + assertEquals(2, proc.getProcId()); + assertEquals(null, queue.poll(0)); + + // resume + queue.wake(event); + + proc = queue.poll(); + assertEquals(1, proc.getProcId()); + assertEquals(null, queue.poll(0)); + } + /** * Verify that "write" operations for a single table are serialized, * but different tables can be executed in parallel. @@ -522,6 +720,32 @@ public class TestMasterProcedureScheduler { } } + public static class TestRegionProcedure extends TestTableProcedure { + private final HRegionInfo[] regionInfo; + + public TestRegionProcedure() { + throw new UnsupportedOperationException("recovery should not be triggered here"); + } + + public TestRegionProcedure(long procId, TableName tableName, TableOperationType opType, + HRegionInfo... regionInfo) { + this(-1, procId, tableName, opType, regionInfo); + } + + public TestRegionProcedure(long parentProcId, long procId, TableName tableName, + TableOperationType opType, HRegionInfo... regionInfo) { + super(procId, tableName, opType); + this.regionInfo = regionInfo; + if (parentProcId > 0) { + setParentProcId(parentProcId); + } + } + + public HRegionInfo[] getRegionInfo() { + return regionInfo; + } + } + public static class TestNamespaceProcedure extends TestProcedure implements TableProcedureInterface { private final TableOperationType opType; diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java new file mode 100644 index 0000000..129ef4f --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDescriptorModificationFromClient.java @@ -0,0 +1,292 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.Set; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.TableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.master.MasterFileSystem; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSTableDescriptors; +import org.apache.hadoop.hbase.util.FSUtils; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; + +/** + * Verify that the HTableDescriptor is updated after + * addColumn(), deleteColumn() and modifyTable() operations. + */ +@Category({MasterTests.class, LargeTests.class}) +public class TestTableDescriptorModificationFromClient { + + @Rule public TestName name = new TestName(); + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static TableName TABLE_NAME = null; + private static final byte[] FAMILY_0 = Bytes.toBytes("cf0"); + private static final byte[] FAMILY_1 = Bytes.toBytes("cf1"); + + /** + * Start up a mini cluster and put a small table of empty regions into it. + * + * @throws Exception + */ + @BeforeClass + public static void beforeAllTests() throws Exception { + TEST_UTIL.startMiniCluster(1); + } + + @Before + public void setup() { + TABLE_NAME = TableName.valueOf(name.getMethodName()); + + } + + @AfterClass + public static void afterAllTests() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testModifyTable() throws IOException { + Admin admin = TEST_UTIL.getHBaseAdmin(); + // Create a table with one family + HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); + baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); + admin.createTable(baseHtd); + admin.disableTable(TABLE_NAME); + try { + // Verify the table descriptor + verifyTableDescriptor(TABLE_NAME, FAMILY_0); + + // Modify the table adding another family and verify the descriptor + HTableDescriptor modifiedHtd = new HTableDescriptor(TABLE_NAME); + modifiedHtd.addFamily(new HColumnDescriptor(FAMILY_0)); + modifiedHtd.addFamily(new HColumnDescriptor(FAMILY_1)); + admin.modifyTable(TABLE_NAME, modifiedHtd); + verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1); + } finally { + admin.deleteTable(TABLE_NAME); + } + } + + @Test + public void testAddColumn() throws IOException { + Admin admin = TEST_UTIL.getHBaseAdmin(); + // Create a table with two families + HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); + baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); + admin.createTable(baseHtd); + admin.disableTable(TABLE_NAME); + try { + // Verify the table descriptor + verifyTableDescriptor(TABLE_NAME, FAMILY_0); + + // Modify the table removing one family and verify the descriptor + admin.addColumnFamily(TABLE_NAME, new HColumnDescriptor(FAMILY_1)); + verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1); + } finally { + admin.deleteTable(TABLE_NAME); + } + } + + @Test + public void testAddSameColumnFamilyTwice() throws IOException { + Admin admin = TEST_UTIL.getHBaseAdmin(); + // Create a table with one families + HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); + baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); + admin.createTable(baseHtd); + admin.disableTable(TABLE_NAME); + try { + // Verify the table descriptor + verifyTableDescriptor(TABLE_NAME, FAMILY_0); + + // Modify the table removing one family and verify the descriptor + admin.addColumnFamily(TABLE_NAME, new HColumnDescriptor(FAMILY_1)); + verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1); + + try { + // Add same column family again - expect failure + admin.addColumnFamily(TABLE_NAME, new HColumnDescriptor(FAMILY_1)); + Assert.fail("Delete a non-exist column family should fail"); + } catch (InvalidFamilyOperationException e) { + // Expected. + } + + } finally { + admin.deleteTable(TABLE_NAME); + } + } + + @Test + public void testModifyColumnFamily() throws IOException { + Admin admin = TEST_UTIL.getHBaseAdmin(); + + HColumnDescriptor cfDescriptor = new HColumnDescriptor(FAMILY_0); + int blockSize = cfDescriptor.getBlocksize(); + // Create a table with one families + HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); + baseHtd.addFamily(cfDescriptor); + admin.createTable(baseHtd); + admin.disableTable(TABLE_NAME); + try { + // Verify the table descriptor + verifyTableDescriptor(TABLE_NAME, FAMILY_0); + + int newBlockSize = 2 * blockSize; + cfDescriptor.setBlocksize(newBlockSize); + + // Modify colymn family + admin.modifyColumnFamily(TABLE_NAME, cfDescriptor); + + HTableDescriptor htd = admin.getTableDescriptor(TABLE_NAME); + HColumnDescriptor hcfd = htd.getFamily(FAMILY_0); + assertTrue(hcfd.getBlocksize() == newBlockSize); + } finally { + admin.deleteTable(TABLE_NAME); + } + } + + @Test + public void testModifyNonExistingColumnFamily() throws IOException { + Admin admin = TEST_UTIL.getHBaseAdmin(); + + HColumnDescriptor cfDescriptor = new HColumnDescriptor(FAMILY_1); + int blockSize = cfDescriptor.getBlocksize(); + // Create a table with one families + HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); + baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); + admin.createTable(baseHtd); + admin.disableTable(TABLE_NAME); + try { + // Verify the table descriptor + verifyTableDescriptor(TABLE_NAME, FAMILY_0); + + int newBlockSize = 2 * blockSize; + cfDescriptor.setBlocksize(newBlockSize); + + // Modify a column family that is not in the table. + try { + admin.modifyColumnFamily(TABLE_NAME, cfDescriptor); + Assert.fail("Modify a non-exist column family should fail"); + } catch (InvalidFamilyOperationException e) { + // Expected. + } + + } finally { + admin.deleteTable(TABLE_NAME); + } + } + + @Test + public void testDeleteColumn() throws IOException { + Admin admin = TEST_UTIL.getHBaseAdmin(); + // Create a table with two families + HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); + baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); + baseHtd.addFamily(new HColumnDescriptor(FAMILY_1)); + admin.createTable(baseHtd); + admin.disableTable(TABLE_NAME); + try { + // Verify the table descriptor + verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1); + + // Modify the table removing one family and verify the descriptor + admin.deleteColumnFamily(TABLE_NAME, FAMILY_1); + verifyTableDescriptor(TABLE_NAME, FAMILY_0); + } finally { + admin.deleteTable(TABLE_NAME); + } + } + + @Test + public void testDeleteSameColumnFamilyTwice() throws IOException { + Admin admin = TEST_UTIL.getHBaseAdmin(); + // Create a table with two families + HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); + baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); + baseHtd.addFamily(new HColumnDescriptor(FAMILY_1)); + admin.createTable(baseHtd); + admin.disableTable(TABLE_NAME); + try { + // Verify the table descriptor + verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1); + + // Modify the table removing one family and verify the descriptor + admin.deleteColumnFamily(TABLE_NAME, FAMILY_1); + verifyTableDescriptor(TABLE_NAME, FAMILY_0); + + try { + // Delete again - expect failure + admin.deleteColumnFamily(TABLE_NAME, FAMILY_1); + Assert.fail("Delete a non-exist column family should fail"); + } catch (Exception e) { + // Expected. + } + } finally { + admin.deleteTable(TABLE_NAME); + } + } + + private void verifyTableDescriptor(final TableName tableName, + final byte[]... families) throws IOException { + Admin admin = TEST_UTIL.getHBaseAdmin(); + + // Verify descriptor from master + HTableDescriptor htd = admin.getTableDescriptor(tableName); + verifyTableDescriptor(htd, tableName, families); + + // Verify descriptor from HDFS + MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); + Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName); + TableDescriptor td = + FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); + verifyTableDescriptor(td.getHTableDescriptor(), tableName, families); + } + + private void verifyTableDescriptor(final HTableDescriptor htd, + final TableName tableName, final byte[]... families) { + Set htdFamilies = htd.getFamiliesKeys(); + assertEquals(tableName, htd.getTableName()); + assertEquals(families.length, htdFamilies.size()); + for (byte[] familyName: families) { + assertTrue("Expected family " + Bytes.toString(familyName), htdFamilies.contains(familyName)); + } + } +} \ No newline at end of file diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java index 94a2ed4..bea6cff 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java @@ -49,9 +49,6 @@ public class TestDefaultMobStoreFlusher { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0); - TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true); - TEST_UTIL.startMiniCluster(1); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java index 267201f..e74b3de 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java @@ -53,9 +53,6 @@ public class TestExpiredMobFileCleaner { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0); - TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true); - TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java index a376c96..1f8bcb2 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java @@ -55,9 +55,6 @@ public class TestMobDataBlockEncoding { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0); - TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true); - TEST_UTIL.startMiniCluster(1); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java index c0ad2dd..863a855 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java @@ -113,8 +113,6 @@ public class TestMobCompactor { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0); - TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true); TEST_UTIL.getConfiguration() .setLong(MobConstants.MOB_COMPACTION_MERGEABLE_THRESHOLD, 5000); TEST_UTIL.getConfiguration().set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java index 0e0bdbe..7970d62 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java @@ -78,8 +78,6 @@ public class TestPartitionedMobCompactor { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0); - TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true); TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3); TEST_UTIL.startMiniCluster(1); pool = createThreadPool(); @@ -396,4 +394,4 @@ public class TestPartitionedMobCompactor { conf.setInt(MobConstants.MOB_COMPACTION_BATCH_SIZE, MobConstants.DEFAULT_MOB_COMPACTION_BATCH_SIZE); } -} \ No newline at end of file +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepJob.java hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepJob.java index 3023849..666e193 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepJob.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepJob.java @@ -47,8 +47,6 @@ public class TestMobSweepJob { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0); - TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true); TEST_UTIL.getConfiguration().set(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, JavaSerialization.class.getName() + "," + WritableSerialization.class.getName()); TEST_UTIL.startMiniCluster(); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepMapper.java hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepMapper.java index 9e95a39..5ae02e4 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepMapper.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepMapper.java @@ -54,8 +54,6 @@ public class TestMobSweepMapper { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0); - TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true); TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3); TEST_UTIL.startMiniCluster(1); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepReducer.java hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepReducer.java index 8c24123..8cef4d9 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepReducer.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepReducer.java @@ -77,9 +77,6 @@ public class TestMobSweepReducer { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0); - TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true); - TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3); TEST_UTIL.startMiniCluster(1); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java index 9499366..aba72dd 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java @@ -24,6 +24,7 @@ import java.text.DecimalFormat; import java.util.ArrayList; import java.util.Iterator; import java.util.List; +import java.util.Locale; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; @@ -142,7 +143,7 @@ public class DataBlockEncodingTool { String s = super.toString(); StringBuilder sb = new StringBuilder(); sb.append(s.charAt(0)); - sb.append(s.substring(1).toLowerCase()); + sb.append(s.substring(1).toLowerCase(Locale.ROOT)); return sb.toString(); } } @@ -372,7 +373,7 @@ public class DataBlockEncodingTool { private void benchmarkDefaultCompression(int totalSize, byte[] rawBuffer) throws IOException { benchmarkAlgorithm(compressionAlgorithm, - compressionAlgorithmName.toUpperCase(), rawBuffer, 0, totalSize); + compressionAlgorithmName.toUpperCase(Locale.ROOT), rawBuffer, 0, totalSize); } /** @@ -526,7 +527,7 @@ public class DataBlockEncodingTool { * @throws IOException */ public void displayStatistics() throws IOException { - final String comprAlgo = compressionAlgorithmName.toUpperCase(); + final String comprAlgo = compressionAlgorithmName.toUpperCase(Locale.ROOT); long rawBytes = totalKeyLength + totalPrefixLength + totalValueLength; System.out.println("Raw data size:"); @@ -695,7 +696,7 @@ public class DataBlockEncodingTool { String compressionName = DEFAULT_COMPRESSION.getName(); if (cmd.hasOption(OPT_ENCODING_ALGORITHM)) { compressionName = - cmd.getOptionValue(OPT_ENCODING_ALGORITHM).toLowerCase(); + cmd.getOptionValue(OPT_ENCODING_ALGORITHM).toLowerCase(Locale.ROOT); } boolean doBenchmark = cmd.hasOption(OPT_MEASURE_THROUGHPUT); boolean doVerify = !cmd.hasOption(OPT_OMIT_CORRECTNESS_TEST); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java new file mode 100644 index 0000000..5c0e42b --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java @@ -0,0 +1,729 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryMXBean; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.KeepDeletedCells; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValueTestUtil; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdge; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.Threads; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +/** + * compacted memstore test case + */ +@Category({RegionServerTests.class, MediumTests.class}) +public class TestCompactingMemStore extends TestDefaultMemStore { + + private static final Log LOG = LogFactory.getLog(TestCompactingMemStore.class); + private static MemStoreChunkPool chunkPool; + private HRegion region; + private RegionServicesForStores regionServicesForStores; + private HStore store; + + ////////////////////////////////////////////////////////////////////////////// + // Helpers + ////////////////////////////////////////////////////////////////////////////// + private static byte[] makeQualifier(final int i1, final int i2) { + return Bytes.toBytes(Integer.toString(i1) + ";" + + Integer.toString(i2)); + } + + @After + public void tearDown() throws Exception { + chunkPool.clearChunks(); + } + + @Before + public void setUp() throws Exception { + super.internalSetUp(); + Configuration conf = new Configuration(); + conf.setBoolean(SegmentFactory.USEMSLAB_KEY, true); + conf.setFloat(MemStoreChunkPool.CHUNK_POOL_MAXSIZE_KEY, 0.2f); + conf.setInt(HRegion.MEMSTORE_PERIODIC_FLUSH_INTERVAL, 1000); + HBaseTestingUtility hbaseUtility = HBaseTestingUtility.createLocalHTU(conf); + HColumnDescriptor hcd = new HColumnDescriptor(FAMILY); + this.region = hbaseUtility.createTestRegion("foobar", hcd); + this.regionServicesForStores = region.getRegionServicesForStores(); + this.store = new HStore(region, hcd, conf); + this.memstore = new CompactingMemStore(HBaseConfiguration.create(), CellComparator.COMPARATOR, + store, regionServicesForStores); + chunkPool = MemStoreChunkPool.getPool(conf); + assertTrue(chunkPool != null); + } + + + /** + * A simple test which verifies the 3 possible states when scanning across snapshot. + * + * @throws IOException + * @throws CloneNotSupportedException + */ + @Test + public void testScanAcrossSnapshot2() throws IOException, CloneNotSupportedException { + // we are going to the scanning across snapshot with two kvs + // kv1 should always be returned before kv2 + final byte[] one = Bytes.toBytes(1); + final byte[] two = Bytes.toBytes(2); + final byte[] f = Bytes.toBytes("f"); + final byte[] q = Bytes.toBytes("q"); + final byte[] v = Bytes.toBytes(3); + + final KeyValue kv1 = new KeyValue(one, f, q, 10, v); + final KeyValue kv2 = new KeyValue(two, f, q, 10, v); + + // use case 1: both kvs in kvset + this.memstore.add(kv1.clone()); + this.memstore.add(kv2.clone()); + verifyScanAcrossSnapshot2(kv1, kv2); + + // use case 2: both kvs in snapshot + this.memstore.snapshot(); + verifyScanAcrossSnapshot2(kv1, kv2); + + // use case 3: first in snapshot second in kvset + this.memstore = new CompactingMemStore(HBaseConfiguration.create(), + CellComparator.COMPARATOR, store, regionServicesForStores); + this.memstore.add(kv1.clone()); + // As compaction is starting in the background the repetition + // of the k1 might be removed BUT the scanners created earlier + // should look on the OLD MutableCellSetSegment, so this should be OK... + this.memstore.snapshot(); + this.memstore.add(kv2.clone()); + verifyScanAcrossSnapshot2(kv1,kv2); + } + + /** + * Test memstore snapshots + * @throws IOException + */ + @Test + public void testSnapshotting() throws IOException { + final int snapshotCount = 5; + // Add some rows, run a snapshot. Do it a few times. + for (int i = 0; i < snapshotCount; i++) { + addRows(this.memstore); + runSnapshot(this.memstore, true); + assertEquals("History not being cleared", 0, this.memstore.getSnapshot().getCellsCount()); + } + } + + + ////////////////////////////////////////////////////////////////////////////// + // Get tests + ////////////////////////////////////////////////////////////////////////////// + + /** Test getNextRow from memstore + * @throws InterruptedException + */ + @Test + public void testGetNextRow() throws Exception { + addRows(this.memstore); + // Add more versions to make it a little more interesting. + Thread.sleep(1); + addRows(this.memstore); + Cell closestToEmpty = ((CompactingMemStore)this.memstore).getNextRow(KeyValue.LOWESTKEY); + assertTrue(KeyValue.COMPARATOR.compareRows(closestToEmpty, + new KeyValue(Bytes.toBytes(0), System.currentTimeMillis())) == 0); + for (int i = 0; i < ROW_COUNT; i++) { + Cell nr = ((CompactingMemStore)this.memstore).getNextRow(new KeyValue(Bytes.toBytes(i), + System.currentTimeMillis())); + if (i + 1 == ROW_COUNT) { + assertEquals(nr, null); + } else { + assertTrue(KeyValue.COMPARATOR.compareRows(nr, + new KeyValue(Bytes.toBytes(i + 1), System.currentTimeMillis())) == 0); + } + } + //starting from each row, validate results should contain the starting row + Configuration conf = HBaseConfiguration.create(); + for (int startRowId = 0; startRowId < ROW_COUNT; startRowId++) { + ScanInfo scanInfo = new ScanInfo(conf, FAMILY, 0, 1, Integer.MAX_VALUE, + KeepDeletedCells.FALSE, 0, this.memstore.getComparator()); + ScanType scanType = ScanType.USER_SCAN; + InternalScanner scanner = new StoreScanner(new Scan( + Bytes.toBytes(startRowId)), scanInfo, scanType, null, + memstore.getScanners(0)); + List results = new ArrayList(); + for (int i = 0; scanner.next(results); i++) { + int rowId = startRowId + i; + Cell left = results.get(0); + byte[] row1 = Bytes.toBytes(rowId); + assertTrue("Row name", + CellComparator.COMPARATOR.compareRows(left, row1, 0, row1.length) == 0); + assertEquals("Count of columns", QUALIFIER_COUNT, results.size()); + List row = new ArrayList(); + for (Cell kv : results) { + row.add(kv); + } + isExpectedRowWithoutTimestamps(rowId, row); + // Clear out set. Otherwise row results accumulate. + results.clear(); + } + } + } + + @Test + public void testGet_memstoreAndSnapShot() throws IOException { + byte[] row = Bytes.toBytes("testrow"); + byte[] fam = Bytes.toBytes("testfamily"); + byte[] qf1 = Bytes.toBytes("testqualifier1"); + byte[] qf2 = Bytes.toBytes("testqualifier2"); + byte[] qf3 = Bytes.toBytes("testqualifier3"); + byte[] qf4 = Bytes.toBytes("testqualifier4"); + byte[] qf5 = Bytes.toBytes("testqualifier5"); + byte[] val = Bytes.toBytes("testval"); + + //Setting up memstore + memstore.add(new KeyValue(row, fam, qf1, val)); + memstore.add(new KeyValue(row, fam, qf2, val)); + memstore.add(new KeyValue(row, fam, qf3, val)); + //Pushing to pipeline + ((CompactingMemStore)memstore).flushInMemory(); + assertEquals(0, memstore.getSnapshot().getCellsCount()); + //Creating a snapshot + memstore.snapshot(); + assertEquals(3, memstore.getSnapshot().getCellsCount()); + //Adding value to "new" memstore + assertEquals(0, memstore.getActive().getCellsCount()); + memstore.add(new KeyValue(row, fam, qf4, val)); + memstore.add(new KeyValue(row, fam, qf5, val)); + assertEquals(2, memstore.getActive().getCellsCount()); + } + + + //////////////////////////////////// + //Test for upsert with MSLAB + //////////////////////////////////// + + /** + * Test a pathological pattern that shows why we can't currently + * use the MSLAB for upsert workloads. This test inserts data + * in the following pattern: + * + * - row0001 through row1000 (fills up one 2M Chunk) + * - row0002 through row1001 (fills up another 2M chunk, leaves one reference + * to the first chunk + * - row0003 through row1002 (another chunk, another dangling reference) + * + * This causes OOME pretty quickly if we use MSLAB for upsert + * since each 2M chunk is held onto by a single reference. + */ + @Test + public void testUpsertMSLAB() throws Exception { + + int ROW_SIZE = 2048; + byte[] qualifier = new byte[ROW_SIZE - 4]; + + MemoryMXBean bean = ManagementFactory.getMemoryMXBean(); + for (int i = 0; i < 3; i++) { System.gc(); } + long usageBefore = bean.getHeapMemoryUsage().getUsed(); + + long size = 0; + long ts=0; + + for (int newValue = 0; newValue < 1000; newValue++) { + for (int row = newValue; row < newValue + 1000; row++) { + byte[] rowBytes = Bytes.toBytes(row); + size += memstore.updateColumnValue(rowBytes, FAMILY, qualifier, newValue, ++ts); + } + } + System.out.println("Wrote " + ts + " vals"); + for (int i = 0; i < 3; i++) { System.gc(); } + long usageAfter = bean.getHeapMemoryUsage().getUsed(); + System.out.println("Memory used: " + (usageAfter - usageBefore) + + " (heapsize: " + memstore.heapSize() + + " size: " + size + ")"); + } + + //////////////////////////////////// + // Test for periodic memstore flushes + // based on time of oldest edit + //////////////////////////////////// + + /** + * Add keyvalues with a fixed memstoreTs, and checks that memstore size is decreased + * as older keyvalues are deleted from the memstore. + * + * @throws Exception + */ + @Test + public void testUpsertMemstoreSize() throws Exception { + long oldSize = memstore.size(); + + List l = new ArrayList(); + KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v"); + KeyValue kv2 = KeyValueTestUtil.create("r", "f", "q", 101, "v"); + KeyValue kv3 = KeyValueTestUtil.create("r", "f", "q", 102, "v"); + + kv1.setSequenceId(1); + kv2.setSequenceId(1); + kv3.setSequenceId(1); + l.add(kv1); + l.add(kv2); + l.add(kv3); + + this.memstore.upsert(l, 2);// readpoint is 2 + long newSize = this.memstore.size(); + assert (newSize > oldSize); + //The kv1 should be removed. + assert (memstore.getActive().getCellsCount() == 2); + + KeyValue kv4 = KeyValueTestUtil.create("r", "f", "q", 104, "v"); + kv4.setSequenceId(1); + l.clear(); + l.add(kv4); + this.memstore.upsert(l, 3); + assertEquals(newSize, this.memstore.size()); + //The kv2 should be removed. + assert (memstore.getActive().getCellsCount() == 2); + //this.memstore = null; + } + + /** + * Tests that the timeOfOldestEdit is updated correctly for the + * various edit operations in memstore. + * @throws Exception + */ + @Test + public void testUpdateToTimeOfOldestEdit() throws Exception { + try { + EnvironmentEdgeForMemstoreTest edge = new EnvironmentEdgeForMemstoreTest(); + EnvironmentEdgeManager.injectEdge(edge); + long t = memstore.timeOfOldestEdit(); + assertEquals(t, Long.MAX_VALUE); + + // test the case that the timeOfOldestEdit is updated after a KV add + memstore.add(KeyValueTestUtil.create("r", "f", "q", 100, "v")); + t = memstore.timeOfOldestEdit(); + assertTrue(t == 1234); + // The method will also assert + // the value is reset to Long.MAX_VALUE + t = runSnapshot(memstore, true); + + // test the case that the timeOfOldestEdit is updated after a KV delete + memstore.delete(KeyValueTestUtil.create("r", "f", "q", 100, "v")); + t = memstore.timeOfOldestEdit(); + assertTrue(t == 1234); + t = runSnapshot(memstore, true); + + // test the case that the timeOfOldestEdit is updated after a KV upsert + List l = new ArrayList(); + KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v"); + kv1.setSequenceId(100); + l.add(kv1); + memstore.upsert(l, 1000); + t = memstore.timeOfOldestEdit(); + assertTrue(t == 1234); + } finally { + EnvironmentEdgeManager.reset(); + } + } + + private long runSnapshot(final AbstractMemStore hmc, boolean useForce) + throws IOException { + // Save off old state. + long oldHistorySize = hmc.getSnapshot().getSize(); + long prevTimeStamp = hmc.timeOfOldestEdit(); + + hmc.snapshot(); + MemStoreSnapshot snapshot = hmc.snapshot(); + if (useForce) { + // Make some assertions about what just happened. + assertTrue("History size has not increased", oldHistorySize < snapshot.getSize()); + long t = hmc.timeOfOldestEdit(); + assertTrue("Time of oldest edit is not Long.MAX_VALUE", t == Long.MAX_VALUE); + hmc.clearSnapshot(snapshot.getId()); + } else { + long t = hmc.timeOfOldestEdit(); + assertTrue("Time of oldest edit didn't remain the same", t == prevTimeStamp); + } + return prevTimeStamp; + } + + private void isExpectedRowWithoutTimestamps(final int rowIndex, + List kvs) { + int i = 0; + for (Cell kv : kvs) { + byte[] expectedColname = makeQualifier(rowIndex, i++); + assertTrue("Column name", CellUtil.matchingQualifier(kv, expectedColname)); + // Value is column name as bytes. Usually result is + // 100 bytes in size at least. This is the default size + // for BytesWriteable. For comparison, convert bytes to + // String and trim to remove trailing null bytes. + assertTrue("Content", CellUtil.matchingValue(kv, expectedColname)); + } + } + + @Test + public void testPuttingBackChunksAfterFlushing() throws IOException { + byte[] row = Bytes.toBytes("testrow"); + byte[] fam = Bytes.toBytes("testfamily"); + byte[] qf1 = Bytes.toBytes("testqualifier1"); + byte[] qf2 = Bytes.toBytes("testqualifier2"); + byte[] qf3 = Bytes.toBytes("testqualifier3"); + byte[] qf4 = Bytes.toBytes("testqualifier4"); + byte[] qf5 = Bytes.toBytes("testqualifier5"); + byte[] val = Bytes.toBytes("testval"); + + // Setting up memstore + memstore.add(new KeyValue(row, fam, qf1, val)); + memstore.add(new KeyValue(row, fam, qf2, val)); + memstore.add(new KeyValue(row, fam, qf3, val)); + + // Creating a snapshot + MemStoreSnapshot snapshot = memstore.snapshot(); + assertEquals(3, memstore.getSnapshot().getCellsCount()); + + // Adding value to "new" memstore + assertEquals(0, memstore.getActive().getCellsCount()); + memstore.add(new KeyValue(row, fam, qf4, val)); + memstore.add(new KeyValue(row, fam, qf5, val)); + assertEquals(2, memstore.getActive().getCellsCount()); + memstore.clearSnapshot(snapshot.getId()); + + int chunkCount = chunkPool.getPoolSize(); + assertTrue(chunkCount > 0); + + } + + @Test + public void testPuttingBackChunksWithOpeningScanner() + throws IOException { + byte[] row = Bytes.toBytes("testrow"); + byte[] fam = Bytes.toBytes("testfamily"); + byte[] qf1 = Bytes.toBytes("testqualifier1"); + byte[] qf2 = Bytes.toBytes("testqualifier2"); + byte[] qf3 = Bytes.toBytes("testqualifier3"); + byte[] qf4 = Bytes.toBytes("testqualifier4"); + byte[] qf5 = Bytes.toBytes("testqualifier5"); + byte[] qf6 = Bytes.toBytes("testqualifier6"); + byte[] qf7 = Bytes.toBytes("testqualifier7"); + byte[] val = Bytes.toBytes("testval"); + + // Setting up memstore + memstore.add(new KeyValue(row, fam, qf1, val)); + memstore.add(new KeyValue(row, fam, qf2, val)); + memstore.add(new KeyValue(row, fam, qf3, val)); + + // Creating a snapshot + MemStoreSnapshot snapshot = memstore.snapshot(); + assertEquals(3, memstore.getSnapshot().getCellsCount()); + + // Adding value to "new" memstore + assertEquals(0, memstore.getActive().getCellsCount()); + memstore.add(new KeyValue(row, fam, qf4, val)); + memstore.add(new KeyValue(row, fam, qf5, val)); + assertEquals(2, memstore.getActive().getCellsCount()); + + // opening scanner before clear the snapshot + List scanners = memstore.getScanners(0); + // Shouldn't putting back the chunks to pool,since some scanners are opening + // based on their data + memstore.clearSnapshot(snapshot.getId()); + + assertTrue(chunkPool.getPoolSize() == 0); + + // Chunks will be put back to pool after close scanners; + for (KeyValueScanner scanner : scanners) { + scanner.close(); + } + assertTrue(chunkPool.getPoolSize() > 0); + + // clear chunks + chunkPool.clearChunks(); + + // Creating another snapshot + + snapshot = memstore.snapshot(); + // Adding more value + memstore.add(new KeyValue(row, fam, qf6, val)); + memstore.add(new KeyValue(row, fam, qf7, val)); + // opening scanners + scanners = memstore.getScanners(0); + // close scanners before clear the snapshot + for (KeyValueScanner scanner : scanners) { + scanner.close(); + } + // Since no opening scanner, the chunks of snapshot should be put back to + // pool + memstore.clearSnapshot(snapshot.getId()); + assertTrue(chunkPool.getPoolSize() > 0); + } + + @Test + public void testPuttingBackChunksWithOpeningPipelineScanner() + throws IOException { + byte[] row = Bytes.toBytes("testrow"); + byte[] fam = Bytes.toBytes("testfamily"); + byte[] qf1 = Bytes.toBytes("testqualifier1"); + byte[] qf2 = Bytes.toBytes("testqualifier2"); + byte[] qf3 = Bytes.toBytes("testqualifier3"); + byte[] val = Bytes.toBytes("testval"); + + // Setting up memstore + memstore.add(new KeyValue(row, fam, qf1, 1, val)); + memstore.add(new KeyValue(row, fam, qf2, 1, val)); + memstore.add(new KeyValue(row, fam, qf3, 1, val)); + + // Creating a pipeline + ((CompactingMemStore)memstore).disableCompaction(); + ((CompactingMemStore)memstore).flushInMemory(); + + // Adding value to "new" memstore + assertEquals(0, memstore.getActive().getCellsCount()); + memstore.add(new KeyValue(row, fam, qf1, 2, val)); + memstore.add(new KeyValue(row, fam, qf2, 2, val)); + assertEquals(2, memstore.getActive().getCellsCount()); + + // pipeline bucket 2 + ((CompactingMemStore)memstore).flushInMemory(); + // opening scanner before force flushing + List scanners = memstore.getScanners(0); + // Shouldn't putting back the chunks to pool,since some scanners are opening + // based on their data + ((CompactingMemStore)memstore).enableCompaction(); + // trigger compaction + ((CompactingMemStore)memstore).flushInMemory(); + + // Adding value to "new" memstore + assertEquals(0, memstore.getActive().getCellsCount()); + memstore.add(new KeyValue(row, fam, qf3, 3, val)); + memstore.add(new KeyValue(row, fam, qf2, 3, val)); + memstore.add(new KeyValue(row, fam, qf1, 3, val)); + assertEquals(3, memstore.getActive().getCellsCount()); + + while (((CompactingMemStore)memstore).isMemStoreFlushingInMemory()) { + Threads.sleep(10); + } + + assertTrue(chunkPool.getPoolSize() == 0); + + // Chunks will be put back to pool after close scanners; + for (KeyValueScanner scanner : scanners) { + scanner.close(); + } + assertTrue(chunkPool.getPoolSize() > 0); + + // clear chunks + chunkPool.clearChunks(); + + // Creating another snapshot + + MemStoreSnapshot snapshot = memstore.snapshot(); + memstore.clearSnapshot(snapshot.getId()); + + snapshot = memstore.snapshot(); + // Adding more value + memstore.add(new KeyValue(row, fam, qf2, 4, val)); + memstore.add(new KeyValue(row, fam, qf3, 4, val)); + // opening scanners + scanners = memstore.getScanners(0); + // close scanners before clear the snapshot + for (KeyValueScanner scanner : scanners) { + scanner.close(); + } + // Since no opening scanner, the chunks of snapshot should be put back to + // pool + memstore.clearSnapshot(snapshot.getId()); + assertTrue(chunkPool.getPoolSize() > 0); + } + + ////////////////////////////////////////////////////////////////////////////// + // Compaction tests + ////////////////////////////////////////////////////////////////////////////// + @Test + public void testCompaction1Bucket() throws IOException { + + String[] keys1 = { "A", "A", "B", "C" }; //A1, A2, B3, C4 + + // test 1 bucket + addRowsByKeys(memstore, keys1); + assertEquals(704, regionServicesForStores.getGlobalMemstoreTotalSize()); + + long size = memstore.getFlushableSize(); + ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact + while (((CompactingMemStore)memstore).isMemStoreFlushingInMemory()) { + Threads.sleep(10); + } + assertEquals(0, memstore.getSnapshot().getCellsCount()); + assertEquals(528, regionServicesForStores.getGlobalMemstoreTotalSize()); + + size = memstore.getFlushableSize(); + MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot + region.addAndGetGlobalMemstoreSize(-size); // simulate flusher + ImmutableSegment s = memstore.getSnapshot(); + assertEquals(3, s.getCellsCount()); + assertEquals(0, regionServicesForStores.getGlobalMemstoreTotalSize()); + + memstore.clearSnapshot(snapshot.getId()); + } + + @Test + public void testCompaction2Buckets() throws IOException { + + String[] keys1 = { "A", "A", "B", "C" }; + String[] keys2 = { "A", "B", "D" }; + + addRowsByKeys(memstore, keys1); + assertEquals(704, regionServicesForStores.getGlobalMemstoreTotalSize()); + + long size = memstore.getFlushableSize(); + ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact + while (((CompactingMemStore)memstore).isMemStoreFlushingInMemory()) { + Threads.sleep(1000); + } + assertEquals(0, memstore.getSnapshot().getCellsCount()); + assertEquals(528, regionServicesForStores.getGlobalMemstoreTotalSize()); + + addRowsByKeys(memstore, keys2); + assertEquals(1056, regionServicesForStores.getGlobalMemstoreTotalSize()); + + size = memstore.getFlushableSize(); + ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact + while (((CompactingMemStore)memstore).isMemStoreFlushingInMemory()) { + Threads.sleep(10); + } + assertEquals(0, memstore.getSnapshot().getCellsCount()); + assertEquals(704, regionServicesForStores.getGlobalMemstoreTotalSize()); + + size = memstore.getFlushableSize(); + MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot + region.addAndGetGlobalMemstoreSize(-size); // simulate flusher + ImmutableSegment s = memstore.getSnapshot(); + assertEquals(4, s.getCellsCount()); + assertEquals(0, regionServicesForStores.getGlobalMemstoreTotalSize()); + + memstore.clearSnapshot(snapshot.getId()); + } + + @Test + public void testCompaction3Buckets() throws IOException { + + String[] keys1 = { "A", "A", "B", "C" }; + String[] keys2 = { "A", "B", "D" }; + String[] keys3 = { "D", "B", "B" }; + + addRowsByKeys(memstore, keys1); + assertEquals(704, region.getMemstoreSize()); + + long size = memstore.getFlushableSize(); + ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact + + String tstStr = "\n\nFlushable size after first flush in memory:" + size + + ". Is MemmStore in compaction?:" + ((CompactingMemStore)memstore).isMemStoreFlushingInMemory(); + while (((CompactingMemStore)memstore).isMemStoreFlushingInMemory()) { + Threads.sleep(10); + } + assertEquals(0, memstore.getSnapshot().getCellsCount()); + assertEquals(528, regionServicesForStores.getGlobalMemstoreTotalSize()); + + addRowsByKeys(memstore, keys2); + + tstStr += " After adding second part of the keys. Memstore size: " + + region.getMemstoreSize() + ", Memstore Total Size: " + + regionServicesForStores.getGlobalMemstoreTotalSize() + "\n\n"; + + assertEquals(1056, regionServicesForStores.getGlobalMemstoreTotalSize()); + + ((CompactingMemStore)memstore).disableCompaction(); + size = memstore.getFlushableSize(); + ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline without compaction + assertEquals(0, memstore.getSnapshot().getCellsCount()); + assertEquals(1056, regionServicesForStores.getGlobalMemstoreTotalSize()); + + addRowsByKeys(memstore, keys3); + assertEquals(1584, regionServicesForStores.getGlobalMemstoreTotalSize()); + + ((CompactingMemStore)memstore).enableCompaction(); + size = memstore.getFlushableSize(); + ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact + while (((CompactingMemStore)memstore).isMemStoreFlushingInMemory()) { + Threads.sleep(10); + } + assertEquals(0, memstore.getSnapshot().getCellsCount()); + assertEquals(704, regionServicesForStores.getGlobalMemstoreTotalSize()); + + size = memstore.getFlushableSize(); + MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot + region.addAndGetGlobalMemstoreSize(-size); // simulate flusher + ImmutableSegment s = memstore.getSnapshot(); + assertEquals(4, s.getCellsCount()); + assertEquals(0, regionServicesForStores.getGlobalMemstoreTotalSize()); + + memstore.clearSnapshot(snapshot.getId()); + + //assertTrue(tstStr, false); + } + + private void addRowsByKeys(final AbstractMemStore hmc, String[] keys) { + byte[] fam = Bytes.toBytes("testfamily"); + byte[] qf = Bytes.toBytes("testqualifier"); + for (int i = 0; i < keys.length; i++) { + long timestamp = System.currentTimeMillis(); + Threads.sleep(1); // to make sure each kv gets a different ts + byte[] row = Bytes.toBytes(keys[i]); + byte[] val = Bytes.toBytes(keys[i] + i); + KeyValue kv = new KeyValue(row, fam, qf, timestamp, val); + hmc.add(kv); + LOG.debug("added kv: " + kv.getKeyString() + ", timestamp" + kv.getTimestamp()); + long size = AbstractMemStore.heapSizeChange(kv, true); + regionServicesForStores.addAndGetGlobalMemstoreSize(size); + } + } + + private class EnvironmentEdgeForMemstoreTest implements EnvironmentEdge { + long t = 1234; + + @Override + public long currentTime() { + return t; + } + public void setCurrentTimeMillis(long t) { + this.t = t; + } + } + +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java index 54dbe9b..68b0ba3 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCorruptedRegionStoreFile.java @@ -28,7 +28,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -92,13 +91,11 @@ public class TestCorruptedRegionStoreFile { table.put(put); if ((rowCount++ % ROW_PER_FILE) == 0) { - // flush it - ((HTable)table).flushCommits(); - UTIL.getHBaseAdmin().flush(tableName); + UTIL.getAdmin().flush(tableName); } } } finally { - UTIL.getHBaseAdmin().flush(tableName); + UTIL.getAdmin().flush(tableName); table.close(); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index 5e6007d..0c4029d 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -21,11 +21,11 @@ package org.apache.hadoop.hbase.regionserver; import com.google.common.base.Joiner; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; @@ -48,7 +48,17 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdge; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.WALFactory; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertNotNull; import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; +import org.junit.rules.TestRule; import java.io.IOException; import java.lang.management.ManagementFactory; @@ -61,27 +71,38 @@ import java.util.concurrent.atomic.AtomicReference; /** memstore test case */ @Category({RegionServerTests.class, MediumTests.class}) -public class TestDefaultMemStore extends TestCase { +public class TestDefaultMemStore { private static final Log LOG = LogFactory.getLog(TestDefaultMemStore.class); - private DefaultMemStore memstore; - private static final int ROW_COUNT = 10; - private static final int QUALIFIER_COUNT = ROW_COUNT; - private static final byte [] FAMILY = Bytes.toBytes("column"); - private MultiVersionConcurrencyControl mvcc; - private AtomicLong startSeqNum = new AtomicLong(0); - - @Override + @Rule public TestName name = new TestName(); + @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). + withLookingForStuckThread(true).build(); + protected AbstractMemStore memstore; + protected static final int ROW_COUNT = 10; + protected static final int QUALIFIER_COUNT = ROW_COUNT; + protected static final byte[] FAMILY = Bytes.toBytes("column"); + protected MultiVersionConcurrencyControl mvcc; + protected AtomicLong startSeqNum = new AtomicLong(0); + + private String getName() { + return this.name.getMethodName(); + } + + @Before public void setUp() throws Exception { - super.setUp(); - this.mvcc = new MultiVersionConcurrencyControl(); + internalSetUp(); this.memstore = new DefaultMemStore(); } + protected void internalSetUp() throws Exception { + this.mvcc = new MultiVersionConcurrencyControl(); + } + + @Test public void testPutSameKey() { - byte [] bytes = Bytes.toBytes(getName()); + byte[] bytes = Bytes.toBytes(getName()); KeyValue kv = new KeyValue(bytes, bytes, bytes, bytes); this.memstore.add(kv); - byte [] other = Bytes.toBytes("somethingelse"); + byte[] other = Bytes.toBytes("somethingelse"); KeyValue samekey = new KeyValue(bytes, bytes, bytes, other); this.memstore.add(samekey); Cell found = this.memstore.getActive().first(); @@ -93,6 +114,7 @@ public class TestDefaultMemStore extends TestCase { * Test memstore snapshot happening while scanning. * @throws IOException */ + @Test public void testScanAcrossSnapshot() throws IOException { int rowCount = addRows(this.memstore); List memstorescanners = this.memstore.getScanners(0); @@ -180,6 +202,7 @@ public class TestDefaultMemStore extends TestCase { * @throws IOException * @throws CloneNotSupportedException */ + @Test public void testScanAcrossSnapshot2() throws IOException, CloneNotSupportedException { // we are going to the scanning across snapshot with two kvs // kv1 should always be returned before kv2 @@ -209,7 +232,7 @@ public class TestDefaultMemStore extends TestCase { verifyScanAcrossSnapshot2(kv1, kv2); } - private void verifyScanAcrossSnapshot2(KeyValue kv1, KeyValue kv2) + protected void verifyScanAcrossSnapshot2(KeyValue kv1, KeyValue kv2) throws IOException { List memstorescanners = this.memstore.getScanners(mvcc.getReadPoint()); assertEquals(1, memstorescanners.size()); @@ -220,7 +243,7 @@ public class TestDefaultMemStore extends TestCase { assertNull(scanner.next()); } - private void assertScannerResults(KeyValueScanner scanner, KeyValue[] expected) + protected void assertScannerResults(KeyValueScanner scanner, KeyValue[] expected) throws IOException { scanner.seek(KeyValueUtil.createFirstOnRow(new byte[]{})); List returned = Lists.newArrayList(); @@ -238,6 +261,7 @@ public class TestDefaultMemStore extends TestCase { assertNull(scanner.peek()); } + @Test public void testMemstoreConcurrentControl() throws IOException { final byte[] row = Bytes.toBytes(1); final byte[] f = Bytes.toBytes("family"); @@ -280,6 +304,7 @@ public class TestDefaultMemStore extends TestCase { * the same timestamp, we still need to provide consistent reads * for the same scanner. */ + @Test public void testMemstoreEditsVisibilityWithSameKey() throws IOException { final byte[] row = Bytes.toBytes(1); final byte[] f = Bytes.toBytes("family"); @@ -334,6 +359,7 @@ public class TestDefaultMemStore extends TestCase { * the same timestamp, we still need to provide consistent reads * for the same scanner. */ + @Test public void testMemstoreDeletesVisibilityWithSameKey() throws IOException { final byte[] row = Bytes.toBytes(1); final byte[] f = Bytes.toBytes("family"); @@ -438,6 +464,7 @@ public class TestDefaultMemStore extends TestCase { } } + @Test public void testReadOwnWritesUnderConcurrency() throws Throwable { int NUM_THREADS = 8; @@ -463,6 +490,7 @@ public class TestDefaultMemStore extends TestCase { * Test memstore snapshots * @throws IOException */ + @Test public void testSnapshotting() throws IOException { final int snapshotCount = 5; // Add some rows, run a snapshot. Do it a few times. @@ -473,6 +501,7 @@ public class TestDefaultMemStore extends TestCase { } } + @Test public void testMultipleVersionsSimple() throws Exception { DefaultMemStore m = new DefaultMemStore(new Configuration(), CellComparator.COMPARATOR); byte [] row = Bytes.toBytes("testRow"); @@ -500,53 +529,56 @@ public class TestDefaultMemStore extends TestCase { /** Test getNextRow from memstore * @throws InterruptedException */ + @Test public void testGetNextRow() throws Exception { addRows(this.memstore); // Add more versions to make it a little more interesting. Thread.sleep(1); addRows(this.memstore); - Cell closestToEmpty = this.memstore.getNextRow(KeyValue.LOWESTKEY); + Cell closestToEmpty = ((DefaultMemStore) this.memstore).getNextRow(KeyValue.LOWESTKEY); assertTrue(CellComparator.COMPARATOR.compareRows(closestToEmpty, - new KeyValue(Bytes.toBytes(0), System.currentTimeMillis())) == 0); + new KeyValue(Bytes.toBytes(0), System.currentTimeMillis())) == 0); for (int i = 0; i < ROW_COUNT; i++) { - Cell nr = this.memstore.getNextRow(new KeyValue(Bytes.toBytes(i), - System.currentTimeMillis())); + Cell nr = ((DefaultMemStore) this.memstore).getNextRow(new KeyValue(Bytes.toBytes(i), + System.currentTimeMillis())); if (i + 1 == ROW_COUNT) { assertEquals(nr, null); } else { assertTrue(CellComparator.COMPARATOR.compareRows(nr, - new KeyValue(Bytes.toBytes(i + 1), System.currentTimeMillis())) == 0); + new KeyValue(Bytes.toBytes(i + 1), System.currentTimeMillis())) == 0); } } //starting from each row, validate results should contain the starting row Configuration conf = HBaseConfiguration.create(); for (int startRowId = 0; startRowId < ROW_COUNT; startRowId++) { ScanInfo scanInfo = new ScanInfo(conf, FAMILY, 0, 1, Integer.MAX_VALUE, - KeepDeletedCells.FALSE, 0, this.memstore.getComparator()); + KeepDeletedCells.FALSE, 0, this.memstore.getComparator()); ScanType scanType = ScanType.USER_SCAN; - InternalScanner scanner = new StoreScanner(new Scan( + try (InternalScanner scanner = new StoreScanner(new Scan( Bytes.toBytes(startRowId)), scanInfo, scanType, null, - memstore.getScanners(0)); - List results = new ArrayList(); - for (int i = 0; scanner.next(results); i++) { - int rowId = startRowId + i; - Cell left = results.get(0); - byte[] row1 = Bytes.toBytes(rowId); - assertTrue( - "Row name", - CellComparator.COMPARATOR.compareRows(left, row1, 0, row1.length) == 0); - assertEquals("Count of columns", QUALIFIER_COUNT, results.size()); - List row = new ArrayList(); - for (Cell kv : results) { - row.add(kv); + memstore.getScanners(0))) { + List results = new ArrayList(); + for (int i = 0; scanner.next(results); i++) { + int rowId = startRowId + i; + Cell left = results.get(0); + byte[] row1 = Bytes.toBytes(rowId); + assertTrue( + "Row name", + CellComparator.COMPARATOR.compareRows(left, row1, 0, row1.length) == 0); + assertEquals("Count of columns", QUALIFIER_COUNT, results.size()); + List row = new ArrayList(); + for (Cell kv : results) { + row.add(kv); + } + isExpectedRowWithoutTimestamps(rowId, row); + // Clear out set. Otherwise row results accumulate. + results.clear(); } - isExpectedRowWithoutTimestamps(rowId, row); - // Clear out set. Otherwise row results accumulate. - results.clear(); } } } + @Test public void testGet_memstoreAndSnapShot() throws IOException { byte [] row = Bytes.toBytes("testrow"); byte [] fam = Bytes.toBytes("testfamily"); @@ -558,9 +590,9 @@ public class TestDefaultMemStore extends TestCase { byte [] val = Bytes.toBytes("testval"); //Setting up memstore - memstore.add(new KeyValue(row, fam ,qf1, val)); - memstore.add(new KeyValue(row, fam ,qf2, val)); - memstore.add(new KeyValue(row, fam ,qf3, val)); + memstore.add(new KeyValue(row, fam, qf1, val)); + memstore.add(new KeyValue(row, fam, qf2, val)); + memstore.add(new KeyValue(row, fam, qf3, val)); //Creating a snapshot memstore.snapshot(); assertEquals(3, memstore.getSnapshot().getCellsCount()); @@ -574,6 +606,7 @@ public class TestDefaultMemStore extends TestCase { ////////////////////////////////////////////////////////////////////////////// // Delete tests ////////////////////////////////////////////////////////////////////////////// + @Test public void testGetWithDelete() throws IOException { byte [] row = Bytes.toBytes("testrow"); byte [] fam = Bytes.toBytes("testfamily"); @@ -584,7 +617,7 @@ public class TestDefaultMemStore extends TestCase { KeyValue put1 = new KeyValue(row, fam, qf1, ts1, val); long ts2 = ts1 + 1; KeyValue put2 = new KeyValue(row, fam, qf1, ts2, val); - long ts3 = ts2 +1; + long ts3 = ts2 + 1; KeyValue put3 = new KeyValue(row, fam, qf1, ts3, val); memstore.add(put1); memstore.add(put2); @@ -608,6 +641,7 @@ public class TestDefaultMemStore extends TestCase { } } + @Test public void testGetWithDeleteColumn() throws IOException { byte [] row = Bytes.toBytes("testrow"); byte [] fam = Bytes.toBytes("testfamily"); @@ -618,7 +652,7 @@ public class TestDefaultMemStore extends TestCase { KeyValue put1 = new KeyValue(row, fam, qf1, ts1, val); long ts2 = ts1 + 1; KeyValue put2 = new KeyValue(row, fam, qf1, ts2, val); - long ts3 = ts2 +1; + long ts3 = ts2 + 1; KeyValue put3 = new KeyValue(row, fam, qf1, ts3, val); memstore.add(put1); memstore.add(put2); @@ -636,15 +670,14 @@ public class TestDefaultMemStore extends TestCase { expected.add(put2); expected.add(put1); - assertEquals(4, memstore.getActive().getCellsCount()); int i = 0; - for (Cell cell: memstore.getActive().getCellSet()) { + for (Cell cell : memstore.getActive().getCellSet()) { assertEquals(expected.get(i++), cell); } } - + @Test public void testGetWithDeleteFamily() throws IOException { byte [] row = Bytes.toBytes("testrow"); byte [] fam = Bytes.toBytes("testfamily"); @@ -675,15 +708,14 @@ public class TestDefaultMemStore extends TestCase { expected.add(put4); expected.add(put3); - - assertEquals(5, memstore.getActive().getCellsCount()); int i = 0; - for (Cell cell: memstore.getActive().getCellSet()) { + for (Cell cell : memstore.getActive().getCellSet()) { assertEquals(expected.get(i++), cell); } } + @Test public void testKeepDeleteInmemstore() { byte [] row = Bytes.toBytes("testrow"); byte [] fam = Bytes.toBytes("testfamily"); @@ -697,6 +729,7 @@ public class TestDefaultMemStore extends TestCase { assertEquals(delete, memstore.getActive().first()); } + @Test public void testRetainsDeleteVersion() throws IOException { // add a put to memstore memstore.add(KeyValueTestUtil.create("row1", "fam", "a", 100, "dont-care")); @@ -709,6 +742,8 @@ public class TestDefaultMemStore extends TestCase { assertEquals(2, memstore.getActive().getCellsCount()); assertEquals(delete, memstore.getActive().first()); } + + @Test public void testRetainsDeleteColumn() throws IOException { // add a put to memstore memstore.add(KeyValueTestUtil.create("row1", "fam", "a", 100, "dont-care")); @@ -721,6 +756,8 @@ public class TestDefaultMemStore extends TestCase { assertEquals(2, memstore.getActive().getCellsCount()); assertEquals(delete, memstore.getActive().first()); } + + @Test public void testRetainsDeleteFamily() throws IOException { // add a put to memstore memstore.add(KeyValueTestUtil.create("row1", "fam", "a", 100, "dont-care")); @@ -751,6 +788,7 @@ public class TestDefaultMemStore extends TestCase { * This causes OOME pretty quickly if we use MSLAB for upsert * since each 2M chunk is held onto by a single reference. */ + @Test public void testUpsertMSLAB() throws Exception { Configuration conf = HBaseConfiguration.create(); conf.setBoolean(SegmentFactory.USEMSLAB_KEY, true); @@ -793,6 +831,7 @@ public class TestDefaultMemStore extends TestCase { * as older keyvalues are deleted from the memstore. * @throws Exception */ + @Test public void testUpsertMemstoreSize() throws Exception { Configuration conf = HBaseConfiguration.create(); memstore = new DefaultMemStore(conf, CellComparator.COMPARATOR); @@ -832,6 +871,7 @@ public class TestDefaultMemStore extends TestCase { * various edit operations in memstore. * @throws Exception */ + @Test public void testUpdateToTimeOfOldestEdit() throws Exception { try { EnvironmentEdgeForMemstoreTest edge = new EnvironmentEdgeForMemstoreTest(); @@ -874,6 +914,7 @@ public class TestDefaultMemStore extends TestCase { * false. * @throws Exception */ + @Test public void testShouldFlush() throws Exception { Configuration conf = new Configuration(); conf.setInt(HRegion.MEMSTORE_PERIODIC_FLUSH_INTERVAL, 1000); @@ -883,7 +924,7 @@ public class TestDefaultMemStore extends TestCase { checkShouldFlush(conf, false); } - private void checkShouldFlush(Configuration conf, boolean expected) throws Exception { + protected void checkShouldFlush(Configuration conf, boolean expected) throws Exception { try { EnvironmentEdgeForMemstoreTest edge = new EnvironmentEdgeForMemstoreTest(); EnvironmentEdgeManager.injectEdge(edge); @@ -898,7 +939,7 @@ public class TestDefaultMemStore extends TestCase { s.add(KeyValueTestUtil.create("r", "f", "q", 100, "v")); edge.setCurrentTimeMillis(1234 + 100); StringBuffer sb = new StringBuffer(); - assertTrue(region.shouldFlush(sb) == false); + assertTrue(!region.shouldFlush(sb)); edge.setCurrentTimeMillis(1234 + 10000); assertTrue(region.shouldFlush(sb) == expected); } finally { @@ -906,6 +947,7 @@ public class TestDefaultMemStore extends TestCase { } } + @Test public void testShouldFlushMeta() throws Exception { // write an edit in the META and ensure the shouldFlush (that the periodic memstore // flusher invokes) returns true after SYSTEM_CACHE_FLUSH_INTERVAL (even though @@ -954,7 +996,7 @@ public class TestDefaultMemStore extends TestCase { * @return How many rows we added. * @throws IOException */ - private int addRows(final MemStore hmc) { + protected int addRows(final AbstractMemStore hmc) { return addRows(hmc, HConstants.LATEST_TIMESTAMP); } @@ -964,10 +1006,10 @@ public class TestDefaultMemStore extends TestCase { * @return How many rows we added. * @throws IOException */ - private int addRows(final MemStore hmc, final long ts) { + protected int addRows(final MemStore hmc, final long ts) { for (int i = 0; i < ROW_COUNT; i++) { - long timestamp = ts == HConstants.LATEST_TIMESTAMP? - System.currentTimeMillis(): ts; + long timestamp = ts == HConstants.LATEST_TIMESTAMP ? + System.currentTimeMillis() : ts; for (int ii = 0; ii < QUALIFIER_COUNT; ii++) { byte [] row = Bytes.toBytes(i); byte [] qf = makeQualifier(i, ii); @@ -977,7 +1019,7 @@ public class TestDefaultMemStore extends TestCase { return ROW_COUNT; } - private long runSnapshot(final DefaultMemStore hmc) throws UnexpectedStateException { + private long runSnapshot(final AbstractMemStore hmc) throws UnexpectedStateException { // Save off old state. int oldHistorySize = hmc.getSnapshot().getCellsCount(); MemStoreSnapshot snapshot = hmc.snapshot(); @@ -993,7 +1035,7 @@ public class TestDefaultMemStore extends TestCase { private void isExpectedRowWithoutTimestamps(final int rowIndex, List kvs) { int i = 0; - for (Cell kv: kvs) { + for (Cell kv : kvs) { byte[] expectedColname = makeQualifier(rowIndex, i++); assertTrue("Column name", CellUtil.matchingQualifier(kv, expectedColname)); // Value is column name as bytes. Usually result is @@ -1023,7 +1065,6 @@ public class TestDefaultMemStore extends TestCase { } } - static void doScan(MemStore ms, int iteration) throws IOException { long nanos = System.nanoTime(); KeyValueScanner s = ms.getScanners(0).get(0); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java index 779fdca..fe2cb44 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java @@ -50,8 +50,6 @@ public class TestDeleteMobTable { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0); - TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true); TEST_UTIL.startMiniCluster(1); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java index 82be1db..cee64e0 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java @@ -80,8 +80,6 @@ public class TestEncryptionKeyRotation { conf.setInt("hfile.format.version", 3); conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase"); - // Enable online schema updates - conf.setBoolean("hbase.online.schema.update.enable", true); // Start the minicluster TEST_UTIL.startMiniCluster(1); @@ -229,7 +227,7 @@ public class TestEncryptionKeyRotation { } } } - + private static List findStorefilePaths(TableName tableName) throws Exception { List paths = new ArrayList(); for (Region region: diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 8cc04f7..93074e1 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -18,35 +18,10 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.apache.hadoop.hbase.HBaseTestingUtility.COLUMNS; -import static org.apache.hadoop.hbase.HBaseTestingUtility.FIRST_CHAR; -import static org.apache.hadoop.hbase.HBaseTestingUtility.LAST_CHAR; -import static org.apache.hadoop.hbase.HBaseTestingUtility.START_KEY; -import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1; -import static org.apache.hadoop.hbase.HBaseTestingUtility.fam2; -import static org.apache.hadoop.hbase.HBaseTestingUtility.fam3; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyBoolean; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.protobuf.ByteString; - import java.io.IOException; import java.io.InterruptedIOException; import java.security.PrivilegedExceptionAction; @@ -79,6 +54,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ArrayBackedTag; +import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; @@ -114,6 +90,7 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TestMobSnapshotCloneIndependence; import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException; import org.apache.hadoop.hbase.filter.BinaryComparator; import org.apache.hadoop.hbase.filter.ColumnCountGetFilter; @@ -126,6 +103,7 @@ import org.apache.hadoop.hbase.filter.PrefixFilter; import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter; import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.master.procedure.TestMasterFailoverWithProcedures; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; @@ -170,16 +148,43 @@ import org.apache.hadoop.hbase.wal.WALSplitter; import org.junit.After; import org.junit.Assert; import org.junit.Before; +import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.junit.rules.TestRule; import org.mockito.ArgumentCaptor; import org.mockito.ArgumentMatcher; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; +import static org.apache.hadoop.hbase.HBaseTestingUtility.COLUMNS; +import static org.apache.hadoop.hbase.HBaseTestingUtility.FIRST_CHAR; +import static org.apache.hadoop.hbase.HBaseTestingUtility.LAST_CHAR; +import static org.apache.hadoop.hbase.HBaseTestingUtility.START_KEY; +import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1; +import static org.apache.hadoop.hbase.HBaseTestingUtility.fam2; +import static org.apache.hadoop.hbase.HBaseTestingUtility.fam3; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + /** * Basic stand-alone testing of HRegion. No clusters! * @@ -193,13 +198,16 @@ public class TestHRegion { // over in TestHRegionOnCluster. private static final Log LOG = LogFactory.getLog(TestHRegion.class); @Rule public TestName name = new TestName(); + @ClassRule + public static final TestRule timeout = + CategoryBasedTimeout.forClass(TestHRegion.class); private static final String COLUMN_FAMILY = "MyCF"; private static final byte [] COLUMN_FAMILY_BYTES = Bytes.toBytes(COLUMN_FAMILY); HRegion region = null; // Do not run unit tests in parallel (? Why not? It don't work? Why not? St.Ack) - private static HBaseTestingUtility TEST_UTIL; + protected static HBaseTestingUtility TEST_UTIL; public static Configuration CONF ; private String dir; private static FileSystem FILESYSTEM; @@ -244,7 +252,7 @@ public class TestHRegion { * Test that I can use the max flushed sequence id after the close. * @throws IOException */ - @Test (timeout = 100000) + @Test public void testSequenceId() throws IOException { HRegion region = initHRegion(tableName, name.getMethodName(), CONF, COLUMN_FAMILY_BYTES); assertEquals(HConstants.NO_SEQNUM, region.getMaxFlushedSeqId()); @@ -279,7 +287,7 @@ public class TestHRegion { * flushes for region close." * @throws IOException */ - @Test (timeout=60000) + @Test public void testCloseCarryingSnapshot() throws IOException { HRegion region = initHRegion(tableName, name.getMethodName(), CONF, COLUMN_FAMILY_BYTES); Store store = region.getStore(COLUMN_FAMILY_BYTES); @@ -305,7 +313,7 @@ public class TestHRegion { * This test is for verifying memstore snapshot size is correctly updated in case of rollback * See HBASE-10845 */ - @Test (timeout=60000) + @Test public void testMemstoreSnapshotSize() throws IOException { class MyFaultyFSLog extends FaultyFSLog { StoreFlushContext storeFlushCtx; @@ -413,6 +421,44 @@ public class TestHRegion { HBaseTestingUtility.closeRegionAndWAL(region); } + @Test + public void testMemstoreSizeAccountingWithFailedPostBatchMutate() throws IOException { + String testName = "testMemstoreSizeAccountingWithFailedPostBatchMutate"; + FileSystem fs = FileSystem.get(CONF); + Path rootDir = new Path(dir + testName); + FSHLog hLog = new FSHLog(fs, rootDir, testName, CONF); + HRegion region = initHRegion(tableName, null, null, name.getMethodName(), + CONF, false, Durability.SYNC_WAL, hLog, COLUMN_FAMILY_BYTES); + Store store = region.getStore(COLUMN_FAMILY_BYTES); + assertEquals(0, region.getMemstoreSize()); + + // Put one value + byte [] value = Bytes.toBytes(name.getMethodName()); + Put put = new Put(value); + put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value); + region.put(put); + long onePutSize = region.getMemstoreSize(); + assertTrue(onePutSize > 0); + + RegionCoprocessorHost mockedCPHost = Mockito.mock(RegionCoprocessorHost.class); + doThrow(new IOException()) + .when(mockedCPHost).postBatchMutate(Mockito.>any()); + region.setCoprocessorHost(mockedCPHost); + + put = new Put(value); + put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("dfg"), value); + try { + region.put(put); + fail("Should have failed with IOException"); + } catch (IOException expected) { + } + assertEquals("memstoreSize should be incremented", onePutSize * 2, region.getMemstoreSize()); + assertEquals("flushable size should be incremented", onePutSize * 2, store.getFlushableSize()); + + region.setCoprocessorHost(null); + HBaseTestingUtility.closeRegionAndWAL(region); + } + /** * Test we do not lose data if we fail a flush and then close. * Part of HBase-10466. Tests the following from the issue description: @@ -428,7 +474,7 @@ public class TestHRegion { * if memstoreSize is not larger than 0." * @throws Exception */ - @Test (timeout=60000) + @Test public void testFlushSizeAccounting() throws Exception { final Configuration conf = HBaseConfiguration.create(CONF); final String callingMethod = name.getMethodName(); @@ -493,7 +539,7 @@ public class TestHRegion { FileSystem.closeAllForUGI(user.getUGI()); } - @Test (timeout=60000) + @Test public void testCloseWithFailingFlush() throws Exception { final Configuration conf = HBaseConfiguration.create(CONF); final String callingMethod = name.getMethodName(); @@ -1101,7 +1147,7 @@ public class TestHRegion { } } - @Test (timeout=60000) + @Test public void testFlushMarkersWALFail() throws Exception { // test the cases where the WAL append for flush markers fail. String method = name.getMethodName(); @@ -2449,7 +2495,7 @@ public class TestHRegion { // extract the key values out the memstore: // This is kinda hacky, but better than nothing... long now = System.currentTimeMillis(); - DefaultMemStore memstore = (DefaultMemStore) ((HStore) region.getStore(fam1)).memstore; + AbstractMemStore memstore = (AbstractMemStore)((HStore) region.getStore(fam1)).memstore; Cell firstCell = memstore.getActive().first(); assertTrue(firstCell.getTimestamp() <= now); now = firstCell.getTimestamp(); @@ -5145,7 +5191,7 @@ public class TestHRegion { * @return A region on which you must call * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done. */ - private static HRegion initHRegion(TableName tableName, String callingMethod, Configuration conf, + protected HRegion initHRegion(TableName tableName, String callingMethod, Configuration conf, byte[]... families) throws IOException { return initHRegion(tableName, null, null, callingMethod, conf, false, families); } @@ -5154,12 +5200,12 @@ public class TestHRegion { * @return A region on which you must call * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done. */ - private static HRegion initHRegion(TableName tableName, String callingMethod, Configuration conf, + protected HRegion initHRegion(TableName tableName, String callingMethod, Configuration conf, boolean isReadOnly, byte[]... families) throws IOException { return initHRegion(tableName, null, null, callingMethod, conf, isReadOnly, families); } - public static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, + protected HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, String callingMethod, Configuration conf, boolean isReadOnly, byte[]... families) throws IOException { Path logDir = TEST_UTIL.getDataTestDirOnTestFS(callingMethod + ".log"); @@ -5173,7 +5219,7 @@ public class TestHRegion { * @return A region on which you must call * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done. */ - public static HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, + public HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, String callingMethod, Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families) throws IOException { return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, @@ -5197,7 +5243,7 @@ public class TestHRegion { Bytes.toString(CellUtil.cloneValue(kv))); } - @Test (timeout=60000) + @Test public void testReverseScanner_FromMemStore_SingleCF_Normal() throws IOException { byte[] rowC = Bytes.toBytes("rowC"); @@ -5256,7 +5302,7 @@ public class TestHRegion { } } - @Test (timeout=60000) + @Test public void testReverseScanner_FromMemStore_SingleCF_LargerKey() throws IOException { byte[] rowC = Bytes.toBytes("rowC"); @@ -5316,7 +5362,7 @@ public class TestHRegion { } } - @Test (timeout=60000) + @Test public void testReverseScanner_FromMemStore_SingleCF_FullScan() throws IOException { byte[] rowC = Bytes.toBytes("rowC"); @@ -5373,7 +5419,7 @@ public class TestHRegion { } } - @Test (timeout=60000) + @Test public void testReverseScanner_moreRowsMayExistAfter() throws IOException { // case for "INCLUDE_AND_SEEK_NEXT_ROW & SEEK_NEXT_ROW" endless loop byte[] rowA = Bytes.toBytes("rowA"); @@ -5455,7 +5501,7 @@ public class TestHRegion { } } - @Test (timeout=60000) + @Test public void testReverseScanner_smaller_blocksize() throws IOException { // case to ensure no conflict with HFile index optimization byte[] rowA = Bytes.toBytes("rowA"); @@ -5539,7 +5585,7 @@ public class TestHRegion { } } - @Test (timeout=60000) + @Test public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs1() throws IOException { byte[] row0 = Bytes.toBytes("row0"); // 1 kv @@ -5708,7 +5754,7 @@ public class TestHRegion { } } - @Test (timeout=60000) + @Test public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs2() throws IOException { byte[] row1 = Bytes.toBytes("row1"); @@ -5789,7 +5835,7 @@ public class TestHRegion { /** * Test for HBASE-14497: Reverse Scan threw StackOverflow caused by readPt checking */ - @Test (timeout = 60000) + @Test public void testReverseScanner_StackOverflow() throws IOException { byte[] cf1 = Bytes.toBytes("CF1"); byte[][] families = {cf1}; @@ -5844,7 +5890,7 @@ public class TestHRegion { } } - @Test (timeout=60000) + @Test public void testSplitRegionWithReverseScan() throws IOException { TableName tableName = TableName.valueOf("testSplitRegionWithReverseScan"); byte [] qualifier = Bytes.toBytes("qualifier"); @@ -6045,6 +6091,7 @@ public class TestHRegion { final HTableDescriptor htd, final RegionServerServices rsServices) { super(tableDir, wal, fs, confParam, regionInfo, htd, rsServices); } + @Override protected long getNextSequenceId(WAL wal) throws IOException { return 42; } @@ -6244,7 +6291,7 @@ public class TestHRegion { /** * Test RegionTooBusyException thrown when region is busy */ - @Test (timeout=24000) + @Test public void testRegionTooBusy() throws IOException { String method = "testRegionTooBusy"; TableName tableName = TableName.valueOf(method); @@ -6507,7 +6554,7 @@ public class TestHRegion { qual2, 0, qual2.length)); } - @Test(timeout = 60000) + @Test public void testBatchMutateWithWrongRegionException() throws Exception { final byte[] a = Bytes.toBytes("a"); final byte[] b = Bytes.toBytes("b"); @@ -6604,7 +6651,7 @@ public class TestHRegion { qual2, 0, qual2.length)); } - static HRegion initHRegion(TableName tableName, String callingMethod, + HRegion initHRegion(TableName tableName, String callingMethod, byte[]... families) throws IOException { return initHRegion(tableName, callingMethod, HBaseConfiguration.create(), families); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java index 76b4134..bd5c91e 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java @@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread; import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.RegionServerCallable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -195,11 +195,11 @@ public class TestHRegionServerBulkLoad { Path hfile = new Path(dir, family(i)); byte[] fam = Bytes.toBytes(family(i)); createHFile(fs, hfile, fam, QUAL, val, 1000); - famPaths.add(new Pair(fam, hfile.toString())); + famPaths.add(new Pair<>(fam, hfile.toString())); } // bulk load HFiles - final HConnection conn = UTIL.getHBaseAdmin().getConnection(); + final ClusterConnection conn = (ClusterConnection) UTIL.getAdmin().getConnection(); RegionServerCallable callable = new RegionServerCallable(conn, tableName, Bytes.toBytes("aaa")) { @Override diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java new file mode 100644 index 0000000..be604af --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java @@ -0,0 +1,69 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CategoryBasedTimeout; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.TestMobSnapshotCloneIndependence; +import org.apache.hadoop.hbase.master.procedure.TestMasterFailoverWithProcedures; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests; +import org.apache.hadoop.hbase.wal.WAL; +import org.junit.ClassRule; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestRule; + +/** + * A test similar to TestHRegion, but with in-memory flush families. + * Also checks wal truncation after in-memory compaction. + */ +@Category({VerySlowRegionServerTests.class, LargeTests.class}) +@SuppressWarnings("deprecation") +public class TestHRegionWithInMemoryFlush extends TestHRegion{ + // Do not spin up clusters in here. If you need to spin up a cluster, do it + // over in TestHRegionOnCluster. + private static final Log LOG = LogFactory.getLog(TestHRegionWithInMemoryFlush.class); + @ClassRule + public static final TestRule timeout = + CategoryBasedTimeout.forClass(TestHRegionWithInMemoryFlush.class); + + /** + * @return A region on which you must call + * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done. + */ + @Override + public HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, + String callingMethod, Configuration conf, boolean isReadOnly, Durability durability, + WAL wal, byte[]... families) throws IOException { + boolean[] inMemory = new boolean[families.length]; + for(int i = 0; i < inMemory.length; i++) { + inMemory[i] = true; + } + return TEST_UTIL.createLocalHRegionWithInMemoryFlags(tableName, startKey, stopKey, + isReadOnly, durability, wal, inMemory, families); + } + +} + diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java index f9ffc88..5cbca4b 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java @@ -85,8 +85,6 @@ public class TestMobStoreCompaction { @BeforeClass public static void setUpBeforeClass() throws Exception { - UTIL.getConfiguration().setInt("hbase.master.info.port", 0); - UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true); UTIL.startMiniCluster(1); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java index 04ad4c9..6a4aceb 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java @@ -78,8 +78,6 @@ public class TestMobStoreScanner { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", 0); - TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true); TEST_UTIL.getConfiguration().setInt("hbase.client.keyvalue.maxsize", 100 * 1024 * 1024); // TODO: AsyncFSWAL can not handle large edits right now, remove this after we fix the issue. TEST_UTIL.getConfiguration().set(WALFactory.WAL_PROVIDER, "filesystem"); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java index 848b678..1615b99 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java @@ -17,16 +17,7 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; -import java.util.Random; - +import com.google.common.hash.Hashing; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -59,7 +50,15 @@ import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; -import com.google.common.hash.Hashing; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Random; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; /** * This test verifies the correctness of the Per Column Family flushing strategy @@ -127,7 +126,7 @@ public class TestPerColumnFamilyFlush { // Set up the configuration Configuration conf = HBaseConfiguration.create(); conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 200 * 1024); - conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushLargeStoresPolicy.class.getName()); + conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushAllLargeStoresPolicy.class.getName()); conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 100 * 1024); // Intialize the region @@ -336,7 +335,7 @@ public class TestPerColumnFamilyFlush { Configuration conf = TEST_UTIL.getConfiguration(); conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 20000); // Carefully chosen limits so that the memstore just flushes when we're done - conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushLargeStoresPolicy.class.getName()); + conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushAllLargeStoresPolicy.class.getName()); conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 10000); final int numRegionServers = 4; try { @@ -450,7 +449,7 @@ public class TestPerColumnFamilyFlush { TableName tableName = TableName.valueOf("testFlushingWhenLogRolling"); Configuration conf = TEST_UTIL.getConfiguration(); conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 128 * 1024 * 1024); - conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushLargeStoresPolicy.class.getName()); + conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushAllLargeStoresPolicy.class.getName()); long cfFlushSizeLowerBound = 2048; conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, cfFlushSizeLowerBound); @@ -607,7 +606,7 @@ public class TestPerColumnFamilyFlush { } LOG.info("==============Test with selective flush enabled==============="); - conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushLargeStoresPolicy.class.getName()); + conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushAllLargeStoresPolicy.class.getName()); // default value of per-cf flush lower bound is too big, set to a small enough value conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 0); try { diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java index cd4410f..f7182ba 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TestMobSnapshotCloneIndependence; import org.apache.hadoop.hbase.exceptions.MergeRegionException; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.HMaster; @@ -64,6 +65,7 @@ import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Regio import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse; import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -75,6 +77,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.zookeeper.KeeperException; import org.junit.AfterClass; import org.junit.BeforeClass; +import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -91,13 +94,15 @@ import com.google.protobuf.ServiceException; * cluster where {@link TestRegionMergeTransaction} is tests against bare * {@link HRegion}. */ -@Category({RegionServerTests.class, LargeTests.class}) +@Category({RegionServerTests.class, MediumTests.class}) public class TestRegionMergeTransactionOnCluster { private static final Log LOG = LogFactory .getLog(TestRegionMergeTransactionOnCluster.class); @Rule public TestName name = new TestName(); - @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). - withLookingForStuckThread(true).build(); + @ClassRule + public static final TestRule timeout = + CategoryBasedTimeout.forClass(TestRegionMergeTransactionOnCluster.class); + private static final int NB_SERVERS = 3; private static final byte[] FAMILYNAME = Bytes.toBytes("fam"); @@ -544,7 +549,7 @@ public class TestRegionMergeTransactionOnCluster { if (enabled.get() && req.getTransition(0).getTransitionCode() == TransitionCode.READY_TO_MERGE && !resp.hasErrorMessage()) { RegionStates regionStates = myMaster.getAssignmentManager().getRegionStates(); - for (RegionState regionState: regionStates.getRegionsInTransition().values()) { + for (RegionState regionState: regionStates.getRegionsInTransition()) { // Find the merging_new region and remove it if (regionState.isMergingNew()) { regionStates.deleteRegion(regionState.getRegion()); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java index 639c148..c48fbec 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java @@ -24,6 +24,7 @@ import java.net.InetAddress; import java.net.NetworkInterface; import java.util.Enumeration; import java.util.List; +import java.util.Locale; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -92,7 +93,7 @@ public class TestRegionServerHostname { assertTrue(servers.size() == NUM_RS+1); for (String server : servers) { assertTrue("From zookeeper: " + server + " hostname: " + hostName, - server.startsWith(hostName.toLowerCase()+",")); + server.startsWith(hostName.toLowerCase(Locale.ROOT)+",")); } zkw.close(); } finally { diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java index 2087097..89a82a7 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.regionserver; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.*; @@ -27,11 +29,17 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.apache.log4j.Level; import org.apache.log4j.Logger; +import org.junit.After; import org.junit.AfterClass; +import org.junit.Before; import org.junit.BeforeClass; +import org.junit.ClassRule; import org.junit.Ignore; +import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; +import org.junit.rules.TestRule; import static org.junit.Assert.*; @@ -39,23 +47,35 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - @Category({RegionServerTests.class, MediumTests.class}) public class TestRegionServerMetrics { - private static MetricsAssertHelper metricsHelper; + private static final Log LOG = LogFactory.getLog(TestRegionServerMetrics.class); + + @Rule + public TestName testName = new TestName(); + + @ClassRule + public static TestRule timeout = CategoryBasedTimeout.forClass(TestRegionServerMetrics.class); static { Logger.getLogger("org.apache.hadoop.hbase").setLevel(Level.DEBUG); } + private static MetricsAssertHelper metricsHelper; private static MiniHBaseCluster cluster; private static HRegionServer rs; private static Configuration conf; private static HBaseTestingUtility TEST_UTIL; + private static Connection connection; private static MetricsRegionServer metricsRegionServer; private static MetricsRegionServerSource serverSource; private static final int NUM_SCAN_NEXT = 30; private static int numScanNext = 0; + private static byte[] cf = Bytes.toBytes("cf"); + private static byte[] row = Bytes.toBytes("row"); + private static byte[] qualifier = Bytes.toBytes("qual"); + private static byte[] val = Bytes.toBytes("val"); + private static Admin admin; @BeforeClass public static void startCluster() throws Exception { @@ -65,12 +85,16 @@ public class TestRegionServerMetrics { conf.getLong("hbase.splitlog.max.resubmit", 0); // Make the failure test faster conf.setInt("zookeeper.recovery.retry", 0); + // testMobMetrics creates few hfiles and manages compaction manually. + conf.setInt("hbase.hstore.compactionThreshold", 100); + conf.setInt("hbase.hstore.compaction.max", 100); conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1); TEST_UTIL.startMiniCluster(1, 1); cluster = TEST_UTIL.getHBaseCluster(); - cluster.waitForActiveAndReadyMaster(); + admin = TEST_UTIL.getHBaseAdmin(); + connection = TEST_UTIL.getConnection(); while (cluster.getLiveRegionServerThreads().size() < 1) { Threads.sleep(100); @@ -88,552 +112,374 @@ public class TestRegionServerMetrics { } } - @Test(timeout = 300000) + TableName tableName; + Table table; + + @Before + public void beforeTestMethod() throws Exception { + metricsRegionServer.getRegionServerWrapper().forceRecompute(); + tableName = TableName.valueOf(testName.getMethodName()); + table = TEST_UTIL.createTable(tableName, cf); + } + + @After + public void afterTestMethod() throws Exception { + admin.disableTable(tableName); + admin.deleteTable(tableName); + } + + public void waitTableDeleted(TableName name, long timeoutInMillis) throws Exception { + long start = System.currentTimeMillis(); + while (true) { + HTableDescriptor[] tables = admin.listTables(); + for (HTableDescriptor htd : tables) { + if (htd.getNameAsString() == name.getNameAsString()) + return; + } + if (System.currentTimeMillis() - start > timeoutInMillis) + return; + Thread.sleep(1000); + } + } + + public void assertCounter(String metric, long expectedValue) { + metricsHelper.assertCounter(metric, expectedValue, serverSource); + } + + public void assertGauge(String metric, long expectedValue) { + metricsHelper.assertGauge(metric, expectedValue, serverSource); + } + + // Aggregates metrics from regions and assert given list of metrics and expected values. + public void assertRegionMetrics(String metric, long expectedValue) throws Exception { + try (RegionLocator locator = connection.getRegionLocator(tableName)) { + for ( HRegionLocation location: locator.getAllRegionLocations()) { + HRegionInfo hri = location.getRegionInfo(); + MetricsRegionAggregateSource agg = + rs.getRegion(hri.getRegionName()).getMetrics().getSource().getAggregateSource(); + String prefix = "namespace_" + NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR + + "_table_" + tableName.getNameAsString() + + "_region_" + hri.getEncodedName()+ + "_metric_"; + metricsHelper.assertCounter(prefix + metric, expectedValue, agg); + } + } + } + + public void doNPuts(int n, boolean batch) throws Exception { + if (batch) { + List puts = new ArrayList<>(); + for (int i = 0; i < n; i++) { + Put p = new Put(Bytes.toBytes("" + i + "row")).addColumn(cf, qualifier, val); + puts.add(p); + } + table.put(puts); + } else { + for (int i = 0; i < n; i++) { + Put p = new Put(row).addColumn(cf, qualifier, val); + table.put(p); + } + } + } + + public void doNGets(int n, boolean batch) throws Exception { + if (batch) { + List gets = new ArrayList<>(); + for (int i = 0; i < n; i++) { + gets.add(new Get(row)); + } + table.get(gets); + } else { + for (int i = 0; i < n; i++) { + table.get(new Get(row)); + } + } + } + + @Test public void testRegionCount() throws Exception { - String regionMetricsKey = "regionCount"; - long regions = metricsHelper.getGaugeLong(regionMetricsKey, serverSource); - // Creating a table should add one region - TEST_UTIL.createTable(TableName.valueOf("table"), Bytes.toBytes("cf")); - metricsHelper.assertGaugeGt(regionMetricsKey, regions, serverSource); + metricsHelper.assertGauge("regionCount", 1, serverSource); } @Test public void testLocalFiles() throws Exception { - metricsHelper.assertGauge("percentFilesLocal", 0, serverSource); - metricsHelper.assertGauge("percentFilesLocalSecondaryRegions", 0, serverSource); + assertGauge("percentFilesLocal", 0); + assertGauge("percentFilesLocalSecondaryRegions", 0); } @Test public void testRequestCount() throws Exception { - String tableNameString = "testRequestCount"; - TableName tName = TableName.valueOf(tableNameString); - byte[] cfName = Bytes.toBytes("d"); - byte[] row = Bytes.toBytes("rk"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] initValue = Bytes.toBytes("Value"); - - TEST_UTIL.createTable(tName, cfName); - - Connection connection = TEST_UTIL.getConnection(); - connection.getTable(tName).close(); //wait for the table to come up. - // Do a first put to be sure that the connection is established, meta is there and so on. - Table table = connection.getTable(tName); - Put p = new Put(row); - p.addColumn(cfName, qualifier, initValue); - table.put(p); + doNPuts(1, false); metricsRegionServer.getRegionServerWrapper().forceRecompute(); long requests = metricsHelper.getCounter("totalRequestCount", serverSource); long readRequests = metricsHelper.getCounter("readRequestCount", serverSource); long writeRequests = metricsHelper.getCounter("writeRequestCount", serverSource); - for (int i=0; i< 30; i++) { - table.put(p); - } + doNPuts(30, false); metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertCounter("totalRequestCount", requests + 30, serverSource); - metricsHelper.assertCounter("readRequestCount", readRequests, serverSource); - metricsHelper.assertCounter("writeRequestCount", writeRequests + 30, serverSource); + assertCounter("totalRequestCount", requests + 30); + assertCounter("readRequestCount", readRequests); + assertCounter("writeRequestCount", writeRequests + 30); - Get g = new Get(row); - for (int i=0; i< 10; i++) { - table.get(g); - } + doNGets(10, false); metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertCounter("totalRequestCount", requests + 40, serverSource); - metricsHelper.assertCounter("readRequestCount", readRequests + 10, serverSource); - metricsHelper.assertCounter("writeRequestCount", writeRequests + 30, serverSource); + assertCounter("totalRequestCount", requests + 40); + assertCounter("readRequestCount", readRequests + 10); + assertCounter("writeRequestCount", writeRequests + 30); - try (RegionLocator locator = connection.getRegionLocator(tName)) { - for ( HRegionLocation location: locator.getAllRegionLocations()) { - HRegionInfo i = location.getRegionInfo(); - MetricsRegionAggregateSource agg = rs.getRegion(i.getRegionName()) - .getMetrics() - .getSource() - .getAggregateSource(); - String prefix = "namespace_"+NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR+ - "_table_"+tableNameString + - "_region_" + i.getEncodedName()+ - "_metric"; - metricsHelper.assertCounter(prefix + "_getNumOps", 10, agg); - metricsHelper.assertCounter(prefix + "_mutateCount", 31, agg); - } - } - List gets = new ArrayList(); - for (int i=0; i< 10; i++) { - gets.add(new Get(row)); - } - table.get(gets); + assertRegionMetrics("getNumOps", 10); + assertRegionMetrics("mutateCount", 31); + + doNGets(10, true); // true = batch metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertCounter("totalRequestCount", requests + 50, serverSource); - metricsHelper.assertCounter("readRequestCount", readRequests + 20, serverSource); - metricsHelper.assertCounter("writeRequestCount", writeRequests + 30, serverSource); + assertCounter("totalRequestCount", requests + 50); + assertCounter("readRequestCount", readRequests + 20); + assertCounter("writeRequestCount", writeRequests + 30); - List puts = new ArrayList<>(); - for (int i=0; i< 30; i++) { - puts.add(p); - } - table.put(puts); + doNPuts(30, true); metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertCounter("totalRequestCount", requests + 80, serverSource); - metricsHelper.assertCounter("readRequestCount", readRequests + 20, serverSource); - metricsHelper.assertCounter("writeRequestCount", writeRequests + 60, serverSource); - - table.close(); + assertCounter("totalRequestCount", requests + 80); + assertCounter("readRequestCount", readRequests + 20); + assertCounter("writeRequestCount", writeRequests + 60); } @Test public void testGet() throws Exception { - String tableNameString = "testGet"; - TableName tName = TableName.valueOf(tableNameString); - byte[] cfName = Bytes.toBytes("d"); - byte[] row = Bytes.toBytes("rk"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] initValue = Bytes.toBytes("Value"); - - TEST_UTIL.createTable(tName, cfName); - - Connection connection = TEST_UTIL.getConnection(); - connection.getTable(tName).close(); //wait for the table to come up. - // Do a first put to be sure that the connection is established, meta is there and so on. - Table table = connection.getTable(tName); - Put p = new Put(row); - p.addColumn(cfName, qualifier, initValue); - table.put(p); - - Get g = new Get(row); - for (int i=0; i< 10; i++) { - table.get(g); - } - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - - try (RegionLocator locator = connection.getRegionLocator(tName)) { - for ( HRegionLocation location: locator.getAllRegionLocations()) { - HRegionInfo i = location.getRegionInfo(); - MetricsRegionAggregateSource agg = rs.getRegion(i.getRegionName()) - .getMetrics() - .getSource() - .getAggregateSource(); - String prefix = "namespace_"+NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR+ - "_table_"+tableNameString + - "_region_" + i.getEncodedName()+ - "_metric"; - metricsHelper.assertCounter(prefix + "_getSizeNumOps", 10, agg); - metricsHelper.assertCounter(prefix + "_getNumOps", 10, agg); - } - metricsHelper.assertCounterGt("Get_num_ops", 10, serverSource); - } - table.close(); + doNPuts(1, false); + doNGets(10, false); + assertRegionMetrics("getNumOps", 10); + assertRegionMetrics("getSizeNumOps", 10); + metricsHelper.assertCounterGt("Get_num_ops", 10, serverSource); } @Test public void testMutationsWithoutWal() throws Exception { - TableName tableName = TableName.valueOf("testMutationsWithoutWal"); - byte[] cf = Bytes.toBytes("d"); - byte[] row = Bytes.toBytes("rk"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] val = Bytes.toBytes("Value"); - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - - Table t = TEST_UTIL.createTable(tableName, cf); - - Put p = new Put(row); - p.addColumn(cf, qualifier, val); - p.setDurability(Durability.SKIP_WAL); - - t.put(p); + Put p = new Put(row).addColumn(cf, qualifier, val) + .setDurability(Durability.SKIP_WAL); + table.put(p); metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertGauge("mutationsWithoutWALCount", 1, serverSource); + assertGauge("mutationsWithoutWALCount", 1); long minLength = row.length + cf.length + qualifier.length + val.length; metricsHelper.assertGaugeGt("mutationsWithoutWALSize", minLength, serverSource); - - t.close(); } @Test public void testStoreCount() throws Exception { - TableName tableName = TableName.valueOf("testStoreCount"); - byte[] cf = Bytes.toBytes("d"); - byte[] row = Bytes.toBytes("rk"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] val = Bytes.toBytes("Value"); - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - long stores = metricsHelper.getGaugeLong("storeCount", serverSource); - long storeFiles = metricsHelper.getGaugeLong("storeFileCount", serverSource); - //Force a hfile. - Table t = TEST_UTIL.createTable(tableName, cf); - Put p = new Put(row); - p.addColumn(cf, qualifier, val); - t.put(p); + doNPuts(1, false); TEST_UTIL.getHBaseAdmin().flush(tableName); metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertGauge("storeCount", stores +1, serverSource); - metricsHelper.assertGauge("storeFileCount", storeFiles + 1, serverSource); - - t.close(); + assertGauge("storeCount", 1); + assertGauge("storeFileCount", 1); } @Test public void testStoreFileAge() throws Exception { - TableName tableName = TableName.valueOf("testStoreFileAge"); - byte[] cf = Bytes.toBytes("d"); - byte[] row = Bytes.toBytes("rk"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] val = Bytes.toBytes("Value"); - //Force a hfile. - Table t = TEST_UTIL.createTable(tableName, cf); - Put p = new Put(row); - p.addColumn(cf, qualifier, val); - t.put(p); + doNPuts(1, false); TEST_UTIL.getHBaseAdmin().flush(tableName); metricsRegionServer.getRegionServerWrapper().forceRecompute(); assertTrue(metricsHelper.getGaugeLong("maxStoreFileAge", serverSource) > 0); assertTrue(metricsHelper.getGaugeLong("minStoreFileAge", serverSource) > 0); assertTrue(metricsHelper.getGaugeLong("avgStoreFileAge", serverSource) > 0); - - t.close(); } @Test public void testCheckAndPutCount() throws Exception { - String tableNameString = "testCheckAndPutCount"; - TableName tableName = TableName.valueOf(tableNameString); - byte[] cf = Bytes.toBytes("d"); - byte[] row = Bytes.toBytes("rk"); - byte[] qualifier = Bytes.toBytes("qual"); byte[] valOne = Bytes.toBytes("Value"); byte[] valTwo = Bytes.toBytes("ValueTwo"); byte[] valThree = Bytes.toBytes("ValueThree"); - Table t = TEST_UTIL.createTable(tableName, cf); Put p = new Put(row); p.addColumn(cf, qualifier, valOne); - t.put(p); + table.put(p); Put pTwo = new Put(row); pTwo.addColumn(cf, qualifier, valTwo); - t.checkAndPut(row, cf, qualifier, valOne, pTwo); + table.checkAndPut(row, cf, qualifier, valOne, pTwo); Put pThree = new Put(row); pThree.addColumn(cf, qualifier, valThree); - t.checkAndPut(row, cf, qualifier, valOne, pThree); + table.checkAndPut(row, cf, qualifier, valOne, pThree); metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertCounter("checkMutateFailedCount", 1, serverSource); - metricsHelper.assertCounter("checkMutatePassedCount", 1, serverSource); - - t.close(); + assertCounter("checkMutateFailedCount", 1); + assertCounter("checkMutatePassedCount", 1); } @Test public void testIncrement() throws Exception { - String tableNameString = "testIncrement"; - TableName tableName = TableName.valueOf(tableNameString); - byte[] cf = Bytes.toBytes("d"); - byte[] row = Bytes.toBytes("rk"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] val = Bytes.toBytes(0l); - - - Table t = TEST_UTIL.createTable(tableName, cf); - Put p = new Put(row); - p.addColumn(cf, qualifier, val); - t.put(p); + Put p = new Put(row).addColumn(cf, qualifier, Bytes.toBytes(0L)); + table.put(p); - for(int count = 0; count< 13; count++) { + for(int count = 0; count < 13; count++) { Increment inc = new Increment(row); inc.addColumn(cf, qualifier, 100); - t.increment(inc); + table.increment(inc); } metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertCounter("incrementNumOps", 13, serverSource); - - t.close(); + assertCounter("incrementNumOps", 13); } @Test public void testAppend() throws Exception { - String tableNameString = "testAppend"; - TableName tableName = TableName.valueOf(tableNameString); - byte[] cf = Bytes.toBytes("d"); - byte[] row = Bytes.toBytes("rk"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] val = Bytes.toBytes("One"); - - - Table t = TEST_UTIL.createTable(tableName, cf); - Put p = new Put(row); - p.addColumn(cf, qualifier, val); - t.put(p); + doNPuts(1, false); for(int count = 0; count< 73; count++) { Append append = new Append(row); append.add(cf, qualifier, Bytes.toBytes(",Test")); - t.append(append); + table.append(append); } metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertCounter("appendNumOps", 73, serverSource); - - t.close(); + assertCounter("appendNumOps", 73); } @Test - public void testScanSize() throws IOException { - String tableNameString = "testScanSize"; - TableName tableName = TableName.valueOf(tableNameString); - byte[] cf = Bytes.toBytes("d"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] val = Bytes.toBytes("One"); - - List puts = new ArrayList<>(); - for (int insertCount =0; insertCount < 100; insertCount++) { - Put p = new Put(Bytes.toBytes("" + insertCount + "row")); - p.addColumn(cf, qualifier, val); - puts.add(p); - } - try (Table t = TEST_UTIL.createTable(tableName, cf)) { - t.put(puts); - - Scan s = new Scan(); - s.setBatch(1); - s.setCaching(1); - ResultScanner resultScanners = t.getScanner(s); - - for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) { - Result result = resultScanners.next(); - assertNotNull(result); - assertEquals(1, result.size()); - } + public void testScanSize() throws Exception { + doNPuts(100, true); // batch put + Scan s = new Scan(); + s.setBatch(1); + s.setCaching(1); + ResultScanner resultScanners = table.getScanner(s); + + for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) { + Result result = resultScanners.next(); + assertNotNull(result); + assertEquals(1, result.size()); } numScanNext += NUM_SCAN_NEXT; - try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { - for ( HRegionLocation location: locator.getAllRegionLocations()) { - HRegionInfo i = location.getRegionInfo(); - MetricsRegionAggregateSource agg = rs.getRegion(i.getRegionName()) - .getMetrics() - .getSource() - .getAggregateSource(); - String prefix = "namespace_"+NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR+ - "_table_"+tableNameString + - "_region_" + i.getEncodedName()+ - "_metric"; - metricsHelper.assertCounter(prefix + "_scanSizeNumOps", NUM_SCAN_NEXT, agg); - } - metricsHelper.assertCounter("ScanSize_num_ops", numScanNext, serverSource); - } - try (Admin admin = TEST_UTIL.getHBaseAdmin()) { - admin.disableTable(tableName); - admin.deleteTable(tableName); - } + assertRegionMetrics("scanSizeNumOps", NUM_SCAN_NEXT); + assertCounter("ScanSize_num_ops", numScanNext); } @Test - public void testScanTime() throws IOException { - String tableNameString = "testScanTime"; - TableName tableName = TableName.valueOf(tableNameString); - byte[] cf = Bytes.toBytes("d"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] val = Bytes.toBytes("One"); - - List puts = new ArrayList<>(); - for (int insertCount =0; insertCount < 100; insertCount++) { - Put p = new Put(Bytes.toBytes("" + insertCount + "row")); - p.addColumn(cf, qualifier, val); - puts.add(p); - } - try (Table t = TEST_UTIL.createTable(tableName, cf)) { - t.put(puts); - - Scan s = new Scan(); - s.setBatch(1); - s.setCaching(1); - ResultScanner resultScanners = t.getScanner(s); - - for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) { - Result result = resultScanners.next(); - assertNotNull(result); - assertEquals(1, result.size()); - } + public void testScanTime() throws Exception { + doNPuts(100, true); + Scan s = new Scan(); + s.setBatch(1); + s.setCaching(1); + ResultScanner resultScanners = table.getScanner(s); + + for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) { + Result result = resultScanners.next(); + assertNotNull(result); + assertEquals(1, result.size()); } numScanNext += NUM_SCAN_NEXT; - try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { - for ( HRegionLocation location: locator.getAllRegionLocations()) { - HRegionInfo i = location.getRegionInfo(); - MetricsRegionAggregateSource agg = rs.getRegion(i.getRegionName()) - .getMetrics() - .getSource() - .getAggregateSource(); - String prefix = "namespace_"+NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR+ - "_table_"+tableNameString + - "_region_" + i.getEncodedName()+ - "_metric"; - metricsHelper.assertCounter(prefix + "_scanTimeNumOps", NUM_SCAN_NEXT, agg); - } - metricsHelper.assertCounter("ScanTime_num_ops", numScanNext, serverSource); - } - try (Admin admin = TEST_UTIL.getHBaseAdmin()) { - admin.disableTable(tableName); - admin.deleteTable(tableName); - } + assertRegionMetrics("scanTimeNumOps", NUM_SCAN_NEXT); + assertCounter("ScanTime_num_ops", numScanNext); } @Test - public void testScanSizeForSmallScan() throws IOException { - String tableNameString = "testScanSizeSmall"; - TableName tableName = TableName.valueOf(tableNameString); - byte[] cf = Bytes.toBytes("d"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] val = Bytes.toBytes("One"); - - List puts = new ArrayList<>(); - for (int insertCount =0; insertCount < 100; insertCount++) { - Put p = new Put(Bytes.toBytes("" + insertCount + "row")); - p.addColumn(cf, qualifier, val); - puts.add(p); - } - try (Table t = TEST_UTIL.createTable(tableName, cf)) { - t.put(puts); - - Scan s = new Scan(); - s.setSmall(true); - s.setCaching(1); - ResultScanner resultScanners = t.getScanner(s); - - for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) { - Result result = resultScanners.next(); - assertNotNull(result); - assertEquals(1, result.size()); - } + public void testScanSizeForSmallScan() throws Exception { + doNPuts(100, true); + Scan s = new Scan(); + s.setSmall(true); + s.setCaching(1); + ResultScanner resultScanners = table.getScanner(s); + + for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) { + Result result = resultScanners.next(); + assertNotNull(result); + assertEquals(1, result.size()); } numScanNext += NUM_SCAN_NEXT; - try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { - for ( HRegionLocation location: locator.getAllRegionLocations()) { - HRegionInfo i = location.getRegionInfo(); - MetricsRegionAggregateSource agg = rs.getRegion(i.getRegionName()) - .getMetrics() - .getSource() - .getAggregateSource(); - String prefix = "namespace_"+NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR+ - "_table_"+tableNameString + - "_region_" + i.getEncodedName()+ - "_metric"; - metricsHelper.assertCounter(prefix + "_scanSizeNumOps", NUM_SCAN_NEXT, agg); - } - metricsHelper.assertCounter("ScanSize_num_ops", numScanNext, serverSource); - } - try (Admin admin = TEST_UTIL.getHBaseAdmin()) { - admin.disableTable(tableName); - admin.deleteTable(tableName); - } + assertRegionMetrics("scanSizeNumOps", NUM_SCAN_NEXT); + assertCounter("ScanSize_num_ops", numScanNext); } @Test public void testMobMetrics() throws IOException, InterruptedException { - String tableNameString = "testMobMetrics"; - TableName tableName = TableName.valueOf(tableNameString); - byte[] cf = Bytes.toBytes("d"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] val = Bytes.toBytes("mobdata"); - int numHfiles = conf.getInt("hbase.hstore.compactionThreshold", 3) - 1; + TableName tableName = TableName.valueOf("testMobMetricsLocal"); + int numHfiles = 5; HTableDescriptor htd = new HTableDescriptor(tableName); HColumnDescriptor hcd = new HColumnDescriptor(cf); hcd.setMobEnabled(true); hcd.setMobThreshold(0); htd.addFamily(hcd); - Connection connection = ConnectionFactory.createConnection(conf); - Admin admin = connection.getAdmin(); - HTable t = TEST_UTIL.createTable(htd, new byte[0][0], conf); - Region region = rs.getOnlineRegions(tableName).get(0); - t.setAutoFlush(true, true); - for (int insertCount = 0; insertCount < numHfiles; insertCount++) { - Put p = new Put(Bytes.toBytes(insertCount)); - p.addColumn(cf, qualifier, val); - t.put(p); - admin.flush(tableName); - } - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertCounter("mobFlushCount", numHfiles, serverSource); - Scan scan = new Scan(Bytes.toBytes(0), Bytes.toBytes(2)); - ResultScanner scanner = t.getScanner(scan); - scanner.next(100); - numScanNext++; // this is an ugly construct - scanner.close(); - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertCounter("mobScanCellsCount", 2, serverSource); - region.getTableDesc().getFamily(cf).setMobThreshold(100); - ((HRegion)region).initialize(); - region.compact(true); - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - metricsHelper.assertCounter("cellsCountCompactedFromMob", numHfiles, - serverSource); - metricsHelper.assertCounter("cellsCountCompactedToMob", 0, serverSource); - scanner = t.getScanner(scan); - scanner.next(100); - numScanNext++; // this is an ugly construct - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - // metrics are reset by the region initialization - metricsHelper.assertCounter("mobScanCellsCount", 0, serverSource); - for (int insertCount = numHfiles; - insertCount < 2 * numHfiles - 1; insertCount++) { - Put p = new Put(Bytes.toBytes(insertCount)); - p.addColumn(cf, qualifier, val); - t.put(p); - admin.flush(tableName); + byte[] val = Bytes.toBytes("mobdata"); + try { + Table table = TEST_UTIL.createTable(htd, new byte[0][0], conf); + Region region = rs.getOnlineRegions(tableName).get(0); + for (int insertCount = 0; insertCount < numHfiles; insertCount++) { + Put p = new Put(Bytes.toBytes(insertCount)); + p.addColumn(cf, qualifier, val); + table.put(p); + admin.flush(tableName); + } + metricsRegionServer.getRegionServerWrapper().forceRecompute(); + assertCounter("mobFlushCount", numHfiles); + + Scan scan = new Scan(Bytes.toBytes(0), Bytes.toBytes(numHfiles)); + ResultScanner scanner = table.getScanner(scan); + scanner.next(100); + numScanNext++; // this is an ugly construct + scanner.close(); + metricsRegionServer.getRegionServerWrapper().forceRecompute(); + assertCounter("mobScanCellsCount", numHfiles); + + region.getTableDesc().getFamily(cf).setMobThreshold(100); + // metrics are reset by the region initialization + ((HRegion) region).initialize(); + region.compact(true); + metricsRegionServer.getRegionServerWrapper().forceRecompute(); + assertCounter("cellsCountCompactedFromMob", numHfiles); + assertCounter("cellsCountCompactedToMob", 0); + + scanner = table.getScanner(scan); + scanner.next(100); + numScanNext++; // this is an ugly construct + metricsRegionServer.getRegionServerWrapper().forceRecompute(); + assertCounter("mobScanCellsCount", 0); + + for (int insertCount = numHfiles; insertCount < 2 * numHfiles; insertCount++) { + Put p = new Put(Bytes.toBytes(insertCount)); + p.addColumn(cf, qualifier, val); + table.put(p); + admin.flush(tableName); + } + region.getTableDesc().getFamily(cf).setMobThreshold(0); + + // closing the region forces the compaction.discharger to archive the compacted hfiles + ((HRegion) region).close(); + + // metrics are reset by the region initialization + ((HRegion) region).initialize(); + region.compact(true); + metricsRegionServer.getRegionServerWrapper().forceRecompute(); + // metrics are reset by the region initialization + assertCounter("cellsCountCompactedFromMob", 0); + assertCounter("cellsCountCompactedToMob", 2 * numHfiles); + } finally { + admin.disableTable(tableName); + admin.deleteTable(tableName); } - region.getTableDesc().getFamily(cf).setMobThreshold(0); - ((HRegion)region).initialize(); - region.compact(true); - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - // metrics are reset by the region initialization - metricsHelper.assertCounter("cellsCountCompactedFromMob", 0, serverSource); - metricsHelper.assertCounter("cellsCountCompactedToMob", 2 * numHfiles - 1, - serverSource); - t.close(); - admin.close(); - connection.close(); } @Test @Ignore public void testRangeCountMetrics() throws Exception { - String tableNameString = "testRangeCountMetrics"; final long[] timeranges = { 1, 3, 10, 30, 100, 300, 1000, 3000, 10000, 30000, 60000, 120000, 300000, 600000 }; final String timeRangeType = "TimeRangeCount"; final String timeRangeMetricName = "Mutate"; boolean timeRangeCountUpdated = false; - TableName tName = TableName.valueOf(tableNameString); - byte[] cfName = Bytes.toBytes("d"); - byte[] row = Bytes.toBytes("rk"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] initValue = Bytes.toBytes("Value"); - - TEST_UTIL.createTable(tName, cfName); - - Connection connection = TEST_UTIL.getConnection(); - connection.getTable(tName).close(); // wait for the table to come up. - // Do a first put to be sure that the connection is established, meta is there and so on. - Table table = connection.getTable(tName); Put p = new Put(row); - p.addColumn(cfName, qualifier, initValue); + p.addColumn(cf, qualifier, val); table.put(p); // do some puts and gets @@ -673,28 +519,15 @@ public class TestRegionServerMetrics { } } assertEquals(true, timeRangeCountUpdated); - - table.close(); } @Test public void testAverageRegionSize() throws Exception { - TableName tableName = TableName.valueOf("testAverageRegionSize"); - byte[] cf = Bytes.toBytes("d"); - byte[] row = Bytes.toBytes("rk"); - byte[] qualifier = Bytes.toBytes("qual"); - byte[] val = Bytes.toBytes("Value"); - //Force a hfile. - Table t = TEST_UTIL.createTable(tableName, cf); - Put p = new Put(row); - p.addColumn(cf, qualifier, val); - t.put(p); + doNPuts(1, false); TEST_UTIL.getHBaseAdmin().flush(tableName); metricsRegionServer.getRegionServerWrapper().forceRecompute(); assertTrue(metricsHelper.getGaugeDouble("averageRegionSize", serverSource) > 0.0); - - t.close(); } } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReadRequestMetrics.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReadRequestMetrics.java new file mode 100644 index 0000000..6867b99 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReadRequestMetrics.java @@ -0,0 +1,387 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.RegionLoad; +import org.apache.hadoop.hbase.ServerLoad; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Increment; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.RowMutations; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.filter.BinaryComparator; +import org.apache.hadoop.hbase.filter.CompareFilter; +import org.apache.hadoop.hbase.filter.RowFilter; +import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.Assert.assertEquals; + +@Category(MediumTests.class) +public class TestRegionServerReadRequestMetrics { + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final TableName TABLE_NAME = TableName.valueOf("test"); + private static final byte[] CF1 = "c1".getBytes(); + private static final byte[] CF2 = "c2".getBytes(); + + private static final byte[] ROW1 = "a".getBytes(); + private static final byte[] ROW2 = "b".getBytes(); + private static final byte[] ROW3 = "c".getBytes(); + private static final byte[] COL1 = "q1".getBytes(); + private static final byte[] COL2 = "q2".getBytes(); + private static final byte[] COL3 = "q3".getBytes(); + private static final byte[] VAL1 = "v1".getBytes(); + private static final byte[] VAL2 = "v2".getBytes(); + private static final byte[] VAL3 = Bytes.toBytes(0L); + + private static final int MAX_TRY = 20; + private static final int SLEEP_MS = 100; + private static final int TTL = 1; + + private static Admin admin; + private static Collection serverNames; + private static Table table; + private static List tableRegions; + + private static Map requestsMap = new HashMap<>(); + private static Map requestsMapPrev = new HashMap<>(); + + @BeforeClass + public static void setUpOnce() throws Exception { + TEST_UTIL.startMiniCluster(); + admin = TEST_UTIL.getAdmin(); + serverNames = admin.getClusterStatus().getServers(); + table = createTable(); + putData(); + tableRegions = admin.getTableRegions(TABLE_NAME); + + for (Metric metric : Metric.values()) { + requestsMap.put(metric, 0L); + requestsMapPrev.put(metric, 0L); + } + } + + private static Table createTable() throws IOException { + HTableDescriptor td = new HTableDescriptor(TABLE_NAME); + HColumnDescriptor cd1 = new HColumnDescriptor(CF1); + td.addFamily(cd1); + HColumnDescriptor cd2 = new HColumnDescriptor(CF2); + cd2.setTimeToLive(TTL); + td.addFamily(cd2); + + admin.createTable(td); + return TEST_UTIL.getConnection().getTable(TABLE_NAME); + } + + private static void testReadRequests(long resultCount, + long expectedReadRequests, long expectedFilteredReadRequests) + throws IOException, InterruptedException { + updateMetricsMap(); + System.out.println("requestsMapPrev = " + requestsMapPrev); + System.out.println("requestsMap = " + requestsMap); + + assertEquals(expectedReadRequests, + requestsMap.get(Metric.REGION_READ) - requestsMapPrev.get(Metric.REGION_READ)); + assertEquals(expectedReadRequests, + requestsMap.get(Metric.SERVER_READ) - requestsMapPrev.get(Metric.SERVER_READ)); + assertEquals(expectedFilteredReadRequests, + requestsMap.get(Metric.FILTERED_REGION_READ) + - requestsMapPrev.get(Metric.FILTERED_REGION_READ)); + assertEquals(expectedFilteredReadRequests, + requestsMap.get(Metric.FILTERED_SERVER_READ) + - requestsMapPrev.get(Metric.FILTERED_SERVER_READ)); + assertEquals(expectedReadRequests, resultCount); + } + + private static void updateMetricsMap() throws IOException, InterruptedException { + for (Metric metric : Metric.values()) { + requestsMapPrev.put(metric, requestsMap.get(metric)); + } + + ServerLoad serverLoad = null; + RegionLoad regionLoadOuter = null; + boolean metricsUpdated = false; + for (int i = 0; i < MAX_TRY; i++) { + for (ServerName serverName : serverNames) { + serverLoad = admin.getClusterStatus().getLoad(serverName); + + Map regionsLoad = serverLoad.getRegionsLoad(); + for (HRegionInfo tableRegion : tableRegions) { + RegionLoad regionLoad = regionsLoad.get(tableRegion.getRegionName()); + if (regionLoad != null) { + regionLoadOuter = regionLoad; + for (Metric metric : Metric.values()) { + if (getReadRequest(serverLoad, regionLoad, metric) > requestsMapPrev.get(metric)) { + for (Metric metricInner : Metric.values()) { + requestsMap.put(metricInner, getReadRequest(serverLoad, regionLoad, metricInner)); + } + metricsUpdated = true; + break; + } + } + } + } + } + if (metricsUpdated) { + break; + } + Thread.sleep(SLEEP_MS); + } + if (!metricsUpdated) { + for (Metric metric : Metric.values()) { + requestsMap.put(metric, getReadRequest(serverLoad, regionLoadOuter, metric)); + } + } + } + + private static long getReadRequest(ServerLoad serverLoad, RegionLoad regionLoad, Metric metric) { + switch (metric) { + case REGION_READ: + return regionLoad.getReadRequestsCount(); + case SERVER_READ: + return serverLoad.getReadRequestsCount(); + case FILTERED_REGION_READ: + return regionLoad.getFilteredReadRequestsCount(); + case FILTERED_SERVER_READ: + return serverLoad.getFilteredReadRequestsCount(); + default: + throw new IllegalStateException(); + } + } + + private static void putData() throws IOException { + Put put; + + put = new Put(ROW1); + put.addColumn(CF1, COL1, VAL1); + put.addColumn(CF1, COL2, VAL2); + put.addColumn(CF1, COL3, VAL3); + table.put(put); + put = new Put(ROW2); + put.addColumn(CF1, COL1, VAL2); // put val2 instead of val1 + put.addColumn(CF1, COL2, VAL2); + table.put(put); + put = new Put(ROW3); + put.addColumn(CF1, COL1, VAL1); + put.addColumn(CF1, COL2, VAL2); + table.put(put); + } + + private static void putTTLExpiredData() throws IOException, InterruptedException { + Put put; + + put = new Put(ROW1); + put.addColumn(CF2, COL1, VAL1); + put.addColumn(CF2, COL2, VAL2); + table.put(put); + + Thread.sleep(TTL * 1000); + + put = new Put(ROW2); + put.addColumn(CF2, COL1, VAL1); + put.addColumn(CF2, COL2, VAL2); + table.put(put); + + put = new Put(ROW3); + put.addColumn(CF2, COL1, VAL1); + put.addColumn(CF2, COL2, VAL2); + table.put(put); + } + + @AfterClass + public static void tearDownOnce() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testReadRequestsCountNotFiltered() throws Exception { + int resultCount; + Scan scan; + Append append; + Put put; + Increment increment; + Get get; + + // test for scan + scan = new Scan(); + try (ResultScanner scanner = table.getScanner(scan)) { + resultCount = 0; + for (Result ignore : scanner) { + resultCount++; + } + testReadRequests(resultCount, 3, 0); + } + + // test for scan + scan = new Scan(ROW2, ROW3); + try (ResultScanner scanner = table.getScanner(scan)) { + resultCount = 0; + for (Result ignore : scanner) { + resultCount++; + } + testReadRequests(resultCount, 1, 0); + } + + // test for get + get = new Get(ROW2); + Result result = table.get(get); + resultCount = result.isEmpty() ? 0 : 1; + testReadRequests(resultCount, 1, 0); + + // test for increment + increment = new Increment(ROW1); + increment.addColumn(CF1, COL3, 1); + result = table.increment(increment); + resultCount = result.isEmpty() ? 0 : 1; + testReadRequests(resultCount, 1, 0); + + // test for checkAndPut + put = new Put(ROW1); + put.addColumn(CF1, COL2, VAL2); + boolean checkAndPut = + table.checkAndPut(ROW1, CF1, COL2, CompareFilter.CompareOp.EQUAL, VAL2, put); + resultCount = checkAndPut ? 1 : 0; + testReadRequests(resultCount, 1, 0); + + // test for append + append = new Append(ROW1); + append.add(CF1, COL2, VAL2); + result = table.append(append); + resultCount = result.isEmpty() ? 0 : 1; + testReadRequests(resultCount, 1, 0); + + // test for checkAndMutate + put = new Put(ROW1); + put.addColumn(CF1, COL1, VAL1); + RowMutations rm = new RowMutations(ROW1); + rm.add(put); + boolean checkAndMutate = + table.checkAndMutate(ROW1, CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1, rm); + resultCount = checkAndMutate ? 1 : 0; + testReadRequests(resultCount, 1, 0); + } + + @Test + public void testReadRequestsCountWithFilter() throws Exception { + int resultCount; + Scan scan; + + // test for scan + scan = new Scan(); + scan.setFilter(new SingleColumnValueFilter(CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1)); + try (ResultScanner scanner = table.getScanner(scan)) { + resultCount = 0; + for (Result ignore : scanner) { + resultCount++; + } + testReadRequests(resultCount, 2, 1); + } + + // test for scan + scan = new Scan(); + scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(ROW1))); + try (ResultScanner scanner = table.getScanner(scan)) { + resultCount = 0; + for (Result ignore : scanner) { + resultCount++; + } + testReadRequests(resultCount, 1, 2); + } + + // test for scan + scan = new Scan(ROW2, ROW3); + scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(ROW1))); + try (ResultScanner scanner = table.getScanner(scan)) { + resultCount = 0; + for (Result ignore : scanner) { + resultCount++; + } + testReadRequests(resultCount, 0, 1); + } + + // fixme filtered get should not increase readRequestsCount +// Get get = new Get(ROW2); +// get.setFilter(new SingleColumnValueFilter(CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1)); +// Result result = table.get(get); +// resultCount = result.isEmpty() ? 0 : 1; +// testReadRequests(resultCount, 0, 1); + } + + @Test + public void testReadRequestsCountWithDeletedRow() throws Exception { + try { + Delete delete = new Delete(ROW3); + table.delete(delete); + + Scan scan = new Scan(); + try (ResultScanner scanner = table.getScanner(scan)) { + int resultCount = 0; + for (Result ignore : scanner) { + resultCount++; + } + testReadRequests(resultCount, 2, 1); + } + } finally { + Put put = new Put(ROW3); + put.addColumn(CF1, COL1, VAL1); + put.addColumn(CF1, COL2, VAL2); + table.put(put); + } + } + + @Test + public void testReadRequestsCountWithTTLExpiration() throws Exception { + putTTLExpiredData(); + + Scan scan = new Scan(); + scan.addFamily(CF2); + try (ResultScanner scanner = table.getScanner(scan)) { + int resultCount = 0; + for (Result ignore : scanner) { + resultCount++; + } + testReadRequests(resultCount, 2, 1); + } + } + + private enum Metric {REGION_READ, SERVER_READ, FILTERED_REGION_READ, FILTERED_SERVER_READ} +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcSchedulerFactory.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcSchedulerFactory.java new file mode 100644 index 0000000..9366c54 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcSchedulerFactory.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import static org.junit.Assert.assertTrue; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CategoryBasedTimeout; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.ipc.FifoRpcScheduler; +import org.apache.hadoop.hbase.ipc.RpcScheduler; +import org.apache.hadoop.hbase.ipc.SimpleRpcScheduler; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; +import org.junit.rules.TestRule; + +/** + * A silly test that does nothing but make sure an rpcscheduler factory makes what it says + * it is going to make. + */ +@Category(SmallTests.class) +public class TestRpcSchedulerFactory { + @Rule public TestName testName = new TestName(); + @ClassRule public static TestRule timeout = + CategoryBasedTimeout.forClass(TestRpcSchedulerFactory.class); + private Configuration conf; + + @Before + public void setUp() throws Exception { + this.conf = HBaseConfiguration.create(); + } + + @Test + public void testRWQ() { + // Set some configs just to see how it changes the scheduler. Can't assert the settings had + // an effect. Just eyeball the log. + this.conf.setDouble(SimpleRpcScheduler.CALL_QUEUE_READ_SHARE_CONF_KEY, 0.5); + this.conf.setDouble(SimpleRpcScheduler.CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 0.5); + this.conf.setDouble(SimpleRpcScheduler.CALL_QUEUE_SCAN_SHARE_CONF_KEY, 0.5); + RpcSchedulerFactory factory = new SimpleRpcSchedulerFactory(); + RpcScheduler rpcScheduler = factory.create(this.conf, null, null); + assertTrue(rpcScheduler.getClass().equals(SimpleRpcScheduler.class)); + } + + @Test + public void testFifo() { + RpcSchedulerFactory factory = new FifoRpcSchedulerFactory(); + RpcScheduler rpcScheduler = factory.create(this.conf, null, null); + assertTrue(rpcScheduler.getClass().equals(FifoRpcScheduler.class)); + } +} \ No newline at end of file diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index fe620e7..7fbcfea 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -31,6 +31,7 @@ import java.io.InterruptedIOException; import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; @@ -148,29 +149,12 @@ public class TestSplitTransactionOnCluster { this.admin.close(); } - private HRegionInfo getAndCheckSingleTableRegion(final List regions) { + private HRegionInfo getAndCheckSingleTableRegion(final List regions) + throws IOException, InterruptedException { assertEquals(1, regions.size()); HRegionInfo hri = regions.get(0).getRegionInfo(); - return waitOnRIT(hri); - } - - /** - * Often region has not yet fully opened. If we try to use it -- do a move for instance -- it - * will fail silently if the region is not yet opened. - * @param hri Region to check if in Regions In Transition... wait until out of transition before - * returning - * @return Passed in hri - */ - private HRegionInfo waitOnRIT(final HRegionInfo hri) { - // Close worked but we are going to open the region elsewhere. Before going on, make sure - // this completes. - while (TESTING_UTIL.getHBaseCluster().getMaster().getAssignmentManager(). - getRegionStates().isRegionInTransition(hri)) { - LOG.info("Waiting on region in transition: " + - TESTING_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates(). - getRegionTransitionState(hri)); - Threads.sleep(10); - } + TESTING_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() + .waitOnRegionToClearRegionsInTransition(hri, 600000); return hri; } @@ -209,14 +193,7 @@ public class TestSplitTransactionOnCluster { observer.latch.await(); LOG.info("Waiting for region to come out of RIT"); - TESTING_UTIL.waitFor(60000, 1000, new Waiter.Predicate() { - @Override - public boolean evaluate() throws Exception { - RegionStates regionStates = cluster.getMaster().getAssignmentManager().getRegionStates(); - Map rit = regionStates.getRegionsInTransition(); - return !rit.containsKey(hri.getEncodedName()); - } - }); + cluster.getMaster().getAssignmentManager().waitOnRegionToClearRegionsInTransition(hri, 60000); } finally { admin.setBalancerRunning(true, false); cluster.getMaster().setCatalogJanitorEnabled(true); @@ -657,7 +634,7 @@ public class TestSplitTransactionOnCluster { tableExists = MetaTableAccessor.tableExists(regionServer.getConnection(), tableName); assertEquals("The specified table should present.", true, tableExists); - Map rit = cluster.getMaster().getAssignmentManager().getRegionStates() + Set rit = cluster.getMaster().getAssignmentManager().getRegionStates() .getRegionsInTransition(); assertTrue(rit.size() == 3); cluster.getMaster().getAssignmentManager().regionOffline(st.getFirstDaughter()); @@ -1311,7 +1288,7 @@ public class TestSplitTransactionOnCluster { if (enabled.get() && req.getTransition(0).getTransitionCode().equals( TransitionCode.READY_TO_SPLIT) && !resp.hasErrorMessage()) { RegionStates regionStates = myMaster.getAssignmentManager().getRegionStates(); - for (RegionState regionState: regionStates.getRegionsInTransition().values()) { + for (RegionState regionState: regionStates.getRegionsInTransition()) { // Find the merging_new region and remove it if (regionState.isSplittingNew()) { regionStates.deleteRegion(regionState.getRegion()); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java new file mode 100644 index 0000000..4173b2a --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java @@ -0,0 +1,565 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.regionserver.wal.FSHLog; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.JVMClusterUtil; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.wal.WAL; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +/** + * This test verifies the correctness of the Per Column Family flushing strategy + * when part of the memstores are compacted memstores + */ +@Category({ RegionServerTests.class, LargeTests.class }) +public class TestWalAndCompactingMemStoreFlush { + + private static final Log LOG = LogFactory.getLog(TestWalAndCompactingMemStoreFlush.class); + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final Path DIR = TEST_UTIL.getDataTestDir("TestHRegion"); + public static final TableName TABLENAME = TableName.valueOf("TestWalAndCompactingMemStoreFlush", + "t1"); + + public static final byte[][] FAMILIES = { Bytes.toBytes("f1"), Bytes.toBytes("f2"), + Bytes.toBytes("f3"), Bytes.toBytes("f4"), Bytes.toBytes("f5") }; + + public static final byte[] FAMILY1 = FAMILIES[0]; + public static final byte[] FAMILY2 = FAMILIES[1]; + public static final byte[] FAMILY3 = FAMILIES[2]; + + + + private HRegion initHRegion(String callingMethod, Configuration conf) throws IOException { + int i=0; + HTableDescriptor htd = new HTableDescriptor(TABLENAME); + for (byte[] family : FAMILIES) { + HColumnDescriptor hcd = new HColumnDescriptor(family); + // even column families are going to have compacted memstore + if(i%2 == 0) hcd.setInMemoryCompaction(true); + htd.addFamily(hcd); + i++; + } + + HRegionInfo info = new HRegionInfo(TABLENAME, null, null, false); + Path path = new Path(DIR, callingMethod); + return HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd); + } + + + + // A helper function to create puts. + private Put createPut(int familyNum, int putNum) { + byte[] qf = Bytes.toBytes("q" + familyNum); + byte[] row = Bytes.toBytes("row" + familyNum + "-" + putNum); + byte[] val = Bytes.toBytes("val" + familyNum + "-" + putNum); + Put p = new Put(row); + p.addColumn(FAMILIES[familyNum - 1], qf, val); + return p; + } + + + // A helper function to create double puts, so something can be compacted later. + private Put createDoublePut(int familyNum, int putNum) { + byte[] qf = Bytes.toBytes("q" + familyNum); + byte[] row = Bytes.toBytes("row" + familyNum + "-" + putNum); + byte[] val = Bytes.toBytes("val" + familyNum + "-" + putNum); + Put p = new Put(row); + // add twice with different timestamps + p.addColumn(FAMILIES[familyNum - 1], qf, 10, val); + p.addColumn(FAMILIES[familyNum - 1], qf, 20, val); + return p; + } + + + // A helper function to create gets. + private Get createGet(int familyNum, int putNum) { + byte[] row = Bytes.toBytes("row" + familyNum + "-" + putNum); + return new Get(row); + } + + + + + // A helper function to verify edits. + void verifyEdit(int familyNum, int putNum, Table table) throws IOException { + Result r = table.get(createGet(familyNum, putNum)); + byte[] family = FAMILIES[familyNum - 1]; + byte[] qf = Bytes.toBytes("q" + familyNum); + byte[] val = Bytes.toBytes("val" + familyNum + "-" + putNum); + assertNotNull(("Missing Put#" + putNum + " for CF# " + familyNum), r.getFamilyMap(family)); + assertNotNull(("Missing Put#" + putNum + " for CF# " + familyNum), + r.getFamilyMap(family).get(qf)); + assertTrue(("Incorrect value for Put#" + putNum + " for CF# " + familyNum), + Arrays.equals(r.getFamilyMap(family).get(qf), val)); + } + + + + + + @Test(timeout = 180000) + public void testSelectiveFlushWhenEnabled() throws IOException { + + // Set up the configuration + Configuration conf = HBaseConfiguration.create(); + conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 600 * 1024); + conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushNonSloppyStoresFirstPolicy.class + .getName()); + conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 300 * + 1024); + conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); + + // Intialize the region + Region region = initHRegion("testSelectiveFlushWhenEnabled", conf); + + // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3 + for (int i = 1; i <= 1200; i++) { + region.put(createPut(1, i)); // compacted memstore + + if (i <= 100) { + region.put(createPut(2, i)); + if (i <= 50) { + region.put(createDoublePut(3, i)); // subject for in-memory compaction + } + } + } + + // Now add more puts for CF2, so that we only flush CF2 to disk + for (int i = 100; i < 2000; i++) { + region.put(createPut(2, i)); + } + + long totalMemstoreSize = region.getMemstoreSize(); + + // Find the smallest LSNs for edits wrt to each CF. + long smallestSeqCF1PhaseI = region.getOldestSeqIdOfStore(FAMILY1); + long smallestSeqCF2PhaseI = region.getOldestSeqIdOfStore(FAMILY2); + long smallestSeqCF3PhaseI = region.getOldestSeqIdOfStore(FAMILY3); + + // Find the sizes of the memstores of each CF. + long cf1MemstoreSizePhaseI = region.getStore(FAMILY1).getMemStoreSize(); + long cf2MemstoreSizePhaseI = region.getStore(FAMILY2).getMemStoreSize(); + long cf3MemstoreSizePhaseI = region.getStore(FAMILY3).getMemStoreSize(); + + // Get the overall smallest LSN in the region's memstores. + long smallestSeqInRegionCurrentMemstorePhaseI = getWAL(region) + .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + + String s = "\n\n----------------------------------\n" + + "Upon initial insert and before any flush, size of CF1 is:" + + cf1MemstoreSizePhaseI + ", is CF1 compacted memstore?:" + + region.getStore(FAMILY1).getMemStore().isSloppy() + ". Size of CF2 is:" + + cf2MemstoreSizePhaseI + ", is CF2 compacted memstore?:" + + region.getStore(FAMILY2).getMemStore().isSloppy() + ". Size of CF3 is:" + + cf3MemstoreSizePhaseI + ", is CF3 compacted memstore?:" + + region.getStore(FAMILY3).getMemStore().isSloppy() + "\n"; + + // The overall smallest LSN in the region's memstores should be the same as + // the LSN of the smallest edit in CF1 + assertEquals(smallestSeqCF1PhaseI, smallestSeqInRegionCurrentMemstorePhaseI); + + // Some other sanity checks. + assertTrue(smallestSeqCF1PhaseI < smallestSeqCF2PhaseI); + assertTrue(smallestSeqCF2PhaseI < smallestSeqCF3PhaseI); + assertTrue(cf1MemstoreSizePhaseI > 0); + assertTrue(cf2MemstoreSizePhaseI > 0); + assertTrue(cf3MemstoreSizePhaseI > 0); + + // The total memstore size should be the same as the sum of the sizes of + // memstores of CF1, CF2 and CF3. + String msg = "totalMemstoreSize="+totalMemstoreSize + + " DefaultMemStore.DEEP_OVERHEAD="+DefaultMemStore.DEEP_OVERHEAD + + " DEEP_OVERHEAD_PER_PIPELINE_ITEM="+CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_ITEM + + " cf1MemstoreSizePhaseI="+cf1MemstoreSizePhaseI + + " cf2MemstoreSizePhaseI="+cf2MemstoreSizePhaseI + + " cf3MemstoreSizePhaseI="+cf3MemstoreSizePhaseI ; + assertEquals(msg,totalMemstoreSize + 3 * DefaultMemStore.DEEP_OVERHEAD, + cf1MemstoreSizePhaseI + cf2MemstoreSizePhaseI + cf3MemstoreSizePhaseI); + + // Flush!!!!!!!!!!!!!!!!!!!!!! + // We have big compacting memstore CF1 and two small memstores: + // CF2 (not compacted) and CF3 (compacting) + // All together they are above the flush size lower bound. + // Since CF1 and CF3 should be flushed to memory (not to disk), + // CF2 is going to be flushed to disk. + // CF1 - nothing to compact, CF3 - should be twice compacted + ((CompactingMemStore) region.getStore(FAMILY1).getMemStore()).flushInMemory(); + ((CompactingMemStore) region.getStore(FAMILY3).getMemStore()).flushInMemory(); + region.flush(false); + + // CF3 should be compacted so wait here to be sure the compaction is done + while (((CompactingMemStore) region.getStore(FAMILY3).getMemStore()) + .isMemStoreFlushingInMemory()) + Threads.sleep(10); + + // Recalculate everything + long cf1MemstoreSizePhaseII = region.getStore(FAMILY1).getMemStoreSize(); + long cf2MemstoreSizePhaseII = region.getStore(FAMILY2).getMemStoreSize(); + long cf3MemstoreSizePhaseII = region.getStore(FAMILY3).getMemStoreSize(); + + long smallestSeqInRegionCurrentMemstorePhaseII = getWAL(region) + .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + // Find the smallest LSNs for edits wrt to each CF. + long smallestSeqCF1PhaseII = region.getOldestSeqIdOfStore(FAMILY1); + long smallestSeqCF2PhaseII = region.getOldestSeqIdOfStore(FAMILY2); + long smallestSeqCF3PhaseII = region.getOldestSeqIdOfStore(FAMILY3); + + s = s + "DefaultMemStore DEEP_OVERHEAD is:" + DefaultMemStore.DEEP_OVERHEAD + + ", CompactingMemStore DEEP_OVERHEAD is:" + CompactingMemStore.DEEP_OVERHEAD + + ", CompactingMemStore DEEP_OVERHEAD_PER_PIPELINE_ITEM is:" + CompactingMemStore + .DEEP_OVERHEAD_PER_PIPELINE_ITEM + + "\n----After first flush! CF1 should be flushed to memory, but not compacted.---\n" + + "Size of CF1 is:" + cf1MemstoreSizePhaseII + ", size of CF2 is:" + cf2MemstoreSizePhaseII + + ", size of CF3 is:" + cf3MemstoreSizePhaseII + "\n"; + + // CF1 was flushed to memory, but there is nothing to compact, should + // remain the same size plus renewed empty skip-list + assertEquals(s, cf1MemstoreSizePhaseII, + cf1MemstoreSizePhaseI + CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_ITEM); + + // CF2 should become empty + assertEquals(DefaultMemStore.DEEP_OVERHEAD, cf2MemstoreSizePhaseII); + + // verify that CF3 was flushed to memory and was compacted (this is approximation check) + assertTrue(cf3MemstoreSizePhaseI/2+DefaultMemStore.DEEP_OVERHEAD + + CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_ITEM > + cf3MemstoreSizePhaseII); + assertTrue(cf3MemstoreSizePhaseI/2 < cf3MemstoreSizePhaseII); + + + // Now the smallest LSN in the region should be the same as the smallest + // LSN in the memstore of CF1. + assertEquals(smallestSeqInRegionCurrentMemstorePhaseII, smallestSeqCF1PhaseI); + + // Now add more puts for CF1, so that we also flush CF1 to disk instead of + // memory in next flush + for (int i = 1200; i < 2000; i++) { + region.put(createPut(1, i)); + } + + s = s + "The smallest sequence in region WAL is: " + smallestSeqInRegionCurrentMemstorePhaseII + + ", the smallest sequence in CF1:" + smallestSeqCF1PhaseII + ", " + + "the smallest sequence in CF2:" + + smallestSeqCF2PhaseII +", the smallest sequence in CF3:" + smallestSeqCF3PhaseII + "\n"; + + // How much does the CF1 memstore occupy? Will be used later. + long cf1MemstoreSizePhaseIII = region.getStore(FAMILY1).getMemStoreSize(); + long smallestSeqCF1PhaseIII = region.getOldestSeqIdOfStore(FAMILY1); + + s = s + "----After more puts into CF1 its size is:" + cf1MemstoreSizePhaseIII + + ", and its sequence is:" + smallestSeqCF1PhaseIII + " ----\n" ; + + + // Flush!!!!!!!!!!!!!!!!!!!!!! + // Flush again, CF1 is flushed to disk + // CF2 is flushed to disk, because it is not in-memory compacted memstore + // CF3 is flushed empty to memory (actually nothing happens to CF3) + region.flush(false); + + // Recalculate everything + long cf1MemstoreSizePhaseIV = region.getStore(FAMILY1).getMemStoreSize(); + long cf2MemstoreSizePhaseIV = region.getStore(FAMILY2).getMemStoreSize(); + long cf3MemstoreSizePhaseIV = region.getStore(FAMILY3).getMemStoreSize(); + + long smallestSeqInRegionCurrentMemstorePhaseIV = getWAL(region) + .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqCF1PhaseIV = region.getOldestSeqIdOfStore(FAMILY1); + long smallestSeqCF2PhaseIV = region.getOldestSeqIdOfStore(FAMILY2); + long smallestSeqCF3PhaseIV = region.getOldestSeqIdOfStore(FAMILY3); + + s = s + "----After SECOND FLUSH, CF1 size is:" + cf1MemstoreSizePhaseIV + ", CF2 size is:" + + cf2MemstoreSizePhaseIV + " and CF3 size is:" + cf3MemstoreSizePhaseIV + + "\n"; + + s = s + "The smallest sequence in region WAL is: " + smallestSeqInRegionCurrentMemstorePhaseIV + + ", the smallest sequence in CF1:" + smallestSeqCF1PhaseIV + ", " + + "the smallest sequence in CF2:" + + smallestSeqCF2PhaseIV +", the smallest sequence in CF3:" + smallestSeqCF3PhaseIV + + "\n"; + + // CF1's pipeline component (inserted before first flush) should be flushed to disk + // CF2 should be flushed to disk + assertEquals(cf1MemstoreSizePhaseIII - cf1MemstoreSizePhaseI + DefaultMemStore.DEEP_OVERHEAD, + cf1MemstoreSizePhaseIV); + assertEquals(DefaultMemStore.DEEP_OVERHEAD, cf2MemstoreSizePhaseIV); + + // CF3 shouldn't have been touched. + assertEquals(cf3MemstoreSizePhaseIV, cf3MemstoreSizePhaseII); + + // the smallest LSN of CF3 shouldn't change + assertEquals(smallestSeqCF3PhaseII, smallestSeqCF3PhaseIV); + + // CF3 should be bottleneck for WAL + assertEquals(s, smallestSeqInRegionCurrentMemstorePhaseIV, smallestSeqCF3PhaseIV); + + // Flush!!!!!!!!!!!!!!!!!!!!!! + // Clearing the existing memstores, CF2 all flushed to disk. The single + // memstore segment in the compaction pipeline of CF1 and CF3 should be flushed to disk. + // Note that active sets of CF1 and CF3 are empty + region.flush(true); + + // Recalculate everything + long cf1MemstoreSizePhaseV = region.getStore(FAMILY1).getMemStoreSize(); + long cf2MemstoreSizePhaseV = region.getStore(FAMILY2).getMemStoreSize(); + long cf3MemstoreSizePhaseV = region.getStore(FAMILY3).getMemStoreSize(); + long smallestSeqInRegionCurrentMemstorePhaseV = getWAL(region) + .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + + assertEquals(DefaultMemStore.DEEP_OVERHEAD, cf1MemstoreSizePhaseV); + assertEquals(DefaultMemStore.DEEP_OVERHEAD, cf2MemstoreSizePhaseV); + assertEquals(DefaultMemStore.DEEP_OVERHEAD, cf3MemstoreSizePhaseV); + + // Because there is nothing in any memstore the WAL's LSN should be -1 + assertEquals(smallestSeqInRegionCurrentMemstorePhaseV, HConstants.NO_SEQNUM); + + // What happens when we hit the memstore limit, but we are not able to find + // any Column Family above the threshold? + // In that case, we should flush all the CFs. + + // The memstore limit is 200*1024 and the column family flush threshold is + // around 50*1024. We try to just hit the memstore limit with each CF's + // memstore being below the CF flush threshold. + for (int i = 1; i <= 300; i++) { + region.put(createPut(1, i)); + region.put(createPut(2, i)); + region.put(createPut(3, i)); + region.put(createPut(4, i)); + region.put(createPut(5, i)); + } + + region.flush(false); + + s = s + "----AFTER THIRD AND FORTH FLUSH, The smallest sequence in region WAL is: " + + smallestSeqInRegionCurrentMemstorePhaseV + + ". After additional inserts and last flush, the entire region size is:" + region + .getMemstoreSize() + + "\n----------------------------------\n"; + + // Since we won't find any CF above the threshold, and hence no specific + // store to flush, we should flush all the memstores + // Also compacted memstores are flushed to disk. + assertEquals(0, region.getMemstoreSize()); + System.out.println(s); + HBaseTestingUtility.closeRegionAndWAL(region); + } + + + + + + + + + + @Test(timeout = 180000) + public void testSelectiveFlushWhenEnabledAndWALinCompaction() throws IOException { + // Set up the configuration + Configuration conf = HBaseConfiguration.create(); + conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 600 * 1024); + conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushNonSloppyStoresFirstPolicy.class + .getName()); + conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 300 * + 1024); + conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); + + // Intialize the HRegion + HRegion region = initHRegion("testSelectiveFlushWhenNotEnabled", conf); + // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3 + for (int i = 1; i <= 1200; i++) { + region.put(createPut(1, i)); + if (i <= 100) { + region.put(createPut(2, i)); + if (i <= 50) { + region.put(createPut(3, i)); + } + } + } + // Now add more puts for CF2, so that we only flush CF2 to disk + for (int i = 100; i < 2000; i++) { + region.put(createPut(2, i)); + } + + long totalMemstoreSize = region.getMemstoreSize(); + + // Find the sizes of the memstores of each CF. + long cf1MemstoreSizePhaseI = region.getStore(FAMILY1).getMemStoreSize(); + long cf2MemstoreSizePhaseI = region.getStore(FAMILY2).getMemStoreSize(); + long cf3MemstoreSizePhaseI = region.getStore(FAMILY3).getMemStoreSize(); + + // Some other sanity checks. + assertTrue(cf1MemstoreSizePhaseI > 0); + assertTrue(cf2MemstoreSizePhaseI > 0); + assertTrue(cf3MemstoreSizePhaseI > 0); + + // The total memstore size should be the same as the sum of the sizes of + // memstores of CF1, CF2 and CF3. + String msg = "totalMemstoreSize="+totalMemstoreSize + + " DefaultMemStore.DEEP_OVERHEAD="+DefaultMemStore.DEEP_OVERHEAD + + " DEEP_OVERHEAD_PER_PIPELINE_ITEM="+CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_ITEM + + " cf1MemstoreSizePhaseI="+cf1MemstoreSizePhaseI + + " cf2MemstoreSizePhaseI="+cf2MemstoreSizePhaseI + + " cf3MemstoreSizePhaseI="+cf3MemstoreSizePhaseI ; + assertEquals(msg, totalMemstoreSize + 3 * DefaultMemStore.DEEP_OVERHEAD, + cf1MemstoreSizePhaseI + cf2MemstoreSizePhaseI + cf3MemstoreSizePhaseI); + + // Flush! + ((CompactingMemStore) region.getStore(FAMILY1).getMemStore()).flushInMemory(); + ((CompactingMemStore) region.getStore(FAMILY3).getMemStore()).flushInMemory(); + // CF1 and CF3 should be compacted so wait here to be sure the compaction is done + while (((CompactingMemStore) region.getStore(FAMILY1).getMemStore()) + .isMemStoreFlushingInMemory()) + Threads.sleep(10); + while (((CompactingMemStore) region.getStore(FAMILY3).getMemStore()) + .isMemStoreFlushingInMemory()) + Threads.sleep(10); + region.flush(false); + + long cf2MemstoreSizePhaseII = region.getStore(FAMILY2).getMemStoreSize(); + + long smallestSeqInRegionCurrentMemstorePhaseII = + region.getWAL().getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqCF1PhaseII = region.getOldestSeqIdOfStore(FAMILY1); + long smallestSeqCF2PhaseII = region.getOldestSeqIdOfStore(FAMILY2); + long smallestSeqCF3PhaseII = region.getOldestSeqIdOfStore(FAMILY3); + + // CF2 should have been cleared + assertEquals(DefaultMemStore.DEEP_OVERHEAD, cf2MemstoreSizePhaseII); + + String s = "\n\n----------------------------------\n" + + "Upon initial insert and flush, LSN of CF1 is:" + + smallestSeqCF1PhaseII + ". LSN of CF2 is:" + + smallestSeqCF2PhaseII + ". LSN of CF3 is:" + + smallestSeqCF3PhaseII + ", smallestSeqInRegionCurrentMemstore:" + + smallestSeqInRegionCurrentMemstorePhaseII + "\n"; + + // Add same entries to compact them later + for (int i = 1; i <= 1200; i++) { + region.put(createPut(1, i)); + if (i <= 100) { + region.put(createPut(2, i)); + if (i <= 50) { + region.put(createPut(3, i)); + } + } + } + // Now add more puts for CF2, so that we only flush CF2 to disk + for (int i = 100; i < 2000; i++) { + region.put(createPut(2, i)); + } + + long smallestSeqInRegionCurrentMemstorePhaseIII = + region.getWAL().getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqCF1PhaseIII = region.getOldestSeqIdOfStore(FAMILY1); + long smallestSeqCF2PhaseIII = region.getOldestSeqIdOfStore(FAMILY2); + long smallestSeqCF3PhaseIII = region.getOldestSeqIdOfStore(FAMILY3); + + s = s + "The smallest sequence in region WAL is: " + smallestSeqInRegionCurrentMemstorePhaseIII + + ", the smallest sequence in CF1:" + smallestSeqCF1PhaseIII + ", " + + "the smallest sequence in CF2:" + + smallestSeqCF2PhaseIII +", the smallest sequence in CF3:" + smallestSeqCF3PhaseIII + "\n"; + + // Flush! + ((CompactingMemStore) region.getStore(FAMILY1).getMemStore()).flushInMemory(); + ((CompactingMemStore) region.getStore(FAMILY3).getMemStore()).flushInMemory(); + // CF1 and CF3 should be compacted so wait here to be sure the compaction is done + while (((CompactingMemStore) region.getStore(FAMILY1).getMemStore()) + .isMemStoreFlushingInMemory()) + Threads.sleep(10); + while (((CompactingMemStore) region.getStore(FAMILY3).getMemStore()) + .isMemStoreFlushingInMemory()) + Threads.sleep(10); + region.flush(false); + + long smallestSeqInRegionCurrentMemstorePhaseIV = + region.getWAL().getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqCF1PhaseIV = region.getOldestSeqIdOfStore(FAMILY1); + long smallestSeqCF2PhaseIV = region.getOldestSeqIdOfStore(FAMILY2); + long smallestSeqCF3PhaseIV = region.getOldestSeqIdOfStore(FAMILY3); + + s = s + "The smallest sequence in region WAL is: " + smallestSeqInRegionCurrentMemstorePhaseIV + + ", the smallest sequence in CF1:" + smallestSeqCF1PhaseIV + ", " + + "the smallest sequence in CF2:" + + smallestSeqCF2PhaseIV +", the smallest sequence in CF3:" + smallestSeqCF3PhaseIV + "\n"; + + // now check that the LSN of the entire WAL, of CF1 and of CF3 has progressed due to compaction + assertTrue(s, smallestSeqInRegionCurrentMemstorePhaseIV > + smallestSeqInRegionCurrentMemstorePhaseIII); + assertTrue(smallestSeqCF1PhaseIV > smallestSeqCF1PhaseIII); + assertTrue(smallestSeqCF3PhaseIV > smallestSeqCF3PhaseIII); + + HBaseTestingUtility.closeRegionAndWAL(region); + } + + + + + + // Find the (first) region which has the specified name. + private static Pair getRegionWithName(TableName tableName) { + MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster(); + List rsts = cluster.getRegionServerThreads(); + for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) { + HRegionServer hrs = rsts.get(i).getRegionServer(); + for (Region region : hrs.getOnlineRegions(tableName)) { + return Pair.newPair(region, hrs); + } + } + return null; + } + + + private WAL getWAL(Region region) { + return ((HRegion)region).getWAL(); + } + + private int getNumRolledLogFiles(Region region) { + return ((FSHLog)getWAL(region)).getNumRolledLogFiles(); + } + + +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java index 8908c71..ae6b036 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; @@ -76,7 +75,7 @@ public class TestFlushWithThroughputController { admin.disableTable(tableName); admin.deleteTable(tableName); } - HTable table = TEST_UTIL.createTable(tableName, family); + Table table = TEST_UTIL.createTable(tableName, family); Random rand = new Random(); for (int i = 0; i < 10; i++) { for (int j = 0; j < 10; j++) { diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java index da01fb9..bf46b03 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -53,7 +52,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; -import org.apache.hadoop.hbase.wal.FSHLogProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -135,7 +133,6 @@ public class TestLogRolling extends AbstractTestLogRolling { admin.createTable(desc); Table table = TEST_UTIL.getConnection().getTable(desc.getTableName()); - assertTrue(((HTable) table).isAutoFlush()); server = TEST_UTIL.getRSForFirstRegionInTable(desc.getTableName()); HRegionInfo region = server.getOnlineRegions(desc.getTableName()).get(0).getRegionInfo(); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java index 2a20a4f..8efa67e 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java @@ -23,6 +23,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -31,6 +32,7 @@ import java.util.TreeMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.ClusterStatus; @@ -72,6 +74,7 @@ import org.junit.experimental.categories.Category; public class TestReplicationSmallTests extends TestReplicationBase { private static final Log LOG = LogFactory.getLog(TestReplicationSmallTests.class); + private static final String PEER_ID = "2"; /** * @throws java.lang.Exception @@ -84,6 +87,7 @@ public class TestReplicationSmallTests extends TestReplicationBase { utility1.getHBaseCluster().getRegionServerThreads()) { utility1.getHBaseAdmin().rollWALWriter(r.getRegionServer().getServerName()); } + int rowCount = utility1.countRows(tableName); utility1.deleteTableData(tableName); // truncating the table will send one Delete per row to the slave cluster // in an async fashion, which is why we cannot just call deleteTableData on @@ -97,7 +101,7 @@ public class TestReplicationSmallTests extends TestReplicationBase { fail("Waited too much time for truncate"); } ResultScanner scanner = htable2.getScanner(scan); - Result[] res = scanner.next(NB_ROWS_IN_BIG_BATCH); + Result[] res = scanner.next(rowCount); scanner.close(); if (res.length != 0) { if (res.length < lastCount) { @@ -254,13 +258,7 @@ public class TestReplicationSmallTests extends TestReplicationBase { public void testSmallBatch() throws Exception { LOG.info("testSmallBatch"); // normal Batch tests - List puts = new ArrayList<>(); - for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { - Put put = new Put(Bytes.toBytes(i)); - put.addColumn(famName, row, row); - puts.add(put); - } - htable1.put(puts); + loadData("", row); Scan scan = new Scan(); @@ -269,15 +267,20 @@ public class TestReplicationSmallTests extends TestReplicationBase { scanner1.close(); assertEquals(NB_ROWS_IN_BATCH, res1.length); - for (int i = 0; i < NB_RETRIES; i++) { + waitForReplication(NB_ROWS_IN_BATCH, NB_RETRIES); + } + + private void waitForReplication(int expectedRows, int retries) throws IOException, InterruptedException { + Scan scan; + for (int i = 0; i < retries; i++) { scan = new Scan(); - if (i==NB_RETRIES-1) { + if (i== retries -1) { fail("Waited too much time for normal batch replication"); } ResultScanner scanner = htable2.getScanner(scan); - Result[] res = scanner.next(NB_ROWS_IN_BATCH); + Result[] res = scanner.next(expectedRows); scanner.close(); - if (res.length != NB_ROWS_IN_BATCH) { + if (res.length != expectedRows) { LOG.info("Only got " + res.length + " rows"); Thread.sleep(SLEEP_TIME); } else { @@ -286,6 +289,16 @@ public class TestReplicationSmallTests extends TestReplicationBase { } } + private void loadData(String prefix, byte[] row) throws IOException { + List puts = new ArrayList<>(); + for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { + Put put = new Put(Bytes.toBytes(prefix + Integer.toString(i))); + put.addColumn(famName, row, row); + puts.add(put); + } + htable1.put(puts); + } + /** * Test disable/enable replication, trying to insert, make sure nothing's * replicated, enable it, the insert should be replicated @@ -296,7 +309,7 @@ public class TestReplicationSmallTests extends TestReplicationBase { public void testDisableEnable() throws Exception { // Test disabling replication - admin.disablePeer("2"); + admin.disablePeer(PEER_ID); byte[] rowkey = Bytes.toBytes("disable enable"); Put put = new Put(rowkey); @@ -315,7 +328,7 @@ public class TestReplicationSmallTests extends TestReplicationBase { } // Test enable replication - admin.enablePeer("2"); + admin.enablePeer(PEER_ID); for (int i = 0; i < NB_RETRIES; i++) { Result res = htable2.get(get); @@ -339,7 +352,7 @@ public class TestReplicationSmallTests extends TestReplicationBase { @Test(timeout=300000) public void testAddAndRemoveClusters() throws Exception { LOG.info("testAddAndRemoveClusters"); - admin.removePeer("2"); + admin.removePeer(PEER_ID); Thread.sleep(SLEEP_TIME); byte[] rowKey = Bytes.toBytes("Won't be replicated"); Put put = new Put(rowKey); @@ -361,7 +374,7 @@ public class TestReplicationSmallTests extends TestReplicationBase { } ReplicationPeerConfig rpc = new ReplicationPeerConfig(); rpc.setClusterKey(utility2.getClusterKey()); - admin.addPeer("2", rpc, null); + admin.addPeer(PEER_ID, rpc, null); Thread.sleep(SLEEP_TIME); rowKey = Bytes.toBytes("do rep"); put = new Put(rowKey); @@ -459,18 +472,8 @@ public class TestReplicationSmallTests extends TestReplicationBase { // identical since it does the check testSmallBatch(); - String[] args = new String[] {"2", tableName.getNameAsString()}; - Job job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args); - if (job == null) { - fail("Job wasn't created, see the log"); - } - if (!job.waitForCompletion(true)) { - fail("Job failed, see the log"); - } - assertEquals(NB_ROWS_IN_BATCH, job.getCounters(). - findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue()); - assertEquals(0, job.getCounters(). - findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue()); + String[] args = new String[] {PEER_ID, tableName.getNameAsString()}; + runVerifyReplication(args, NB_ROWS_IN_BATCH, 0); Scan scan = new Scan(); ResultScanner rs = htable2.getScanner(scan); @@ -484,16 +487,21 @@ public class TestReplicationSmallTests extends TestReplicationBase { } Delete delete = new Delete(put.getRow()); htable2.delete(delete); - job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args); + runVerifyReplication(args, 0, NB_ROWS_IN_BATCH); + } + + private void runVerifyReplication(String[] args, int expectedGoodRows, int expectedBadRows) + throws IOException, InterruptedException, ClassNotFoundException { + Job job = VerifyReplication.createSubmittableJob(new Configuration(CONF_WITH_LOCALFS), args); if (job == null) { fail("Job wasn't created, see the log"); } if (!job.waitForCompletion(true)) { fail("Job failed, see the log"); } - assertEquals(0, job.getCounters(). + assertEquals(expectedGoodRows, job.getCounters(). findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue()); - assertEquals(NB_ROWS_IN_BATCH, job.getCounters(). + assertEquals(expectedBadRows, job.getCounters(). findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue()); } @@ -556,18 +564,8 @@ public class TestReplicationSmallTests extends TestReplicationBase { assertEquals(1, res1.length); assertEquals(5, res1[0].getColumnCells(famName, qualifierName).size()); - String[] args = new String[] {"--versions=100", "2", tableName.getNameAsString()}; - Job job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args); - if (job == null) { - fail("Job wasn't created, see the log"); - } - if (!job.waitForCompletion(true)) { - fail("Job failed, see the log"); - } - assertEquals(0, job.getCounters(). - findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue()); - assertEquals(1, job.getCounters(). - findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue()); + String[] args = new String[] {"--versions=100", PEER_ID, tableName.getNameAsString()}; + runVerifyReplication(args, 0, 1); } @Test(timeout=300000) @@ -618,7 +616,7 @@ public class TestReplicationSmallTests extends TestReplicationBase { try { // Disabling replication and modifying the particular version of the cell to validate the feature. - admin.disablePeer("2"); + admin.disablePeer(PEER_ID); Put put2 = new Put(Bytes.toBytes("r1")); put2.addColumn(famName, qualifierName, ts +2, Bytes.toBytes("v99")); htable2.put(put2); @@ -631,21 +629,11 @@ public class TestReplicationSmallTests extends TestReplicationBase { assertEquals(1, res1.length); assertEquals(3, res1[0].getColumnCells(famName, qualifierName).size()); - String[] args = new String[] {"--versions=100", "2", tableName.getNameAsString()}; - Job job = VerifyReplication.createSubmittableJob(CONF_WITH_LOCALFS, args); - if (job == null) { - fail("Job wasn't created, see the log"); - } - if (!job.waitForCompletion(true)) { - fail("Job failed, see the log"); - } - assertEquals(0, job.getCounters(). - findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue()); - assertEquals(1, job.getCounters(). - findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue()); + String[] args = new String[] {"--versions=100", PEER_ID, tableName.getNameAsString()}; + runVerifyReplication(args, 0, 1); } finally { - admin.enablePeer("2"); + admin.enablePeer(PEER_ID); } } @@ -803,5 +791,18 @@ public class TestReplicationSmallTests extends TestReplicationBase { } } + @Test(timeout=300000) + public void testVerifyReplicationPrefixFiltering() throws Exception { + final byte[] prefixRow = Bytes.toBytes("prefixrow"); + final byte[] prefixRow2 = Bytes.toBytes("secondrow"); + loadData("prefixrow", prefixRow); + loadData("secondrow", prefixRow2); + loadData("aaa", row); + loadData("zzz", row); + waitForReplication(NB_ROWS_IN_BATCH * 4, NB_RETRIES * 4); + String[] args = new String[] {"--row-prefixes=prefixrow,secondrow", PEER_ID, + tableName.getNameAsString()}; + runVerifyReplication(args, NB_ROWS_IN_BATCH *2, 0); + } } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java index 144046f4..de5cc31 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java @@ -121,7 +121,7 @@ public abstract class TestReplicationStateBasic { rq1.removeQueue("bogus"); rq1.removeLog("bogus", "bogus"); rq1.removeAllQueues(); - assertNull(rq1.getAllQueues()); + assertEquals(0, rq1.getAllQueues().size()); assertEquals(0, rq1.getLogPosition("bogus", "bogus")); assertNull(rq1.getLogsInQueue("bogus")); assertEquals(0, rq1.claimQueues(ServerName.valueOf("bogus", 1234, -1L).toString()).size()); @@ -204,6 +204,7 @@ public abstract class TestReplicationStateBasic { assertNull(rqc.getReplicableHFiles(ID_ONE)); assertEquals(0, rqc.getAllPeersFromHFileRefsQueue().size()); rp.addPeer(ID_ONE, new ReplicationPeerConfig().setClusterKey(KEY_ONE)); + rq1.addPeerToHFileRefs(ID_ONE); rq1.addHFileRefs(ID_ONE, files1); assertEquals(1, rqc.getAllPeersFromHFileRefsQueue().size()); assertEquals(3, rqc.getReplicableHFiles(ID_ONE).size()); @@ -225,7 +226,9 @@ public abstract class TestReplicationStateBasic { rp.init(); rp.addPeer(ID_ONE, new ReplicationPeerConfig().setClusterKey(KEY_ONE)); + rq1.addPeerToHFileRefs(ID_ONE); rp.addPeer(ID_TWO, new ReplicationPeerConfig().setClusterKey(KEY_TWO)); + rq1.addPeerToHFileRefs(ID_TWO); List files1 = new ArrayList(3); files1.add("file_1"); @@ -238,11 +241,13 @@ public abstract class TestReplicationStateBasic { assertEquals(3, rqc.getReplicableHFiles(ID_TWO).size()); rp.removePeer(ID_ONE); + rq1.removePeerFromHFileRefs(ID_ONE); assertEquals(1, rqc.getAllPeersFromHFileRefsQueue().size()); assertNull(rqc.getReplicableHFiles(ID_ONE)); assertEquals(3, rqc.getReplicableHFiles(ID_TWO).size()); rp.removePeer(ID_TWO); + rq1.removePeerFromHFileRefs(ID_TWO); assertEquals(0, rqc.getAllPeersFromHFileRefsQueue().size()); assertNull(rqc.getReplicableHFiles(ID_TWO)); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateHBaseImpl.java hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateHBaseImpl.java new file mode 100644 index 0000000..8186213 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateHBaseImpl.java @@ -0,0 +1,243 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.replication; + +import junit.framework.Assert; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ChoreService; +import org.apache.hadoop.hbase.CoordinatedStateManager; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static junit.framework.TestCase.assertNull; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({ReplicationTests.class, MediumTests.class}) +public class TestReplicationStateHBaseImpl { + + private static Configuration conf; + private static HBaseTestingUtility utility; + private static Connection connection; + private static ReplicationQueues rqH; + + private final String server1 = ServerName.valueOf("hostname1.example.org", 1234, -1L).toString(); + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + utility = new HBaseTestingUtility(); + utility.startMiniCluster(); + conf = utility.getConfiguration(); + conf.setClass("hbase.region.replica.replication.ReplicationQueuesType", + ReplicationQueuesHBaseImpl.class, ReplicationQueues.class); + connection = ConnectionFactory.createConnection(conf); + } + + @Test + public void checkNamingSchema() throws Exception { + rqH.init(server1); + assertTrue(rqH.isThisOurRegionServer(server1)); + assertTrue(!rqH.isThisOurRegionServer(server1 + "a")); + assertTrue(!rqH.isThisOurRegionServer(null)); + } + + @Test + public void testReplicationStateHBase() { + DummyServer ds = new DummyServer(server1); + try { + rqH = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, ds, null)); + rqH.init(server1); + // Check that the proper System Tables have been generated + Table replicationTable = connection.getTable( + ReplicationQueuesHBaseImpl.REPLICATION_TABLE_NAME); + assertTrue(replicationTable.getName().isSystemTable()); + + } catch (Exception e) { + e.printStackTrace(); + fail("testReplicationStateHBaseConstruction received an Exception"); + } + try { + // Test adding in WAL files + assertEquals(0, rqH.getAllQueues().size()); + rqH.addLog("Queue1", "WALLogFile1.1"); + assertEquals(1, rqH.getAllQueues().size()); + rqH.addLog("Queue1", "WALLogFile1.2"); + rqH.addLog("Queue1", "WALLogFile1.3"); + rqH.addLog("Queue1", "WALLogFile1.4"); + rqH.addLog("Queue2", "WALLogFile2.1"); + rqH.addLog("Queue3", "WALLogFile3.1"); + assertEquals(3, rqH.getAllQueues().size()); + assertEquals(4, rqH.getLogsInQueue("Queue1").size()); + assertEquals(1, rqH.getLogsInQueue("Queue2").size()); + assertEquals(1, rqH.getLogsInQueue("Queue3").size()); + // Make sure that abortCount is still 0 + assertEquals(0, ds.getAbortCount()); + // Make sure that getting a log from a non-existent queue triggers an abort + assertNull(rqH.getLogsInQueue("Queue4")); + assertEquals(1, ds.getAbortCount()); + } catch (ReplicationException e) { + e.printStackTrace(); + fail("testAddLog received a ReplicationException"); + } + try { + + // Test updating the log positions + assertEquals(0L, rqH.getLogPosition("Queue1", "WALLogFile1.1")); + rqH.setLogPosition("Queue1", "WALLogFile1.1", 123L); + assertEquals(123L, rqH.getLogPosition("Queue1", "WALLogFile1.1")); + rqH.setLogPosition("Queue1", "WALLogFile1.1", 123456789L); + assertEquals(123456789L, rqH.getLogPosition("Queue1", "WALLogFile1.1")); + rqH.setLogPosition("Queue2", "WALLogFile2.1", 242L); + assertEquals(242L, rqH.getLogPosition("Queue2", "WALLogFile2.1")); + rqH.setLogPosition("Queue3", "WALLogFile3.1", 243L); + assertEquals(243L, rqH.getLogPosition("Queue3", "WALLogFile3.1")); + + // Test that setting log positions in non-existing logs will cause an abort + assertEquals(1, ds.getAbortCount()); + rqH.setLogPosition("NotHereQueue", "WALLogFile3.1", 243L); + assertEquals(2, ds.getAbortCount()); + rqH.setLogPosition("NotHereQueue", "NotHereFile", 243L); + assertEquals(3, ds.getAbortCount()); + rqH.setLogPosition("Queue1", "NotHereFile", 243l); + assertEquals(4, ds.getAbortCount()); + + // Test reading log positions for non-existent queues and WAL's + try { + rqH.getLogPosition("Queue1", "NotHereWAL"); + fail("Replication queue should have thrown a ReplicationException for reading from a " + + "non-existent WAL"); + } catch (ReplicationException e) { + } + try { + rqH.getLogPosition("NotHereQueue", "NotHereWAL"); + fail("Replication queue should have thrown a ReplicationException for reading from a " + + "non-existent queue"); + } catch (ReplicationException e) { + } + // Test removing logs + rqH.removeLog("Queue1", "WALLogFile1.1"); + assertEquals(3, rqH.getLogsInQueue("Queue1").size()); + // Test removing queues + rqH.removeQueue("Queue2"); + assertEquals(2, rqH.getAllQueues().size()); + assertNull(rqH.getLogsInQueue("Queue2")); + // Test that getting logs from a non-existent queue aborts + assertEquals(5, ds.getAbortCount()); + // Test removing all queues for a Region Server + rqH.removeAllQueues(); + assertEquals(0, rqH.getAllQueues().size()); + assertNull(rqH.getLogsInQueue("Queue1")); + // Test that getting logs from a non-existent queue aborts + assertEquals(6, ds.getAbortCount()); + } catch (ReplicationException e) { + e.printStackTrace(); + fail("testAddLog received a ReplicationException"); + } + } + + static class DummyServer implements Server { + private String serverName; + private boolean isAborted = false; + private boolean isStopped = false; + private int abortCount = 0; + + public DummyServer(String serverName) { + this.serverName = serverName; + } + + @Override + public Configuration getConfiguration() { + return conf; + } + + @Override + public ZooKeeperWatcher getZooKeeper() { + return null; + } + + @Override + public CoordinatedStateManager getCoordinatedStateManager() { + return null; + } + + @Override + public ClusterConnection getConnection() { + return null; + } + + @Override + public MetaTableLocator getMetaTableLocator() { + return null; + } + + @Override + public ServerName getServerName() { + return ServerName.valueOf(this.serverName); + } + + @Override + public void abort(String why, Throwable e) { + abortCount++; + this.isAborted = true; + } + + @Override + public boolean isAborted() { + return this.isAborted; + } + + @Override + public void stop(String why) { + this.isStopped = true; + } + + @Override + public boolean isStopped() { + return this.isStopped; + } + + @Override + public ChoreService getChoreService() { + return null; + } + + @Override + public ClusterConnection getClusterConnection() { + return null; + } + + public int getAbortCount() { + return abortCount; + } + } +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java index 94dbb25..e731135 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.replication; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.IOException; @@ -33,6 +34,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.replication.regionserver.Replication; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; @@ -91,9 +93,14 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic { DummyServer ds1 = new DummyServer(server1); DummyServer ds2 = new DummyServer(server2); DummyServer ds3 = new DummyServer(server3); - rq1 = ReplicationFactory.getReplicationQueues(zkw, conf, ds1); - rq2 = ReplicationFactory.getReplicationQueues(zkw, conf, ds2); - rq3 = ReplicationFactory.getReplicationQueues(zkw, conf, ds3); + try { + rq1 = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, ds1, zkw)); + rq2 = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, ds2, zkw)); + rq3 = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, ds3, zkw)); + } catch (Exception e) { + // This should not occur, because getReplicationQueues() only throws for ReplicationQueuesHBaseImpl + fail("ReplicationFactory.getReplicationQueues() threw an IO Exception"); + } rqc = ReplicationFactory.getReplicationQueuesClient(zkw, conf, ds1); rp = ReplicationFactory.getReplicationPeers(zkw, conf, zkw); OUR_KEY = ZKConfig.getZooKeeperClusterKey(conf); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java index 104753a..538b8ac 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java @@ -25,9 +25,9 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint; import org.apache.hadoop.hbase.replication.ReplicationPeers; @@ -51,7 +51,7 @@ public class TestReplicationSinkManager { public void setUp() { replicationPeers = mock(ReplicationPeers.class); replicationEndpoint = mock(HBaseReplicationEndpoint.class); - sinkManager = new ReplicationSinkManager(mock(HConnection.class), + sinkManager = new ReplicationSinkManager(mock(ClusterConnection.class), PEER_CLUSTER_ID, replicationEndpoint, new Configuration()); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java index 9e950d2..d1db068 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java @@ -66,6 +66,7 @@ import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationPeers; import org.apache.hadoop.hbase.replication.ReplicationQueueInfo; import org.apache.hadoop.hbase.replication.ReplicationQueues; +import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments; import org.apache.hadoop.hbase.replication.ReplicationQueuesClient; import org.apache.hadoop.hbase.replication.ReplicationSourceDummy; import org.apache.hadoop.hbase.replication.ReplicationStateZKBase; @@ -284,9 +285,11 @@ public class TestReplicationSourceManager { LOG.debug("testNodeFailoverWorkerCopyQueuesFromRSUsingMulti"); conf.setBoolean(HConstants.ZOOKEEPER_USEMULTI, true); final Server server = new DummyServer("hostname0.example.org"); + + ReplicationQueues rq = - ReplicationFactory.getReplicationQueues(server.getZooKeeper(), server.getConfiguration(), - server); + ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(server.getConfiguration(), server, + server.getZooKeeper())); rq.init(server.getServerName().toString()); // populate some znodes in the peer znode files.add("log1"); @@ -326,8 +329,8 @@ public class TestReplicationSourceManager { public void testCleanupFailoverQueues() throws Exception { final Server server = new DummyServer("hostname1.example.org"); ReplicationQueues rq = - ReplicationFactory.getReplicationQueues(server.getZooKeeper(), server.getConfiguration(), - server); + ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(server.getConfiguration(), server, + server.getZooKeeper())); rq.init(server.getServerName().toString()); // populate some znodes in the peer znode SortedSet files = new TreeSet(); @@ -341,7 +344,8 @@ public class TestReplicationSourceManager { } Server s1 = new DummyServer("dummyserver1.example.org"); ReplicationQueues rq1 = - ReplicationFactory.getReplicationQueues(s1.getZooKeeper(), s1.getConfiguration(), s1); + ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(s1.getConfiguration(), s1, + s1.getZooKeeper())); rq1.init(s1.getServerName().toString()); ReplicationPeers rp1 = ReplicationFactory.getReplicationPeers(s1.getZooKeeper(), s1.getConfiguration(), s1); @@ -365,7 +369,8 @@ public class TestReplicationSourceManager { conf.setBoolean(HConstants.ZOOKEEPER_USEMULTI, true); final Server server = new DummyServer("ec2-54-234-230-108.compute-1.amazonaws.com"); ReplicationQueues repQueues = - ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server); + ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, server, + server.getZooKeeper())); repQueues.init(server.getServerName().toString()); // populate some znodes in the peer znode files.add("log1"); @@ -381,16 +386,19 @@ public class TestReplicationSourceManager { // simulate three servers fail sequentially ReplicationQueues rq1 = - ReplicationFactory.getReplicationQueues(s1.getZooKeeper(), s1.getConfiguration(), s1); + ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(s1.getConfiguration(), s1, + s1.getZooKeeper())); rq1.init(s1.getServerName().toString()); SortedMap> testMap = rq1.claimQueues(server.getServerName().getServerName()); ReplicationQueues rq2 = - ReplicationFactory.getReplicationQueues(s2.getZooKeeper(), s2.getConfiguration(), s2); + ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(s2.getConfiguration(), s2, + s2.getZooKeeper())); rq2.init(s2.getServerName().toString()); testMap = rq2.claimQueues(s1.getServerName().getServerName()); ReplicationQueues rq3 = - ReplicationFactory.getReplicationQueues(s3.getZooKeeper(), s3.getConfiguration(), s3); + ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(s3.getConfiguration(), s3, + s3.getZooKeeper())); rq3.init(s3.getServerName().toString()); testMap = rq3.claimQueues(s2.getServerName().getServerName()); @@ -412,7 +420,8 @@ public class TestReplicationSourceManager { conf.setBoolean(HConstants.ZOOKEEPER_USEMULTI, true); final Server s0 = new DummyServer("cversion-change0.example.org"); ReplicationQueues repQueues = - ReplicationFactory.getReplicationQueues(s0.getZooKeeper(), conf, s0); + ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, s0, + s0.getZooKeeper())); repQueues.init(s0.getServerName().toString()); // populate some znodes in the peer znode files.add("log1"); @@ -423,7 +432,8 @@ public class TestReplicationSourceManager { // simulate queue transfer Server s1 = new DummyServer("cversion-change1.example.org"); ReplicationQueues rq1 = - ReplicationFactory.getReplicationQueues(s1.getZooKeeper(), s1.getConfiguration(), s1); + ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(s1.getConfiguration(), s1, + s1.getZooKeeper())); rq1.init(s1.getServerName().toString()); ReplicationQueuesClient client = @@ -522,8 +532,8 @@ public class TestReplicationSourceManager { this.deadRsZnode = znode; this.server = s; this.rq = - ReplicationFactory.getReplicationQueues(server.getZooKeeper(), server.getConfiguration(), - server); + ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(server.getConfiguration(), server, + server.getZooKeeper())); this.rq.init(this.server.getServerName().toString()); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/security/AbstractTestSecureIPC.java hbase-server/src/test/java/org/apache/hadoop/hbase/security/AbstractTestSecureIPC.java index 7e99cc0..385b7b0 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/security/AbstractTestSecureIPC.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/security/AbstractTestSecureIPC.java @@ -36,6 +36,7 @@ import java.util.concurrent.ThreadLocalRandom; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; +import org.apache.commons.lang.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hbase.Cell; @@ -217,6 +218,12 @@ public abstract class AbstractTestSecureIPC { setRpcProtection("integrity,authentication", "privacy,authentication"); callRpcService(User.create(ugi)); + + setRpcProtection("integrity,authentication", "integrity,authentication"); + callRpcService(User.create(ugi)); + + setRpcProtection("privacy,authentication", "privacy,authentication"); + callRpcService(User.create(ugi)); } @Test @@ -302,18 +309,17 @@ public abstract class AbstractTestSecureIPC { @Override public void run() { - String result; try { - result = stub.echo(null, TestProtos.EchoRequestProto.newBuilder().setMessage(String.valueOf( - ThreadLocalRandom.current().nextInt())).build()).getMessage(); - } catch (ServiceException e) { - throw new RuntimeException(e); - } - if (results != null) { - synchronized (results) { - results.add(result); - } + int[] messageSize = new int[] {100, 1000, 10000}; + for (int i = 0; i < messageSize.length; i++) { + String input = RandomStringUtils.random(messageSize[i]); + String result = stub.echo(null, TestProtos.EchoRequestProto.newBuilder() + .setMessage(input).build()).getMessage(); + assertEquals(input, result); } + } catch (ServiceException e) { + throw new RuntimeException(e); + } } } } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index 92d7806..f58e24e 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -2596,15 +2596,41 @@ public class TestAccessController extends SecureTestUtil { NamespaceDescriptor desc = NamespaceDescriptor.create(namespace).build(); createNamespace(TEST_UTIL, desc); grantOnNamespace(TEST_UTIL, USER_NONE.getShortName(), namespace, Permission.Action.READ); + + // Test 1: A specific namespace + getNamespacePermissionsAndVerify(namespace, 1, namespace); + + // Test 2: '@.*' + getNamespacePermissionsAndVerify(".*", 1, namespace); + + // Test 3: A more complex regex + getNamespacePermissionsAndVerify("^test[a-zA-Z]*", 1, namespace); + + deleteNamespace(TEST_UTIL, namespace); + } + + /** + * List all user permissions match the given regular expression for namespace + * and verify each of them. + * @param namespaceRegexWithoutPrefix the regualar expression for namespace, without NAMESPACE_PREFIX + * @param expectedAmount the expected amount of user permissions returned + * @param expectedNamespace the expected namespace of each user permission returned + * @throws HBaseException in the case of any HBase exception when accessing hbase:acl table + */ + private void getNamespacePermissionsAndVerify(String namespaceRegexWithoutPrefix, + int expectedAmount, String expectedNamespace) throws HBaseException { try { List namespacePermissions = AccessControlClient.getUserPermissions( - systemUserConnection, AccessControlLists.toNamespaceEntry(namespace)); + systemUserConnection, AccessControlLists.toNamespaceEntry(namespaceRegexWithoutPrefix)); assertTrue(namespacePermissions != null); - assertTrue(namespacePermissions.size() == 1); + assertEquals(expectedAmount, namespacePermissions.size()); + for (UserPermission namespacePermission : namespacePermissions) { + assertFalse(namespacePermission.isGlobal()); // Verify it is not a global user permission + assertEquals(expectedNamespace, namespacePermission.getNamespace()); // Verify namespace is set + } } catch (Throwable thw) { throw new HBaseException(thw); } - deleteNamespace(TEST_UTIL, namespace); } @Test (timeout=180000) diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java index faac8eb..9382bd4 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java @@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HTableInterface; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; import org.apache.hadoop.hbase.ipc.FifoRpcScheduler; @@ -235,11 +235,11 @@ public class TestTokenAuthentication { public Configuration getConfiguration() { return conf; } @Override - public HTableInterface getTable(TableName tableName) throws IOException + public Table getTable(TableName tableName) throws IOException { return null; } @Override - public HTableInterface getTable(TableName tableName, ExecutorService service) + public Table getTable(TableName tableName, ExecutorService service) throws IOException { return null; } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java index 18a1088..9483ac9 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java @@ -80,7 +80,6 @@ public class TestVisibilityLabelReplicationWithExpAsString extends TestVisibilit // setup configuration conf = HBaseConfiguration.create(); conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false); - conf.setBoolean("hbase.online.schema.update.enable", true); conf.setInt("hfile.format.version", 3); conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); conf.setInt("replication.source.size.capacity", 10240); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java index c9d9530..4ed47b0 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java @@ -130,7 +130,6 @@ public class TestVisibilityLabelsReplication { // setup configuration conf = HBaseConfiguration.create(); conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false); - conf.setBoolean("hbase.online.schema.update.enable", true); conf.setInt("hfile.format.version", 3); conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); conf.setInt("replication.source.size.capacity", 10240); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java index a229bdb..63c08a2 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java @@ -67,7 +67,6 @@ public class TestVisibilityLabelsWithDefaultVisLabelService extends TestVisibili // setup configuration conf = TEST_UTIL.getConfiguration(); conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false); - conf.setBoolean("hbase.online.schema.update.enable", true); VisibilityTestUtil.enableVisiblityLabels(conf); conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, SimpleScanLabelGenerator.class, ScanLabelGenerator.class); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java index 0a933c6..4c2d69a 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java @@ -42,7 +42,9 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TestMobSnapshotCloneIndependence; import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.procedure.TestMasterFailoverWithProcedures; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; @@ -55,6 +57,7 @@ import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -71,8 +74,9 @@ import org.junit.rules.TestRule; @Category({RegionServerTests.class, LargeTests.class}) public class TestFlushSnapshotFromClient { private static final Log LOG = LogFactory.getLog(TestFlushSnapshotFromClient.class); - @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). - withLookingForStuckThread(true).build(); + @ClassRule + public static final TestRule timeout = + CategoryBasedTimeout.forClass(TestFlushSnapshotFromClient.class); protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); protected static final int NUM_RS = 2; diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java index 6e612ba..1fa681a 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java @@ -26,10 +26,12 @@ import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.master.procedure.TestMasterFailoverWithProcedures; import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.junit.BeforeClass; +import org.junit.ClassRule; import org.junit.Rule; import org.junit.experimental.categories.Category; import org.junit.rules.TestRule; @@ -45,8 +47,10 @@ import org.junit.rules.TestRule; @Category({ClientTests.class, LargeTests.class}) public class TestMobFlushSnapshotFromClient extends TestFlushSnapshotFromClient { private static final Log LOG = LogFactory.getLog(TestFlushSnapshotFromClient.class); - @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). - withLookingForStuckThread(true).build(); + + @ClassRule + public static final TestRule timeout = + CategoryBasedTimeout.forClass(TestMobFlushSnapshotFromClient.class); @BeforeClass public static void setupCluster() throws Exception { diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java index 04fce5c..bf26c69 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java @@ -70,7 +70,6 @@ public class TestRestoreFlushSnapshotFromClient { } protected static void setupConf(Configuration conf) { - UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true); UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); UTIL.getConfiguration().setInt("hbase.client.pause", 250); UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java new file mode 100644 index 0000000..fd67186 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java @@ -0,0 +1,153 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.tool; + +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.util.ToolRunner; +import org.apache.log4j.Appender; +import org.apache.log4j.LogManager; +import org.apache.log4j.spi.LoggingEvent; +import com.google.common.collect.Iterables; +import org.apache.hadoop.hbase.HConstants; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.mockito.ArgumentMatcher; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; + +import java.util.concurrent.ExecutorService; +import java.util.concurrent.ScheduledThreadPoolExecutor; + +import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.eq; +import static org.mockito.Matchers.isA; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Matchers.argThat; +import static org.mockito.Mockito.never; + +@RunWith(MockitoJUnitRunner.class) +@Category({MediumTests.class}) +public class TestCanaryTool { + + private HBaseTestingUtility testingUtility; + private static final byte[] FAMILY = Bytes.toBytes("f"); + private static final byte[] COLUMN = Bytes.toBytes("col"); + + @Before + public void setUp() throws Exception { + testingUtility = new HBaseTestingUtility(); + testingUtility.startMiniCluster(); + LogManager.getRootLogger().addAppender(mockAppender); + } + + @After + public void tearDown() throws Exception { + testingUtility.shutdownMiniCluster(); + LogManager.getRootLogger().removeAppender(mockAppender); + } + + @Mock + Appender mockAppender; + + @Test + public void testBasicZookeeperCanaryWorks() throws Exception { + Integer port = + Iterables.getOnlyElement(testingUtility.getZkCluster().getClientPortList(), null); + testingUtility.getConfiguration().set(HConstants.ZOOKEEPER_QUORUM, + "localhost:" + port + "/hbase"); + ExecutorService executor = new ScheduledThreadPoolExecutor(2); + Canary.ZookeeperStdOutSink sink = spy(new Canary.ZookeeperStdOutSink()); + Canary canary = new Canary(executor, sink); + String[] args = { "-t", "10000", "-zookeeper" }; + ToolRunner.run(testingUtility.getConfiguration(), canary, args); + + String baseZnode = testingUtility.getConfiguration() + .get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + verify(sink, atLeastOnce()) + .publishReadTiming(eq(baseZnode), eq("localhost:" + port), anyLong()); + } + + @Test + public void testBasicCanaryWorks() throws Exception { + TableName tableName = TableName.valueOf("testTable"); + Table table = testingUtility.createTable(tableName, new byte[][] { FAMILY }); + // insert some test rows + for (int i=0; i<1000; i++) { + byte[] iBytes = Bytes.toBytes(i); + Put p = new Put(iBytes); + p.addColumn(FAMILY, COLUMN, iBytes); + table.put(p); + } + ExecutorService executor = new ScheduledThreadPoolExecutor(1); + Canary.RegionServerStdOutSink sink = spy(new Canary.RegionServerStdOutSink()); + Canary canary = new Canary(executor, sink); + String[] args = { "-t", "10000", "testTable" }; + ToolRunner.run(testingUtility.getConfiguration(), canary, args); + verify(sink, atLeastOnce()) + .publishReadTiming(isA(HRegionInfo.class), isA(HColumnDescriptor.class), anyLong()); + } + + //no table created, so there should be no regions + @Test + public void testRegionserverNoRegions() throws Exception { + runRegionserverCanary(); + verify(mockAppender).doAppend(argThat(new ArgumentMatcher() { + @Override + public boolean matches(Object argument) { + return ((LoggingEvent) argument).getRenderedMessage().contains("Regionserver not serving any regions"); + } + })); + } + + //by creating a table, there shouldn't be any region servers not serving any regions + @Test + public void testRegionserverWithRegions() throws Exception { + TableName tableName = TableName.valueOf("testTable"); + testingUtility.createTable(tableName, new byte[][] { FAMILY }); + runRegionserverCanary(); + verify(mockAppender, never()).doAppend(argThat(new ArgumentMatcher() { + @Override + public boolean matches(Object argument) { + return ((LoggingEvent) argument).getRenderedMessage().contains("Regionserver not serving any regions"); + } + })); + } + + private void runRegionserverCanary() throws Exception { + ExecutorService executor = new ScheduledThreadPoolExecutor(1); + Canary canary = new Canary(executor, new Canary.RegionServerStdOutSink()); + String[] args = { "-t", "10000", "-regionserver"}; + ToolRunner.run(testingUtility.getConfiguration(), canary, args); + } + +} + diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java index 1dfb526..da51516 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java @@ -53,7 +53,6 @@ import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.ResultScanner; @@ -138,7 +137,7 @@ public class BaseTestHBaseFsck { protected void undeployRegion(Connection conn, ServerName sn, HRegionInfo hri) throws IOException, InterruptedException { try { - HBaseFsckRepair.closeRegionSilentlyAndWait((HConnection) conn, sn, hri); + HBaseFsckRepair.closeRegionSilentlyAndWait(conn, sn, hri); if (!hri.isMetaTable()) { admin.offline(hri.getRegionName()); } @@ -344,11 +343,11 @@ public class BaseTestHBaseFsck { Map> mm = new HashMap>(); for (ServerName hsi : regionServers) { - AdminProtos.AdminService.BlockingInterface server = ((HConnection) connection).getAdmin(hsi); + AdminProtos.AdminService.BlockingInterface server = connection.getAdmin(hsi); // list all online regions from this region server List regions = ProtobufUtil.getOnlineRegions(server); - List regionNames = new ArrayList(); + List regionNames = new ArrayList<>(); for (HRegionInfo hri : regions) { regionNames.add(hri.getRegionNameAsString()); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java index 69b696e..9d62693 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java @@ -24,6 +24,7 @@ import java.security.SecureRandom; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Locale; import java.util.Properties; import java.util.Random; import java.util.concurrent.atomic.AtomicReference; @@ -124,7 +125,7 @@ public class LoadTestTool extends AbstractHBaseTool { public static final String OPT_DEFERRED_LOG_FLUSH_USAGE = "Enable deferred log flush."; public static final String OPT_DATA_BLOCK_ENCODING = - HColumnDescriptor.DATA_BLOCK_ENCODING.toLowerCase(); + HColumnDescriptor.DATA_BLOCK_ENCODING.toLowerCase(Locale.ROOT); public static final String OPT_INMEMORY = "in_memory"; public static final String OPT_USAGE_IN_MEMORY = "Tries to keep the HFiles of the CF " + diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java index aaa92ff..04c22b7 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java @@ -19,6 +19,8 @@ package org.apache.hadoop.hbase.util; import static org.apache.hadoop.hbase.util.test.LoadTestDataGenerator.INCREMENT; import static org.apache.hadoop.hbase.util.test.LoadTestDataGenerator.MUTATE_INFO; +import com.google.common.base.Preconditions; + import java.io.IOException; import java.util.Arrays; import java.util.Collection; @@ -39,15 +41,12 @@ import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator; import org.apache.hadoop.hbase.util.test.LoadTestKVGenerator; import org.apache.hadoop.util.StringUtils; -import com.google.common.base.Preconditions; - /** * Common base class for reader and writer parts of multi-thread HBase load * test ({@link LoadTestTool}). @@ -57,7 +56,7 @@ public abstract class MultiThreadedAction { protected final TableName tableName; protected final Configuration conf; - protected final HConnection connection; // all reader / writer threads will share this connection + protected final ClusterConnection connection; // all reader / writer threads will share this connection protected int numThreads = 1; @@ -152,7 +151,7 @@ public abstract class MultiThreadedAction { this.dataGenerator = dataGen; this.tableName = tableName; this.actionLetter = actionLetter; - this.connection = (HConnection) ConnectionFactory.createConnection(conf); + this.connection = (ClusterConnection) ConnectionFactory.createConnection(conf); } public void start(long startKey, long endKey, int numThreads) throws IOException { diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java index ca06e97..77443e1 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java @@ -28,10 +28,10 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Consistency; -import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator; @@ -158,7 +158,7 @@ public class MultiThreadedReader extends MultiThreadedAction setName(getClass().getSimpleName() + "_" + readerId); } - protected HTableInterface createTable() throws IOException { + protected Table createTable() throws IOException { return connection.getTable(tableName); } @@ -379,7 +379,7 @@ public class MultiThreadedReader extends MultiThreadedAction numKeysVerified.incrementAndGet(); } } else { - HRegionLocation hloc = connection.getRegionLocation(tableName, + HRegionLocation hloc = ((ClusterConnection) connection).getRegionLocation(tableName, get.getRow(), false); String rowKey = Bytes.toString(get.getRow()); LOG.info("Key = " + rowKey + ", Region location: " + hloc); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java index 84cc47d..cdf814c 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java @@ -26,7 +26,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.security.User; @@ -69,7 +68,7 @@ public class MultiThreadedReaderWithACL extends MultiThreadedReader { } @Override - protected HTableInterface createTable() throws IOException { + protected Table createTable() throws IOException { return null; } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java index e28acc6..6c816cf 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; @@ -130,7 +129,7 @@ public class MultiThreadedUpdater extends MultiThreadedWriterBase { table = createTable(); } - protected HTableInterface createTable() throws IOException { + protected Table createTable() throws IOException { return connection.getTable(tableName); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdaterWithACL.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdaterWithACL.java index 756f612..bf27dde 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdaterWithACL.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdaterWithACL.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; @@ -84,7 +83,7 @@ public class MultiThreadedUpdaterWithACL extends MultiThreadedUpdater { } @Override - protected HTableInterface createTable() throws IOException { + protected Table createTable() throws IOException { return null; } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java index 83e207a..d53ab25 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java @@ -33,7 +33,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; import org.apache.hadoop.hbase.client.Table; @@ -87,7 +86,7 @@ public class MultiThreadedWriter extends MultiThreadedWriterBase { table = createTable(); } - protected HTableInterface createTable() throws IOException { + protected Table createTable() throws IOException { return connection.getTable(tableName); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterWithACL.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterWithACL.java index d3cba2b..4806288 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterWithACL.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterWithACL.java @@ -26,7 +26,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; import org.apache.hadoop.hbase.client.Table; @@ -71,7 +70,7 @@ public class MultiThreadedWriterWithACL extends MultiThreadedWriter { } @Override - protected HTableInterface createTable() throws IOException { + protected Table createTable() throws IOException { return null; } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCompressionTest.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCompressionTest.java index 43c7cfc..398f3f0 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCompressionTest.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCompressionTest.java @@ -75,12 +75,14 @@ public class TestCompressionTest { nativeCodecTest("LZO", "lzo2", "com.hadoop.compression.lzo.LzoCodec"); nativeCodecTest("LZ4", null, "org.apache.hadoop.io.compress.Lz4Codec"); nativeCodecTest("SNAPPY", "snappy", "org.apache.hadoop.io.compress.SnappyCodec"); + nativeCodecTest("BZIP2", "bzip2", "org.apache.hadoop.io.compress.BZip2Codec"); } else { // Hadoop nativelib is not available LOG.debug("Native code not loaded"); assertFalse(CompressionTest.testCompression("LZO")); assertFalse(CompressionTest.testCompression("LZ4")); assertFalse(CompressionTest.testCompression("SNAPPY")); + assertFalse(CompressionTest.testCompression("BZIP2")); } } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestConnectionCache.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestConnectionCache.java index 99b82f2..88df7f4 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestConnectionCache.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestConnectionCache.java @@ -31,7 +31,7 @@ public class TestConnectionCache extends TestCase { private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); /** - * test for ConnectionCache cleaning expired HConnection + * test for ConnectionCache cleaning expired Connection */ @Test public void testConnectionChore() throws Exception { diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java index a7c0c55..84ef6da 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java @@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.regionserver.TestEndToEndSplitTransaction; import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationQueues; +import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; @@ -80,6 +81,7 @@ import java.util.LinkedList; import java.util.List; import java.util.HashMap; import java.util.Map; +import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; @@ -309,9 +311,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { HRegionInfo hriOverlap = createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A2"), Bytes.toBytes("B")); - TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap); - TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() - .waitForAssignment(hriOverlap); + TEST_UTIL.assignRegion(hriOverlap); + ServerName server = regionStates.getRegionServerOfRegion(hriOverlap); TEST_UTIL.assertRegionOnServer(hriOverlap, server, REGION_ONLINE_TIMEOUT); @@ -349,9 +350,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { // Mess it up by creating an overlap in the metadata HRegionInfo hriOverlap = createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A2"), Bytes.toBytes("B2")); - TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap); - TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() - .waitForAssignment(hriOverlap); + TEST_UTIL.assignRegion(hriOverlap); + ServerName server = regionStates.getRegionServerOfRegion(hriOverlap); TEST_UTIL.assertRegionOnServer(hriOverlap, server, REGION_ONLINE_TIMEOUT); @@ -609,7 +609,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { hbck.close(); } } - + @Test (timeout=180000) public void testHbckAfterRegionMerge() throws Exception { TableName table = TableName.valueOf("testMergeRegionFilesInHdfs"); @@ -1224,9 +1224,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { HRegionInfo hriOverlap = createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A2"), Bytes.toBytes("B")); - TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap); - TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() - .waitForAssignment(hriOverlap); + TEST_UTIL.assignRegion(hriOverlap); + ServerName server = regionStates.getRegionServerOfRegion(hriOverlap); TEST_UTIL.assertRegionOnServer(hriOverlap, server, REGION_ONLINE_TIMEOUT); @@ -1350,9 +1349,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { // Now let's mess it up, by adding a region with a duplicate startkey HRegionInfo hriDupe = createRegion(tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("B")); - TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriDupe); - TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() - .waitForAssignment(hriDupe); + TEST_UTIL.assignRegion(hriDupe); + ServerName server = regionStates.getRegionServerOfRegion(hriDupe); TEST_UTIL.assertRegionOnServer(hriDupe, server, REGION_ONLINE_TIMEOUT); @@ -1471,7 +1469,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { TableLockManager.DEFAULT_TABLE_LOCK_EXPIRE_TIMEOUT_MS)); // let table lock expire hbck = doFsck(conf, false); - assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] {HBaseFsck.ErrorReporter.ERROR_CODE.EXPIRED_TABLE_LOCK}); + assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.EXPIRED_TABLE_LOCK}); final CountDownLatch latch = new CountDownLatch(1); new Thread() { @@ -1495,24 +1494,27 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { Threads.sleep(300); // wait some more to ensure writeLock.acquire() is called hbck = doFsck(conf, false); + // still one expired, one not-expired assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { - HBaseFsck.ErrorReporter.ERROR_CODE.EXPIRED_TABLE_LOCK}); // still one expired, one not-expired + HBaseFsck.ErrorReporter.ERROR_CODE.EXPIRED_TABLE_LOCK}); edge.incrementTime(conf.getLong(TableLockManager.TABLE_LOCK_EXPIRE_TIMEOUT, TableLockManager.DEFAULT_TABLE_LOCK_EXPIRE_TIMEOUT_MS)); // let table lock expire hbck = doFsck(conf, false); - assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] {HBaseFsck.ErrorReporter.ERROR_CODE.EXPIRED_TABLE_LOCK, + assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.EXPIRED_TABLE_LOCK, HBaseFsck.ErrorReporter.ERROR_CODE.EXPIRED_TABLE_LOCK}); // both are expired - conf.setLong(TableLockManager.TABLE_LOCK_EXPIRE_TIMEOUT, 1); + Configuration localConf = new Configuration(conf); // reaping from ZKInterProcessWriteLock uses znode cTime, // which is not injectable through EnvironmentEdge + localConf.setLong(TableLockManager.TABLE_LOCK_EXPIRE_TIMEOUT, 1); Threads.sleep(10); - hbck = doFsck(conf, true); // now fix both cases + hbck = doFsck(localConf, true); // now fix both cases - hbck = doFsck(conf, false); + hbck = doFsck(localConf, false); assertNoErrors(hbck); // ensure that locks are deleted @@ -1527,7 +1529,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { // check no errors HBaseFsck hbck = doFsck(conf, false); assertNoErrors(hbck); - + // create peer ReplicationAdmin replicationAdmin = new ReplicationAdmin(conf); Assert.assertEquals(0, replicationAdmin.getPeersCount()); @@ -1538,11 +1540,12 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { replicationAdmin.addPeer("1", rpc, null); replicationAdmin.getPeersCount(); Assert.assertEquals(1, replicationAdmin.getPeersCount()); - + // create replicator ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "Test Hbase Fsck", connection); ReplicationQueues repQueues = - ReplicationFactory.getReplicationQueues(zkw, conf, connection); + ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, connection, + zkw)); repQueues.init("server1"); // queues for current peer, no errors repQueues.addLog("1", "file1"); @@ -1550,7 +1553,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { Assert.assertEquals(2, repQueues.getAllQueues().size()); hbck = doFsck(conf, false); assertNoErrors(hbck); - + // queues for removed peer repQueues.addLog("2", "file1"); repQueues.addLog("2-server2", "file1"); @@ -1559,7 +1562,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE, HBaseFsck.ErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE }); - + // fix the case hbck = doFsck(conf, true); hbck = doFsck(conf, false); @@ -1568,7 +1571,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { Assert.assertEquals(2, repQueues.getAllQueues().size()); Assert.assertNull(repQueues.getLogsInQueue("2")); Assert.assertNull(repQueues.getLogsInQueue("2-sever2")); - + replicationAdmin.removePeer("1"); repQueues.removeAllQueues(); zkw.close(); @@ -1678,8 +1681,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { st.prepare(); st.stepsBeforePONR(regionServer, regionServer, false); AssignmentManager am = cluster.getMaster().getAssignmentManager(); - Map regionsInTransition = am.getRegionStates().getRegionsInTransition(); - for (RegionState state : regionsInTransition.values()) { + for (RegionState state : am.getRegionStates().getRegionsInTransition()) { am.regionOffline(state.getRegion()); } Map regionsMap = new HashMap(); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java index 6d0e48c..17ac778 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java @@ -19,8 +19,13 @@ package org.apache.hadoop.hbase.util; - import com.google.common.collect.Multimap; + +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -33,7 +38,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Durability; -import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -52,11 +56,6 @@ import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; -import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.SynchronousQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.*; import static org.junit.Assert.*; @@ -137,9 +136,8 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck { // Now let's mess it up, by adding a region with a duplicate startkey HRegionInfo hriDupe = createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("A2")); - TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriDupe); - TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() - .waitForAssignment(hriDupe); + TEST_UTIL.assignRegion(hriDupe); + ServerName server = regionStates.getRegionServerOfRegion(hriDupe); TEST_UTIL.assertRegionOnServer(hriDupe, server, REGION_ONLINE_TIMEOUT); @@ -177,10 +175,8 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck { // Now let's mess it up, by adding a region with a duplicate startkey HRegionInfo hriDupe = createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("B")); + TEST_UTIL.assignRegion(hriDupe); - TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriDupe); - TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() - .waitForAssignment(hriDupe); ServerName server = regionStates.getRegionServerOfRegion(hriDupe); TEST_UTIL.assertRegionOnServer(hriDupe, server, REGION_ONLINE_TIMEOUT); @@ -229,9 +225,8 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck { // Mess it up by creating an overlap in the metadata HRegionInfo hriOverlap = createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A2"), Bytes.toBytes("B")); - TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap); - TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager() - .waitForAssignment(hriOverlap); + TEST_UTIL.assignRegion(hriOverlap); + ServerName server = regionStates.getRegionServerOfRegion(hriOverlap); TEST_UTIL.assertRegionOnServer(hriOverlap, server, REGION_ONLINE_TIMEOUT); @@ -318,12 +313,11 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck { HMaster master = cluster.getMaster(); HRegionInfo hriOverlap1 = createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("AB")); - master.assignRegion(hriOverlap1); - master.getAssignmentManager().waitForAssignment(hriOverlap1); + TEST_UTIL.assignRegion(hriOverlap1); + HRegionInfo hriOverlap2 = createRegion(tbl.getTableDescriptor(), Bytes.toBytes("AB"), Bytes.toBytes("B")); - master.assignRegion(hriOverlap2); - master.getAssignmentManager().waitForAssignment(hriOverlap2); + TEST_UTIL.assignRegion(hriOverlap2); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] {HBaseFsck.ErrorReporter.ERROR_CODE.DUPE_STARTKEYS, @@ -350,7 +344,7 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck { } } - HBaseFsckRepair.closeRegionSilentlyAndWait((HConnection) connection, + HBaseFsckRepair.closeRegionSilentlyAndWait(connection, cluster.getRegionServer(k).getServerName(), hbi.getHdfsHRI()); admin.offline(regionName); break; @@ -459,7 +453,7 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck { scanner.close(); meta.close(); } - + /** * This creates and fixes a bad table with a missing region -- hole in meta and data present but * .regioninfo missing (an orphan hdfs region)in the fs. At last we check every row was present diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java index 615487d..b8565e3 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java @@ -75,15 +75,6 @@ public class TestOfflineMetaRebuildHole extends OfflineMetaRebuildTestCore { LOG.info("Waiting for no more RIT"); TEST_UTIL.waitUntilNoRegionsInTransition(60000); - LOG.info("No more RIT in ZK, now doing final test verification"); - int tries = 60; - while(TEST_UTIL.getHBaseCluster() - .getMaster().getAssignmentManager().getRegionStates().getRegionsInTransition().size() > 0 && - tries-- > 0) { - LOG.info("Waiting for RIT: "+TEST_UTIL.getHBaseCluster() - .getMaster().getAssignmentManager().getRegionStates().getRegionsInTransition()); - Thread.sleep(1000); - } // Meta still messed up. assertEquals(1, scanMeta()); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java index 40ba86a..ae72935 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java @@ -84,14 +84,6 @@ public class TestOfflineMetaRebuildOverlap extends OfflineMetaRebuildTestCore { LOG.info("Waiting for no more RIT"); TEST_UTIL.waitUntilNoRegionsInTransition(60000); LOG.info("No more RIT in ZK, now doing final test verification"); - int tries = 60; - while(TEST_UTIL.getHBaseCluster() - .getMaster().getAssignmentManager().getRegionStates().getRegionsInTransition().size() > 0 && - tries-- > 0) { - LOG.info("Waiting for RIT: "+TEST_UTIL.getHBaseCluster() - .getMaster().getAssignmentManager().getRegionStates().getRegionsInTransition()); - Thread.sleep(1000); - } // Meta still messed up. assertEquals(1, scanMeta()); diff --git hbase-shell/src/main/ruby/hbase.rb hbase-shell/src/main/ruby/hbase.rb deleted file mode 100644 index 21f88f9..0000000 --- hbase-shell/src/main/ruby/hbase.rb +++ /dev/null @@ -1,107 +0,0 @@ -# -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# HBase ruby classes. -# Has wrapper classes for org.apache.hadoop.hbase.client.Admin -# and for org.apache.hadoop.hbase.client.Table. Classes take -# Formatters on construction and outputs any results using -# Formatter methods. These classes are only really for use by -# the hirb.rb HBase Shell script; they don't make much sense elsewhere. -# For example, the exists method on Admin class prints to the formatter -# whether the table exists and returns nil regardless. -include Java - -include_class('java.lang.Integer') {|package,name| "J#{name}" } -include_class('java.lang.Long') {|package,name| "J#{name}" } -include_class('java.lang.Boolean') {|package,name| "J#{name}" } - -module HBaseConstants - COLUMN = "COLUMN" - COLUMNS = "COLUMNS" - TIMESTAMP = "TIMESTAMP" - TIMERANGE = "TIMERANGE" - NAME = org.apache.hadoop.hbase.HConstants::NAME - VERSIONS = org.apache.hadoop.hbase.HConstants::VERSIONS - IN_MEMORY = org.apache.hadoop.hbase.HConstants::IN_MEMORY - METADATA = org.apache.hadoop.hbase.HConstants::METADATA - STOPROW = "STOPROW" - STARTROW = "STARTROW" - ROWPREFIXFILTER = "ROWPREFIXFILTER" - ENDROW = STOPROW - RAW = "RAW" - LIMIT = "LIMIT" - METHOD = "METHOD" - MAXLENGTH = "MAXLENGTH" - CACHE_BLOCKS = "CACHE_BLOCKS" - ALL_METRICS = "ALL_METRICS" - METRICS = "METRICS" - REVERSED = "REVERSED" - REPLICATION_SCOPE = "REPLICATION_SCOPE" - INTERVAL = 'INTERVAL' - CACHE = 'CACHE' - FILTER = 'FILTER' - SPLITS = 'SPLITS' - SPLITS_FILE = 'SPLITS_FILE' - SPLITALGO = 'SPLITALGO' - NUMREGIONS = 'NUMREGIONS' - REGION_REPLICATION = 'REGION_REPLICATION' - REGION_REPLICA_ID = 'REGION_REPLICA_ID' - CONFIGURATION = org.apache.hadoop.hbase.HConstants::CONFIGURATION - ATTRIBUTES="ATTRIBUTES" - VISIBILITY="VISIBILITY" - AUTHORIZATIONS = "AUTHORIZATIONS" - SKIP_FLUSH = 'SKIP_FLUSH' - CONSISTENCY = "CONSISTENCY" - USER = 'USER' - TABLE = 'TABLE' - NAMESPACE = 'NAMESPACE' - TYPE = 'TYPE' - NONE = 'NONE' - VALUE = 'VALUE' - ENDPOINT_CLASSNAME = 'ENDPOINT_CLASSNAME' - CLUSTER_KEY = 'CLUSTER_KEY' - TABLE_CFS = 'TABLE_CFS' - CONFIG = 'CONFIG' - DATA = 'DATA' - - # Load constants from hbase java API - def self.promote_constants(constants) - # The constants to import are all in uppercase - constants.each do |c| - next if c =~ /DEFAULT_.*/ || c != c.upcase - next if eval("defined?(#{c})") - eval("#{c} = '#{c}'") - end - end - - promote_constants(org.apache.hadoop.hbase.HColumnDescriptor.constants) - promote_constants(org.apache.hadoop.hbase.HTableDescriptor.constants) -end - -# Include classes definition -require 'hbase/hbase' -require 'hbase/admin' -require 'hbase/table' -require 'hbase/quotas' -require 'hbase/replication_admin' -require 'hbase/security' -require 'hbase/visibility_labels' -require 'hbase/rsgroup_admin' - -include HBaseQuotasConstants diff --git hbase-shell/src/main/ruby/hbase/admin.rb hbase-shell/src/main/ruby/hbase/admin.rb index 88a6598..d66c1d6 100644 --- hbase-shell/src/main/ruby/hbase/admin.rb +++ hbase-shell/src/main/ruby/hbase/admin.rb @@ -31,10 +31,10 @@ module Hbase class Admin include HBaseConstants - def initialize(admin, formatter) - @admin = admin - @connection = @admin.getConnection() - @formatter = formatter + def initialize(connection) + @connection = connection + # Java Admin instance + @admin = @connection.getAdmin end def close @@ -309,12 +309,6 @@ module Hbase end #---------------------------------------------------------------------------------------------- - # Parse arguments and update HTableDescriptor accordingly - def parse_htd_args(htd, arg) - htd.setNormalizationEnabled(JBoolean.valueOf(arg.delete(NORMALIZATION_ENABLED))) if arg[NORMALIZATION_ENABLED] - end - - #---------------------------------------------------------------------------------------------- # Creates a table def create(table_name, *args) # Fail if table name is not a string @@ -392,24 +386,7 @@ module Hbase end # Done with splits; apply formerly-table_att parameters. - htd.setOwnerString(arg.delete(OWNER)) if arg[OWNER] - htd.setMaxFileSize(JLong.valueOf(arg.delete(MAX_FILESIZE))) if arg[MAX_FILESIZE] - htd.setReadOnly(JBoolean.valueOf(arg.delete(READONLY))) if arg[READONLY] - htd.setCompactionEnabled(JBoolean.valueOf(arg[COMPACTION_ENABLED])) if arg[COMPACTION_ENABLED] - htd.setMemStoreFlushSize(JLong.valueOf(arg.delete(MEMSTORE_FLUSHSIZE))) if arg[MEMSTORE_FLUSHSIZE] - # DEFERRED_LOG_FLUSH is deprecated and was replaced by DURABILITY. To keep backward compatible, it still exists. - # However, it has to be set before DURABILITY so that DURABILITY could overwrite if both args are set - if arg.include?(DEFERRED_LOG_FLUSH) - if arg.delete(DEFERRED_LOG_FLUSH).to_s.upcase == "TRUE" - htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf("ASYNC_WAL")) - else - htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf("SYNC_WAL")) - end - end - htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(DURABILITY))) if arg[DURABILITY] - parse_htd_args(htd, arg) - set_user_metadata(htd, arg.delete(METADATA)) if arg[METADATA] - set_descriptor_config(htd, arg.delete(CONFIGURATION)) if arg[CONFIGURATION] + update_htd_from_arg(htd, arg) arg.each_key do |ignored_key| puts("An argument ignored (unknown or overridden): %s" % [ ignored_key ]) @@ -481,16 +458,17 @@ module Hbase #---------------------------------------------------------------------------------------------- # Truncates table (deletes all records by recreating the table) - def truncate(table_name_str, conf = @conf) + def truncate(table_name_str) + puts "Truncating '#{table_name_str}' table (it may take a while):" table_name = TableName.valueOf(table_name_str) table_description = @admin.getTableDescriptor(table_name) raise ArgumentError, "Table #{table_name_str} is not enabled. Enable it first." unless enabled?(table_name_str) - yield 'Disabling table...' if block_given? + puts 'Disabling table...' @admin.disableTable(table_name) begin - yield 'Truncating table...' if block_given? + puts 'Truncating table...' @admin.truncateTable(table_name, false) rescue => e # Handle the compatibility case, where the truncate method doesn't exists on the Master @@ -498,10 +476,10 @@ module Hbase rootCause = e.cause if rootCause.kind_of?(org.apache.hadoop.hbase.DoNotRetryIOException) then # Handle the compatibility case, where the truncate method doesn't exists on the Master - yield 'Dropping table...' if block_given? + puts 'Dropping table...' @admin.deleteTable(table_name) - yield 'Creating table...' if block_given? + puts 'Creating table...' @admin.createTable(table_description) else raise e @@ -511,9 +489,9 @@ module Hbase #---------------------------------------------------------------------------------------------- # Truncates table while maintaing region boundaries (deletes all records by recreating the table) - def truncate_preserve(table_name_str, conf = @conf) + def truncate_preserve(table_name_str) + puts "Truncating '#{table}' table (it may take a while):" table_name = TableName.valueOf(table_name_str) - h_table = @connection.getTable(table_name) locator = @connection.getRegionLocator(table_name) begin splits = locator.getAllRegionLocations(). @@ -524,11 +502,11 @@ module Hbase end table_description = @admin.getTableDescriptor(table_name) - yield 'Disabling table...' if block_given? + puts 'Disabling table...' disable(table_name_str) begin - yield 'Truncating table...' if block_given? + puts 'Truncating table...' @admin.truncateTable(table_name, true) rescue => e # Handle the compatibility case, where the truncate method doesn't exists on the Master @@ -536,10 +514,10 @@ module Hbase rootCause = e.cause if rootCause.kind_of?(org.apache.hadoop.hbase.DoNotRetryIOException) then # Handle the compatibility case, where the truncate method doesn't exists on the Master - yield 'Dropping table...' if block_given? + puts 'Dropping table...' @admin.deleteTable(table_name) - yield 'Creating table with region boundaries...' if block_given? + puts 'Creating table with region boundaries...' @admin.createTable(table_description, splits) else raise e @@ -653,26 +631,7 @@ module Hbase end # 3) Some args for the table, optionally with METHOD => table_att (deprecated) - raise(ArgumentError, "NAME argument in an unexpected place") if name - htd.setOwnerString(arg.delete(OWNER)) if arg[OWNER] - htd.setMaxFileSize(JLong.valueOf(arg.delete(MAX_FILESIZE))) if arg[MAX_FILESIZE] - htd.setReadOnly(JBoolean.valueOf(arg.delete(READONLY))) if arg[READONLY] - htd.setCompactionEnabled(JBoolean.valueOf(arg[COMPACTION_ENABLED])) if arg[COMPACTION_ENABLED] - parse_htd_args(htd, arg) - htd.setMemStoreFlushSize(JLong.valueOf(arg.delete(MEMSTORE_FLUSHSIZE))) if arg[MEMSTORE_FLUSHSIZE] - # DEFERRED_LOG_FLUSH is deprecated and was replaced by DURABILITY. To keep backward compatible, it still exists. - # However, it has to be set before DURABILITY so that DURABILITY could overwrite if both args are set - if arg.include?(DEFERRED_LOG_FLUSH) - if arg.delete(DEFERRED_LOG_FLUSH).to_s.upcase == "TRUE" - htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf("ASYNC_WAL")) - else - htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf("SYNC_WAL")) - end - end - htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(DURABILITY))) if arg[DURABILITY] - htd.setRegionReplication(JInteger.valueOf(arg.delete(REGION_REPLICATION))) if arg[REGION_REPLICATION] - set_user_metadata(htd, arg.delete(METADATA)) if arg[METADATA] - set_descriptor_config(htd, arg.delete(CONFIGURATION)) if arg[CONFIGURATION] + update_htd_from_arg(htd, arg) # set a coprocessor attribute valid_coproc_keys = [] @@ -720,7 +679,7 @@ module Hbase puts("version %s" % [ status.getHBaseVersion() ]) # Put regions in transition first because usually empty puts("%d regionsInTransition" % status.getRegionsInTransition().size()) - for k, v in status.getRegionsInTransition() + for v in status.getRegionsInTransition() puts(" %s" % [v]) end master = status.getMaster() @@ -764,7 +723,7 @@ module Hbase rLoadSink = sl.getReplicationLoadSink() rSinkString << " AgeOfLastAppliedOp=" + rLoadSink.getAgeOfLastAppliedOp().to_s rSinkString << ", TimeStampsOfLastAppliedOp=" + - (java.util.Date.new(rLoadSink.getTimeStampsOfLastAppliedOp())).toString() + (java.util.Date.new(rLoadSink.getTimeStampsOfLastAppliedOp())).toString() rLoadSourceList = sl.getReplicationLoadSourceList() index = 0 while index < rLoadSourceList.size() @@ -773,7 +732,7 @@ module Hbase rSourceString << ", AgeOfLastShippedOp=" + rLoadSource.getAgeOfLastShippedOp().to_s rSourceString << ", SizeOfLogQueue=" + rLoadSource.getSizeOfLogQueue().to_s rSourceString << ", TimeStampsOfLastShippedOp=" + - (java.util.Date.new(rLoadSource.getTimeStampOfLastShippedOp())).toString() + (java.util.Date.new(rLoadSource.getTimeStampOfLastShippedOp())).toString() rSourceString << ", Replication Lag=" + rLoadSource.getReplicationLag().to_s index = index + 1 end @@ -855,6 +814,7 @@ module Hbase family.setScope(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::REPLICATION_SCOPE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::REPLICATION_SCOPE) family.setCacheDataOnWrite(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE) family.setInMemory(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY) + family.setCompacted(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION) family.setTimeToLive(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::TTL)) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::TTL) family.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING) family.setBlocksize(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE) @@ -1006,6 +966,47 @@ module Hbase @admin.listTableSnapshots(tableNameRegex, snapshotNameRegex).to_a end + #---------------------------------------------------------------------------------------------- + # Returns a list of regionservers + def getRegionServers() + return @admin.getClusterStatus.getServers.map { |serverName| serverName } + end + + #---------------------------------------------------------------------------------------------- + # Returns a list of servernames + def getServerNames(servers) + regionservers = getRegionServers() + servernames = [] + + if servers.length == 0 + # if no servers were specified as arguments, get a list of all servers + servernames = regionservers + else + # Strings replace with ServerName objects in servers array + i = 0 + while (i < servers.length) + server = servers[i] + + if ServerName.isFullServerName(server) + servernames.push(ServerName.valueOf(server)) + else + name_list = server.split(",") + j = 0 + while (j < regionservers.length) + sn = regionservers[j] + if name_list[0] == sn.hostname and (name_list[1] == nil ? true : (name_list[1] == sn.port.to_s) ) + servernames.push(sn) + end + j += 1 + end + end + i += 1 + end + end + + return servernames + end + # Apply config specific to a table/column to its descriptor def set_descriptor_config(descriptor, config) raise(ArgumentError, "#{CONFIGURATION} must be a Hash type") unless config.kind_of?(Hash) @@ -1144,5 +1145,29 @@ module Hbase def list_procedures() @admin.listProcedures() end + + # Parse arguments and update HTableDescriptor accordingly + def update_htd_from_arg(htd, arg) + htd.setOwnerString(arg.delete(OWNER)) if arg[OWNER] + htd.setMaxFileSize(JLong.valueOf(arg.delete(MAX_FILESIZE))) if arg[MAX_FILESIZE] + htd.setReadOnly(JBoolean.valueOf(arg.delete(READONLY))) if arg[READONLY] + htd.setCompactionEnabled(JBoolean.valueOf(arg[COMPACTION_ENABLED])) if arg[COMPACTION_ENABLED] + htd.setNormalizationEnabled( + JBoolean.valueOf(arg[NORMALIZATION_ENABLED])) if arg[NORMALIZATION_ENABLED] + htd.setMemStoreFlushSize(JLong.valueOf(arg.delete(MEMSTORE_FLUSHSIZE))) if arg[MEMSTORE_FLUSHSIZE] + # DEFERRED_LOG_FLUSH is deprecated and was replaced by DURABILITY. To keep backward compatible, it still exists. + # However, it has to be set before DURABILITY so that DURABILITY could overwrite if both args are set + if arg.include?(DEFERRED_LOG_FLUSH) + if arg.delete(DEFERRED_LOG_FLUSH).to_s.upcase == "TRUE" + htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf("ASYNC_WAL")) + else + htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf("SYNC_WAL")) + end + end + htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(DURABILITY))) if arg[DURABILITY] + htd.setRegionReplication(JInteger.valueOf(arg.delete(REGION_REPLICATION))) if arg[REGION_REPLICATION] + set_user_metadata(htd, arg.delete(METADATA)) if arg[METADATA] + set_descriptor_config(htd, arg.delete(CONFIGURATION)) if arg[CONFIGURATION] + end end end diff --git hbase-shell/src/main/ruby/hbase/hbase.rb hbase-shell/src/main/ruby/hbase/hbase.rb index 785f8bc..bc5a31d 100644 --- hbase-shell/src/main/ruby/hbase/hbase.rb +++ hbase-shell/src/main/ruby/hbase/hbase.rb @@ -18,9 +18,12 @@ # include Java +java_import org.apache.hadoop.hbase.client.ConnectionFactory +java_import org.apache.hadoop.hbase.HBaseConfiguration require 'hbase/admin' require 'hbase/table' +require 'hbase/taskmonitor' require 'hbase/quotas' require 'hbase/security' require 'hbase/visibility_labels' @@ -34,44 +37,48 @@ module Hbase if config self.configuration = config else - self.configuration = org.apache.hadoop.hbase.HBaseConfiguration.create + self.configuration = HBaseConfiguration.create # Turn off retries in hbase and ipc. Human doesn't want to wait on N retries. configuration.setInt("hbase.client.retries.number", 7) configuration.setInt("hbase.ipc.client.connect.max.retries", 3) end - @connection = org.apache.hadoop.hbase.client.ConnectionFactory.createConnection( - self.configuration) + @connection = ConnectionFactory.createConnection(self.configuration) end - def admin(formatter) - ::Hbase::Admin.new(@connection.getAdmin, formatter) + # Returns ruby's Admin class from admin.rb + def admin() + ::Hbase::Admin.new(@connection) end - def rsgroup_admin(formatter) - ::Hbase::RSGroupAdmin.new(@connection, formatter) + def rsgroup_admin() + ::Hbase::RSGroupAdmin.new(@connection) + end + + def taskmonitor() + ::Hbase::TaskMonitor.new(configuration) end # Create new one each time def table(table, shell) - ::Hbase::Table.new(@connection.getTable(table), shell) + ::Hbase::Table.new(@connection.getTable(TableName.valueOf(table)), shell) end - def replication_admin(formatter) - ::Hbase::RepAdmin.new(configuration, formatter) + def replication_admin() + ::Hbase::RepAdmin.new(configuration) end - def security_admin(formatter) - ::Hbase::SecurityAdmin.new(@connection.getAdmin, formatter) + def security_admin() + ::Hbase::SecurityAdmin.new(@connection.getAdmin) end - def visibility_labels_admin(formatter) - ::Hbase::VisibilityLabelsAdmin.new(@connection.getAdmin, formatter) + def visibility_labels_admin() + ::Hbase::VisibilityLabelsAdmin.new(@connection.getAdmin) end - def quotas_admin(formatter) - ::Hbase::QuotasAdmin.new(@connection.getAdmin, formatter) + def quotas_admin() + ::Hbase::QuotasAdmin.new(@connection.getAdmin) end - + def shutdown @connection.close end diff --git hbase-shell/src/main/ruby/hbase/quotas.rb hbase-shell/src/main/ruby/hbase/quotas.rb index 0be428d..bf2dc63 100644 --- hbase-shell/src/main/ruby/hbase/quotas.rb +++ hbase-shell/src/main/ruby/hbase/quotas.rb @@ -36,9 +36,8 @@ end module Hbase class QuotasAdmin - def initialize(admin, formatter) + def initialize(admin) @admin = admin - @formatter = formatter end def close diff --git hbase-shell/src/main/ruby/hbase/replication_admin.rb hbase-shell/src/main/ruby/hbase/replication_admin.rb index e91a4f7..7eae7af 100644 --- hbase-shell/src/main/ruby/hbase/replication_admin.rb +++ hbase-shell/src/main/ruby/hbase/replication_admin.rb @@ -31,10 +31,9 @@ module Hbase class RepAdmin include HBaseConstants - def initialize(configuration, formatter) + def initialize(configuration) @replication_admin = ReplicationAdmin.new(configuration) @configuration = configuration - @formatter = formatter end #---------------------------------------------------------------------------------------------- diff --git hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb index 51a4efb..c654f23 100644 --- hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb +++ hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb @@ -28,9 +28,8 @@ module Hbase class RSGroupAdmin include HBaseConstants - def initialize(connection, formatter) + def initialize(connection) @admin = org.apache.hadoop.hbase.rsgroup.RSGroupAdmin.newClient(connection) - @formatter = formatter end def close diff --git hbase-shell/src/main/ruby/hbase/security.rb hbase-shell/src/main/ruby/hbase/security.rb index 20f3298..55519ed 100644 --- hbase-shell/src/main/ruby/hbase/security.rb +++ hbase-shell/src/main/ruby/hbase/security.rb @@ -24,10 +24,9 @@ module Hbase class SecurityAdmin include HBaseConstants - def initialize(admin, formatter) + def initialize(admin) @admin = admin @connection = @admin.getConnection() - @formatter = formatter end def close @@ -137,9 +136,9 @@ module Hbase all_perms.each do |value| user_name = String.from_java_bytes(value.getUser) if (table_regex != nil && isNamespace?(table_regex)) - namespace = table_regex[1...table_regex.length] + namespace = value.getNamespace() else - namespace = (value.getTableName != nil) ? value.getTableName.getNamespaceAsString() : '' + namespace = (value.getTableName != nil) ? value.getTableName.getNamespaceAsString() : value.getNamespace() end table = (value.getTableName != nil) ? value.getTableName.getNameAsString() : '' family = (value.getFamily != nil) ? diff --git hbase-shell/src/main/ruby/hbase/table.rb hbase-shell/src/main/ruby/hbase/table.rb index b5769ca..a90760e 100644 --- hbase-shell/src/main/ruby/hbase/table.rb +++ hbase-shell/src/main/ruby/hbase/table.rb @@ -282,6 +282,7 @@ EOF def _get_internal(row, *args) get = org.apache.hadoop.hbase.client.Get.new(row.to_s.to_java_bytes) maxlength = -1 + count = 0 @converters.clear() # Normalize args @@ -295,7 +296,7 @@ EOF # Parse arguments # unless args.kind_of?(Hash) - raise ArgumentError, "Failed parse of of #{args.inspect}, #{args.class}" + raise ArgumentError, "Failed parse of #{args.inspect}, #{args.class}" end # Get maxlength parameter if passed @@ -370,6 +371,10 @@ EOF result = @table.get(get) return nil if result.isEmpty + # Get stale info from results + is_stale = result.isStale + count += 1 + # Print out results. Result can be Cell or RowResult. res = {} result.listCells.each do |c| @@ -389,7 +394,7 @@ EOF end # If block given, we've yielded all the results, otherwise just return them - return ((block_given?) ? nil : res) + return ((block_given?) ? [count, is_stale]: res) end #---------------------------------------------------------------------------------------------- @@ -509,6 +514,7 @@ EOF while iter.hasNext row = iter.next key = org.apache.hadoop.hbase.util.Bytes::toStringBinary(row.getRow) + is_stale |= row.isStale row.listCells.each do |c| family = org.apache.hadoop.hbase.util.Bytes::toStringBinary(c.getFamilyArray, @@ -536,7 +542,7 @@ EOF end scanner.close() - return ((block_given?) ? count : res) + return ((block_given?) ? [count, is_stale] : res) end # Apply OperationAttributes to puts/scans/gets @@ -713,6 +719,7 @@ EOF map{|i| Bytes.toStringBinary(i.getRegionInfo().getStartKey)}.delete_if{|k| k == ""} locator.close() puts("Total number of splits = %s" % [splits.size + 1]) + puts splits return splits end end diff --git hbase-shell/src/main/ruby/hbase/taskmonitor.rb hbase-shell/src/main/ruby/hbase/taskmonitor.rb new file mode 100644 index 0000000..d312558 --- /dev/null +++ hbase-shell/src/main/ruby/hbase/taskmonitor.rb @@ -0,0 +1,204 @@ +# +# Copyright 2010 The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +include Java + +# Add the $HBASE_HOME/lib directory to the ruby load_path to load jackson +if File.exists?(File.join(File.dirname(__FILE__), "..", "lib")) + $LOAD_PATH.unshift File.join(File.dirname(__FILE__), "..", "lib") +end + +module Hbase + class TaskMonitor + include HBaseConstants + + #--------------------------------------------------------------------------------------------- + # Represents information reported by a server on a single MonitoredTask + class Task + + def initialize(taskMap,host) + + taskMap.each_pair do |k,v| + case k + when "statustimems" + @statustime = Time.at(v/1000) + when "status" + @status = v + when "starttimems" + @starttime = Time.at(v/1000) + when "description" + @description = v + when "state" + @state = v + end + end + + @host = host + + end + + def statustime + # waiting IPC handlers often have statustime = -1, in this case return starttime + if @statustime > Time.at(-1) + return @statustime + end + return @starttime + end + + attr_reader :host + attr_reader :status + attr_reader :starttime + attr_reader :description + attr_reader :state + + end + + + def initialize(configuration) + @conf = configuration + @conn = org.apache.hadoop.hbase.client.ConnectionFactory.createConnection(@conf) + @admin = @conn.getAdmin() + end + + #--------------------------------------------------------------------------------------------------- + # Returns a filtered list of tasks on the given host + def tasksOnHost(filter,host) + + java_import 'java.net.URL' + java_import 'org.codehaus.jackson.map.ObjectMapper' + + infoport = @admin.getClusterStatus().getLoad(host).getInfoServerPort().to_s + + # Note: This condition use constants from hbase-server + #if (!@admin.getConfiguration().getBoolean(org.apache.hadoop.hbase.http.ServerConfigurationKeys::HBASE_SSL_ENABLED_KEY, + # org.apache.hadoop.hbase.http.ServerConfigurationKeys::HBASE_SSL_ENABLED_DEFAULT)) + # schema = "http://" + #else + # schema = "https://" + #end + schema = "http://" + url = schema + host.hostname + ":" + infoport + "/rs-status?format=json&filter=" + filter + + json = URL.new(url) + mapper = ObjectMapper.new + + # read and parse JSON + tasksArrayList = mapper.readValue(json,java.lang.Object.java_class) + + # convert to an array of TaskMonitor::Task instances + tasks = Array.new + tasksArrayList.each do |t| + tasks.unshift Task.new(t,host) + end + + return tasks + + end + + #--------------------------------------------------------------------------------------------------- + # Prints a table of filtered tasks on requested hosts + def tasks(filter,hosts) + + # put all tasks on all requested hosts in the same list + tasks = [] + hosts.each do |host| + tasks.concat(tasksOnHost(filter,host)) + end + + puts("%d tasks as of: %s" % [tasks.size,Time.now.strftime("%Y-%m-%d %H:%M:%S")]) + + if tasks.size() == 0 + puts("No " + filter + " tasks currently running.") + else + + # determine table width + longestStatusWidth = 0 + longestDescriptionWidth = 0 + tasks.each do |t| + longestStatusWidth = [longestStatusWidth,t.status.length].max + longestDescriptionWidth = [longestDescriptionWidth,t.description.length].max + end + + # set the maximum character width of each column, without padding + hostWidth = 15 + startTimeWidth = 19 + stateWidth = 8 + descriptionWidth = [32,longestDescriptionWidth].min + statusWidth = [36,longestStatusWidth + 27].min + + rowSeparator = "+" + "-" * (hostWidth + 2) + + "+" + "-" * (startTimeWidth + 2) + + "+" + "-" * (stateWidth + 2) + + "+" + "-" * (descriptionWidth + 2) + + "+" + "-" * (statusWidth + 2) + "+" + + # print table header + cells = [setCellWidth("Host",hostWidth), + setCellWidth("Start Time",startTimeWidth), + setCellWidth("State",stateWidth), + setCellWidth("Description",descriptionWidth), + setCellWidth("Status",statusWidth)] + + line = "| %s | %s | %s | %s | %s |" % cells + + puts(rowSeparator) + puts(line) + + # print table content + tasks.each do |t| + + cells = [setCellWidth(t.host.hostname,hostWidth), + setCellWidth(t.starttime.strftime("%Y-%m-%d %H:%M:%S"),startTimeWidth), + setCellWidth(t.state,stateWidth), + setCellWidth(t.description,descriptionWidth), + setCellWidth("%s (since %d seconds ago)" % + [t.status,Time.now - t.statustime], statusWidth)] + + line = "| %s | %s | %s | %s | %s |" % cells + + puts(rowSeparator) + puts(line) + + end + puts(rowSeparator) + + end + + end + + #--------------------------------------------------------------------------------------------------- + # + # Helper methods + # + + # right-pad with spaces or truncate with ellipses to match passed width + def setCellWidth(cellContent,width) + numCharsTooShort = width-cellContent.length + if numCharsTooShort < 0 + # cellContent is too long, so truncate + return cellContent[0,[width-3,0].max] + "." * [3,width].min + else + # cellContent is requested width or too short, so right-pad with zero or more spaces + return cellContent + " " * numCharsTooShort + end + end + + end +end diff --git hbase-shell/src/main/ruby/hbase/visibility_labels.rb hbase-shell/src/main/ruby/hbase/visibility_labels.rb index 98bfb2c..8e6c93c 100644 --- hbase-shell/src/main/ruby/hbase/visibility_labels.rb +++ hbase-shell/src/main/ruby/hbase/visibility_labels.rb @@ -24,9 +24,8 @@ java_import org.apache.hadoop.hbase.util.Bytes module Hbase class VisibilityLabelsAdmin - def initialize(admin, formatter) + def initialize(admin) @admin = admin - @formatter = formatter @connection = @admin.getConnection() end @@ -41,7 +40,7 @@ module Hbase labels = [ args ].flatten.compact end if labels.size() == 0 - raise(ArgumentError, "Arguments cannot be null") + raise(ArgumentError, "Arguments cannot be null") end begin diff --git hbase-shell/src/main/ruby/hbase_constants.rb hbase-shell/src/main/ruby/hbase_constants.rb new file mode 100644 index 0000000..bc6f37c --- /dev/null +++ hbase-shell/src/main/ruby/hbase_constants.rb @@ -0,0 +1,109 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# HBase ruby classes. +# Has wrapper classes for org.apache.hadoop.hbase.client.Admin +# and for org.apache.hadoop.hbase.client.Table. Classes take +# Formatters on construction and outputs any results using +# Formatter methods. These classes are only really for use by +# the hirb.rb HBase Shell script; they don't make much sense elsewhere. +# For example, the exists method on Admin class prints to the formatter +# whether the table exists and returns nil regardless. +include Java + +include_class('java.lang.Integer') {|package,name| "J#{name}" } +include_class('java.lang.Long') {|package,name| "J#{name}" } +include_class('java.lang.Boolean') {|package,name| "J#{name}" } + +module HBaseConstants + COLUMN = "COLUMN" + COLUMNS = "COLUMNS" + TIMESTAMP = "TIMESTAMP" + TIMERANGE = "TIMERANGE" + NAME = org.apache.hadoop.hbase.HConstants::NAME + VERSIONS = org.apache.hadoop.hbase.HConstants::VERSIONS + IN_MEMORY = org.apache.hadoop.hbase.HConstants::IN_MEMORY + IN_MEMORY_COMPACTION = org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION + METADATA = org.apache.hadoop.hbase.HConstants::METADATA + STOPROW = "STOPROW" + STARTROW = "STARTROW" + ROWPREFIXFILTER = "ROWPREFIXFILTER" + ENDROW = STOPROW + RAW = "RAW" + LIMIT = "LIMIT" + METHOD = "METHOD" + MAXLENGTH = "MAXLENGTH" + CACHE_BLOCKS = "CACHE_BLOCKS" + ALL_METRICS = "ALL_METRICS" + METRICS = "METRICS" + REVERSED = "REVERSED" + REPLICATION_SCOPE = "REPLICATION_SCOPE" + INTERVAL = 'INTERVAL' + CACHE = 'CACHE' + FILTER = 'FILTER' + SPLITS = 'SPLITS' + SPLITS_FILE = 'SPLITS_FILE' + SPLITALGO = 'SPLITALGO' + NUMREGIONS = 'NUMREGIONS' + REGION_REPLICATION = 'REGION_REPLICATION' + REGION_REPLICA_ID = 'REGION_REPLICA_ID' + CONFIGURATION = org.apache.hadoop.hbase.HConstants::CONFIGURATION + ATTRIBUTES="ATTRIBUTES" + VISIBILITY="VISIBILITY" + AUTHORIZATIONS = "AUTHORIZATIONS" + SKIP_FLUSH = 'SKIP_FLUSH' + CONSISTENCY = "CONSISTENCY" + USER = 'USER' + TABLE = 'TABLE' + NAMESPACE = 'NAMESPACE' + TYPE = 'TYPE' + NONE = 'NONE' + VALUE = 'VALUE' + ENDPOINT_CLASSNAME = 'ENDPOINT_CLASSNAME' + CLUSTER_KEY = 'CLUSTER_KEY' + TABLE_CFS = 'TABLE_CFS' + CONFIG = 'CONFIG' + DATA = 'DATA' + + # Load constants from hbase java API + def self.promote_constants(constants) + # The constants to import are all in uppercase + constants.each do |c| + next if c =~ /DEFAULT_.*/ || c != c.upcase + next if eval("defined?(#{c})") + eval("#{c} = '#{c}'") + end + end + + promote_constants(org.apache.hadoop.hbase.HColumnDescriptor.constants) + promote_constants(org.apache.hadoop.hbase.HTableDescriptor.constants) +end + +# Include classes definition +require 'hbase/hbase' +require 'hbase/admin' +require 'hbase/taskmonitor' +require 'hbase/table' +require 'hbase/quotas' +require 'hbase/replication_admin' +require 'hbase/security' +require 'hbase/visibility_labels' +require 'hbase/rsgroup_admin' + +include HBaseQuotasConstants diff --git hbase-shell/src/main/ruby/shell.rb hbase-shell/src/main/ruby/shell.rb index adcd8f2..d0cb577 100644 --- hbase-shell/src/main/ruby/shell.rb +++ hbase-shell/src/main/ruby/shell.rb @@ -70,21 +70,24 @@ module Shell #---------------------------------------------------------------------- class Shell attr_accessor :hbase - attr_accessor :formatter attr_accessor :interactive alias interactive? interactive @debug = false attr_accessor :debug - def initialize(hbase, formatter, interactive=true) + def initialize(hbase, interactive=true) self.hbase = hbase - self.formatter = formatter self.interactive = interactive end - def hbase_admin - @hbase_admin ||= hbase.admin(formatter) + # Returns Admin class from admin.rb + def admin + @admin ||= hbase.admin() + end + + def hbase_taskmonitor + @hbase_taskmonitor ||= hbase.taskmonitor() end def hbase_table(name) @@ -92,23 +95,23 @@ module Shell end def hbase_replication_admin - @hbase_replication_admin ||= hbase.replication_admin(formatter) + @hbase_replication_admin ||= hbase.replication_admin() end def hbase_security_admin - @hbase_security_admin ||= hbase.security_admin(formatter) + @hbase_security_admin ||= hbase.security_admin() end def hbase_visibility_labels_admin - @hbase_visibility_labels_admin ||= hbase.visibility_labels_admin(formatter) + @hbase_visibility_labels_admin ||= hbase.visibility_labels_admin() end def hbase_quotas_admin - @hbase_quotas_admin ||= hbase.quotas_admin(formatter) + @hbase_quotas_admin ||= hbase.quotas_admin() end def hbase_rsgroup_admin - @rsgroup_admin ||= hbase.rsgroup_admin(formatter) + @rsgroup_admin ||= hbase.rsgroup_admin() end def export_commands(where) @@ -131,22 +134,31 @@ module Shell ::Shell.commands[command.to_s].new(self) end - #call the method 'command' on the specified command + # call the method 'command' on the specified command + # If interactive is enabled, then we suppress the return value. The command should have + # printed relevant output. + # Return value is only useful in non-interactive mode, for e.g. tests. def command(command, *args) - internal_command(command, :command, *args) + ret = internal_command(command, :command, *args) + if self.interactive + return nil + else + return ret + end end - #call a specific internal method in the command instance + # call a specific internal method in the command instance # command - name of the command to call # method_name - name of the method on the command to call. Defaults to just 'command' # args - to be passed to the named method def internal_command(command, method_name= :command, *args) - command_instance(command).command_safe(self.debug,method_name, *args) + command_instance(command).command_safe(self.debug, method_name, *args) end def print_banner - puts "HBase Shell; enter 'help' for list of supported commands." - puts 'Type "exit" to leave the HBase Shell' + puts 'HBase Shell' + puts 'Use "help" to get list of supported commands.' + puts 'Use "exit" to quit this interactive shell.' print 'Version ' command('version') puts @@ -247,6 +259,7 @@ Shell.load_command_group( version table_help whoami + processlist ] ) diff --git hbase-shell/src/main/ruby/shell/commands.rb hbase-shell/src/main/ruby/shell/commands.rb index 0c37c4f..98fcf60 100644 --- hbase-shell/src/main/ruby/shell/commands.rb +++ hbase-shell/src/main/ruby/shell/commands.rb @@ -17,6 +17,8 @@ # limitations under the License. # +require 'shell/formatter' + module Shell module Commands class Command @@ -26,12 +28,15 @@ module Shell end #wrap an execution of cmd to catch hbase exceptions - # cmd - command name to execture + # cmd - command name to execute # args - arguments to pass to the command def command_safe(debug, cmd = :command, *args) + # Commands can overwrite start_time to skip time used in some kind of setup. + # See count.rb for example. + @start_time = Time.now # send is internal ruby method to call 'cmd' with *args #(everything is a message, so this is just the formal semantics to support that idiom) - translate_hbase_exceptions(*args) { send(cmd,*args) } + translate_hbase_exceptions(*args) { send(cmd, *args) } rescue => e rootCause = e while rootCause != nil && rootCause.respond_to?(:cause) && rootCause.cause != nil @@ -48,10 +53,20 @@ module Shell else raise rootCause end + ensure + # If end_time is not already set by the command, use current time. + @end_time ||= Time.now + formatter.output_str("Took %.4f seconds" % [@end_time - @start_time]) end + # Convenience functions to get different admins + # Returns HBase::Admin ruby class. def admin - @shell.hbase_admin + @shell.admin + end + + def taskmonitor + @shell.hbase_taskmonitor end def table(name) @@ -79,24 +94,9 @@ module Shell end #---------------------------------------------------------------------- - + # Creates formatter instance first time and then reuses it. def formatter - @shell.formatter - end - - def format_simple_command - now = Time.now - yield - formatter.header - formatter.footer(now) - end - - def format_and_return_simple_command - now = Time.now - ret = yield - formatter.header - formatter.footer(now) - return ret + @formatter ||= ::Shell::Formatter::Console.new end def translate_hbase_exceptions(*args) diff --git hbase-shell/src/main/ruby/shell/commands/abort_procedure.rb hbase-shell/src/main/ruby/shell/commands/abort_procedure.rb index 6f77ab7..e69e133 100644 --- hbase-shell/src/main/ruby/shell/commands/abort_procedure.rb +++ hbase-shell/src/main/ruby/shell/commands/abort_procedure.rb @@ -40,11 +40,7 @@ EOF end def command(proc_id, may_interrupt_if_running=nil) - format_simple_command do - formatter.row([ - admin.abort_procedure?(proc_id, may_interrupt_if_running).to_s - ]) - end + formatter.row([admin.abort_procedure?(proc_id, may_interrupt_if_running).to_s]) end end end diff --git hbase-shell/src/main/ruby/shell/commands/add_labels.rb hbase-shell/src/main/ruby/shell/commands/add_labels.rb index 65a1140..7bde5fb 100644 --- hbase-shell/src/main/ruby/shell/commands/add_labels.rb +++ hbase-shell/src/main/ruby/shell/commands/add_labels.rb @@ -31,9 +31,7 @@ EOF end def command(*args) - format_simple_command do - visibility_labels_admin.add_labels(args) - end + visibility_labels_admin.add_labels(args) end end end diff --git hbase-shell/src/main/ruby/shell/commands/add_peer.rb hbase-shell/src/main/ruby/shell/commands/add_peer.rb index 498f79f..558e86d 100644 --- hbase-shell/src/main/ruby/shell/commands/add_peer.rb +++ hbase-shell/src/main/ruby/shell/commands/add_peer.rb @@ -58,9 +58,7 @@ EOF end def command(id, args = {}, peer_tableCFs = nil) - format_simple_command do - replication_admin.add_peer(id, args, peer_tableCFs) - end + replication_admin.add_peer(id, args, peer_tableCFs) end end end diff --git hbase-shell/src/main/ruby/shell/commands/alter.rb hbase-shell/src/main/ruby/shell/commands/alter.rb index 2c3aa6f..8d6b6ca 100644 --- hbase-shell/src/main/ruby/shell/commands/alter.rb +++ hbase-shell/src/main/ruby/shell/commands/alter.rb @@ -22,19 +22,17 @@ module Shell class Alter < Command def help return <<-EOF -Alter a table. If the "hbase.online.schema.update.enable" property is set to -false, then the table must be disabled (see help 'disable'). If the -"hbase.online.schema.update.enable" property is set to true, tables can be -altered without disabling them first. Altering enabled tables has caused problems -in the past, so use caution and test it before using in production. +Alter a table. Tables can be altered without disabling them first. +Altering enabled tables has caused problems +in the past, so use caution and test it before using in production. -You can use the alter command to add, +You can use the alter command to add, modify or delete column families or change table configuration options. Column families work in a similar way as the 'create' command. The column family specification can either be a name string, or a dictionary with the NAME attribute. Dictionaries are described in the output of the 'help' command, with no arguments. -For example, to change or add the 'f1' column family in table 't1' from +For example, to change or add the 'f1' column family in table 't1' from current value to keep a maximum of 5 cell VERSIONS, do: hbase> alter 't1', NAME => 'f1', VERSIONS => 5 @@ -48,7 +46,7 @@ To delete the 'f1' column family in table 'ns1:t1', use one of: hbase> alter 'ns1:t1', NAME => 'f1', METHOD => 'delete' hbase> alter 'ns1:t1', 'delete' => 'f1' -You can also change table-scope attributes like MAX_FILESIZE, READONLY, +You can also change table-scope attributes like MAX_FILESIZE, READONLY, MEMSTORE_FLUSHSIZE, DURABILITY, etc. These can be put at the end; for example, to change the max size of a region to 128MB, do: @@ -85,16 +83,14 @@ You can also set REGION_REPLICATION: There could be more than one alteration in one command: - hbase> alter 't1', { NAME => 'f1', VERSIONS => 3 }, + hbase> alter 't1', { NAME => 'f1', VERSIONS => 3 }, { MAX_FILESIZE => '134217728' }, { METHOD => 'delete', NAME => 'f2' }, OWNER => 'johndoe', METADATA => { 'mykey' => 'myvalue' } EOF end def command(table, *args) - format_simple_command do - admin.alter(table, true, *args) - end + admin.alter(table, true, *args) end end end diff --git hbase-shell/src/main/ruby/shell/commands/alter_async.rb hbase-shell/src/main/ruby/shell/commands/alter_async.rb index bddff01..e0f6deb 100644 --- hbase-shell/src/main/ruby/shell/commands/alter_async.rb +++ hbase-shell/src/main/ruby/shell/commands/alter_async.rb @@ -56,9 +56,7 @@ EOF end def command(table, *args) - format_simple_command do - admin.alter(table, false, *args) - end + admin.alter(table, false, *args) end end end diff --git hbase-shell/src/main/ruby/shell/commands/alter_namespace.rb hbase-shell/src/main/ruby/shell/commands/alter_namespace.rb index a16e10d..0051c7f 100644 --- hbase-shell/src/main/ruby/shell/commands/alter_namespace.rb +++ hbase-shell/src/main/ruby/shell/commands/alter_namespace.rb @@ -35,9 +35,7 @@ EOF end def command(namespace, *args) - format_simple_command do - admin.alter_namespace(namespace, *args) - end + admin.alter_namespace(namespace, *args) end end end diff --git hbase-shell/src/main/ruby/shell/commands/append.rb hbase-shell/src/main/ruby/shell/commands/append.rb index a0ef36d..93a4317 100644 --- hbase-shell/src/main/ruby/shell/commands/append.rb +++ hbase-shell/src/main/ruby/shell/commands/append.rb @@ -35,14 +35,14 @@ t to table 't1', the corresponding command would be: EOF end - def command(table, row, column, value, args={}) - append(table(table), row, column, value, args) + def command(table_name, row, column, value, args={}) + table = table(table_name) + @start_time = Time.now + append(table, row, column, value, args) end def append(table, row, column, value, args={}) - format_simple_command do - table._append_internal(row, column, value, args) - end + table._append_internal(row, column, value, args) end end end diff --git hbase-shell/src/main/ruby/shell/commands/append_peer_tableCFs.rb hbase-shell/src/main/ruby/shell/commands/append_peer_tableCFs.rb index 753067a..fe34c5c 100644 --- hbase-shell/src/main/ruby/shell/commands/append_peer_tableCFs.rb +++ hbase-shell/src/main/ruby/shell/commands/append_peer_tableCFs.rb @@ -32,9 +32,7 @@ EOF end def command(id, table_cfs) - format_simple_command do - replication_admin.append_peer_tableCFs(id, table_cfs) - end + replication_admin.append_peer_tableCFs(id, table_cfs) end end end diff --git hbase-shell/src/main/ruby/shell/commands/assign.rb hbase-shell/src/main/ruby/shell/commands/assign.rb index 448a546..1220bf1 100644 --- hbase-shell/src/main/ruby/shell/commands/assign.rb +++ hbase-shell/src/main/ruby/shell/commands/assign.rb @@ -32,9 +32,7 @@ EOF end def command(region_name) - format_simple_command do - admin.assign(region_name) - end + admin.assign(region_name) end end end diff --git hbase-shell/src/main/ruby/shell/commands/balance_rsgroup.rb hbase-shell/src/main/ruby/shell/commands/balance_rsgroup.rb index bee139f..c925f28 100644 --- hbase-shell/src/main/ruby/shell/commands/balance_rsgroup.rb +++ hbase-shell/src/main/ruby/shell/commands/balance_rsgroup.rb @@ -30,7 +30,14 @@ EOF end def command(group_name) - rsgroup_admin.balance_rs_group(group_name) + # Returns true if balancer was run, otherwise false. + ret = rsgroup_admin.balance_rs_group(group_name) + if ret + puts "Ran the balancer." + else + puts "Couldn't run the balancer." + end + ret end end end diff --git hbase-shell/src/main/ruby/shell/commands/balance_switch.rb hbase-shell/src/main/ruby/shell/commands/balance_switch.rb index 4d7778d..107d267 100644 --- hbase-shell/src/main/ruby/shell/commands/balance_switch.rb +++ hbase-shell/src/main/ruby/shell/commands/balance_switch.rb @@ -31,11 +31,7 @@ EOF end def command(enableDisable) - format_simple_command do - formatter.row([ - admin.balance_switch(enableDisable)? "true" : "false" - ]) - end + formatter.row([admin.balance_switch(enableDisable)? "true" : "false"]) end end end diff --git hbase-shell/src/main/ruby/shell/commands/balancer.rb hbase-shell/src/main/ruby/shell/commands/balancer.rb index ee53ca0..a7490a5 100644 --- hbase-shell/src/main/ruby/shell/commands/balancer.rb +++ hbase-shell/src/main/ruby/shell/commands/balancer.rb @@ -38,15 +38,13 @@ EOF end def command(force=nil) - format_simple_command do - formatter.row([ - if force.nil? - admin.balancer("false")? "true": "false" - elsif force == "force" - admin.balancer("true")? "true": "false" - end - ]) + force_balancer = 'false' + if force == 'force' + force_balancer = 'true' + elsif !force.nil? + raise ArgumentError, "Invalid argument #{force}." end + formatter.row([admin.balancer(force_balancer)? "true": "false"]) end end end diff --git hbase-shell/src/main/ruby/shell/commands/balancer_enabled.rb hbase-shell/src/main/ruby/shell/commands/balancer_enabled.rb index 3b2f5c6..6a75ab5 100644 --- hbase-shell/src/main/ruby/shell/commands/balancer_enabled.rb +++ hbase-shell/src/main/ruby/shell/commands/balancer_enabled.rb @@ -30,11 +30,7 @@ EOF end def command() - format_simple_command do - formatter.row([ - admin.balancer_enabled?.to_s - ]) - end + formatter.row([admin.balancer_enabled?.to_s]) end end end diff --git hbase-shell/src/main/ruby/shell/commands/catalogjanitor_enabled.rb hbase-shell/src/main/ruby/shell/commands/catalogjanitor_enabled.rb index b310c3a..fdeb67e 100644 --- hbase-shell/src/main/ruby/shell/commands/catalogjanitor_enabled.rb +++ hbase-shell/src/main/ruby/shell/commands/catalogjanitor_enabled.rb @@ -29,11 +29,7 @@ EOF end def command() - format_simple_command do - formatter.row([ - admin.catalogjanitor_enabled()? "true" : "false" - ]) - end + formatter.row([admin.catalogjanitor_enabled()? "true" : "false"]) end end end diff --git hbase-shell/src/main/ruby/shell/commands/catalogjanitor_run.rb hbase-shell/src/main/ruby/shell/commands/catalogjanitor_run.rb index 03426cb..638a18f 100644 --- hbase-shell/src/main/ruby/shell/commands/catalogjanitor_run.rb +++ hbase-shell/src/main/ruby/shell/commands/catalogjanitor_run.rb @@ -28,9 +28,7 @@ Catalog janitor command to run the (garbage collection) scan from command line. EOF end def command() - format_simple_command do - admin.catalogjanitor_run() - end + admin.catalogjanitor_run() end end end diff --git hbase-shell/src/main/ruby/shell/commands/catalogjanitor_switch.rb hbase-shell/src/main/ruby/shell/commands/catalogjanitor_switch.rb index fce1925..d2d8e58 100644 --- hbase-shell/src/main/ruby/shell/commands/catalogjanitor_switch.rb +++ hbase-shell/src/main/ruby/shell/commands/catalogjanitor_switch.rb @@ -30,11 +30,7 @@ EOF end def command(enableDisable) - format_simple_command do - formatter.row([ - admin.catalogjanitor_switch(enableDisable)? "true" : "false" - ]) - end + formatter.row([admin.catalogjanitor_switch(enableDisable)? "true" : "false"]) end end end diff --git hbase-shell/src/main/ruby/shell/commands/clear_auths.rb hbase-shell/src/main/ruby/shell/commands/clear_auths.rb index 8553fa6..be56d5d 100644 --- hbase-shell/src/main/ruby/shell/commands/clear_auths.rb +++ hbase-shell/src/main/ruby/shell/commands/clear_auths.rb @@ -31,9 +31,7 @@ EOF end def command(user, *args) - format_simple_command do - visibility_labels_admin.clear_auths(user, args) - end + visibility_labels_admin.clear_auths(user, args) end end end diff --git hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb index 0498c8e..c57e87f 100644 --- hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb +++ hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb @@ -32,9 +32,7 @@ EOF end def command(snapshot_name, table) - format_simple_command do - admin.clone_snapshot(snapshot_name, table) - end + admin.clone_snapshot(snapshot_name, table) end def handle_exceptions(cause, *args) diff --git hbase-shell/src/main/ruby/shell/commands/close_region.rb hbase-shell/src/main/ruby/shell/commands/close_region.rb index ef1c99c..3d01911 100644 --- hbase-shell/src/main/ruby/shell/commands/close_region.rb +++ hbase-shell/src/main/ruby/shell/commands/close_region.rb @@ -51,9 +51,7 @@ EOF end def command(region_name, server = nil) - format_simple_command do - admin.close_region(region_name, server) - end + admin.close_region(region_name, server) end end end diff --git hbase-shell/src/main/ruby/shell/commands/compact.rb hbase-shell/src/main/ruby/shell/commands/compact.rb index e267821..1607536 100644 --- hbase-shell/src/main/ruby/shell/commands/compact.rb +++ hbase-shell/src/main/ruby/shell/commands/compact.rb @@ -44,9 +44,7 @@ module Shell end def command(table_or_region_name, family = nil, type = "NORMAL") - format_simple_command do - admin.compact(table_or_region_name, family, type) - end + admin.compact(table_or_region_name, family, type) end end end diff --git hbase-shell/src/main/ruby/shell/commands/compact_rs.rb hbase-shell/src/main/ruby/shell/commands/compact_rs.rb index 0ecdd21..588b6fe 100644 --- hbase-shell/src/main/ruby/shell/commands/compact_rs.rb +++ hbase-shell/src/main/ruby/shell/commands/compact_rs.rb @@ -34,9 +34,7 @@ module Shell end def command(regionserver, major = false) - format_simple_command do - admin.compact_regionserver(regionserver, major) - end + admin.compactRegionserver(regionserver, major) end end end diff --git hbase-shell/src/main/ruby/shell/commands/count.rb hbase-shell/src/main/ruby/shell/commands/count.rb index 225005e..36250a6 100644 --- hbase-shell/src/main/ruby/shell/commands/count.rb +++ hbase-shell/src/main/ruby/shell/commands/count.rb @@ -61,12 +61,12 @@ EOF }.merge(params) # Call the counter method - now = Time.now + @start_time = Time.now formatter.header count = table._count_internal(params['INTERVAL'].to_i, params['CACHE'].to_i) do |cnt, row| formatter.row([ "Current count: #{cnt}, row: #{row}" ]) end - formatter.footer(now, count) + formatter.footer(count) return count end end diff --git hbase-shell/src/main/ruby/shell/commands/create.rb hbase-shell/src/main/ruby/shell/commands/create.rb index ab149bf..ee14455 100644 --- hbase-shell/src/main/ruby/shell/commands/create.rb +++ hbase-shell/src/main/ruby/shell/commands/create.rb @@ -62,10 +62,11 @@ EOF end def command(table, *args) - format_simple_command do - ret = admin.create(table, *args) - end - #and then return the table you just created + admin.create(table, *args) + @end_time = Time.now + puts "Created table " + table.to_s + + #and then return the table just created table(table) end end diff --git hbase-shell/src/main/ruby/shell/commands/create_namespace.rb hbase-shell/src/main/ruby/shell/commands/create_namespace.rb index adb6897..d478fc1 100644 --- hbase-shell/src/main/ruby/shell/commands/create_namespace.rb +++ hbase-shell/src/main/ruby/shell/commands/create_namespace.rb @@ -32,9 +32,7 @@ EOF end def command(namespace, *args) - format_simple_command do - admin.create_namespace(namespace, *args) - end + admin.create_namespace(namespace, *args) end end end diff --git hbase-shell/src/main/ruby/shell/commands/delete.rb hbase-shell/src/main/ruby/shell/commands/delete.rb index dcb8341..bce6625 100644 --- hbase-shell/src/main/ruby/shell/commands/delete.rb +++ hbase-shell/src/main/ruby/shell/commands/delete.rb @@ -40,16 +40,15 @@ t to table 't1', the corresponding command would be: EOF end - def command(table, row, column, - timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args = {}) + def command(table, row, column, + timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args = {}) delete(table(table), row, column, timestamp, args) end - def delete(table, row, column, - timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args = {}) - format_simple_command do - table._delete_internal(row, column, timestamp, args) - end + def delete(table, row, column, + timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args = {}) + @start_time = Time.now + table._delete_internal(row, column, timestamp, args) end end end diff --git hbase-shell/src/main/ruby/shell/commands/delete_all_snapshot.rb hbase-shell/src/main/ruby/shell/commands/delete_all_snapshot.rb index bc07259..5a77f73 100644 --- hbase-shell/src/main/ruby/shell/commands/delete_all_snapshot.rb +++ hbase-shell/src/main/ruby/shell/commands/delete_all_snapshot.rb @@ -41,9 +41,9 @@ EOF answer = gets.chomp unless count == 0 puts "No snapshots matched the regex #{regex.to_s}" if count == 0 return unless answer =~ /y.*/i - format_simple_command do - admin.delete_all_snapshot(regex) - end + @start_time = Time.now + admin.delete_all_snapshot(regex) + @end_time = Time.now list = admin.list_snapshot(regex) leftOverSnapshotCount = list.size successfullyDeleted = count - leftOverSnapshotCount diff --git hbase-shell/src/main/ruby/shell/commands/delete_snapshot.rb hbase-shell/src/main/ruby/shell/commands/delete_snapshot.rb index b8c3791..48059b0 100644 --- hbase-shell/src/main/ruby/shell/commands/delete_snapshot.rb +++ hbase-shell/src/main/ruby/shell/commands/delete_snapshot.rb @@ -28,9 +28,7 @@ EOF end def command(snapshot_name) - format_simple_command do - admin.delete_snapshot(snapshot_name) - end + admin.delete_snapshot(snapshot_name) end end end diff --git hbase-shell/src/main/ruby/shell/commands/delete_table_snapshots.rb hbase-shell/src/main/ruby/shell/commands/delete_table_snapshots.rb index dc4e0a2..89936f1 100644 --- hbase-shell/src/main/ruby/shell/commands/delete_table_snapshots.rb +++ hbase-shell/src/main/ruby/shell/commands/delete_table_snapshots.rb @@ -51,16 +51,15 @@ EOF puts "No snapshots matched the table name regular expression #{tableNameregex.to_s} and the snapshot name regular expression #{snapshotNameRegex.to_s}" if count == 0 return unless answer =~ /y.*/i - format_simple_command do - list.each do |deleteSnapshot| - begin - admin.delete_snapshot(deleteSnapshot.getName) - puts "Successfully deleted snapshot: #{deleteSnapshot.getName}" - puts "\n" - rescue RuntimeError - puts "Failed to delete snapshot: #{deleteSnapshot.getName}, due to below exception,\n" + $! - puts "\n" - end + @start_time = Time.now + list.each do |deleteSnapshot| + begin + admin.delete_snapshot(deleteSnapshot.getName) + puts "Successfully deleted snapshot: #{deleteSnapshot.getName}" + puts "\n" + rescue RuntimeError + puts "Failed to delete snapshot: #{deleteSnapshot.getName}, due to below exception,\n" + $! + puts "\n" end end end diff --git hbase-shell/src/main/ruby/shell/commands/deleteall.rb hbase-shell/src/main/ruby/shell/commands/deleteall.rb index e6118c9..2965403 100644 --- hbase-shell/src/main/ruby/shell/commands/deleteall.rb +++ hbase-shell/src/main/ruby/shell/commands/deleteall.rb @@ -48,9 +48,8 @@ EOF def deleteall(table, row, column = nil, timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args = {}) - format_simple_command do - table._deleteall_internal(row, column, timestamp, args) - end + @start_time = Time.now + table._deleteall_internal(row, column, timestamp, args) end end end diff --git hbase-shell/src/main/ruby/shell/commands/describe.rb hbase-shell/src/main/ruby/shell/commands/describe.rb index bfa16cd..37938a7 100644 --- hbase-shell/src/main/ruby/shell/commands/describe.rb +++ hbase-shell/src/main/ruby/shell/commands/describe.rb @@ -33,8 +33,6 @@ EOF end def command(table) - now = Time.now - column_families = admin.get_column_families(table) formatter.header(["Table " + table.to_s + " is " + if admin.enabled?(table) then "ENABLED" else "DISABLED" end]) @@ -43,7 +41,7 @@ EOF column_families.each do |column_family| formatter.row([ column_family.to_s ], true) end - formatter.footer(now) + formatter.footer() end end end diff --git hbase-shell/src/main/ruby/shell/commands/describe_namespace.rb hbase-shell/src/main/ruby/shell/commands/describe_namespace.rb index cf135da..ebd9bd2 100644 --- hbase-shell/src/main/ruby/shell/commands/describe_namespace.rb +++ hbase-shell/src/main/ruby/shell/commands/describe_namespace.rb @@ -28,13 +28,10 @@ EOF end def command(namespace) - now = Time.now - desc = admin.describe_namespace(namespace) formatter.header([ "DESCRIPTION" ], [ 64 ]) formatter.row([ desc ], true, [ 64 ]) - formatter.footer(now) end end end diff --git hbase-shell/src/main/ruby/shell/commands/disable.rb hbase-shell/src/main/ruby/shell/commands/disable.rb index 79bcd86..6695002 100644 --- hbase-shell/src/main/ruby/shell/commands/disable.rb +++ hbase-shell/src/main/ruby/shell/commands/disable.rb @@ -29,9 +29,7 @@ EOF end def command(table) - format_simple_command do - admin.disable(table) - end + admin.disable(table) end end end diff --git hbase-shell/src/main/ruby/shell/commands/disable_peer.rb hbase-shell/src/main/ruby/shell/commands/disable_peer.rb index 416545b..c193f13 100644 --- hbase-shell/src/main/ruby/shell/commands/disable_peer.rb +++ hbase-shell/src/main/ruby/shell/commands/disable_peer.rb @@ -32,9 +32,7 @@ EOF end def command(id) - format_simple_command do - replication_admin.disable_peer(id) - end + replication_admin.disable_peer(id) end end end diff --git hbase-shell/src/main/ruby/shell/commands/disable_table_replication.rb hbase-shell/src/main/ruby/shell/commands/disable_table_replication.rb index 5bf9667..a020d81 100644 --- hbase-shell/src/main/ruby/shell/commands/disable_table_replication.rb +++ hbase-shell/src/main/ruby/shell/commands/disable_table_replication.rb @@ -30,10 +30,8 @@ EOF end def command(table_name) - format_simple_command do - replication_admin.disable_tablerep(table_name) - end - puts "The replication swith of table '#{table_name}' successfully disabled" + replication_admin.disable_tablerep(table_name) + puts "Replication of table '#{table_name}' successfully disabled." end end end diff --git hbase-shell/src/main/ruby/shell/commands/drop.rb hbase-shell/src/main/ruby/shell/commands/drop.rb index fc7b134..3f7332c 100644 --- hbase-shell/src/main/ruby/shell/commands/drop.rb +++ hbase-shell/src/main/ruby/shell/commands/drop.rb @@ -29,9 +29,7 @@ EOF end def command(table) - format_simple_command do - admin.drop(table) - end + admin.drop(table) end end end diff --git hbase-shell/src/main/ruby/shell/commands/drop_namespace.rb hbase-shell/src/main/ruby/shell/commands/drop_namespace.rb index b030d27..392f247 100644 --- hbase-shell/src/main/ruby/shell/commands/drop_namespace.rb +++ hbase-shell/src/main/ruby/shell/commands/drop_namespace.rb @@ -27,9 +27,7 @@ EOF end def command(namespace) - format_simple_command do - admin.drop_namespace(namespace) - end + admin.drop_namespace(namespace) end end end diff --git hbase-shell/src/main/ruby/shell/commands/enable.rb hbase-shell/src/main/ruby/shell/commands/enable.rb index deeb70c..5d21219 100644 --- hbase-shell/src/main/ruby/shell/commands/enable.rb +++ hbase-shell/src/main/ruby/shell/commands/enable.rb @@ -29,9 +29,7 @@ EOF end def command(table) - format_simple_command do - admin.enable(table) - end + admin.enable(table) end end end diff --git hbase-shell/src/main/ruby/shell/commands/enable_peer.rb hbase-shell/src/main/ruby/shell/commands/enable_peer.rb index 55136ff..5f1a727 100644 --- hbase-shell/src/main/ruby/shell/commands/enable_peer.rb +++ hbase-shell/src/main/ruby/shell/commands/enable_peer.rb @@ -32,9 +32,7 @@ EOF end def command(id) - format_simple_command do - replication_admin.enable_peer(id) - end + replication_admin.enable_peer(id) end end end diff --git hbase-shell/src/main/ruby/shell/commands/enable_table_replication.rb hbase-shell/src/main/ruby/shell/commands/enable_table_replication.rb index 15e3133..e4e2fc1 100644 --- hbase-shell/src/main/ruby/shell/commands/enable_table_replication.rb +++ hbase-shell/src/main/ruby/shell/commands/enable_table_replication.rb @@ -30,10 +30,8 @@ EOF end def command(table_name) - format_simple_command do - replication_admin.enable_tablerep(table_name) - end - puts "The replication swith of table '#{table_name}' successfully enabled" + replication_admin.enable_tablerep(table_name) + puts "The replication of table '#{table_name}' successfully enabled" end end end diff --git hbase-shell/src/main/ruby/shell/commands/exists.rb hbase-shell/src/main/ruby/shell/commands/exists.rb index bacf6c9..4eb13a4 100644 --- hbase-shell/src/main/ruby/shell/commands/exists.rb +++ hbase-shell/src/main/ruby/shell/commands/exists.rb @@ -29,11 +29,11 @@ EOF end def command(table) - format_simple_command do - formatter.row([ - "Table #{table} " + (admin.exists?(table.to_s) ? "does exist" : "does not exist") + exists = admin.exists?(table.to_s) + formatter.row([ + "Table #{table} " + (exists ? "does exist" : "does not exist") ]) - end + exists end end end diff --git hbase-shell/src/main/ruby/shell/commands/flush.rb hbase-shell/src/main/ruby/shell/commands/flush.rb index 2aefec5..13963e1 100644 --- hbase-shell/src/main/ruby/shell/commands/flush.rb +++ hbase-shell/src/main/ruby/shell/commands/flush.rb @@ -32,9 +32,7 @@ EOF end def command(table_or_region_name) - format_simple_command do - admin.flush(table_or_region_name) - end + admin.flush(table_or_region_name) end end end diff --git hbase-shell/src/main/ruby/shell/commands/get.rb hbase-shell/src/main/ruby/shell/commands/get.rb index 1ab13cb..8191c22 100644 --- hbase-shell/src/main/ruby/shell/commands/get.rb +++ hbase-shell/src/main/ruby/shell/commands/get.rb @@ -81,14 +81,14 @@ EOF end def get(table, row, *args) - now = Time.now + @start_time = Time.now formatter.header(["COLUMN", "CELL"]) - table._get_internal(row, *args) do |column, value| + count, is_stale = table._get_internal(row, *args) do |column, value| formatter.row([ column, value ]) end - formatter.footer(now) + formatter.footer(count, is_stale) end end end diff --git hbase-shell/src/main/ruby/shell/commands/get_auths.rb hbase-shell/src/main/ruby/shell/commands/get_auths.rb index 1b758ef..4ea1b2e 100644 --- hbase-shell/src/main/ruby/shell/commands/get_auths.rb +++ hbase-shell/src/main/ruby/shell/commands/get_auths.rb @@ -31,12 +31,11 @@ EOF end def command(user) - format_simple_command do - list = visibility_labels_admin.get_auths(user) - list.each do |auths| - formatter.row([org.apache.hadoop.hbase.util.Bytes::toStringBinary(auths.toByteArray)]) - end + list = visibility_labels_admin.get_auths(user) + list.each do |auths| + formatter.row([org.apache.hadoop.hbase.util.Bytes::toStringBinary(auths.toByteArray)]) end + list end end end diff --git hbase-shell/src/main/ruby/shell/commands/get_peer_config.rb hbase-shell/src/main/ruby/shell/commands/get_peer_config.rb index ee02229..6417980 100644 --- hbase-shell/src/main/ruby/shell/commands/get_peer_config.rb +++ hbase-shell/src/main/ruby/shell/commands/get_peer_config.rb @@ -25,10 +25,10 @@ module Shell end def command(id) - peer_config = replication_admin.get_peer_config(id) - format_simple_command do - format_peer_config(peer_config) - end + peer_config = replication_admin.get_peer_config(id) + @start_time = Time.now + format_peer_config(peer_config) + peer_config end def format_peer_config(peer_config) diff --git hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb index 6772aa1..ce4be71 100644 --- hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb +++ hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb @@ -34,7 +34,7 @@ EOF def command(group_name) now = Time.now formatter.header(['GROUP INFORMATION']) - group_admin.get_rsgroup(group_name) do |s| + rsgroup_admin.get_rsgroup(group_name) do |s| formatter.row([s]) end formatter.footer(now) diff --git hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb index 322f6bb..a689a7c 100644 --- hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb +++ hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb @@ -31,7 +31,7 @@ EOF def command(server) now = Time.now - group_name = rsgroup_admin.getGroupOfServer(server).getName + group_name = rsgroup_admin.getRSGroupOfServer(server).getName formatter.row([group_name]) formatter.footer(now, 1) end diff --git hbase-shell/src/main/ruby/shell/commands/get_table.rb hbase-shell/src/main/ruby/shell/commands/get_table.rb index 43e7c1a..2270f43 100644 --- hbase-shell/src/main/ruby/shell/commands/get_table.rb +++ hbase-shell/src/main/ruby/shell/commands/get_table.rb @@ -38,9 +38,7 @@ EOF end def command(table, *args) - format_and_return_simple_command do - table(table) - end + table(table) end end end diff --git hbase-shell/src/main/ruby/shell/commands/grant.rb hbase-shell/src/main/ruby/shell/commands/grant.rb index a4e4547..d6f848b 100644 --- hbase-shell/src/main/ruby/shell/commands/grant.rb +++ hbase-shell/src/main/ruby/shell/commands/grant.rb @@ -74,9 +74,8 @@ EOF end end end - format_simple_command do - security_admin.grant(user, permissions, table_name, family, qualifier) - end + @start_time = Time.now + security_admin.grant(user, permissions, table_name, family, qualifier) elsif args[1].kind_of?(Hash) @@ -92,7 +91,7 @@ EOF raise(ArgumentError, "Scanner specification is not a Hash") unless scan.kind_of?(Hash) t = table(table_name) - now = Time.now + @start_time = Time.now scanner = t._get_scanner(scan) count = 0 iter = scanner.iterator @@ -106,7 +105,7 @@ EOF end count += 1 end - formatter.footer(now, count) + formatter.footer(count) else raise(ArgumentError, "Second argument should be a String or Hash") diff --git hbase-shell/src/main/ruby/shell/commands/incr.rb hbase-shell/src/main/ruby/shell/commands/incr.rb index d223a45..318fac3 100644 --- hbase-shell/src/main/ruby/shell/commands/incr.rb +++ hbase-shell/src/main/ruby/shell/commands/incr.rb @@ -49,13 +49,11 @@ EOF end def incr(table, row, column, value = nil, args={}) - format_simple_command do - if cnt = table._incr_internal(row, column, value, args) - puts "COUNTER VALUE = #{cnt}" - else - puts "No counter found at specified coordinates" - end - end + if cnt = table._incr_internal(row, column, value, args) + puts "COUNTER VALUE = #{cnt}" + else + puts "No counter found at specified coordinates" + end end end end diff --git hbase-shell/src/main/ruby/shell/commands/is_disabled.rb hbase-shell/src/main/ruby/shell/commands/is_disabled.rb index 6da7046..6a914e3 100644 --- hbase-shell/src/main/ruby/shell/commands/is_disabled.rb +++ hbase-shell/src/main/ruby/shell/commands/is_disabled.rb @@ -29,12 +29,8 @@ EOF end def command(table) - format_simple_command do - formatter.row([ - admin.disabled?(table)? "true" : "false" - ]) - end - end + formatter.row([admin.disabled?(table)? "true" : "false"]) + end end end end diff --git hbase-shell/src/main/ruby/shell/commands/is_enabled.rb hbase-shell/src/main/ruby/shell/commands/is_enabled.rb index 960ade7..da9c566 100644 --- hbase-shell/src/main/ruby/shell/commands/is_enabled.rb +++ hbase-shell/src/main/ruby/shell/commands/is_enabled.rb @@ -29,11 +29,9 @@ EOF end def command(table) - format_simple_command do - formatter.row([ - admin.enabled?(table)? "true" : "false" - ]) - end + enabled = admin.enabled?(table) + formatter.row([enabled ? "true" : "false"]) + enabled end end end diff --git hbase-shell/src/main/ruby/shell/commands/list.rb hbase-shell/src/main/ruby/shell/commands/list.rb index dce0ae2..f7a0987 100644 --- hbase-shell/src/main/ruby/shell/commands/list.rb +++ hbase-shell/src/main/ruby/shell/commands/list.rb @@ -33,7 +33,6 @@ EOF end def command(regex = ".*") - now = Time.now formatter.header([ "TABLE" ]) list = admin.list(regex) @@ -41,7 +40,7 @@ EOF formatter.row([ table ]) end - formatter.footer(now, list.size) + formatter.footer(list.size) return list end end diff --git hbase-shell/src/main/ruby/shell/commands/list_labels.rb hbase-shell/src/main/ruby/shell/commands/list_labels.rb index 6c7f991..6b730b2 100644 --- hbase-shell/src/main/ruby/shell/commands/list_labels.rb +++ hbase-shell/src/main/ruby/shell/commands/list_labels.rb @@ -32,11 +32,9 @@ EOF end def command(regex = ".*") - format_simple_command do - list = visibility_labels_admin.list_labels(regex) - list.each do |label| - formatter.row([org.apache.hadoop.hbase.util.Bytes::toStringBinary(label.toByteArray)]) - end + list = visibility_labels_admin.list_labels(regex) + list.each do |label| + formatter.row([org.apache.hadoop.hbase.util.Bytes::toStringBinary(label.toByteArray)]) end end end diff --git hbase-shell/src/main/ruby/shell/commands/list_namespace.rb hbase-shell/src/main/ruby/shell/commands/list_namespace.rb index 5d25604..63aeac1 100644 --- hbase-shell/src/main/ruby/shell/commands/list_namespace.rb +++ hbase-shell/src/main/ruby/shell/commands/list_namespace.rb @@ -31,7 +31,6 @@ EOF end def command(regex = ".*") - now = Time.now formatter.header([ "NAMESPACE" ]) list = admin.list_namespace(regex) @@ -39,7 +38,7 @@ EOF formatter.row([ table ]) end - formatter.footer(now, list.size) + formatter.footer(list.size) end end end diff --git hbase-shell/src/main/ruby/shell/commands/list_namespace_tables.rb hbase-shell/src/main/ruby/shell/commands/list_namespace_tables.rb index 29e1812..30d4db0 100644 --- hbase-shell/src/main/ruby/shell/commands/list_namespace_tables.rb +++ hbase-shell/src/main/ruby/shell/commands/list_namespace_tables.rb @@ -30,7 +30,6 @@ EOF end def command(namespace) - now = Time.now formatter.header([ "TABLE" ]) list = admin.list_namespace_tables(namespace) @@ -38,7 +37,8 @@ EOF formatter.row([ table ]) end - formatter.footer(now, list.size) + formatter.footer(list.size) + list end end end diff --git hbase-shell/src/main/ruby/shell/commands/list_peer_configs.rb hbase-shell/src/main/ruby/shell/commands/list_peer_configs.rb index fc6e4a7..8946e39 100644 --- hbase-shell/src/main/ruby/shell/commands/list_peer_configs.rb +++ hbase-shell/src/main/ruby/shell/commands/list_peer_configs.rb @@ -25,18 +25,17 @@ module Shell end def command - format_simple_command do - peer_configs = replication_admin.list_peer_configs - unless peer_configs.nil? - peer_configs.each do |peer_config_entry| - peer_id = peer_config_entry[0] - peer_config = peer_config_entry[1] - formatter.row(["PeerId", peer_id]) - GetPeerConfig.new(@shell).format_peer_config(peer_config) - formatter.row([" "]) - end + peer_configs = replication_admin.list_peer_configs + unless peer_configs.nil? + peer_configs.each do |peer_config_entry| + peer_id = peer_config_entry[0] + peer_config = peer_config_entry[1] + formatter.row(["PeerId", peer_id]) + GetPeerConfig.new(@shell).format_peer_config(peer_config) + formatter.row([" "]) end end + peer_configs end end end diff --git hbase-shell/src/main/ruby/shell/commands/list_peers.rb hbase-shell/src/main/ruby/shell/commands/list_peers.rb index cc1be04..72a0704 100644 --- hbase-shell/src/main/ruby/shell/commands/list_peers.rb +++ hbase-shell/src/main/ruby/shell/commands/list_peers.rb @@ -30,7 +30,6 @@ EOF end def command() - now = Time.now peers = replication_admin.list_peers formatter.header(["PEER_ID", "CLUSTER_KEY", "STATE", "TABLE_CFS"]) @@ -41,7 +40,8 @@ EOF formatter.row([ e.key, e.value, state, tableCFs ]) end - formatter.footer(now) + formatter.footer() + peers end end end diff --git hbase-shell/src/main/ruby/shell/commands/list_procedures.rb hbase-shell/src/main/ruby/shell/commands/list_procedures.rb index f407547..83e08c1 100644 --- hbase-shell/src/main/ruby/shell/commands/list_procedures.rb +++ hbase-shell/src/main/ruby/shell/commands/list_procedures.rb @@ -29,7 +29,6 @@ EOF end def command() - now = Time.now formatter.header([ "Id", "Name", "State", "Start_Time", "Last_Update" ]) list = admin.list_procedures() @@ -39,7 +38,7 @@ EOF formatter.row([ proc.getProcId, proc.getProcName, proc.getProcState, start_time, last_update ]) end - formatter.footer(now, list.size) + formatter.footer(list.size) end end end diff --git hbase-shell/src/main/ruby/shell/commands/list_quotas.rb hbase-shell/src/main/ruby/shell/commands/list_quotas.rb index 682bb71..604d833 100644 --- hbase-shell/src/main/ruby/shell/commands/list_quotas.rb +++ hbase-shell/src/main/ruby/shell/commands/list_quotas.rb @@ -37,7 +37,6 @@ EOF end def command(args = {}) - now = Time.now formatter.header(["OWNER", "QUOTAS"]) #actually do the scanning @@ -45,7 +44,7 @@ EOF formatter.row([ row, cells ]) end - formatter.footer(now, count) + formatter.footer(count) end end end diff --git hbase-shell/src/main/ruby/shell/commands/list_replicated_tables.rb hbase-shell/src/main/ruby/shell/commands/list_replicated_tables.rb index 0db1d83..142adfc 100644 --- hbase-shell/src/main/ruby/shell/commands/list_replicated_tables.rb +++ hbase-shell/src/main/ruby/shell/commands/list_replicated_tables.rb @@ -31,8 +31,6 @@ EOF end def command(regex = ".*") - now = Time.now - formatter.header([ "TABLE:COLUMNFAMILY", "ReplicationType" ], [ 32 ]) list = replication_admin.list_replicated_tables(regex) list.each do |e| @@ -43,7 +41,7 @@ EOF end formatter.row([e.get(org.apache.hadoop.hbase.client.replication.ReplicationAdmin::TNAME) + ":" + e.get(org.apache.hadoop.hbase.client.replication.ReplicationAdmin::CFNAME), replicateType], true, [32]) end - formatter.footer(now) + formatter.footer() end end end diff --git hbase-shell/src/main/ruby/shell/commands/list_snapshots.rb hbase-shell/src/main/ruby/shell/commands/list_snapshots.rb index 4e68802..bc91737 100644 --- hbase-shell/src/main/ruby/shell/commands/list_snapshots.rb +++ hbase-shell/src/main/ruby/shell/commands/list_snapshots.rb @@ -34,7 +34,6 @@ EOF end def command(regex = ".*") - now = Time.now formatter.header([ "SNAPSHOT", "TABLE + CREATION TIME"]) list = admin.list_snapshot(regex) @@ -43,7 +42,7 @@ EOF formatter.row([ snapshot.getName, snapshot.getTable + " (" + creation_time + ")" ]) end - formatter.footer(now, list.size) + formatter.footer(list.size) return list.map { |s| s.getName() } end end diff --git hbase-shell/src/main/ruby/shell/commands/list_table_snapshots.rb hbase-shell/src/main/ruby/shell/commands/list_table_snapshots.rb index 3a32e9e..1efcc17 100644 --- hbase-shell/src/main/ruby/shell/commands/list_table_snapshots.rb +++ hbase-shell/src/main/ruby/shell/commands/list_table_snapshots.rb @@ -39,7 +39,6 @@ EOF end def command(tableNameRegex, snapshotNameRegex = ".*") - now = Time.now formatter.header([ "SNAPSHOT", "TABLE + CREATION TIME"]) list = admin.list_table_snapshots(tableNameRegex, snapshotNameRegex) @@ -48,7 +47,7 @@ EOF formatter.row([ snapshot.getName, snapshot.getTable + " (" + creation_time + ")" ]) end - formatter.footer(now, list.size) + formatter.footer(list.size) return list.map { |s| s.getName() } end end diff --git hbase-shell/src/main/ruby/shell/commands/locate_region.rb hbase-shell/src/main/ruby/shell/commands/locate_region.rb index b1e8c7b..e2487c1 100644 --- hbase-shell/src/main/ruby/shell/commands/locate_region.rb +++ hbase-shell/src/main/ruby/shell/commands/locate_region.rb @@ -30,14 +30,13 @@ EOF end def command(table, row_key) - now = Time.now - region_location = admin.locate_region(table, row_key) hri = region_location.getRegionInfo() formatter.header([ "HOST", "REGION" ]) formatter.row([region_location.getHostnamePort(), hri.toString()]) - formatter.footer(now, 1) + formatter.footer(1) + region_location end end end diff --git hbase-shell/src/main/ruby/shell/commands/major_compact.rb hbase-shell/src/main/ruby/shell/commands/major_compact.rb index 5efd895..9b0573c 100644 --- hbase-shell/src/main/ruby/shell/commands/major_compact.rb +++ hbase-shell/src/main/ruby/shell/commands/major_compact.rb @@ -44,9 +44,7 @@ module Shell end def command(table_or_region_name, family = nil, type = "NORMAL") - format_simple_command do - admin.major_compact(table_or_region_name, family, type) - end + admin.major_compact(table_or_region_name, family, type) end end end diff --git hbase-shell/src/main/ruby/shell/commands/merge_region.rb hbase-shell/src/main/ruby/shell/commands/merge_region.rb index 6afa2e5..63f7159 100644 --- hbase-shell/src/main/ruby/shell/commands/merge_region.rb +++ hbase-shell/src/main/ruby/shell/commands/merge_region.rb @@ -40,9 +40,7 @@ EOF end def command(encoded_region_a_name, encoded_region_b_name, force = 'false') - format_simple_command do - admin.merge_region(encoded_region_a_name, encoded_region_b_name, force) - end + admin.merge_region(encoded_region_a_name, encoded_region_b_name, force) end end end diff --git hbase-shell/src/main/ruby/shell/commands/move.rb hbase-shell/src/main/ruby/shell/commands/move.rb index e6b2828..24816f3 100644 --- hbase-shell/src/main/ruby/shell/commands/move.rb +++ hbase-shell/src/main/ruby/shell/commands/move.rb @@ -38,9 +38,7 @@ EOF end def command(encoded_region_name, server_name = nil) - format_simple_command do - admin.move(encoded_region_name, server_name) - end + admin.move(encoded_region_name, server_name) end end end diff --git hbase-shell/src/main/ruby/shell/commands/normalize.rb hbase-shell/src/main/ruby/shell/commands/normalize.rb index 7e6302c..0a61227 100644 --- hbase-shell/src/main/ruby/shell/commands/normalize.rb +++ hbase-shell/src/main/ruby/shell/commands/normalize.rb @@ -33,11 +33,7 @@ EOF end def command() - format_simple_command do - formatter.row([ - admin.normalize()? "true": "false" - ]) - end + formatter.row([admin.normalize()? "true": "false"]) end end end diff --git hbase-shell/src/main/ruby/shell/commands/normalizer_enabled.rb hbase-shell/src/main/ruby/shell/commands/normalizer_enabled.rb index 1121b25..d39b777 100644 --- hbase-shell/src/main/ruby/shell/commands/normalizer_enabled.rb +++ hbase-shell/src/main/ruby/shell/commands/normalizer_enabled.rb @@ -30,11 +30,7 @@ EOF end def command() - format_simple_command do - formatter.row([ - admin.normalizer_enabled?.to_s - ]) - end + formatter.row([admin.normalizer_enabled?.to_s]) end end end diff --git hbase-shell/src/main/ruby/shell/commands/normalizer_switch.rb hbase-shell/src/main/ruby/shell/commands/normalizer_switch.rb index 6d959c4..7a12b71 100644 --- hbase-shell/src/main/ruby/shell/commands/normalizer_switch.rb +++ hbase-shell/src/main/ruby/shell/commands/normalizer_switch.rb @@ -32,11 +32,7 @@ EOF end def command(enableDisable) - format_simple_command do - formatter.row([ - admin.normalizer_switch(enableDisable)? "true" : "false" - ]) - end + formatter.row([admin.normalizer_switch(enableDisable)? "true" : "false"]) end end end diff --git hbase-shell/src/main/ruby/shell/commands/processlist.rb hbase-shell/src/main/ruby/shell/commands/processlist.rb new file mode 100644 index 0000000..5715f4b --- /dev/null +++ hbase-shell/src/main/ruby/shell/commands/processlist.rb @@ -0,0 +1,65 @@ +# +# Copyright 2010 The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class Processlist < Command + def help + return <<-EOF +Show regionserver task list. + + hbase> processlist + hbase> processlist 'all' + hbase> processlist 'general' + hbase> processlist 'handler' + hbase> processlist 'rpc' + hbase> processlist 'operation' + hbase> processlist 'all','host187.example.com' + hbase> processlist 'all','host187.example.com,16020' + hbase> processlist 'all','host187.example.com,16020,1289493121758' + +EOF + end + + def command(*args) + + if ['all','general','handler','rpc','operation'].include? args[0] + # if the first argument is a valid filter specifier, use it as such + filter = args[0] + hosts = args[1,args.length] + else + # otherwise, treat all arguments as host addresses by default + filter = 'general' + hosts = args + end + + hosts = admin.getServerNames(hosts) + + if hosts == nil + puts "No regionservers available." + else + taskmonitor.tasks(filter,hosts) + end + + end + + end + end +end diff --git hbase-shell/src/main/ruby/shell/commands/put.rb hbase-shell/src/main/ruby/shell/commands/put.rb index 2b47a4d..39f9fea 100644 --- hbase-shell/src/main/ruby/shell/commands/put.rb +++ hbase-shell/src/main/ruby/shell/commands/put.rb @@ -45,9 +45,8 @@ EOF end def put(table, row, column, value, timestamp = nil, args = {}) - format_simple_command do - table._put_internal(row, column, value, timestamp, args) - end + @start_time = Time.now + table._put_internal(row, column, value, timestamp, args) end end end diff --git hbase-shell/src/main/ruby/shell/commands/remove_peer.rb hbase-shell/src/main/ruby/shell/commands/remove_peer.rb index 5ae5786..bc9d6ab 100644 --- hbase-shell/src/main/ruby/shell/commands/remove_peer.rb +++ hbase-shell/src/main/ruby/shell/commands/remove_peer.rb @@ -30,9 +30,7 @@ EOF end def command(id) - format_simple_command do - replication_admin.remove_peer(id) - end + replication_admin.remove_peer(id) end end end diff --git hbase-shell/src/main/ruby/shell/commands/remove_peer_tableCFs.rb hbase-shell/src/main/ruby/shell/commands/remove_peer_tableCFs.rb index 70bc9b5..adfb85d 100644 --- hbase-shell/src/main/ruby/shell/commands/remove_peer_tableCFs.rb +++ hbase-shell/src/main/ruby/shell/commands/remove_peer_tableCFs.rb @@ -33,9 +33,7 @@ EOF end def command(id, table_cfs) - format_simple_command do - replication_admin.remove_peer_tableCFs(id, table_cfs) - end + replication_admin.remove_peer_tableCFs(id, table_cfs) end end end diff --git hbase-shell/src/main/ruby/shell/commands/restore_snapshot.rb hbase-shell/src/main/ruby/shell/commands/restore_snapshot.rb index 4d53171..2471e1b 100644 --- hbase-shell/src/main/ruby/shell/commands/restore_snapshot.rb +++ hbase-shell/src/main/ruby/shell/commands/restore_snapshot.rb @@ -32,9 +32,7 @@ EOF end def command(snapshot_name) - format_simple_command do - admin.restore_snapshot(snapshot_name) - end + admin.restore_snapshot(snapshot_name) end end end diff --git hbase-shell/src/main/ruby/shell/commands/revoke.rb hbase-shell/src/main/ruby/shell/commands/revoke.rb index 4a0d5ff..bcf60e9 100644 --- hbase-shell/src/main/ruby/shell/commands/revoke.rb +++ hbase-shell/src/main/ruby/shell/commands/revoke.rb @@ -39,9 +39,7 @@ EOF end def command(user, table_name=nil, family=nil, qualifier=nil) - format_simple_command do - security_admin.revoke(user, table_name, family, qualifier) - end + security_admin.revoke(user, table_name, family, qualifier) end end end diff --git hbase-shell/src/main/ruby/shell/commands/scan.rb hbase-shell/src/main/ruby/shell/commands/scan.rb index 6f49f80..b3cc5c8 100644 --- hbase-shell/src/main/ruby/shell/commands/scan.rb +++ hbase-shell/src/main/ruby/shell/commands/scan.rb @@ -104,17 +104,17 @@ EOF #internal command that actually does the scanning def scan(table, args = {}) - now = Time.now formatter.header(["ROW", "COLUMN+CELL"]) scan = table._hash_to_scan(args) #actually do the scanning - count = table._scan_internal(args, scan) do |row, cells| + @start_time = Time.now + count, is_stale = table._scan_internal(args, scan) do |row, cells| formatter.row([ row, cells ]) end + @end_time = Time.now - formatter.footer(now, count) - + formatter.footer(count, is_stale) # if scan metrics were enabled, print them after the results if (scan != nil && scan.isScanMetricsEnabled()) formatter.scan_metrics(scan.getScanMetrics(), args["METRICS"]) diff --git hbase-shell/src/main/ruby/shell/commands/set_auths.rb hbase-shell/src/main/ruby/shell/commands/set_auths.rb index 4a52eb0..5663ec3 100644 --- hbase-shell/src/main/ruby/shell/commands/set_auths.rb +++ hbase-shell/src/main/ruby/shell/commands/set_auths.rb @@ -31,9 +31,7 @@ EOF end def command(user, *args) - format_simple_command do - visibility_labels_admin.set_auths(user, args) - end + visibility_labels_admin.set_auths(user, args) end end end diff --git hbase-shell/src/main/ruby/shell/commands/set_peer_tableCFs.rb hbase-shell/src/main/ruby/shell/commands/set_peer_tableCFs.rb index fb7fae5..b2e823c 100644 --- hbase-shell/src/main/ruby/shell/commands/set_peer_tableCFs.rb +++ hbase-shell/src/main/ruby/shell/commands/set_peer_tableCFs.rb @@ -41,9 +41,7 @@ module Shell end def command(id, peer_table_cfs = nil) - format_simple_command do - replication_admin.set_peer_tableCFs(id, peer_table_cfs) - end + replication_admin.set_peer_tableCFs(id, peer_table_cfs) end end end diff --git hbase-shell/src/main/ruby/shell/commands/set_visibility.rb hbase-shell/src/main/ruby/shell/commands/set_visibility.rb index 2c77d8b..058ccf2 100644 --- hbase-shell/src/main/ruby/shell/commands/set_visibility.rb +++ hbase-shell/src/main/ruby/shell/commands/set_visibility.rb @@ -51,7 +51,7 @@ EOF def command(table, visibility, scan) t = table(table) - now = Time.now + @start_time = Time.now scanner = t._get_scanner(scan) count = 0 iter = scanner.iterator @@ -65,7 +65,7 @@ EOF end count += 1 end - formatter.footer(now, count) + formatter.footer(count) end end diff --git hbase-shell/src/main/ruby/shell/commands/show_filters.rb hbase-shell/src/main/ruby/shell/commands/show_filters.rb index cdbd9ed..5ff0be4 100644 --- hbase-shell/src/main/ruby/shell/commands/show_filters.rb +++ hbase-shell/src/main/ruby/shell/commands/show_filters.rb @@ -36,7 +36,6 @@ EOF end def command( ) - now = Time.now parseFilter = ParseFilter.new supportedFilters = parseFilter.getSupportedFilters diff --git hbase-shell/src/main/ruby/shell/commands/show_peer_tableCFs.rb hbase-shell/src/main/ruby/shell/commands/show_peer_tableCFs.rb index 3ce3d06..b6b6956 100644 --- hbase-shell/src/main/ruby/shell/commands/show_peer_tableCFs.rb +++ hbase-shell/src/main/ruby/shell/commands/show_peer_tableCFs.rb @@ -30,7 +30,9 @@ module Shell end def command(id) - puts replication_admin.show_peer_tableCFs(id) + peer_table_cfs = replication_admin.show_peer_tableCFs(id) + puts peer_table_cfs + peer_table_cfs end end end diff --git hbase-shell/src/main/ruby/shell/commands/snapshot.rb hbase-shell/src/main/ruby/shell/commands/snapshot.rb index 15bf298..fd37d07 100644 --- hbase-shell/src/main/ruby/shell/commands/snapshot.rb +++ hbase-shell/src/main/ruby/shell/commands/snapshot.rb @@ -29,9 +29,7 @@ EOF end def command(table, snapshot_name, *args) - format_simple_command do - admin.snapshot(table, snapshot_name, *args) - end + admin.snapshot(table, snapshot_name, *args) end end end diff --git hbase-shell/src/main/ruby/shell/commands/split.rb hbase-shell/src/main/ruby/shell/commands/split.rb index 9dc424f..9e6ec6a 100644 --- hbase-shell/src/main/ruby/shell/commands/split.rb +++ hbase-shell/src/main/ruby/shell/commands/split.rb @@ -34,9 +34,7 @@ EOF end def command(table_or_region_name, split_point = nil) - format_simple_command do - admin.split(table_or_region_name, split_point) - end + admin.split(table_or_region_name, split_point) end end end diff --git hbase-shell/src/main/ruby/shell/commands/splitormerge_enabled.rb hbase-shell/src/main/ruby/shell/commands/splitormerge_enabled.rb index 7da7564..5a13871 100644 --- hbase-shell/src/main/ruby/shell/commands/splitormerge_enabled.rb +++ hbase-shell/src/main/ruby/shell/commands/splitormerge_enabled.rb @@ -30,11 +30,9 @@ EOF end def command(switch_type) - format_simple_command do - formatter.row( - [admin.splitormerge_enabled(switch_type) ? 'true' : 'false'] - ) - end + formatter.row( + [admin.splitormerge_enabled(switch_type) ? 'true' : 'false'] + ) end end end diff --git hbase-shell/src/main/ruby/shell/commands/splitormerge_switch.rb hbase-shell/src/main/ruby/shell/commands/splitormerge_switch.rb index f4c2858..73dc82d 100644 --- hbase-shell/src/main/ruby/shell/commands/splitormerge_switch.rb +++ hbase-shell/src/main/ruby/shell/commands/splitormerge_switch.rb @@ -32,11 +32,9 @@ EOF end def command(switch_type, enabled) - format_simple_command do - formatter.row( - [admin.splitormerge_switch(switch_type, enabled) ? 'true' : 'false'] - ) - end + formatter.row( + [admin.splitormerge_switch(switch_type, enabled) ? 'true' : 'false'] + ) end end end diff --git hbase-shell/src/main/ruby/shell/commands/trace.rb hbase-shell/src/main/ruby/shell/commands/trace.rb index 5e00930..d838979 100644 --- hbase-shell/src/main/ruby/shell/commands/trace.rb +++ hbase-shell/src/main/ruby/shell/commands/trace.rb @@ -48,9 +48,7 @@ EOF end def command(startstop="status", spanname="HBaseShell") - format_and_return_simple_command do - trace(startstop, spanname) - end + trace(startstop, spanname) end def trace(startstop, spanname) diff --git hbase-shell/src/main/ruby/shell/commands/truncate.rb hbase-shell/src/main/ruby/shell/commands/truncate.rb index b7812fb..aff51ac 100644 --- hbase-shell/src/main/ruby/shell/commands/truncate.rb +++ hbase-shell/src/main/ruby/shell/commands/truncate.rb @@ -27,10 +27,7 @@ EOF end def command(table) - format_simple_command do - puts "Truncating '#{table}' table (it may take a while):" - admin.truncate(table) { |log| puts " - #{log}" } - end + admin.truncate(table) end end diff --git hbase-shell/src/main/ruby/shell/commands/truncate_preserve.rb hbase-shell/src/main/ruby/shell/commands/truncate_preserve.rb index 918b232..8bb3131 100644 --- hbase-shell/src/main/ruby/shell/commands/truncate_preserve.rb +++ hbase-shell/src/main/ruby/shell/commands/truncate_preserve.rb @@ -27,10 +27,7 @@ EOF end def command(table) - format_simple_command do - puts "Truncating '#{table}' table (it may take a while):" - admin.truncate_preserve(table) { |log| puts " - #{log}" } - end + admin.truncate_preserve(table) end end diff --git hbase-shell/src/main/ruby/shell/commands/unassign.rb hbase-shell/src/main/ruby/shell/commands/unassign.rb index 5eea71f..b69971f 100644 --- hbase-shell/src/main/ruby/shell/commands/unassign.rb +++ hbase-shell/src/main/ruby/shell/commands/unassign.rb @@ -36,9 +36,7 @@ EOF end def command(region_name, force = 'false') - format_simple_command do - admin.unassign(region_name, force) - end + admin.unassign(region_name, force) end end end diff --git hbase-shell/src/main/ruby/shell/commands/update_all_config.rb hbase-shell/src/main/ruby/shell/commands/update_all_config.rb index 05295b7..cb6852f 100644 --- hbase-shell/src/main/ruby/shell/commands/update_all_config.rb +++ hbase-shell/src/main/ruby/shell/commands/update_all_config.rb @@ -30,9 +30,7 @@ EOF end def command() - format_simple_command do - admin.update_all_config() - end + admin.update_all_config() end end end diff --git hbase-shell/src/main/ruby/shell/commands/update_config.rb hbase-shell/src/main/ruby/shell/commands/update_config.rb index 9f65fdd..3617bb3 100644 --- hbase-shell/src/main/ruby/shell/commands/update_config.rb +++ hbase-shell/src/main/ruby/shell/commands/update_config.rb @@ -31,9 +31,7 @@ EOF end def command(serverName) - format_simple_command do - admin.update_config(serverName) - end + admin.update_config(serverName) end end end diff --git hbase-shell/src/main/ruby/shell/commands/update_peer_config.rb hbase-shell/src/main/ruby/shell/commands/update_peer_config.rb index 5d721fd..c09acc2 100644 --- hbase-shell/src/main/ruby/shell/commands/update_peer_config.rb +++ hbase-shell/src/main/ruby/shell/commands/update_peer_config.rb @@ -40,9 +40,7 @@ To update TABLE_CFs, see the append_peer_tableCFs and remove_peer_tableCFs comma end def command(id, args = {}) - format_simple_command do - replication_admin.update_peer_config(id, args) - end + replication_admin.update_peer_config(id, args) end end end diff --git hbase-shell/src/main/ruby/shell/commands/user_permission.rb hbase-shell/src/main/ruby/shell/commands/user_permission.rb index e4673fc..4b5d3ff 100644 --- hbase-shell/src/main/ruby/shell/commands/user_permission.rb +++ hbase-shell/src/main/ruby/shell/commands/user_permission.rb @@ -30,6 +30,8 @@ For example: hbase> user_permission hbase> user_permission '@ns1' + hbase> user_permission '@.*' + hbase> user_permission '@^[a-c].*' hbase> user_permission 'table1' hbase> user_permission 'namespace1:table1' hbase> user_permission '.*' @@ -38,16 +40,14 @@ EOF end def command(table_regex=nil) - #format_simple_command do #admin.user_permission(table_regex) - now = Time.now formatter.header(["User", "Namespace,Table,Family,Qualifier:Permission"]) count = security_admin.user_permission(table_regex) do |user, permission| formatter.row([ user, permission]) end - formatter.footer(now, count) + formatter.footer(count) end end end diff --git hbase-shell/src/main/ruby/shell/commands/wal_roll.rb hbase-shell/src/main/ruby/shell/commands/wal_roll.rb index 0fe1870..a94e9e1 100644 --- hbase-shell/src/main/ruby/shell/commands/wal_roll.rb +++ hbase-shell/src/main/ruby/shell/commands/wal_roll.rb @@ -30,11 +30,10 @@ EOF end def command(server_name) - format_simple_command do - admin.wal_roll(server_name) - end + admin.wal_roll(server_name) end end + #TODO remove old HLog version class HlogRoll < WalRoll end diff --git hbase-shell/src/main/ruby/shell/formatter.rb hbase-shell/src/main/ruby/shell/formatter.rb index 47c9c8d..2f800f6 100644 --- hbase-shell/src/main/ruby/shell/formatter.rb +++ hbase-shell/src/main/ruby/shell/formatter.rb @@ -64,7 +64,7 @@ module Shell # Print a string if args.is_a?(String) - output(args) + output_str(args) @out.puts return end @@ -162,7 +162,7 @@ module Shell return str end - def output(str) + def output_str(str) output(@max_width, str) end @@ -177,11 +177,13 @@ module Shell end end - def footer(start_time = nil, row_count = nil) - return unless start_time + def footer(row_count = nil, is_stale = false) row_count ||= @row_count # Only output elapsed time and row count if startTime passed - @out.puts("%d row(s) in %.4f seconds" % [row_count, Time.now - start_time]) + @out.puts("%d row(s)" % [row_count]) + if is_stale == true + @out.puts(" (possible stale results) ") + end end end diff --git hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java index 24d07ed..074b9f7 100644 --- hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java +++ hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java @@ -37,14 +37,16 @@ public abstract class AbstractTestShell { @BeforeClass public static void setUpBeforeClass() throws Exception { // Start mini cluster - TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true); TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250); TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6); TEST_UTIL.getConfiguration().setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, false); TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3); - TEST_UTIL.getConfiguration().setInt(HConstants.MASTER_INFO_PORT, -1); - TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_INFO_PORT, -1); + + // Below settings are necessary for task monitor test. + TEST_UTIL.getConfiguration().setInt(HConstants.MASTER_INFO_PORT, 0); + TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_INFO_PORT, 0); + TEST_UTIL.getConfiguration().setBoolean(HConstants.REGIONSERVER_INFO_PORT_AUTO, true); // Security setup configuration SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration()); VisibilityTestUtil.enableVisiblityLabels(TEST_UTIL.getConfiguration()); diff --git hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java index 3f4af05..04fbc7a 100644 --- hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java +++ hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java @@ -28,7 +28,7 @@ import org.junit.experimental.categories.Category; @Category({ ClientTests.class, LargeTests.class }) public class TestReplicationShell extends AbstractTestShell { - @Ignore ("Disabled because hangs on occasion.. about 10% of the time") @Test + @Test public void testRunShellTests() throws IOException { System.setProperty("shell.test.include", "replication_admin_test.rb"); // Start all ruby tests diff --git hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java index be23a59..5f3720e 100644 --- hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java +++ hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java @@ -54,14 +54,11 @@ public class TestShellRSGroups { basePath = System.getProperty("basedir"); // Start mini cluster - TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true); TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250); TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6); TEST_UTIL.getConfiguration().setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, false); TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3); - TEST_UTIL.getConfiguration().setInt(HConstants.MASTER_INFO_PORT, -1); - TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_INFO_PORT, -1); // Security setup configuration SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration()); VisibilityTestUtil.enableVisiblityLabels(TEST_UTIL.getConfiguration()); diff --git hbase-shell/src/test/ruby/hbase/admin_test.rb hbase-shell/src/test/ruby/hbase/admin_test.rb index 50a65d0..cf9cf64 100644 --- hbase-shell/src/test/ruby/hbase/admin_test.rb +++ hbase-shell/src/test/ruby/hbase/admin_test.rb @@ -18,9 +18,8 @@ # require 'shell' -require 'shell/formatter' require 'stringio' -require 'hbase' +require 'hbase_constants' require 'hbase/hbase' require 'hbase/table' @@ -42,21 +41,21 @@ module Hbase end define_test "exists? should return true when a table exists" do - assert(admin.exists?('hbase:meta')) + assert(command(:exists, 'hbase:meta')) end define_test "exists? should return false when a table exists" do - assert(!admin.exists?('NOT.EXISTS')) + assert(!command(:exists, 'NOT.EXISTS')) end define_test "enabled? should return true for enabled tables" do - admin.enable(@test_name) - assert(admin.enabled?(@test_name)) + command(:enable, @test_name) + assert(command(:is_enabled, @test_name)) end define_test "enabled? should return false for disabled tables" do - admin.disable(@test_name) - assert(!admin.enabled?(@test_name)) + command(:disable, @test_name) + assert(!command(:is_enabled, @test_name)) end end @@ -79,63 +78,67 @@ module Hbase end define_test "list should return a list of tables" do - assert(admin.list.member?(@test_name)) + list = command(:list) + assert(list.member?(@test_name)) end define_test "list should not return meta tables" do - assert(!admin.list.member?('hbase:meta')) + list = command(:list) + assert(!list.member?('hbase:meta')) end define_test "list_namespace_tables for the system namespace should return a list of tables" do - assert(admin.list_namespace_tables('hbase').count > 0) + list = command(:list_namespace_tables, 'hbase') + assert(list.count > 0) end define_test "list_namespace_tables for the default namespace should return a list of tables" do - assert(admin.list_namespace_tables('default').count > 0) + list = command(:list_namespace_tables, 'default') + assert(list.count > 0) end #------------------------------------------------------------------------------- define_test "flush should work" do - admin.flush('hbase:meta') + command(:flush, 'hbase:meta') end #------------------------------------------------------------------------------- define_test "compact should work" do - admin.compact('hbase:meta') + command(:compact, 'hbase:meta') end #------------------------------------------------------------------------------- define_test "major_compact should work" do - admin.major_compact('hbase:meta') + command(:major_compact, 'hbase:meta') end #------------------------------------------------------------------------------- define_test "split should work" do - admin.split('hbase:meta', nil) + command(:split, 'hbase:meta', nil) end #------------------------------------------------------------------------------- define_test "drop should fail on non-existent tables" do assert_raise(ArgumentError) do - admin.drop('NOT.EXISTS') + command(:drop, 'NOT.EXISTS') end end define_test "drop should fail on enabled tables" do assert_raise(ArgumentError) do - admin.drop(@test_name) + command(:drop, @test_name) end end define_test "drop should drop tables" do - admin.disable(@test_name) - admin.drop(@test_name) - assert(!admin.exists?(@test_name)) + command(:disable, @test_name) + command(:drop, @test_name) + assert(!command(:exists, @test_name)) end #------------------------------------------------------------------------------- @@ -148,45 +151,46 @@ module Hbase define_test "create should fail with non-string table names" do assert_raise(ArgumentError) do - admin.create(123, 'xxx') + command(:create, 123, 'xxx') end end define_test "create should fail with non-string/non-hash column args" do assert_raise(ArgumentError) do - admin.create(@create_test_name, 123) + command(:create, @create_test_name, 123) end end define_test "create should fail without columns" do drop_test_table(@create_test_name) assert_raise(ArgumentError) do - admin.create(@create_test_name) + command(:create, @create_test_name) end end define_test "create should fail without columns when called with options" do drop_test_table(@create_test_name) assert_raise(ArgumentError) do - admin.create(@create_test_name, { OWNER => 'a' }) + command(:create, @create_test_name, { OWNER => 'a' }) end end define_test "create should work with string column args" do drop_test_table(@create_test_name) - admin.create(@create_test_name, 'a', 'b') + command(:create, @create_test_name, 'a', 'b') assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort) end define_test "create should work with hash column args" do drop_test_table(@create_test_name) - admin.create(@create_test_name, { NAME => 'a'}, { NAME => 'b'}) + command(:create, @create_test_name, { NAME => 'a'}, { NAME => 'b'}) assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort) end define_test "create should be able to set table options" do drop_test_table(@create_test_name) - admin.create(@create_test_name, 'a', 'b', 'MAX_FILESIZE' => 12345678, OWNER => '987654321') + command(:create, @create_test_name, 'a', 'b', 'MAX_FILESIZE' => 12345678, + OWNER => '987654321') assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort) assert_match(/12345678/, admin.describe(@create_test_name)) assert_match(/987654321/, admin.describe(@create_test_name)) @@ -194,14 +198,15 @@ module Hbase define_test "create should ignore table_att" do drop_test_table(@create_test_name) - admin.create(@create_test_name, 'a', 'b', METHOD => 'table_att', OWNER => '987654321') + command(:create, @create_test_name, 'a', 'b', METHOD => 'table_att', OWNER => '987654321') assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort) assert_match(/987654321/, admin.describe(@create_test_name)) end define_test "create should work with SPLITALGO" do drop_test_table(@create_test_name) - admin.create(@create_test_name, 'a', 'b', {NUMREGIONS => 10, SPLITALGO => 'HexStringSplit'}) + command(:create, @create_test_name, 'a', 'b', + {NUMREGIONS => 10, SPLITALGO => 'HexStringSplit'}) assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort) end @@ -224,17 +229,13 @@ module Hbase table(@test_name).put(2, "x:a", 2) assert_equal(2, table(@test_name)._count_internal) # This is hacky. Need to get the configuration into admin instance - admin.truncate(@test_name, $TEST_CLUSTER.getConfiguration) + command(:truncate, @test_name) assert_equal(0, table(@test_name)._count_internal) end define_test "truncate should yield log records" do - logs = [] - admin.truncate(@test_name, $TEST_CLUSTER.getConfiguration) do |log| - assert_kind_of(String, log) - logs << log - end - assert(!logs.empty?) + output = capture_stdout { command(:truncate, @test_name) } + assert(!output.empty?) end end @@ -254,16 +255,16 @@ module Hbase end define_test "close_region should allow encoded & non-encoded region names" do - region = admin.locate_region(@test_name, '') + region = command(:locate_region, @test_name, '') serverName = region.getServerName().getServerName() regionName = region.getRegionInfo().getRegionNameAsString() encodedRegionName = region.getRegionInfo().getEncodedName() # Close region with just region name. - admin.close_region(regionName, nil) + command(:close_region, regionName, nil) # Close region with region name and server. - admin.close_region(regionName, serverName) - admin.close_region(encodedRegionName, serverName) + command(:close_region, regionName, serverName) + command(:close_region, encodedRegionName, serverName) end end @@ -287,77 +288,68 @@ module Hbase define_test "alter should fail with non-string table names" do assert_raise(ArgumentError) do - admin.alter(123, true, METHOD => 'delete', NAME => 'y') + command(:alter, 123, METHOD => 'delete', NAME => 'y') end end define_test "alter should fail with non-existing tables" do assert_raise(ArgumentError) do - admin.alter('NOT.EXISTS', true, METHOD => 'delete', NAME => 'y') + command(:alter, 'NOT.EXISTS', METHOD => 'delete', NAME => 'y') end end define_test "alter should not fail with enabled tables" do - admin.enable(@test_name) - admin.alter(@test_name, true, METHOD => 'delete', NAME => 'y') + command(:enable, @test_name) + command(:alter, @test_name, METHOD => 'delete', NAME => 'y') end define_test "alter should be able to delete column families" do assert_equal(['x:', 'y:'], table(@test_name).get_all_columns.sort) - admin.alter(@test_name, true, METHOD => 'delete', NAME => 'y') - admin.enable(@test_name) + command(:alter, @test_name, METHOD => 'delete', NAME => 'y') + command(:enable, @test_name) assert_equal(['x:'], table(@test_name).get_all_columns.sort) end define_test "alter should be able to add column families" do assert_equal(['x:', 'y:'], table(@test_name).get_all_columns.sort) - admin.alter(@test_name, true, NAME => 'z') - admin.enable(@test_name) + command(:alter, @test_name, NAME => 'z') + command(:enable, @test_name) assert_equal(['x:', 'y:', 'z:'], table(@test_name).get_all_columns.sort) end define_test "alter should be able to add column families (name-only alter spec)" do assert_equal(['x:', 'y:'], table(@test_name).get_all_columns.sort) - admin.alter(@test_name, true, 'z') - admin.enable(@test_name) + command(:alter, @test_name, 'z') + command(:enable, @test_name) assert_equal(['x:', 'y:', 'z:'], table(@test_name).get_all_columns.sort) end define_test "alter should support more than one alteration in one call" do assert_equal(['x:', 'y:'], table(@test_name).get_all_columns.sort) - alterOutput = capture_stdout { admin.alter(@test_name, true, { NAME => 'z' }, - { METHOD => 'delete', NAME => 'y' }, 'MAX_FILESIZE' => 12345678) } - admin.enable(@test_name) + alterOutput = capture_stdout { + command(:alter, @test_name, { NAME => 'z' }, { METHOD => 'delete', NAME => 'y' }, + 'MAX_FILESIZE' => 12345678) } + command(:enable, @test_name) assert_equal(1, /Updating all regions/.match(alterOutput).size, "HBASE-15641 - Should only perform one table modification per alter.") assert_equal(['x:', 'z:'], table(@test_name).get_all_columns.sort) assert_match(/12345678/, admin.describe(@test_name)) end - def capture_stdout - begin - old_stdout = $stdout - $stdout = StringIO.new('','w') - yield - $stdout.string - ensure - $stdout = old_stdout - end - end define_test 'alter should support shortcut DELETE alter specs' do assert_equal(['x:', 'y:'], table(@test_name).get_all_columns.sort) - admin.alter(@test_name, true, 'delete' => 'y') + command(:alter, @test_name, 'delete' => 'y') assert_equal(['x:'], table(@test_name).get_all_columns.sort) end define_test "alter should be able to change table options" do - admin.alter(@test_name, true, METHOD => 'table_att', 'MAX_FILESIZE' => 12345678) + command(:alter, @test_name, METHOD => 'table_att', 'MAX_FILESIZE' => 12345678) assert_match(/12345678/, admin.describe(@test_name)) end define_test "alter should be able to change table options w/o table_att" do - admin.alter(@test_name, true, 'MAX_FILESIZE' => 12345678) + command(:alter, @test_name, 'MAX_FILESIZE' => 12345678) assert_match(/12345678/, admin.describe(@test_name)) end @@ -373,7 +365,7 @@ module Hbase # eval() is used to convert a string to regex assert_no_match(eval("/" + class_name + "/"), admin.describe(@test_name)) assert_no_match(eval("/" + cp_key + "/"), admin.describe(@test_name)) - admin.alter(@test_name, true, 'METHOD' => 'table_att', cp_key => cp_value) + command(:alter, @test_name, 'METHOD' => 'table_att', cp_key => cp_value) assert_match(eval("/" + class_name + "/"), admin.describe(@test_name)) assert_match(eval("/" + cp_key + "\\$(\\d+)/"), admin.describe(@test_name)) end @@ -383,12 +375,12 @@ module Hbase create_test_table(@test_name) key = "MAX_FILESIZE" - admin.alter(@test_name, true, 'METHOD' => 'table_att', key => 12345678) + command(:alter, @test_name, 'METHOD' => 'table_att', key => 12345678) # eval() is used to convert a string to regex assert_match(eval("/" + key + "/"), admin.describe(@test_name)) - admin.alter(@test_name, true, 'METHOD' => 'table_att_unset', 'NAME' => key) + command(:alter, @test_name, 'METHOD' => 'table_att_unset', 'NAME' => key) assert_no_match(eval("/" + key + "/"), admin.describe(@test_name)) end @@ -397,13 +389,13 @@ module Hbase key_1 = "TestAttr1" key_2 = "TestAttr2" - admin.create(@test_name, { NAME => 'i'}, METADATA => { key_1 => 1, key_2 => 2 }) + command(:create, @test_name, { NAME => 'i'}, METADATA => { key_1 => 1, key_2 => 2 }) # eval() is used to convert a string to regex assert_match(eval("/" + key_1 + "/"), admin.describe(@test_name)) assert_match(eval("/" + key_2 + "/"), admin.describe(@test_name)) - admin.alter(@test_name, true, 'METHOD' => 'table_att_unset', 'NAME' => [ key_1, key_2 ]) + command(:alter, @test_name, 'METHOD' => 'table_att_unset', 'NAME' => [ key_1, key_2 ]) assert_no_match(eval("/" + key_1 + "/"), admin.describe(@test_name)) assert_no_match(eval("/" + key_2 + "/"), admin.describe(@test_name)) end @@ -451,66 +443,66 @@ module Hbase #------------------------------------------------------------------------------- define_test "Snapshot should fail with non-string table name" do assert_raise(ArgumentError) do - admin.snapshot(123, 'xxx') + command(:snapshot, 123, 'xxx') end end define_test "Snapshot should fail with non-string snapshot name" do assert_raise(ArgumentError) do - admin.snapshot(@test_name, 123) + command(:snapshot, @test_name, 123) end end define_test "Snapshot should fail without snapshot name" do assert_raise(ArgumentError) do - admin.snapshot(@test_name) + command(:snapshot, @test_name) end end define_test "Snapshot should work with string args" do drop_test_snapshot() - admin.snapshot(@test_name, @create_test_snapshot) - list = admin.list_snapshot(@create_test_snapshot) + command(:snapshot, @test_name, @create_test_snapshot) + list = command(:list_snapshots, @create_test_snapshot) assert_equal(1, list.size) end define_test "Snapshot should work when SKIP_FLUSH args" do drop_test_snapshot() - admin.snapshot(@test_name, @create_test_snapshot, {SKIP_FLUSH => true}) - list = admin.list_snapshot(@create_test_snapshot) + command(:snapshot, @test_name, @create_test_snapshot, {SKIP_FLUSH => true}) + list = command(:list_snapshots, @create_test_snapshot) assert_equal(1, list.size) end define_test "List snapshot without any args" do drop_test_snapshot() - admin.snapshot(@test_name, @create_test_snapshot) - list = admin.list_snapshot() + command(:snapshot, @test_name, @create_test_snapshot) + list = command(:list_snapshots) assert_equal(1, list.size) end define_test "List snapshot for a non-existing snapshot" do - list = admin.list_snapshot("xyz") + list = command(:list_snapshots, "xyz") assert_equal(0, list.size) end define_test "Restore snapshot without any args" do assert_raise(ArgumentError) do - admin.restore_snapshot() + command(:restore_snapshot) end end define_test "Restore snapshot should work" do drop_test_snapshot() restore_table = "test_restore_snapshot_table" - admin.create(restore_table, 'f1', 'f2') + command(:create, restore_table, 'f1', 'f2') assert_match(eval("/" + "f1" + "/"), admin.describe(restore_table)) assert_match(eval("/" + "f2" + "/"), admin.describe(restore_table)) - admin.snapshot(restore_table, @create_test_snapshot) - admin.alter(restore_table, true, METHOD => 'delete', NAME => 'f1') + command(:snapshot, restore_table, @create_test_snapshot) + command(:alter, restore_table, METHOD => 'delete', NAME => 'f1') assert_no_match(eval("/" + "f1" + "/"), admin.describe(restore_table)) assert_match(eval("/" + "f2" + "/"), admin.describe(restore_table)) drop_test_table(restore_table) - admin.restore_snapshot(@create_test_snapshot) + command(:restore_snapshot, @create_test_snapshot) assert_match(eval("/" + "f1" + "/"), admin.describe(restore_table)) assert_match(eval("/" + "f2" + "/"), admin.describe(restore_table)) drop_test_table(restore_table) @@ -518,13 +510,13 @@ module Hbase define_test "Clone snapshot without any args" do assert_raise(ArgumentError) do - admin.restore_snapshot() + command(:restore_snapshot) end end define_test "Clone snapshot without table name args" do assert_raise(ArgumentError) do - admin.clone_snapshot(@create_test_snapshot) + command(:clone_snapshot, @create_test_snapshot) end end @@ -533,8 +525,8 @@ module Hbase clone_table = "test_clone_snapshot_table" assert_match(eval("/" + "x" + "/"), admin.describe(@test_name)) assert_match(eval("/" + "y" + "/"), admin.describe(@test_name)) - admin.snapshot(@test_name, @create_test_snapshot) - admin.clone_snapshot(@create_test_snapshot, clone_table) + command(:snapshot, @test_name, @create_test_snapshot) + command(:clone_snapshot, @create_test_snapshot, clone_table) assert_match(eval("/" + "x" + "/"), admin.describe(clone_table)) assert_match(eval("/" + "y" + "/"), admin.describe(clone_table)) drop_test_table(clone_table) @@ -548,11 +540,11 @@ module Hbase define_test "Delete snapshot should work" do drop_test_snapshot() - admin.snapshot(@test_name, @create_test_snapshot) - list = admin.list_snapshot() + command(:snapshot, @test_name, @create_test_snapshot) + list = command(:list_snapshots) assert_equal(1, list.size) admin.delete_snapshot(@create_test_snapshot) - list = admin.list_snapshot() + list = command(:list_snapshots) assert_equal(0, list.size) end @@ -564,17 +556,17 @@ module Hbase define_test "Delete all snapshots should work" do drop_test_snapshot() - admin.snapshot(@test_name, "delete_all_snapshot1") - admin.snapshot(@test_name, "delete_all_snapshot2") - admin.snapshot(@test_name, "snapshot_delete_all_1") - admin.snapshot(@test_name, "snapshot_delete_all_2") - list = admin.list_snapshot() + command(:snapshot, @test_name, "delete_all_snapshot1") + command(:snapshot, @test_name, "delete_all_snapshot2") + command(:snapshot, @test_name, "snapshot_delete_all_1") + command(:snapshot, @test_name, "snapshot_delete_all_2") + list = command(:list_snapshots) assert_equal(4, list.size) admin.delete_all_snapshot("d.*") - list = admin.list_snapshot() + list = command(:list_snapshots) assert_equal(2, list.size) admin.delete_all_snapshot(".*") - list = admin.list_snapshot() + list = command(:list_snapshots) assert_equal(0, list.size) end @@ -586,48 +578,48 @@ module Hbase define_test "Delete table snapshots should work" do drop_test_snapshot() - admin.snapshot(@test_name, "delete_table_snapshot1") - admin.snapshot(@test_name, "delete_table_snapshot2") - admin.snapshot(@test_name, "snapshot_delete_table1") + command(:snapshot, @test_name, "delete_table_snapshot1") + command(:snapshot, @test_name, "delete_table_snapshot2") + command(:snapshot, @test_name, "snapshot_delete_table1") new_table = "test_delete_table_snapshots_table" - admin.create(new_table, 'f1') - admin.snapshot(new_table, "delete_table_snapshot3") - list = admin.list_snapshot() + command(:create, new_table, 'f1') + command(:snapshot, new_table, "delete_table_snapshot3") + list = command(:list_snapshots) assert_equal(4, list.size) admin.delete_table_snapshots(@test_name, "d.*") - list = admin.list_snapshot() + list = command(:list_snapshots) assert_equal(2, list.size) admin.delete_table_snapshots(@test_name) - list = admin.list_snapshot() + list = command(:list_snapshots) assert_equal(1, list.size) admin.delete_table_snapshots(".*", "d.*") - list = admin.list_snapshot() + list = command(:list_snapshots) assert_equal(0, list.size) drop_test_table(new_table) end define_test "List table snapshots without any args" do assert_raise(ArgumentError) do - admin.list_table_snapshots() + command(:list_table_snapshots) end end define_test "List table snapshots should work" do drop_test_snapshot() - admin.snapshot(@test_name, "delete_table_snapshot1") - admin.snapshot(@test_name, "delete_table_snapshot2") - admin.snapshot(@test_name, "snapshot_delete_table1") + command(:snapshot, @test_name, "delete_table_snapshot1") + command(:snapshot, @test_name, "delete_table_snapshot2") + command(:snapshot, @test_name, "snapshot_delete_table1") new_table = "test_list_table_snapshots_table" - admin.create(new_table, 'f1') - admin.snapshot(new_table, "delete_table_snapshot3") - list = admin.list_table_snapshots(".*") + command(:create, new_table, 'f1') + command(:snapshot, new_table, "delete_table_snapshot3") + list = command(:list_table_snapshots, ".*") assert_equal(4, list.size) - list = admin.list_table_snapshots(@test_name, "d.*") + list = command(:list_table_snapshots, @test_name, "d.*") assert_equal(2, list.size) - list = admin.list_table_snapshots(@test_name) + list = command(:list_table_snapshots, @test_name) assert_equal(3, list.size) admin.delete_table_snapshots(".*") - list = admin.list_table_snapshots(".*", ".*") + list = command(:list_table_snapshots, ".*", ".*") assert_equal(0, list.size) drop_test_table(new_table) end diff --git hbase-shell/src/test/ruby/hbase/hbase_test.rb hbase-shell/src/test/ruby/hbase/hbase_test.rb index 185ec3e..0f19234 100644 --- hbase-shell/src/test/ruby/hbase/hbase_test.rb +++ hbase-shell/src/test/ruby/hbase/hbase_test.rb @@ -17,12 +17,11 @@ # limitations under the License. # -require 'hbase' +require 'hbase_constants' module Hbase class HbaseTest < Test::Unit::TestCase def setup - @formatter = Shell::Formatter::Console.new() @hbase = ::Hbase::Hbase.new($TEST_CLUSTER.getConfiguration) end @@ -31,19 +30,19 @@ module Hbase end define_test "Hbase::Hbase#admin should create a new admin object when called the first time" do - assert_kind_of(::Hbase::Admin, @hbase.admin(@formatter)) + assert_kind_of(::Hbase::Admin, @hbase.admin()) end define_test "Hbase::Hbase#admin should create a new admin object every call" do - assert_not_same(@hbase.admin(@formatter), @hbase.admin(@formatter)) + assert_not_same(@hbase.admin(), @hbase.admin()) end define_test "Hbase::Hbase#table should create a new table object when called the first time" do - assert_kind_of(::Hbase::Table, @hbase.table('hbase:meta', @formatter)) + assert_kind_of(::Hbase::Table, @hbase.table('hbase:meta', @shell)) end define_test "Hbase::Hbase#table should create a new table object every call" do - assert_not_same(@hbase.table('hbase:meta', @formatter), @hbase.table('hbase:meta', @formatter)) + assert_not_same(@hbase.table('hbase:meta', @shell), @hbase.table('hbase:meta', @shell)) end end end diff --git hbase-shell/src/test/ruby/hbase/replication_admin_test.rb hbase-shell/src/test/ruby/hbase/replication_admin_test.rb index 0c026d6..cf6eac2 100644 --- hbase-shell/src/test/ruby/hbase/replication_admin_test.rb +++ hbase-shell/src/test/ruby/hbase/replication_admin_test.rb @@ -18,8 +18,7 @@ # require 'shell' -require 'shell/formatter' -require 'hbase' +require 'hbase_constants' require 'hbase/hbase' require 'hbase/table' @@ -34,25 +33,25 @@ module Hbase setup_hbase - assert_equal(0, replication_admin.list_peers.length) + assert_equal(0, command(:list_peers).length) end def teardown - assert_equal(0, replication_admin.list_peers.length) + assert_equal(0, command(:list_peers).length) shutdown end define_test "add_peer: should fail when args isn't specified" do assert_raise(ArgumentError) do - replication_admin.add_peer(@peer_id, nil) + command(:add_peer, @peer_id, nil) end end define_test "add_peer: fail when neither CLUSTER_KEY nor ENDPOINT_CLASSNAME are specified" do assert_raise(ArgumentError) do args = {} - replication_admin.add_peer(@peer_id, args) + command(:add_peer, @peer_id, args) end end @@ -60,74 +59,74 @@ module Hbase assert_raise(ArgumentError) do args = { CLUSTER_KEY => 'zk1,zk2,zk3:2182:/hbase-prod', ENDPOINT_CLASSNAME => 'org.apache.hadoop.hbase.MyReplicationEndpoint' } - replication_admin.add_peer(@peer_id, args) + command(:add_peer, @peer_id, args) end end define_test "add_peer: args must be a hash" do assert_raise(ArgumentError) do - replication_admin.add_peer(@peer_id, 1) + command(:add_peer, @peer_id, 1) end assert_raise(ArgumentError) do - replication_admin.add_peer(@peer_id, ['test']) + command(:add_peer, @peer_id, ['test']) end assert_raise(ArgumentError) do - replication_admin.add_peer(@peer_id, 'test') + command(:add_peer, @peer_id, 'test') end end define_test "add_peer: single zk cluster key" do cluster_key = "server1.cie.com:2181:/hbase" - replication_admin.add_peer(@peer_id, {CLUSTER_KEY => cluster_key}) + command(:add_peer, @peer_id, {CLUSTER_KEY => cluster_key}) - assert_equal(1, replication_admin.list_peers.length) - assert(replication_admin.list_peers.key?(@peer_id)) - assert_equal(cluster_key, replication_admin.list_peers.fetch(@peer_id).get_cluster_key) + assert_equal(1, command(:list_peers).length) + assert(command(:list_peers).key?(@peer_id)) + assert_equal(cluster_key, command(:list_peers).fetch(@peer_id).get_cluster_key) # cleanup for future tests - replication_admin.remove_peer(@peer_id) + command(:remove_peer, @peer_id) end define_test "add_peer: multiple zk cluster key" do cluster_key = "zk1,zk2,zk3:2182:/hbase-prod" - replication_admin.add_peer(@peer_id, {CLUSTER_KEY => cluster_key}) + command(:add_peer, @peer_id, {CLUSTER_KEY => cluster_key}) - assert_equal(1, replication_admin.list_peers.length) - assert(replication_admin.list_peers.key?(@peer_id)) - assert_equal(cluster_key, replication_admin.list_peers.fetch(@peer_id).get_cluster_key) + assert_equal(1, command(:list_peers).length) + assert(command(:list_peers).key?(@peer_id)) + assert_equal(cluster_key, command(:list_peers).fetch(@peer_id).get_cluster_key) # cleanup for future tests - replication_admin.remove_peer(@peer_id) + command(:remove_peer, @peer_id) end define_test "add_peer: single zk cluster key - peer config" do cluster_key = "server1.cie.com:2181:/hbase" args = { CLUSTER_KEY => cluster_key } - replication_admin.add_peer(@peer_id, args) + command(:add_peer, @peer_id, args) - assert_equal(1, replication_admin.list_peers.length) - assert(replication_admin.list_peers.key?(@peer_id)) - assert_equal(cluster_key, replication_admin.list_peers.fetch(@peer_id).get_cluster_key) + assert_equal(1, command(:list_peers).length) + assert(command(:list_peers).key?(@peer_id)) + assert_equal(cluster_key, command(:list_peers).fetch(@peer_id).get_cluster_key) # cleanup for future tests - replication_admin.remove_peer(@peer_id) + command(:remove_peer, @peer_id) end define_test "add_peer: multiple zk cluster key - peer config" do cluster_key = "zk1,zk2,zk3:2182:/hbase-prod" args = { CLUSTER_KEY => cluster_key } - replication_admin.add_peer(@peer_id, args) + command(:add_peer, @peer_id, args) - assert_equal(1, replication_admin.list_peers.length) - assert(replication_admin.list_peers.key?(@peer_id)) - assert_equal(cluster_key, replication_admin.list_peers.fetch(@peer_id).get_cluster_key) + assert_equal(1, command(:list_peers).length) + assert(command(:list_peers).key?(@peer_id)) + assert_equal(cluster_key, command(:list_peers).fetch(@peer_id).get_cluster_key) # cleanup for future tests - replication_admin.remove_peer(@peer_id) + command(:remove_peer, @peer_id) end define_test "add_peer: multiple zk cluster key and table_cfs - peer config" do @@ -136,15 +135,15 @@ module Hbase table_cfs_str = "default.table1;default.table3:cf1,cf2;default.table2:cf1" args = { CLUSTER_KEY => cluster_key, TABLE_CFS => table_cfs } - replication_admin.add_peer(@peer_id, args) + command(:add_peer, @peer_id, args) - assert_equal(1, replication_admin.list_peers.length) - assert(replication_admin.list_peers.key?(@peer_id)) - assert_equal(cluster_key, replication_admin.list_peers.fetch(@peer_id).get_cluster_key) - assert_equal(table_cfs_str, replication_admin.show_peer_tableCFs(@peer_id)) + assert_equal(1, command(:list_peers).length) + assert(command(:list_peers).key?(@peer_id)) + assert_equal(cluster_key, command(:list_peers).fetch(@peer_id).get_cluster_key) + assert_equal(table_cfs_str, command(:show_peer_tableCFs, @peer_id)) # cleanup for future tests - replication_admin.remove_peer(@peer_id) + command(:remove_peer, @peer_id) end define_test "add_peer: should fail when args is a hash and peer_tableCFs provided" do @@ -153,51 +152,51 @@ module Hbase assert_raise(ArgumentError) do args = { CLUSTER_KEY => cluster_key } - replication_admin.add_peer(@peer_id, args, table_cfs_str) + command(:add_peer, @peer_id, args, table_cfs_str) end end define_test "get_peer_config: works with simple clusterKey peer" do cluster_key = "localhost:2181:/hbase-test" args = { CLUSTER_KEY => cluster_key } - replication_admin.add_peer(@peer_id, args) - peer_config = replication_admin.get_peer_config(@peer_id) + command(:add_peer, @peer_id, args) + peer_config = command(:get_peer_config, @peer_id) assert_equal(cluster_key, peer_config.get_cluster_key) #cleanup - replication_admin.remove_peer(@peer_id) + command(:remove_peer, @peer_id) end define_test "get_peer_config: works with replicationendpointimpl peer and config params" do repl_impl = "org.apache.hadoop.hbase.replication.ReplicationEndpointForTest" config_params = { "config1" => "value1", "config2" => "value2" } args = { ENDPOINT_CLASSNAME => repl_impl, CONFIG => config_params} - replication_admin.add_peer(@peer_id, args) - peer_config = replication_admin.get_peer_config(@peer_id) + command(:add_peer, @peer_id, args) + peer_config = command(:get_peer_config, @peer_id) assert_equal(repl_impl, peer_config.get_replication_endpoint_impl) assert_equal(2, peer_config.get_configuration.size) assert_equal("value1", peer_config.get_configuration.get("config1")) #cleanup - replication_admin.remove_peer(@peer_id) + command(:remove_peer, @peer_id) end define_test "list_peer_configs: returns all peers' ReplicationPeerConfig objects" do cluster_key = "localhost:2181:/hbase-test" args = { CLUSTER_KEY => cluster_key } peer_id_second = '2' - replication_admin.add_peer(@peer_id, args) + command(:add_peer, @peer_id, args) repl_impl = "org.apache.hadoop.hbase.replication.ReplicationEndpointForTest" config_params = { "config1" => "value1", "config2" => "value2" } args2 = { ENDPOINT_CLASSNAME => repl_impl, CONFIG => config_params} - replication_admin.add_peer(peer_id_second, args2) + command(:add_peer, peer_id_second, args2) - peer_configs = replication_admin.list_peer_configs + peer_configs = command(:list_peer_configs) assert_equal(2, peer_configs.size) assert_equal(cluster_key, peer_configs.get(@peer_id).get_cluster_key) assert_equal(repl_impl, peer_configs.get(peer_id_second).get_replication_endpoint_impl) #cleanup - replication_admin.remove_peer(@peer_id) - replication_admin.remove_peer(peer_id_second) + command(:remove_peer, @peer_id) + command(:remove_peer, peer_id_second) end define_test "update_peer_config: can update peer config and data" do @@ -205,7 +204,7 @@ module Hbase config_params = { "config1" => "value1", "config2" => "value2" } data_params = {"data1" => "value1", "data2" => "value2"} args = { ENDPOINT_CLASSNAME => repl_impl, CONFIG => config_params, DATA => data_params} - replication_admin.add_peer(@peer_id, args) + command(:add_peer, @peer_id, args) #Normally the ReplicationSourceManager will call ReplicationPeer#peer_added, but here we have to do it ourselves replication_admin.peer_added(@peer_id) @@ -213,12 +212,12 @@ module Hbase new_config_params = { "config1" => "new_value1" } new_data_params = {"data1" => "new_value1"} new_args = {CONFIG => new_config_params, DATA => new_data_params} - replication_admin.update_peer_config(@peer_id, new_args) + command(:update_peer_config, @peer_id, new_args) #Make sure the updated key/value pairs in config and data were successfully updated, and that those we didn't #update are still there and unchanged - peer_config = replication_admin.get_peer_config(@peer_id) - replication_admin.remove_peer(@peer_id) + peer_config = command(:get_peer_config, @peer_id) + command(:remove_peer, @peer_id) assert_equal("new_value1", peer_config.get_configuration.get("config1")) assert_equal("value2", peer_config.get_configuration.get("config2")) assert_equal("new_value1", Bytes.to_string(peer_config.get_peer_data.get(Bytes.toBytes("data1")))) @@ -228,17 +227,17 @@ module Hbase # assert_raise fails on native exceptions - https://jira.codehaus.org/browse/JRUBY-5279 # Can't catch native Java exception with assert_raise in JRuby 1.6.8 as in the test below. # define_test "add_peer: adding a second peer with same id should error" do - # replication_admin.add_peer(@peer_id, '') - # assert_equal(1, replication_admin.list_peers.length) + # command(:add_peer, @peer_id, '') + # assert_equal(1, command(:list_peers).length) # # assert_raise(java.lang.IllegalArgumentException) do - # replication_admin.add_peer(@peer_id, '') + # command(:add_peer, @peer_id, '') # end # - # assert_equal(1, replication_admin.list_peers.length, 1) + # assert_equal(1, command(:list_peers).length, 1) # # # cleanup for future tests - # replication_admin.remove_peer(@peer_id) + # command(:remove_peer, @peer_id) # end end end diff --git hbase-shell/src/test/ruby/hbase/security_admin_test.rb hbase-shell/src/test/ruby/hbase/security_admin_test.rb index 6ecfb98..be5bbae 100644 --- hbase-shell/src/test/ruby/hbase/security_admin_test.rb +++ hbase-shell/src/test/ruby/hbase/security_admin_test.rb @@ -18,8 +18,7 @@ # require 'shell' -require 'shell/formatter' -require 'hbase' +require 'hbase_constants' require 'hbase/hbase' require 'hbase/table' diff --git hbase-shell/src/test/ruby/hbase/table_test.rb hbase-shell/src/test/ruby/hbase/table_test.rb index d4547b7..faf9827 100644 --- hbase-shell/src/test/ruby/hbase/table_test.rb +++ hbase-shell/src/test/ruby/hbase/table_test.rb @@ -17,7 +17,7 @@ # limitations under the License. # -require 'hbase' +require 'hbase_constants' include HBaseConstants @@ -561,7 +561,7 @@ module Hbase define_test "scan with a block should yield rows and return rows counter" do rows = {} res = @test_table._scan_internal { |row, cells| rows[row] = cells } - assert_equal(rows.keys.size, res) + assert_equal([rows.keys.size,false], res) end define_test "scan should support COLUMNS with value CONVERTER information" do diff --git hbase-shell/src/test/ruby/hbase/taskmonitor_test.rb hbase-shell/src/test/ruby/hbase/taskmonitor_test.rb new file mode 100644 index 0000000..cdb91c7 --- /dev/null +++ hbase-shell/src/test/ruby/hbase/taskmonitor_test.rb @@ -0,0 +1,38 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +require 'hbase_constants' + +module Hbase + class TaskMonitorTest < Test::Unit::TestCase + include TestHelpers + def setup + setup_hbase + end + + define_test "tasksOnHost should return tasks list" do + filter = 'all' + hosts = admin.getRegionServers() + hosts.each do |host| + tasks = taskmonitor.tasksOnHost(filter,host) + assert(tasks.length > 0) + end + end + end +end diff --git hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb index 47ac292..b42290f 100644 --- hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb +++ hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb @@ -18,8 +18,7 @@ # require 'shell' -require 'shell/formatter' -require 'hbase' +require 'hbase_constants' require 'hbase/hbase' require 'hbase/table' @@ -46,37 +45,37 @@ module Hbase define_test "Labels should be created as specified" do label = 'TEST_LABELS' count = table('hbase:labels')._count_internal - visibility_admin.add_labels('test_label') + command(:add_labels, 'test_label') assert_equal(count + 1, table('hbase:labels')._count_internal) end define_test "The set/clear methods should work with authorizations" do label = 'TEST_AUTHS' user = org.apache.hadoop.hbase.security.User.getCurrent().getName(); - visibility_admin.add_labels(label) + command(:add_labels, label) $TEST_CLUSTER.waitLabelAvailable(10000, label) - count = visibility_admin.get_auths(user).length + count = command(:get_auths, user).length # verifying the set functionality - visibility_admin.set_auths(user, label) - assert_equal(count + 1, visibility_admin.get_auths(user).length) + command(:set_auths, user, label) + assert_equal(count + 1, command(:get_auths, user).length) assert_block do - visibility_admin.get_auths(user).any? { + command(:get_auths, user).any? { |auth| org.apache.hadoop.hbase.util.Bytes::toStringBinary(auth.toByteArray) == label } end # verifying the clear functionality - visibility_admin.clear_auths(user, label) - assert_equal(count, visibility_admin.get_auths(user).length) + command(:clear_auths, user, label) + assert_equal(count, command(:get_auths, user).length) end define_test "The get/put methods should work for data written with Visibility" do label = 'TEST_VISIBILITY' user = org.apache.hadoop.hbase.security.User.getCurrent().getName(); - visibility_admin.add_labels(label) + command(:add_labels, label) $TEST_CLUSTER.waitLabelAvailable(10000, label) - visibility_admin.set_auths(user, label) + command(:set_auths, user, label) # verifying put functionality @test_table.put(1, "x:a", 31, {VISIBILITY=>label}) diff --git hbase-shell/src/test/ruby/shell/commands_test.rb hbase-shell/src/test/ruby/shell/commands_test.rb index 3f6a802..9fa291a 100644 --- hbase-shell/src/test/ruby/shell/commands_test.rb +++ hbase-shell/src/test/ruby/shell/commands_test.rb @@ -17,7 +17,7 @@ # limitations under the License. # -require 'hbase' +require 'hbase_constants' require 'hbase/table' require 'shell' diff --git hbase-shell/src/test/ruby/shell/formatter_test.rb hbase-shell/src/test/ruby/shell/formatter_test.rb index 7010479..540dd09 100644 --- hbase-shell/src/test/ruby/shell/formatter_test.rb +++ hbase-shell/src/test/ruby/shell/formatter_test.rb @@ -63,6 +63,6 @@ class ShellFormatterTest < Test::Unit::TestCase end define_test "Froematter#footer should work" do - formatter.footer(Time.now - 5) + formatter.footer() end end diff --git hbase-shell/src/test/ruby/shell/noninteractive_test.rb hbase-shell/src/test/ruby/shell/noninteractive_test.rb index 14bdbc7..0fae4cb 100644 --- hbase-shell/src/test/ruby/shell/noninteractive_test.rb +++ hbase-shell/src/test/ruby/shell/noninteractive_test.rb @@ -14,15 +14,13 @@ # See the License for the specific language governing permissions and # limitations under the License. # -require 'hbase' +require 'hbase_constants' require 'shell' -require 'shell/formatter' class NonInteractiveTest < Test::Unit::TestCase def setup - @formatter = ::Shell::Formatter::Console.new() @hbase = ::Hbase::Hbase.new($TEST_CLUSTER.getConfiguration) - @shell = Shell::Shell.new(@hbase, @formatter, false) + @shell = Shell::Shell.new(@hbase, false) end define_test "Shell::Shell noninteractive mode should throw" do diff --git hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb index d892775..cb76c1f 100644 --- hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb +++ hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb @@ -17,16 +17,14 @@ # limitations under the License. # -require 'hbase' +require 'hbase_constants' require 'shell' -require 'shell/formatter' module Hbase class RSGroupShellTest < Test::Unit::TestCase def setup - @formatter = ::Shell::Formatter::Console.new @hbase = ::Hbase::Hbase.new($TEST_CLUSTER.getConfiguration) - @shell = Shell::Shell.new(@hbase, @formatter) + @shell = Shell::Shell.new(@hbase) connection = $TEST_CLUSTER.getConnection @rsgroup_admin = org.apache.hadoop.hbase.rsgroup.RSGroupAdmin.newClient(connection) @@ -49,12 +47,15 @@ module Hbase assert_not_nil(group) assert_equal(0, group.getServers.count) - hostport = - @rsgroup_admin.getRSGroupInfo('default').getServers.iterator.next.toString + hostport = @rsgroup_admin.getRSGroupInfo('default').getServers.iterator.next + @shell.command('get_rsgroup', 'default') + hostPortStr = hostport.toString + @shell.command('get_server_rsgroup', [hostPortStr]) @shell.command('move_rsgroup_servers', group_name, - [hostport]) + [hostPortStr]) assert_equal(1, @rsgroup_admin.getRSGroupInfo(group_name).getServers.count) + assert_equal(group_name, @rsgroup_admin.getRSGroupOfServer(hostport).getName) @shell.command('move_rsgroup_tables', group_name, @@ -62,10 +63,10 @@ module Hbase assert_equal(1, @rsgroup_admin.getRSGroupInfo(group_name).getTables.count) count = 0 - @hbase.rsgroup_admin(@formatter).get_rsgroup(group_name) do |line| + @hbase.rsgroup_admin().get_rsgroup(group_name) do |line| case count when 1 - assert_equal(hostport, line) + assert_equal(hostPortStr, line) when 3 assert_equal(table_name, line) end @@ -74,22 +75,22 @@ module Hbase assert_equal(4, count) assert_equal(2, - @hbase.rsgroup_admin(@formatter).list_rs_groups.count) + @hbase.rsgroup_admin().list_rs_groups.count) # just run it to verify jruby->java api binding - @hbase.rsgroup_admin(@formatter).balance_rs_group(group_name) + @hbase.rsgroup_admin().balance_rs_group(group_name) end # we test exceptions that could be thrown by the ruby wrappers define_test 'Test bogus arguments' do assert_raise(ArgumentError) do - @hbase.rsgroup_admin(@formatter).get_rsgroup('foobar') + @hbase.rsgroup_admin().get_rsgroup('foobar') end assert_raise(ArgumentError) do - @hbase.rsgroup_admin(@formatter).get_rsgroup_of_server('foobar:123') + @hbase.rsgroup_admin().get_rsgroup_of_server('foobar:123') end assert_raise(ArgumentError) do - @hbase.rsgroup_admin(@formatter).get_rsgroup_of_table('foobar') + @hbase.rsgroup_admin().get_rsgroup_of_table('foobar') end end end diff --git hbase-shell/src/test/ruby/shell/shell_test.rb hbase-shell/src/test/ruby/shell/shell_test.rb index 56b7dc8..ab150a5 100644 --- hbase-shell/src/test/ruby/shell/shell_test.rb +++ hbase-shell/src/test/ruby/shell/shell_test.rb @@ -17,23 +17,21 @@ # limitations under the License. # -require 'hbase' +require 'hbase_constants' require 'shell' -require 'shell/formatter' class ShellTest < Test::Unit::TestCase def setup - @formatter = ::Shell::Formatter::Console.new() @hbase = ::Hbase::Hbase.new($TEST_CLUSTER.getConfiguration) - @shell = Shell::Shell.new(@hbase, @formatter) + @shell = Shell::Shell.new(@hbase) end - define_test "Shell::Shell#hbase_admin should return an admin instance" do - assert_kind_of(Hbase::Admin, @shell.hbase_admin) + define_test "Shell::Shell#admin should return an admin instance" do + assert_kind_of(Hbase::Admin, @shell.admin) end - define_test "Shell::Shell#hbase_admin should cache admin instances" do - assert_same(@shell.hbase_admin, @shell.hbase_admin) + define_test "Shell::Shell#admin should cache admin instances" do + assert_same(@shell.admin, @shell.admin) end #------------------------------------------------------------------------------- @@ -46,6 +44,10 @@ class ShellTest < Test::Unit::TestCase assert_not_same(@shell.hbase_table('hbase:meta'), @shell.hbase_table('hbase:meta')) end + define_test "Shell::Shell#hbase attribute is a HBase instance" do + assert_kind_of(Hbase::Hbase, @shell.hbase) + end + #------------------------------------------------------------------------------- define_test "Shell::Shell#export_commands should export command methods to specified object" do diff --git hbase-shell/src/test/ruby/test_helper.rb hbase-shell/src/test/ruby/test_helper.rb index e75cd36..ec6bb6a 100644 --- hbase-shell/src/test/ruby/test_helper.rb +++ hbase-shell/src/test/ruby/test_helper.rb @@ -37,27 +37,34 @@ end module Hbase module TestHelpers - require 'hbase' + require 'hbase_constants' require 'hbase/hbase' require 'shell' - require 'shell/formatter' def setup_hbase - formatter = ::Shell::Formatter::Console.new hbase = ::Hbase::Hbase.new($TEST_CLUSTER.getConfiguration) - @shell = ::Shell::Shell.new(hbase, formatter) + @shell = ::Shell::Shell.new(hbase, interactive = false) end def shutdown @shell.hbase.shutdown end + # This function triggers exactly same path as the users. + def command(command, *args) + @shell.command(command, *args) + end + def table(table) @shell.hbase_table(table) end def admin - @shell.hbase_admin + @shell.admin + end + + def taskmonitor + @shell.hbase_taskmonitor end def security_admin @@ -68,6 +75,10 @@ module Hbase @shell.hbase_visibility_labels_admin end + def quotas_admin + @shell.hbase_quotas_admin + end + def replication_admin @shell.hbase_replication_admin end @@ -79,7 +90,7 @@ module Hbase def create_test_table(name) # Create the table if needed unless admin.exists?(name) - admin.create name, [{'NAME' => 'x', 'VERSIONS' => 5}, 'y'] + command(:create, name, {'NAME' => 'x', 'VERSIONS' => 5}, 'y') return end @@ -92,7 +103,7 @@ module Hbase def create_test_table_with_splits(name, splits) # Create the table if needed unless admin.exists?(name) - admin.create name, 'f1', splits + command(:create, name, 'f1', splits) end # Enable the table if needed @@ -126,6 +137,18 @@ module Hbase puts "IGNORING DELETE ALL SNAPSHOT ERROR: #{e}" end end + + + def capture_stdout + begin + old_stdout = $stdout + $stdout = StringIO.new('','w') + yield + $stdout.string + ensure + $stdout = old_stdout + end + end end end diff --git hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/ColumnSpec.java hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/ColumnSpec.java new file mode 100644 index 0000000..28a2c1c --- /dev/null +++ hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/ColumnSpec.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.spark; + +import org.apache.hadoop.hbase.util.Bytes; + +/** + * Used by SparkSQLPushDownFilter.currentCellToColumnIndexMap to maintain + * the information for a column with start and length in an HBase cell, used by + * composite key. Because to support composite key, one cell may consists of multiple columns + * of Spark table. + */ +public class ColumnSpec { + public int start; + public int length; + // the column name of the spark table. + public String name; + + public ColumnSpec(int s, int l, String n) { + start = s; + length = l; + name = n; + } + + /** + * Used in Spark Driver to serialize to byte array to be shipped to region server. + * @return bytes[] + */ + public byte[] toBytes() { + byte[] nb = Bytes.toBytes(name); + byte[] b = new byte[Bytes.SIZEOF_INT * 2 + nb.length]; + System.arraycopy(Bytes.toBytes(start), 0, b, 0, Bytes.SIZEOF_INT); + System.arraycopy(Bytes.toBytes(length), 0, b, Bytes.SIZEOF_INT, Bytes.SIZEOF_INT); + System.arraycopy(nb, 0, b, Bytes.SIZEOF_INT * 2, nb.length); + return b; + } + + /** + * Used inside of region server to deserialize the ColumnSpec from byte array. + * @return deserialized ColumnSpec + */ + public static ColumnSpec fromBytes(byte[] b) { + int s = Bytes.toInt(b, 0); + int l = Bytes.toInt(b, Bytes.SIZEOF_INT); + String n = Bytes.toString(b, Bytes.SIZEOF_INT * 2); + ColumnSpec cs = new ColumnSpec(s, l, n); + return cs; + } +} diff --git hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java index 071c1ca..ab85129 100644 --- hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java +++ hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java @@ -37,6 +37,8 @@ import java.io.IOException; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; /** * This filter will push down all qualifier logic given to us @@ -49,21 +51,22 @@ public class SparkSQLPushDownFilter extends FilterBase{ //The following values are populated with protobuffer DynamicLogicExpression dynamicLogicExpression; byte[][] valueFromQueryArray; - HashMap> - currentCellToColumnIndexMap; + // This is the map for Map[Columnfamilty, Map[ColumnQualifier, ColumnSpec]] + // ColumnSpec is the format of (String, start, offset) to support composite key + HashMap> + currentCellToColumnIndexMap; //The following values are transient HashMap columnToCurrentRowValueMap = null; static final byte[] rowKeyFamily = new byte[0]; - static final byte[] rowKeyQualifier = Bytes.toBytes("key"); String encoderClassName; public SparkSQLPushDownFilter(DynamicLogicExpression dynamicLogicExpression, byte[][] valueFromQueryArray, HashMap> + HashMap> currentCellToColumnIndexMap, String encoderClassName) { this.dynamicLogicExpression = dynamicLogicExpression; this.valueFromQueryArray = valueFromQueryArray; @@ -89,7 +92,7 @@ public class SparkSQLPushDownFilter extends FilterBase{ ByteArrayComparable familyByteComparable = new ByteArrayComparable(cfBytes, 0, cfBytes.length); - HashMap qualifierIndexMap = + HashMap qualifierIndexMap = currentCellToColumnIndexMap.get(familyByteComparable); if (qualifierIndexMap == null) { @@ -100,7 +103,7 @@ public class SparkSQLPushDownFilter extends FilterBase{ ByteArrayComparable qualifierByteComparable = new ByteArrayComparable(qBytes, 0, qBytes.length); - qualifierIndexMap.put(qualifierByteComparable, field.colName()); + qualifierIndexMap.put(qualifierByteComparable, field.columnSpec().toBytes()); } } @@ -111,20 +114,24 @@ public class SparkSQLPushDownFilter extends FilterBase{ // the row key if (columnToCurrentRowValueMap == null) { columnToCurrentRowValueMap = new HashMap<>(); - HashMap qualifierColumnMap = + HashMap qualifierColumnMap = currentCellToColumnIndexMap.get( new ByteArrayComparable(rowKeyFamily, 0, rowKeyFamily.length)); if (qualifierColumnMap != null) { - String rowKeyColumnName = - qualifierColumnMap.get( - new ByteArrayComparable(rowKeyQualifier, 0, - rowKeyQualifier.length)); - //Make sure that the rowKey is part of the where clause - if (rowKeyColumnName != null) { - columnToCurrentRowValueMap.put(rowKeyColumnName, - new ByteArrayComparable(c.getRowArray(), - c.getRowOffset(), c.getRowLength())); + Set> entries = qualifierColumnMap.entrySet(); + // We should get a sequence of rowKeyColumnName (name, start, length) + // and fill all the columns belong to row key. + for (Entry entry: entries) { + ColumnSpec cs = ColumnSpec.fromBytes(entry.getValue()); + int length = cs.length; + // If we do not know the length, assume to extend to the end. + if (length == -1) { + length = c.getRowLength() - cs.start; + } + columnToCurrentRowValueMap.put(cs.name, + new ByteArrayComparable(c.getRowArray(), + c.getRowOffset() + cs.start, length)); } } } @@ -135,17 +142,18 @@ public class SparkSQLPushDownFilter extends FilterBase{ c.getFamilyOffset(), c.getFamilyLength()); - HashMap qualifierColumnMap = + HashMap qualifierColumnMap = currentCellToColumnIndexMap.get( currentFamilyByteComparable); if (qualifierColumnMap != null) { - String columnName = + byte[] columnSpec = qualifierColumnMap.get( new ByteArrayComparable(c.getQualifierArray(), c.getQualifierOffset(), c.getQualifierLength())); + String columnName = ColumnSpec.fromBytes(columnSpec).name; if (columnName != null) { columnToCurrentRowValueMap.put(columnName, @@ -205,7 +213,7 @@ public class SparkSQLPushDownFilter extends FilterBase{ } //Load mapping from HBase family/qualifier to Spark SQL columnName - HashMap> + HashMap> currentCellToColumnIndexMap = new HashMap<>(); for (FilterProtos.SQLPredicatePushDownCellToColumnMapping @@ -216,7 +224,7 @@ public class SparkSQLPushDownFilter extends FilterBase{ sqlPredicatePushDownCellToColumnMapping.getColumnFamily().toByteArray(); ByteArrayComparable familyByteComparable = new ByteArrayComparable(familyArray, 0, familyArray.length); - HashMap qualifierMap = + HashMap qualifierMap = currentCellToColumnIndexMap.get(familyByteComparable); if (qualifierMap == null) { @@ -230,7 +238,7 @@ public class SparkSQLPushDownFilter extends FilterBase{ new ByteArrayComparable(qualifierArray, 0 ,qualifierArray.length); qualifierMap.put(qualifierByteComparable, - sqlPredicatePushDownCellToColumnMapping.getColumnName()); + sqlPredicatePushDownCellToColumnMapping.getColumnSpec().toByteArray()); } return new SparkSQLPushDownFilter(dynamicLogicExpression, @@ -253,15 +261,15 @@ public class SparkSQLPushDownFilter extends FilterBase{ builder.addValueFromQueryArray(ByteStringer.wrap(valueFromQuery)); } - for (Map.Entry> + for (Entry> familyEntry : currentCellToColumnIndexMap.entrySet()) { - for (Map.Entry qualifierEntry : + for (Entry qualifierEntry : familyEntry.getValue().entrySet()) { columnMappingBuilder.setColumnFamily( ByteStringer.wrap(familyEntry.getKey().bytes())); columnMappingBuilder.setQualifier( ByteStringer.wrap(qualifierEntry.getKey().bytes())); - columnMappingBuilder.setColumnName(qualifierEntry.getValue()); + columnMappingBuilder.setColumnSpec(ByteStringer.wrap(qualifierEntry.getValue())); builder.addCellToColumnMapping(columnMappingBuilder.build()); } } diff --git hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/protobuf/generated/FilterProtos.java hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/protobuf/generated/FilterProtos.java index cbef134..6947c62 100644 --- hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/protobuf/generated/FilterProtos.java +++ hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/protobuf/generated/FilterProtos.java @@ -31,20 +31,15 @@ public final class FilterProtos { */ com.google.protobuf.ByteString getQualifier(); - // required string column_name = 3; + // required bytes column_spec = 3; /** - * required string column_name = 3; + * required bytes column_spec = 3; */ - boolean hasColumnName(); + boolean hasColumnSpec(); /** - * required string column_name = 3; + * required bytes column_spec = 3; */ - java.lang.String getColumnName(); - /** - * required string column_name = 3; - */ - com.google.protobuf.ByteString - getColumnNameBytes(); + com.google.protobuf.ByteString getColumnSpec(); } /** * Protobuf type {@code hbase.pb.SQLPredicatePushDownCellToColumnMapping} @@ -109,7 +104,7 @@ public final class FilterProtos { } case 26: { bitField0_ |= 0x00000004; - columnName_ = input.readBytes(); + columnSpec_ = input.readBytes(); break; } } @@ -184,53 +179,26 @@ public final class FilterProtos { return qualifier_; } - // required string column_name = 3; - public static final int COLUMN_NAME_FIELD_NUMBER = 3; - private java.lang.Object columnName_; + // required bytes column_spec = 3; + public static final int COLUMN_SPEC_FIELD_NUMBER = 3; + private com.google.protobuf.ByteString columnSpec_; /** - * required string column_name = 3; + * required bytes column_spec = 3; */ - public boolean hasColumnName() { + public boolean hasColumnSpec() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** - * required string column_name = 3; + * required bytes column_spec = 3; */ - public java.lang.String getColumnName() { - java.lang.Object ref = columnName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - columnName_ = s; - } - return s; - } - } - /** - * required string column_name = 3; - */ - public com.google.protobuf.ByteString - getColumnNameBytes() { - java.lang.Object ref = columnName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - columnName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + public com.google.protobuf.ByteString getColumnSpec() { + return columnSpec_; } private void initFields() { columnFamily_ = com.google.protobuf.ByteString.EMPTY; qualifier_ = com.google.protobuf.ByteString.EMPTY; - columnName_ = ""; + columnSpec_ = com.google.protobuf.ByteString.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -245,7 +213,7 @@ public final class FilterProtos { memoizedIsInitialized = 0; return false; } - if (!hasColumnName()) { + if (!hasColumnSpec()) { memoizedIsInitialized = 0; return false; } @@ -263,7 +231,7 @@ public final class FilterProtos { output.writeBytes(2, qualifier_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getColumnNameBytes()); + output.writeBytes(3, columnSpec_); } getUnknownFields().writeTo(output); } @@ -284,7 +252,7 @@ public final class FilterProtos { } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getColumnNameBytes()); + .computeBytesSize(3, columnSpec_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -319,10 +287,10 @@ public final class FilterProtos { result = result && getQualifier() .equals(other.getQualifier()); } - result = result && (hasColumnName() == other.hasColumnName()); - if (hasColumnName()) { - result = result && getColumnName() - .equals(other.getColumnName()); + result = result && (hasColumnSpec() == other.hasColumnSpec()); + if (hasColumnSpec()) { + result = result && getColumnSpec() + .equals(other.getColumnSpec()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -345,9 +313,9 @@ public final class FilterProtos { hash = (37 * hash) + QUALIFIER_FIELD_NUMBER; hash = (53 * hash) + getQualifier().hashCode(); } - if (hasColumnName()) { - hash = (37 * hash) + COLUMN_NAME_FIELD_NUMBER; - hash = (53 * hash) + getColumnName().hashCode(); + if (hasColumnSpec()) { + hash = (37 * hash) + COLUMN_SPEC_FIELD_NUMBER; + hash = (53 * hash) + getColumnSpec().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; @@ -462,7 +430,7 @@ public final class FilterProtos { bitField0_ = (bitField0_ & ~0x00000001); qualifier_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); - columnName_ = ""; + columnSpec_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -503,7 +471,7 @@ public final class FilterProtos { if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } - result.columnName_ = columnName_; + result.columnSpec_ = columnSpec_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -526,10 +494,8 @@ public final class FilterProtos { if (other.hasQualifier()) { setQualifier(other.getQualifier()); } - if (other.hasColumnName()) { - bitField0_ |= 0x00000004; - columnName_ = other.columnName_; - onChanged(); + if (other.hasColumnSpec()) { + setColumnSpec(other.getColumnSpec()); } this.mergeUnknownFields(other.getUnknownFields()); return this; @@ -544,7 +510,7 @@ public final class FilterProtos { return false; } - if (!hasColumnName()) { + if (!hasColumnSpec()) { return false; } @@ -642,76 +608,38 @@ public final class FilterProtos { return this; } - // required string column_name = 3; - private java.lang.Object columnName_ = ""; + // required bytes column_spec = 3; + private com.google.protobuf.ByteString columnSpec_ = com.google.protobuf.ByteString.EMPTY; /** - * required string column_name = 3; + * required bytes column_spec = 3; */ - public boolean hasColumnName() { + public boolean hasColumnSpec() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** - * required string column_name = 3; + * required bytes column_spec = 3; */ - public java.lang.String getColumnName() { - java.lang.Object ref = columnName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - columnName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string column_name = 3; - */ - public com.google.protobuf.ByteString - getColumnNameBytes() { - java.lang.Object ref = columnName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - columnName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + public com.google.protobuf.ByteString getColumnSpec() { + return columnSpec_; } /** - * required string column_name = 3; + * required bytes column_spec = 3; */ - public Builder setColumnName( - java.lang.String value) { + public Builder setColumnSpec(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; - columnName_ = value; + columnSpec_ = value; onChanged(); return this; } /** - * required string column_name = 3; + * required bytes column_spec = 3; */ - public Builder clearColumnName() { + public Builder clearColumnSpec() { bitField0_ = (bitField0_ & ~0x00000004); - columnName_ = getDefaultInstance().getColumnName(); - onChanged(); - return this; - } - /** - * required string column_name = 3; - */ - public Builder setColumnNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - columnName_ = value; + columnSpec_ = getDefaultInstance().getColumnSpec(); onChanged(); return this; } @@ -1967,7 +1895,7 @@ public final class FilterProtos { "\n\014Filter.proto\022\010hbase.pb\"h\n\'SQLPredicate" + "PushDownCellToColumnMapping\022\025\n\rcolumn_fa" + "mily\030\001 \002(\014\022\021\n\tqualifier\030\002 \002(\014\022\023\n\013column_" + - "name\030\003 \002(\t\"\313\001\n\032SQLPredicatePushDownFilte" + + "spec\030\003 \002(\014\"\313\001\n\032SQLPredicatePushDownFilte" + "r\022 \n\030dynamic_logic_expression\030\001 \002(\t\022\036\n\026v" + "alue_from_query_array\030\002 \003(\014\022Q\n\026cell_to_c" + "olumn_mapping\030\003 \003(\01321.hbase.pb.SQLPredic" + @@ -1986,7 +1914,7 @@ public final class FilterProtos { internal_static_hbase_pb_SQLPredicatePushDownCellToColumnMapping_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SQLPredicatePushDownCellToColumnMapping_descriptor, - new java.lang.String[] { "ColumnFamily", "Qualifier", "ColumnName", }); + new java.lang.String[] { "ColumnFamily", "Qualifier", "ColumnSpec", }); internal_static_hbase_pb_SQLPredicatePushDownFilter_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_hbase_pb_SQLPredicatePushDownFilter_fieldAccessorTable = new diff --git hbase-spark/src/main/protobuf/Filter.proto hbase-spark/src/main/protobuf/Filter.proto index d17b48c..d9d96b6 100644 --- hbase-spark/src/main/protobuf/Filter.proto +++ hbase-spark/src/main/protobuf/Filter.proto @@ -28,7 +28,7 @@ option optimize_for = SPEED; message SQLPredicatePushDownCellToColumnMapping { required bytes column_family = 1; required bytes qualifier = 2; - required string column_name = 3; + required bytes column_spec = 3; } message SQLPredicatePushDownFilter { diff --git hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/ByteArrayComparable.scala hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/ByteArrayComparable.scala index fce92fb..7887fac 100644 --- hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/ByteArrayComparable.scala +++ hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/ByteArrayComparable.scala @@ -25,7 +25,7 @@ class ByteArrayComparable(val bytes:Array[Byte], val offset:Int = 0, var length: extends Comparable[ByteArrayComparable] { if (length == -1) { - length = bytes.length + length = bytes.length - offset } override def compareTo(o: ByteArrayComparable): Int = { diff --git hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala index 0c29f50..e3cbe23 100644 --- hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala +++ hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala @@ -138,7 +138,23 @@ case class HBaseRelation ( */ override val schema: StructType = userSpecifiedSchema.getOrElse(catalog.toDataType) + def rows = catalog.row + def getField(name: String): Field = { + catalog.getField(name) + } + // check whether the column is the first key in the rowkey + def isPrimaryKey(c: String): Boolean = { + val f1 = catalog.getRowKey(0) + val f2 = getField(c) + f1 == f2 + } + def isComposite(): Boolean = { + catalog.getRowKey.size > 1 + } + def isColumn(c: String): Boolean = { + !catalog.getRowKey.map(_.colName).contains(c) + } def createTable() { val numReg = parameters.get(HBaseTableCatalog.newTable).map(x => x.toInt).getOrElse(0) @@ -244,20 +260,9 @@ case class HBaseRelation ( // Return the new index and appended value (idx + field.length, parsed ++ Seq((field, value))) } else { - field.dt match { - case StringType => - val pos = row.indexOf(HBaseTableCatalog.delimiter, idx) - if (pos == -1 || pos > row.length) { - // this is at the last dimension - val value = Utils.hbaseFieldToScalaType(field, row, idx, row.length) - (row.length + 1, parsed ++ Seq((field, value))) - } else { - val value = Utils.hbaseFieldToScalaType(field, row, idx, pos - idx) - (pos, parsed ++ Seq((field, value))) - } - // We don't know the length, assume it extends to the end of the rowkey. - case _ => (row.length + 1, parsed ++ Seq((field, Utils.hbaseFieldToScalaType(field, row, idx, row.length)))) - } + // This is the last dimension. + val value = Utils.hbaseFieldToScalaType(field, row, idx, row.length - idx) + (row.length + 1, parsed ++ Seq((field, value))) } })._2.toMap } @@ -395,7 +400,6 @@ case class HBaseRelation ( }) val queryValueArray = queryValueList.toArray - if (superRowKeyFilter == null) { superRowKeyFilter = new RowKeyFilter } @@ -403,6 +407,23 @@ case class HBaseRelation ( (superRowKeyFilter, superDynamicLogicExpression, queryValueArray) } + // convert the point to the range if it is composite key + private def padding(value: Array[Byte], length: Int): Array[Byte] = { + if (!isComposite || value == null || value.length == length ) { + value + } else { + val end = Array.fill(length)(-1: Byte) + System.arraycopy(value, 0, end, 0, value.length) + end + } + } + // convert the upper bound to extend to the whole rowkey length + private def convertUpperBound(value: Array[Byte], length: Int): Array[Byte] = { + val upper = Array.fill(length)(-1: Byte) + System.arraycopy(value, 0, upper, 0, value.length) + upper + } + /** * For some codec, the order may be inconsistent between java primitive * type and its byte array. We may have to split the predicates on some @@ -421,15 +442,19 @@ case class HBaseRelation ( filter match { case EqualTo(attr, value) => val field = catalog.getField(attr) + val bytes = Utils.toBytes(value, field) if (field != null) { - if (field.isRowKey) { - parentRowKeyFilter.mergeIntersect(new RowKeyFilter( - DefaultSourceStaticUtils.getByteValue(field, - value.toString), null)) + if (isPrimaryKey(attr)) { + if (!isComposite()) { + parentRowKeyFilter.mergeIntersect(new RowKeyFilter( + bytes, null)) + } else { + // we have to convert the point to range in case of composite key + parentRowKeyFilter.mergeIntersect(new RowKeyFilter(null, + new ScanRange(padding(bytes, rows.length), true, bytes, true))) + } } - val byteValue = - DefaultSourceStaticUtils.getByteValue(field, value.toString) - valueArray += byteValue + valueArray += bytes } new EqualLogicExpression(attr, valueArray.length - 1, false) @@ -448,12 +473,16 @@ case class HBaseRelation ( case LessThan(attr, value) => val field = catalog.getField(attr) if (field != null) { - if (field.isRowKey) { + if (isPrimaryKey(attr)) { val b = encoder.ranges(value) var inc = false b.map(_.less.map { x => val r = new RowKeyFilter(null, - new ScanRange(x.upper, inc, x.low, true) + new ScanRange(if (inc) { + padding(x.upper, rows.length) + } else { + x.upper + }, inc, x.low, true) ) inc = true r @@ -470,12 +499,12 @@ case class HBaseRelation ( case GreaterThan(attr, value) => val field = catalog.getField(attr) if (field != null) { - if (field.isRowKey) { + if (isPrimaryKey(attr)) { val b = encoder.ranges(value) var inc = false - b.map(_.greater.map{x => + b.map(_.greater.map { x => val r = new RowKeyFilter(null, - new ScanRange(x.upper, true, x.low, inc)) + new ScanRange(padding(x.upper, rows.length), true, x.low, inc)) inc = true r }).map { x => @@ -491,11 +520,11 @@ case class HBaseRelation ( case LessThanOrEqual(attr, value) => val field = catalog.getField(attr) if (field != null) { - if (field.isRowKey) { + if (isPrimaryKey(attr)) { val b = encoder.ranges(value) b.map(_.less.map(x => new RowKeyFilter(null, - new ScanRange(x.upper, true, x.low, true)))) + new ScanRange(padding(x.upper, rows.length), true, x.low, true)))) .map { x => x.reduce{ (i, j) => i.mergeUnion(j) @@ -509,11 +538,11 @@ case class HBaseRelation ( case GreaterThanOrEqual(attr, value) => val field = catalog.getField(attr) if (field != null) { - if (field.isRowKey) { + if (isPrimaryKey(attr)) { val b = encoder.ranges(value) b.map(_.greater.map(x => new RowKeyFilter(null, - new ScanRange(x.upper, true, x.low, true)))) + new ScanRange(padding(x.upper, rows.length), true, x.low, true)))) .map { x => x.reduce { (i, j) => i.mergeUnion(j) @@ -590,7 +619,7 @@ class ScanRange(var upperBound:Array[Byte], var isUpperBoundEqualTo:Boolean, * * @param other Other scan object */ - def mergeUnion(other:ScanRange): Unit = { + def mergeUnion(other:ScanRange): ScanRange = { val upperBoundCompare = compareRange(upperBound, other.upperBound) val lowerBoundCompare = compareRange(lowerBound, other.lowerBound) @@ -605,6 +634,7 @@ class ScanRange(var upperBound:Array[Byte], var isUpperBoundEqualTo:Boolean, isUpperBoundEqualTo = if (upperBoundCompare == 0) isUpperBoundEqualTo || other.isUpperBoundEqualTo else if (upperBoundCompare < 0) other.isUpperBoundEqualTo else isUpperBoundEqualTo + this } /** @@ -1112,13 +1142,24 @@ class RowKeyFilter (currentPoint:Array[Byte] = null, other.points.foreach( p => points += p) other.ranges.foreach( otherR => { + // we may change it in the mergeUnion below. + var newR = otherR var doesOverLap = false + // This is the ranges after otherR union with existing ranges + // It is used to keep the changed ranges after each iteration. + var newRanges = mutable.MutableList[ScanRange]() ranges.foreach{ r => - if (r.getOverLapScanRange(otherR) != null) { - r.mergeUnion(otherR) - doesOverLap = true - }} - if (!doesOverLap) ranges.+=(otherR) + // If there is overlap, we update newR to be compared + // and do not add them to new ranges. We will not lose it + // since newR has it covered. Otherwise, we may have duplicate range + if (r.getOverLapScanRange(newR) != null) { + newR = r.mergeUnion(newR) + } else { + newRanges :+= r + } + } + newRanges :+= newR + ranges = newRanges }) this } @@ -1192,7 +1233,7 @@ class RowKeyFilter (currentPoint:Array[Byte] = null, ranges.foreach( r => { if (isFirst) isFirst = false else strBuilder.append(",") - strBuilder.append(r) + strBuilder.append(r.toString) }) strBuilder.append("))") strBuilder.toString() diff --git hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DynamicLogicExpression.scala hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DynamicLogicExpression.scala index 1a1d478..0a95a05 100644 --- hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DynamicLogicExpression.scala +++ hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DynamicLogicExpression.scala @@ -113,7 +113,6 @@ class EqualLogicExpression (val columnName:String, valueFromQueryValueArray:Array[Array[Byte]]): Boolean = { val currentRowValue = columnToCurrentRowValueMap.get(columnName) val valueFromQuery = valueFromQueryValueArray(valueFromQueryIndex) - currentRowValue != null && Bytes.equals(valueFromQuery, 0, valueFromQuery.length, currentRowValue.bytes, diff --git hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala index 20866e2..a9b38ba 100644 --- hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala +++ hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala @@ -56,7 +56,7 @@ import scala.collection.mutable * * HBaseContext will take the responsibilities * of disseminating the configuration information - * to the working and managing the life cycle of HConnections. + * to the working and managing the life cycle of Connections. */ class HBaseContext(@transient sc: SparkContext, @transient val config: Configuration, @@ -88,14 +88,14 @@ class HBaseContext(@transient sc: SparkContext, /** * A simple enrichment of the traditional Spark RDD foreachPartition. * This function differs from the original in that it offers the - * developer access to a already connected HConnection object + * developer access to a already connected Connection object * - * Note: Do not close the HConnection object. All HConnection + * Note: Do not close the Connection object. All Connection * management is handled outside this method * * @param rdd Original RDD with data to iterate over * @param f Function to be given a iterator to iterate through - * the RDD values and a HConnection object to interact + * the RDD values and a Connection object to interact * with HBase */ def foreachPartition[T](rdd: RDD[T], @@ -107,14 +107,14 @@ class HBaseContext(@transient sc: SparkContext, /** * A simple enrichment of the traditional Spark Streaming dStream foreach * This function differs from the original in that it offers the - * developer access to a already connected HConnection object + * developer access to a already connected Connection object * - * Note: Do not close the HConnection object. All HConnection + * Note: Do not close the Connection object. All Connection * management is handled outside this method * * @param dstream Original DStream with data to iterate over * @param f Function to be given a iterator to iterate through - * the DStream values and a HConnection object to + * the DStream values and a Connection object to * interact with HBase */ def foreachPartition[T](dstream: DStream[T], @@ -127,14 +127,14 @@ class HBaseContext(@transient sc: SparkContext, /** * A simple enrichment of the traditional Spark RDD mapPartition. * This function differs from the original in that it offers the - * developer access to a already connected HConnection object + * developer access to a already connected Connection object * - * Note: Do not close the HConnection object. All HConnection + * Note: Do not close the Connection object. All Connection * management is handled outside this method * * @param rdd Original RDD with data to iterate over * @param mp Function to be given a iterator to iterate through - * the RDD values and a HConnection object to interact + * the RDD values and a Connection object to interact * with HBase * @return Returns a new RDD generated by the user definition * function just like normal mapPartition @@ -153,9 +153,9 @@ class HBaseContext(@transient sc: SparkContext, * foreachPartition. * * This function differs from the original in that it offers the - * developer access to a already connected HConnection object + * developer access to a already connected Connection object * - * Note: Do not close the HConnection object. All HConnection + * Note: Do not close the Connection object. All Connection * management is handled outside this method * * Note: Make sure to partition correctly to avoid memory issue when @@ -163,7 +163,7 @@ class HBaseContext(@transient sc: SparkContext, * * @param dstream Original DStream with data to iterate over * @param f Function to be given a iterator to iterate through - * the DStream values and a HConnection object to + * the DStream values and a Connection object to * interact with HBase * @return Returns a new DStream generated by the user * definition function just like normal mapPartition @@ -179,9 +179,9 @@ class HBaseContext(@transient sc: SparkContext, * mapPartition. * * This function differs from the original in that it offers the - * developer access to a already connected HConnection object + * developer access to a already connected Connection object * - * Note: Do not close the HConnection object. All HConnection + * Note: Do not close the Connection object. All Connection * management is handled outside this method * * Note: Make sure to partition correctly to avoid memory issue when @@ -189,7 +189,7 @@ class HBaseContext(@transient sc: SparkContext, * * @param dstream Original DStream with data to iterate over * @param f Function to be given a iterator to iterate through - * the DStream values and a HConnection object to + * the DStream values and a Connection object to * interact with HBase * @return Returns a new DStream generated by the user * definition function just like normal mapPartition @@ -208,7 +208,7 @@ class HBaseContext(@transient sc: SparkContext, * * It allow addition support for a user to take RDD * and generate puts and send them to HBase. - * The complexity of managing the HConnection is + * The complexity of managing the Connection is * removed from the developer * * @param rdd Original RDD with data to iterate over @@ -253,7 +253,7 @@ class HBaseContext(@transient sc: SparkContext, * It allow addition support for a user to take a DStream and * generate puts and send them to HBase. * - * The complexity of managing the HConnection is + * The complexity of managing the Connection is * removed from the developer * * @param dstream Original DStream with data to iterate over @@ -274,7 +274,7 @@ class HBaseContext(@transient sc: SparkContext, * A simple abstraction over the HBaseContext.foreachPartition method. * * It allow addition support for a user to take a RDD and generate delete - * and send them to HBase. The complexity of managing the HConnection is + * and send them to HBase. The complexity of managing the Connection is * removed from the developer * * @param rdd Original RDD with data to iterate over @@ -294,7 +294,7 @@ class HBaseContext(@transient sc: SparkContext, * It allow addition support for a user to take a DStream and * generate Delete and send them to HBase. * - * The complexity of managing the HConnection is + * The complexity of managing the Connection is * removed from the developer * * @param dstream Original DStream with data to iterate over diff --git hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/JavaHBaseContext.scala hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/JavaHBaseContext.scala index d8fdb23..7deb5b8 100644 --- hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/JavaHBaseContext.scala +++ hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/JavaHBaseContext.scala @@ -43,14 +43,14 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext, /** * A simple enrichment of the traditional Spark javaRdd foreachPartition. * This function differs from the original in that it offers the - * developer access to a already connected HConnection object + * developer access to a already connected Connection object * - * Note: Do not close the HConnection object. All HConnection + * Note: Do not close the Connection object. All Connection * management is handled outside this method * * @param javaRdd Original javaRdd with data to iterate over * @param f Function to be given a iterator to iterate through - * the RDD values and a HConnection object to interact + * the RDD values and a Connection object to interact * with HBase */ def foreachPartition[T](javaRdd: JavaRDD[T], @@ -65,14 +65,14 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext, /** * A simple enrichment of the traditional Spark Streaming dStream foreach * This function differs from the original in that it offers the - * developer access to a already connected HConnection object + * developer access to a already connected Connection object * - * Note: Do not close the HConnection object. All HConnection + * Note: Do not close the Connection object. All Connection * management is handled outside this method * * @param javaDstream Original DStream with data to iterate over * @param f Function to be given a iterator to iterate through - * the JavaDStream values and a HConnection object to + * the JavaDStream values and a Connection object to * interact with HBase */ def foreachPartition[T](javaDstream: JavaDStream[T], @@ -84,9 +84,9 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext, /** * A simple enrichment of the traditional Spark JavaRDD mapPartition. * This function differs from the original in that it offers the - * developer access to a already connected HConnection object + * developer access to a already connected Connection object * - * Note: Do not close the HConnection object. All HConnection + * Note: Do not close the Connection object. All Connection * management is handled outside this method * * Note: Make sure to partition correctly to avoid memory issue when @@ -94,7 +94,7 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext, * * @param javaRdd Original JavaRdd with data to iterate over * @param f Function to be given a iterator to iterate through - * the RDD values and a HConnection object to interact + * the RDD values and a Connection object to interact * with HBase * @return Returns a new RDD generated by the user definition * function just like normal mapPartition @@ -118,9 +118,9 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext, * mapPartition. * * This function differs from the original in that it offers the - * developer access to a already connected HConnection object + * developer access to a already connected Connection object * - * Note: Do not close the HConnection object. All HConnection + * Note: Do not close the Connection object. All Connection * management is handled outside this method * * Note: Make sure to partition correctly to avoid memory issue when @@ -128,7 +128,7 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext, * * @param javaDstream Original JavaDStream with data to iterate over * @param mp Function to be given a iterator to iterate through - * the JavaDStream values and a HConnection object to + * the JavaDStream values and a Connection object to * interact with HBase * @return Returns a new JavaDStream generated by the user * definition function just like normal mapPartition @@ -146,7 +146,7 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext, * * It allow addition support for a user to take JavaRDD * and generate puts and send them to HBase. - * The complexity of managing the HConnection is + * The complexity of managing the Connection is * removed from the developer * * @param javaRdd Original JavaRDD with data to iterate over @@ -167,7 +167,7 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext, * It allow addition support for a user to take a JavaDStream and * generate puts and send them to HBase. * - * The complexity of managing the HConnection is + * The complexity of managing the Connection is * removed from the developer * * @param javaDstream Original DStream with data to iterate over @@ -189,7 +189,7 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext, * It allow addition support for a user to take a JavaRDD and * generate delete and send them to HBase. * - * The complexity of managing the HConnection is + * The complexity of managing the Connection is * removed from the developer * * @param javaRdd Original JavaRDD with data to iterate over @@ -209,7 +209,7 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext, * It allow addition support for a user to take a JavaDStream and * generate Delete and send them to HBase. * - * The complexity of managing the HConnection is + * The complexity of managing the Connection is * removed from the developer * * @param javaDStream Original DStream with data to iterate over diff --git hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/package.scala hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/package.scala index ce7b55a..0a672f0 100644 --- hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/package.scala +++ hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/package.scala @@ -27,6 +27,7 @@ package object hbase { def bytesMax = null val ByteMax = -1.asInstanceOf[Byte] val ByteMin = 0.asInstanceOf[Byte] + val MaxLength = 256 val ord: Ordering[HBaseType] = new Ordering[HBaseType] { def compare(x: Array[Byte], y: Array[Byte]): Int = { return Bytes.compareTo(x, y) diff --git hbase-spark/src/main/scala/org/apache/spark/sql/datasources/hbase/HBaseTableCatalog.scala hbase-spark/src/main/scala/org/apache/spark/sql/datasources/hbase/HBaseTableCatalog.scala index c2d611f..716d9b3 100644 --- hbase-spark/src/main/scala/org/apache/spark/sql/datasources/hbase/HBaseTableCatalog.scala +++ hbase-spark/src/main/scala/org/apache/spark/sql/datasources/hbase/HBaseTableCatalog.scala @@ -18,7 +18,7 @@ package org.apache.spark.sql.datasources.hbase import org.apache.avro.Schema -import org.apache.hadoop.hbase.spark.SchemaConverters +import org.apache.hadoop.hbase.spark.{ColumnSpec, SchemaConverters} import org.apache.hadoop.hbase.spark.datasources._ import org.apache.hadoop.hbase.spark.hbase._ import org.apache.hadoop.hbase.util.Bytes @@ -39,7 +39,7 @@ case class Field( sType: Option[String] = None, avroSchema: Option[String] = None, serdes: Option[SerDes]= None, - len: Int = -1) extends Logging { + private val len: Int = -1) extends Logging { override def toString = s"$colName $cf $col" val isRowKey = cf == HBaseTableCatalog.rowKey var start: Int = _ @@ -69,11 +69,7 @@ case class Field( } } def colBytes: Array[Byte] = { - if (isRowKey) { - Bytes.toBytes("key") - } else { - Bytes.toBytes(col) - } + Bytes.toBytes(col) } val dt = { @@ -108,6 +104,8 @@ case class Field( colName == that.colName && cf == that.cf && col == that.col case _ => false } + + def columnSpec = new ColumnSpec(start, length, colName) } // The row key definition, with each key refer to the col defined in Field, e.g., @@ -117,12 +115,13 @@ case class RowKey(k: String) { var fields: Seq[Field] = _ var varLength = false def length = { - if (varLength) { - -1 - } else { - fields.foldLeft(0){case (x, y) => - x + y.length + fields.foldLeft(0) { case (x, y) => + val yLen = if (y.length == -1) { + MaxLength + } else { + y.length } + x + yLen } } } @@ -155,49 +154,20 @@ case class HBaseTableCatalog( def get(key: String) = params.get(key) - // Setup the start and length for each dimension of row key at runtime. - def dynSetupRowKey(rowKey: Array[Byte]) { - logDebug(s"length: ${rowKey.length}") - if(row.varLength) { - var start = 0 - row.fields.foreach { f => - logDebug(s"start: $start") - f.start = start - f.length = { - // If the length is not defined - if (f.length == -1) { - f.dt match { - case StringType => - var pos = rowKey.indexOf(HBaseTableCatalog.delimiter, start) - if (pos == -1 || pos > rowKey.length) { - // this is at the last dimension - pos = rowKey.length - } - pos - start - // We don't know the length, assume it extend to the end of the rowkey. - case _ => rowKey.length - start - } - } else { - f.length - } - } - start += f.length - } - } - } - + // Setup the start and length for each dimension of row key def initRowKey = { val fields = sMap.fields.filter(_.cf == HBaseTableCatalog.rowKey) row.fields = row.keys.flatMap(n => fields.find(_.col == n)) - // The length is determined at run time if it is string or binary and the length is undefined. - if (row.fields.filter(_.length == -1).isEmpty) { + // We only allowed there is one key at the end that is determined at runtime. + if (row.fields.reverse.tail.filter(_.length == -1).isEmpty) { var start = 0 row.fields.foreach { f => f.start = start start += f.length } } else { - row.varLength = true + throw new Exception("Only the last dimension of " + + "RowKey is allowed to have varied length") } } initRowKey @@ -232,11 +202,28 @@ object HBaseTableCatalog { val length = "length" /** - * User provide table schema definition - * {"tablename":"name", "rowkey":"key1:key2", - * "columns":{"col1":{"cf":"cf1", "col":"col1", "type":"type1"}, - * "col2":{"cf":"cf2", "col":"col2", "type":"type2"}}} - * Note that any col in the rowKey, there has to be one corresponding col defined in columns + * + * params is a mapping with key as "catalog" and value as json defined the schema mapping between + * hbase and spark. Following is an exmaple. + * Note that + * 1. any col in the rowKey, there has to be one corresponding col defined in columns with cf as "rowkey" + * 2. for composite rowkey, different dimensions are separate by ":" + * 3. in the column definition mapping, the key is the name of the spark table column name, with column family + * and column qualifier defined. + * s"""{ + * |"table":{"namespace":"default", "name":"htable"}, + * |"rowkey":"key1:key2", + * |"columns":{ + * |"col1":{"cf":"rowkey", "col":"key1", "type":"string", "length":"6"}, + * |"col2":{"cf":"rowkey", "col":"key2", "type":"double"}, + * |"col3":{"cf":"cf1", "col":"col2", "type":"binary"}, + * |"col4":{"cf":"cf1", "col":"col3", "type":"timestamp"}, + * |"col5":{"cf":"cf1", "col":"col4", "type":"double"}, + * |"col6":{"cf":"cf1", "col":"col5", "type":"$map"}, + * |"col7":{"cf":"cf1", "col":"col6", "type":"$array"}, + * |"col8":{"cf":"cf1", "col":"col7", "type":"$arrayMap"} + * |} + * */ def apply(params: Map[String, String]): HBaseTableCatalog = { val parameters = convert(params) diff --git hbase-spark/src/test/resources/log4j.properties hbase-spark/src/test/resources/log4j.properties index 4eeeb2c..6d9415b 100644 --- hbase-spark/src/test/resources/log4j.properties +++ hbase-spark/src/test/resources/log4j.properties @@ -15,7 +15,7 @@ # limitations under the License. # Define some default values that can be overridden by system properties -hbase.root.logger=INFO,console +hbase.root.logger=INFO,FA hbase.log.dir=. hbase.log.file=hbase.log @@ -50,6 +50,14 @@ log4j.appender.console.target=System.err log4j.appender.console.layout=org.apache.log4j.PatternLayout log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): %m%n +#File Appender +log4j.appender.FA=org.apache.log4j.FileAppender +log4j.appender.FA.append=false +log4j.appender.FA.file=target/log-output.txt +log4j.appender.FA.layout=org.apache.log4j.PatternLayout +log4j.appender.FA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): %m%n +log4j.appender.FA.Threshold = INFO + # Custom Logging levels #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG diff --git hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/CompositeKeySuite.scala hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/CompositeKeySuite.scala new file mode 100644 index 0000000..c495136 --- /dev/null +++ hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/CompositeKeySuite.scala @@ -0,0 +1,494 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.spark + +import org.apache.hadoop.hbase.spark.datasources.HBaseSparkConf +import org.apache.hadoop.hbase.{TableName, HBaseTestingUtility} +import org.apache.spark.sql.datasources.hbase.HBaseTableCatalog +import org.apache.spark.sql.{DataFrame, SQLContext} +import org.apache.spark.{SparkConf, SparkContext, Logging} +import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FunSuite} + +case class HBaseCompositeRecord( + stringCol00: String, + intCol01: Int, + booleanCol1: Boolean, + doubleCol2: Double, + floatCol3: Float, + intCol4: Int, + longCol5: Long, + shortCol6: Short, + stringCol7: String, + byteCol8: Byte) + +object HBaseCompositeRecord { + def apply(i: Int): HBaseCompositeRecord = { + HBaseCompositeRecord(s"row${"%03d".format(i)}", + if (i % 2 == 0) { + i + } else { + -i + }, + i % 2 == 0, + i.toDouble, + i.toFloat, + i, + i.toLong, + i.toShort, + s"String$i extra", + i.toByte) + } +} + +case class HBaseCompositeRecordInt( + intCol00: Int, + stringCol01: String, + booleanCol1: Boolean, + doubleCol2: Double, + floatCol3: Float, + intCol4: Int, + longCol5: Long, + shortCol6: Short, + stringCol7: String, + byteCol8: Byte) + +object HBaseCompositeRecordInt { + def apply(i: Int): HBaseCompositeRecordInt = { + HBaseCompositeRecordInt( + if (i % 2 == 0) { + i + } else { + -i + }, + s"row${"%03d".format(i)}", + i % 2 == 0, + i.toDouble, + i.toFloat, + i, + i.toLong, + i.toShort, + s"String$i extra", + i.toByte) + } +} + +case class HBaseCompositeRecordBool( + booleanCol00: Boolean, + stringCol01: String, + doubleCol2: Double, + floatCol3: Float, + intCol4: Int, + longCol5: Long, + shortCol6: Short, + stringCol7: String, + byteCol8: Byte) + +object HBaseCompositeRecordBool { + def apply(i: Int): HBaseCompositeRecordBool = { + HBaseCompositeRecordBool( + i % 2 == 0, + s"row${"%03d".format(i)}", + i.toDouble, + i.toFloat, + i, + i.toLong, + i.toShort, + s"String$i extra", + i.toByte) + } +} + +class CompositeKeySuite extends FunSuite with + BeforeAndAfterEach with BeforeAndAfterAll with Logging { + @transient var sc: SparkContext = null + var TEST_UTIL: HBaseTestingUtility = new HBaseTestingUtility + + var sqlContext: SQLContext = null + var df: DataFrame = null + // The original raw data used for construct result set without going through + // data frame logic. It is used to verify the result set retrieved from data frame logic. + val rawResult = (0 to 255).map { i => + HBaseCompositeRecord(i) + } + val rawResultInt = (0 to 255).map { i => + HBaseCompositeRecordInt(i) + } + + val rawResultBool = (0 to 255).map { i => + HBaseCompositeRecordBool(i) + } + + def collectToSet(df: DataFrame): Set[(String, Int, Boolean)] = { + df.collect() + .map(x => (x.getAs[String](0), x.getAs[Int](1), x.getAs[Boolean](2))).toSet + } + + + def withCatalog(cat: String): DataFrame = { + sqlContext + .read + .options(Map(HBaseTableCatalog.tableCatalog -> cat)) + .format("org.apache.hadoop.hbase.spark") + .load() + } + + override def beforeAll() { + + TEST_UTIL.startMiniCluster + val sparkConf = new SparkConf + sparkConf.set(HBaseSparkConf.BLOCK_CACHE_ENABLE, "true") + sparkConf.set(HBaseSparkConf.BATCH_NUM, "100") + sparkConf.set(HBaseSparkConf.CACHE_SIZE, "100") + + sc = new SparkContext("local", "test", sparkConf) + new HBaseContext(sc, TEST_UTIL.getConfiguration) + sqlContext = new SQLContext(sc) + } + + override def afterAll() { + logInfo("shuting down minicluster") + TEST_UTIL.shutdownMiniCluster() + + sc.stop() + } + + override def beforeEach(): Unit = { + DefaultSourceStaticUtils.lastFiveExecutionRules.clear() + } + + def catalog = s"""{ + |"table":{"namespace":"default", "name":"table1"}, + |"rowkey":"key1:key2", + |"columns":{ + |"stringCol00":{"cf":"rowkey", "col":"key1", "type":"string", "length":"6"}, + |"intCol01":{"cf":"rowkey", "col":"key2", "type":"int"}, + |"booleanCol1":{"cf":"cf1", "col":"booleanCol1", "type":"boolean"}, + |"doubleCol2":{"cf":"cf2", "col":"doubleCol2", "type":"double"}, + |"floatCol3":{"cf":"cf3", "col":"floatCol3", "type":"float"}, + |"intCol4":{"cf":"cf4", "col":"intCol4", "type":"int"}, + |"longCol5":{"cf":"cf5", "col":"longCol5", "type":"bigint"}, + |"shortCol6":{"cf":"cf6", "col":"shortCol6", "type":"smallint"}, + |"stringCol7":{"cf":"cf7", "col":"stringCol7", "type":"string"}, + |"byteCol8":{"cf":"cf8", "col":"byteCol8", "type":"tinyint"} + |} + |}""".stripMargin + + def catalogInt = s"""{ + |"table":{"namespace":"default", "name":"intTable1"}, + |"rowkey":"key1:key2", + |"columns":{ + |"intCol00":{"cf":"rowkey", "col":"key1", "type":"int"}, + |"stringCol01":{"cf":"rowkey", "col":"key2", "type":"string"}, + |"booleanCol1":{"cf":"cf1", "col":"booleanCol1", "type":"boolean"}, + |"doubleCol2":{"cf":"cf2", "col":"doubleCol2", "type":"double"}, + |"floatCol3":{"cf":"cf3", "col":"floatCol3", "type":"float"}, + |"intCol4":{"cf":"cf4", "col":"intCol4", "type":"int"}, + |"longCol5":{"cf":"cf5", "col":"longCol5", "type":"bigint"}, + |"shortCol6":{"cf":"cf6", "col":"shortCol6", "type":"smallint"}, + |"stringCol7":{"cf":"cf7", "col":"stringCol7", "type":"string"}, + |"byteCol8":{"cf":"cf8", "col":"byteCol8", "type":"tinyint"} + |} + |}""".stripMargin + + def catalogBool = s"""{ + |"table":{"namespace":"default", "name":"boolTable1"}, + |"rowkey":"key1:key2", + |"columns":{ + |"booleanCol00":{"cf":"rowkey", "col":"key1", "type":"boolean"}, + |"stringCol01":{"cf":"rowkey", "col":"key2", "type":"string", "length":"6"}, + |"doubleCol2":{"cf":"cf2", "col":"doubleCol2", "type":"double"}, + |"floatCol3":{"cf":"cf3", "col":"floatCol3", "type":"float"}, + |"intCol4":{"cf":"cf4", "col":"intCol4", "type":"int"}, + |"longCol5":{"cf":"cf5", "col":"longCol5", "type":"bigint"}, + |"shortCol6":{"cf":"cf6", "col":"shortCol6", "type":"smallint"}, + |"stringCol7":{"cf":"cf7", "col":"stringCol7", "type":"string"}, + |"byteCol8":{"cf":"cf8", "col":"byteCol8", "type":"tinyint"} + |} + |}""".stripMargin + + + test("populate table with composite key") { + val sql = sqlContext + import sql.implicits._ + + sc.parallelize(rawResult).toDF.write.options( + Map(HBaseTableCatalog.tableCatalog -> catalog, HBaseTableCatalog.newTable -> "5")) + .format("org.apache.hadoop.hbase.spark") + .save() + + sc.parallelize(rawResultInt).toDF.write.options( + Map(HBaseTableCatalog.tableCatalog -> catalogInt, HBaseTableCatalog.newTable -> "5")) + .format("org.apache.hadoop.hbase.spark") + .save() + + sc.parallelize(rawResultBool).toDF.write.options( + Map(HBaseTableCatalog.tableCatalog -> catalogBool, HBaseTableCatalog.newTable -> "5")) + .format("org.apache.hadoop.hbase.spark") + .save() + } + + test("full query") { + val df = withCatalog(catalog) + df.show + assert(df.count() == 256) + } + + test("filtered query1") { + val sql = sqlContext + import sql.implicits._ + val df = withCatalog(catalog) + val s = df.filter($"stringCol00" <= "row050" && $"intCol01" > 40) + .select("stringCol00", "intCol01","booleanCol1") + s.show + + val expected = rawResult.filter { x => + x.stringCol00 <= "row050" && x.intCol01 > 40 + }.map(x => (x.stringCol00, x.intCol01, x.booleanCol1)).toSet + val result = collectToSet(s) + assert(expected === result) + } + test("int filtered query1") { + val sql = sqlContext + import sql.implicits._ + val df = withCatalog(catalogInt) + val s = df.filter($"stringCol01" <= "row050" && $"intCol00" > 40) + .select("stringCol01", "intCol00","booleanCol1") + s.show + + val expected = rawResultInt.filter { x => + x.stringCol01 <= "row050" && x.intCol00 > 40 + }.map(x => (x.stringCol01, x.intCol00, x.booleanCol1)).toSet + val result = collectToSet(s) + assert(expected === result) + } + + + test("bool filtered query1") { + val sql = sqlContext + import sql.implicits._ + val df = withCatalog(catalogBool) + val s = df.filter($"stringCol01" <= "row050" && $"booleanCol00" === true) + .select("stringCol01", "intCol4","booleanCol00") + s.show + + val expected = rawResultBool.filter { x => + x.stringCol01 <= "row050" && x.booleanCol00 == true + }.map(x => (x.stringCol01, x.intCol4, x.booleanCol00)).toSet + val result = collectToSet(s) + assert(expected === result) + } + + test("filtered query11") { + val sql = sqlContext + import sql.implicits._ + val df = withCatalog(catalog) + val s = df.filter($"stringCol00" <= "row050" && $"intCol01" < -40) + .select("stringCol00", "intCol01","booleanCol1") + s.show + + val expected = rawResult.filter { x => + x.stringCol00 <= "row050" && x.intCol01 < -40 + }.map(x => (x.stringCol00, x.intCol01, x.booleanCol1)).toSet + val result = collectToSet(s) + assert(expected === result) + } + + test("int filtered query11") { + val sql = sqlContext + import sql.implicits._ + val df = withCatalog(catalogInt) + val s = df.filter($"stringCol01" <= "row050" && $"intCol00" < -40) + .select("stringCol01", "intCol00","booleanCol1") + s.show + + val expected = rawResultInt.filter { x => + x.stringCol01 <= "row050" && x.intCol00 < -40 + }.map(x => (x.stringCol01, x.intCol00, x.booleanCol1)).toSet + val result = collectToSet(s) + assert(expected === result) + } + + + test("filtered query12") { + val sql = sqlContext + import sql.implicits._ + val df = withCatalog(catalog) + val s = df.filter($"stringCol00" <= "row050" && $"intCol01" > -40) + .select("stringCol00", "intCol01","booleanCol1") + s.show + + val expected = rawResult.filter { x => + x.stringCol00 <= "row050" && x.intCol01 > -40 + }.map(x => (x.stringCol00, x.intCol01, x.booleanCol1)).toSet + val result = collectToSet(s) + assert(expected === result) + } + + test("int filtered query12") { + val sql = sqlContext + import sql.implicits._ + val df = withCatalog(catalogInt) + val s = df.filter($"stringCol01" <= "row050" && $"intCol00" > -40) + .select("stringCol01", "intCol00","booleanCol1") + s.show + + val expected = rawResultInt.filter { x => + x.stringCol01 <= "row050" && x.intCol00 > -40 + }.map(x => (x.stringCol01, x.intCol00, x.booleanCol1)).toSet + val result = collectToSet(s) + assert(expected === result) + } + + test("filtered query2") { + val sql = sqlContext + import sql.implicits._ + val df = withCatalog(catalog) + val s = df.filter($"stringCol00" <= "row050" && $"intCol01" >= 40) + .select("stringCol00", "intCol01","booleanCol1") + s.show + + val expected = rawResult.filter { x => + x.stringCol00 <= "row050" && x.intCol01 >= 40 + }.map(x => (x.stringCol00, x.intCol01, x.booleanCol1)).toSet + val result = collectToSet(s) + assert(expected === result) + } + + test("filtered query3") { + val sql = sqlContext + import sql.implicits._ + val df = withCatalog(catalog) + val s = df.filter($"stringCol00" >= "row250" && $"intCol01" < 50) + .select("stringCol00", "intCol01","booleanCol1") + s.show + + val expected = rawResult.filter { x => + x.stringCol00 >= "row250" && x.intCol01 < 50 + }.map(x => (x.stringCol00, x.intCol01, x.booleanCol1)).toSet + val result = collectToSet(s) + assert(expected === result) + } + + test("filtered query4") { + val sql = sqlContext + import sql.implicits._ + val df = withCatalog(catalog) + val s = df.filter($"stringCol00" <= "row010") // row005 not included + .select("stringCol00", "intCol01","booleanCol1") + s.show + + val expected = rawResult.filter { x => + x.stringCol00 <= "row010" + }.map(x => (x.stringCol00, x.intCol01, x.booleanCol1)).toSet + val result = collectToSet(s) + assert(expected === result) + } + + test("filtered query5") { + val sql = sqlContext + import sql.implicits._ + val df = withCatalog(catalog) + val s = df.filter($"stringCol00" === "row010") // row005 not included + .select("stringCol00", "intCol01","booleanCol1") + s.show + + val expected = rawResult.filter { x => + x.stringCol00 === "row010" + }.map(x => (x.stringCol00, x.intCol01, x.booleanCol1)).toSet + val result = collectToSet(s) + assert(expected === result) + } + test("filtered query51") { + val sql = sqlContext + import sql.implicits._ + val df = withCatalog(catalog) + val s = df.filter($"stringCol00" === "row011") // row005 not included + .select("stringCol00", "intCol01","booleanCol1") + s.show + + val expected = rawResult.filter { x => + x.stringCol00 === "row011" + }.map(x => (x.stringCol00, x.intCol01, x.booleanCol1)).toSet + val result = collectToSet(s) + assert(expected === result) + } + + test("filtered query52") { + val sql = sqlContext + import sql.implicits._ + val df = withCatalog(catalog) + val s = df.filter($"stringCol00" === "row005") // row005 not included + .select("stringCol00", "intCol01","booleanCol1") + s.show + + val expected = rawResult.filter { x => + x.stringCol00 === "row005" + }.map(x => (x.stringCol00, x.intCol01, x.booleanCol1)).toSet + val result = collectToSet(s) + assert(expected === result) + } + + test("filtered query6") { + val sql = sqlContext + import sql.implicits._ + val df = withCatalog(catalog) + val s = df.filter(($"stringCol00" <= "row050" && $"stringCol00" > "row040") || + $"stringCol00" === "row010" || // no included, since it is composite key + $"stringCol00" === "row020" || // not inlcuded + $"stringCol00" === "r20" || // not included + $"stringCol00" <= "row010") // row005 not included + .select("stringCol00", "intCol01","booleanCol1") + s.show(40) + + val expected = rawResult.filter { x => + (x.stringCol00 <= "row050" && x.stringCol00 > "row040") || + x.stringCol00 === "row010" || // no included, since it is composite key + x.stringCol00 === "row020" || // not inlcuded + x.stringCol00 === "r20" || // not included + x.stringCol00 <= "row010" + }.map(x => (x.stringCol00, x.intCol01, x.booleanCol1)).toSet + val result = collectToSet(s) + assert(expected === result) + + } + + + test("filtered query7") { + val sql = sqlContext + import sql.implicits._ + val df = withCatalog(catalog) + val s = df.filter(($"stringCol00" <= "row050" && $"stringCol00" > "row040") || + $"stringCol00" === "row005" || // no included, since it is composite key + $"stringCol00" === "row020" || // not inlcuded + $"stringCol00" === "r20" || // not included + $"stringCol00" <= "row005") // row005 not included + .select("stringCol00", "intCol01","booleanCol1") + s.show(40) + + val expected = rawResult.filter { x => + (x.stringCol00 <= "row050" && x.stringCol00 > "row040") || + x.stringCol00 === "row005" || // no included, since it is composite key + x.stringCol00 === "row020" || // not inlcuded + x.stringCol00 === "r20" || // not included + x.stringCol00 <= "row005" // row005 not included + }.map(x => (x.stringCol00, x.intCol01, x.booleanCol1)).toSet + val result = collectToSet(s) + assert(expected === result) + } + +} diff --git hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/HBaseCatalogSuite.scala hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/HBaseCatalogSuite.scala index 49e2f6c..5a4b230 100644 --- hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/HBaseCatalogSuite.scala +++ hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/HBaseCatalogSuite.scala @@ -33,7 +33,7 @@ class HBaseCatalogSuite extends FunSuite with BeforeAndAfterEach with BeforeAndA |"table":{"namespace":"default", "name":"htable"}, |"rowkey":"key1:key2", |"columns":{ - |"col1":{"cf":"rowkey", "col":"key1", "type":"string"}, + |"col1":{"cf":"rowkey", "col":"key1", "type":"string", "length":"5"}, |"col2":{"cf":"rowkey", "col":"key2", "type":"double"}, |"col3":{"cf":"cf1", "col":"col2", "type":"binary"}, |"col4":{"cf":"cf1", "col":"col3", "type":"timestamp"}, diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HThreadedSelectorServerArgs.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HThreadedSelectorServerArgs.java index 5fbde7a..da33cc0 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HThreadedSelectorServerArgs.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HThreadedSelectorServerArgs.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.thrift; +import java.util.Locale; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -79,7 +80,7 @@ public class HThreadedSelectorServerArgs extends TThreadedSelectorServer.Args { int acceptQueueSizePerThread = conf.getInt( ACCEPT_QUEUE_SIZE_PER_THREAD_CONF_KEY, getAcceptQueueSizePerThread()); AcceptPolicy acceptPolicy = AcceptPolicy.valueOf(conf.get( - ACCEPT_POLICY_CONF_KEY, getAcceptPolicy().toString()).toUpperCase()); + ACCEPT_POLICY_CONF_KEY, getAcceptPolicy().toString()).toUpperCase(Locale.ROOT)); super.selectorThreads(selectorThreads) .workerThreads(workerThreads) diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java index 8767a3c..b82fa37 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java @@ -1497,6 +1497,9 @@ public class ThriftServerRunner implements Runnable { if (tScan.isSetReversed()) { scan.setReversed(tScan.isReversed()); } + if (tScan.isSetCacheBlocks()) { + scan.setCacheBlocks(tScan.isCacheBlocks()); + } return addScanner(table.getScanner(scan), tScan.sortColumns); } catch (IOException e) { LOG.warn(e.getMessage(), e); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java index 29db5be..dd0c52c 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java @@ -23,6 +23,7 @@ import static org.apache.hadoop.hbase.util.Bytes.getBytes; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; +import java.util.Locale; import java.util.TreeMap; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -59,7 +60,7 @@ public class ThriftUtilities { static public HColumnDescriptor colDescFromThrift(ColumnDescriptor in) throws IllegalArgument { Compression.Algorithm comp = - Compression.getCompressionAlgorithmByName(in.compression.toLowerCase()); + Compression.getCompressionAlgorithmByName(in.compression.toLowerCase(Locale.ROOT)); BloomType bt = BloomType.valueOf(in.bloomFilterType); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java index 6db142e..68361c1 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java @@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory; * An AlreadyExists exceptions signals that a table with the specified * name already exists */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class AlreadyExists extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AlreadyExists"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java index de5dbbc..a2920ea 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java @@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory; /** * A BatchMutation object is used to apply a number of Mutations to a single row. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class BatchMutation implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("BatchMutation"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java index b8c7b63..3252377 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java @@ -39,7 +39,7 @@ import org.slf4j.LoggerFactory; * such as the number of versions, compression settings, etc. It is * used as input when creating a table or adding a column. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class ColumnDescriptor implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ColumnDescriptor"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java index 99623af..f77ce14 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class Hbase { public interface Iface { diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java index 1cf37bb..558f3e1 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java @@ -39,7 +39,7 @@ import org.slf4j.LoggerFactory; * to the Hbase master or an Hbase region server. Also used to return * more general Hbase error conditions. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class IOError extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("IOError"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java index 8dd58c0..bd296cc 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/IllegalArgument.java @@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory; * An IllegalArgument exception indicates an illegal or invalid * argument was passed into a procedure. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class IllegalArgument extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("IllegalArgument"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java index 27f2560..216df2d 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Mutation.java @@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory; /** * A Mutation object is used to either update or delete a column-value. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class Mutation implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Mutation"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAppend.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAppend.java index fc95104..439d71e 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAppend.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TAppend.java @@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory; /** * An Append object is used to specify the parameters for performing the append operation. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TAppend implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TAppend"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java index eb1448b..37021c0 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TCell.java @@ -40,7 +40,7 @@ import org.slf4j.LoggerFactory; * the timestamp of a cell to a first-class value, making it easy to take * note of temporal data. Cell is used all the way from HStore up to HTable. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TCell implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCell"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TColumn.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TColumn.java index 2d43593..77e875d 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TColumn.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TColumn.java @@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory; /** * Holds column name and the cell. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumn"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TIncrement.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TIncrement.java index 10c143a..22b5f79 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TIncrement.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TIncrement.java @@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory; * For increments that are not incrementColumnValue * equivalents. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TIncrement implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TIncrement"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java index 6f2d048..a1dab6b 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java @@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory; /** * A TRegionInfo contains information about an HTable region. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TRegionInfo implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRegionInfo"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java index 9d876a7..418f503 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TRowResult.java @@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory; /** * Holds row name and then a map of columns to cells. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TRowResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRowResult"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java index 4e13005..3faadd9 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java @@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory; /** * A Scan object is used to specify scanner parameters when opening a scanner. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TScan implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TScan"); @@ -50,6 +50,7 @@ public class TScan implements org.apache.thrift.TBase, jav private static final org.apache.thrift.protocol.TField BATCH_SIZE_FIELD_DESC = new org.apache.thrift.protocol.TField("batchSize", org.apache.thrift.protocol.TType.I32, (short)7); private static final org.apache.thrift.protocol.TField SORT_COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("sortColumns", org.apache.thrift.protocol.TType.BOOL, (short)8); private static final org.apache.thrift.protocol.TField REVERSED_FIELD_DESC = new org.apache.thrift.protocol.TField("reversed", org.apache.thrift.protocol.TType.BOOL, (short)9); + private static final org.apache.thrift.protocol.TField CACHE_BLOCKS_FIELD_DESC = new org.apache.thrift.protocol.TField("cacheBlocks", org.apache.thrift.protocol.TType.BOOL, (short)10); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -66,6 +67,7 @@ public class TScan implements org.apache.thrift.TBase, jav public int batchSize; // optional public boolean sortColumns; // optional public boolean reversed; // optional + public boolean cacheBlocks; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -77,7 +79,8 @@ public class TScan implements org.apache.thrift.TBase, jav FILTER_STRING((short)6, "filterString"), BATCH_SIZE((short)7, "batchSize"), SORT_COLUMNS((short)8, "sortColumns"), - REVERSED((short)9, "reversed"); + REVERSED((short)9, "reversed"), + CACHE_BLOCKS((short)10, "cacheBlocks"); private static final Map byName = new HashMap(); @@ -110,6 +113,8 @@ public class TScan implements org.apache.thrift.TBase, jav return SORT_COLUMNS; case 9: // REVERSED return REVERSED; + case 10: // CACHE_BLOCKS + return CACHE_BLOCKS; default: return null; } @@ -155,8 +160,9 @@ public class TScan implements org.apache.thrift.TBase, jav private static final int __BATCHSIZE_ISSET_ID = 2; private static final int __SORTCOLUMNS_ISSET_ID = 3; private static final int __REVERSED_ISSET_ID = 4; + private static final int __CACHEBLOCKS_ISSET_ID = 5; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.START_ROW,_Fields.STOP_ROW,_Fields.TIMESTAMP,_Fields.COLUMNS,_Fields.CACHING,_Fields.FILTER_STRING,_Fields.BATCH_SIZE,_Fields.SORT_COLUMNS,_Fields.REVERSED}; + private static final _Fields optionals[] = {_Fields.START_ROW,_Fields.STOP_ROW,_Fields.TIMESTAMP,_Fields.COLUMNS,_Fields.CACHING,_Fields.FILTER_STRING,_Fields.BATCH_SIZE,_Fields.SORT_COLUMNS,_Fields.REVERSED,_Fields.CACHE_BLOCKS}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -179,6 +185,8 @@ public class TScan implements org.apache.thrift.TBase, jav new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); tmpMap.put(_Fields.REVERSED, new org.apache.thrift.meta_data.FieldMetaData("reversed", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.CACHE_BLOCKS, new org.apache.thrift.meta_data.FieldMetaData("cacheBlocks", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TScan.class, metaDataMap); } @@ -212,6 +220,7 @@ public class TScan implements org.apache.thrift.TBase, jav this.batchSize = other.batchSize; this.sortColumns = other.sortColumns; this.reversed = other.reversed; + this.cacheBlocks = other.cacheBlocks; } public TScan deepCopy() { @@ -234,6 +243,8 @@ public class TScan implements org.apache.thrift.TBase, jav this.sortColumns = false; setReversedIsSet(false); this.reversed = false; + setCacheBlocksIsSet(false); + this.cacheBlocks = false; } public byte[] getStartRow() { @@ -492,6 +503,29 @@ public class TScan implements org.apache.thrift.TBase, jav __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __REVERSED_ISSET_ID, value); } + public boolean isCacheBlocks() { + return this.cacheBlocks; + } + + public TScan setCacheBlocks(boolean cacheBlocks) { + this.cacheBlocks = cacheBlocks; + setCacheBlocksIsSet(true); + return this; + } + + public void unsetCacheBlocks() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __CACHEBLOCKS_ISSET_ID); + } + + /** Returns true if field cacheBlocks is set (has been assigned a value) and false otherwise */ + public boolean isSetCacheBlocks() { + return EncodingUtils.testBit(__isset_bitfield, __CACHEBLOCKS_ISSET_ID); + } + + public void setCacheBlocksIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __CACHEBLOCKS_ISSET_ID, value); + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case START_ROW: @@ -566,6 +600,14 @@ public class TScan implements org.apache.thrift.TBase, jav } break; + case CACHE_BLOCKS: + if (value == null) { + unsetCacheBlocks(); + } else { + setCacheBlocks((Boolean)value); + } + break; + } } @@ -598,6 +640,9 @@ public class TScan implements org.apache.thrift.TBase, jav case REVERSED: return isReversed(); + case CACHE_BLOCKS: + return isCacheBlocks(); + } throw new IllegalStateException(); } @@ -627,6 +672,8 @@ public class TScan implements org.apache.thrift.TBase, jav return isSetSortColumns(); case REVERSED: return isSetReversed(); + case CACHE_BLOCKS: + return isSetCacheBlocks(); } throw new IllegalStateException(); } @@ -725,6 +772,15 @@ public class TScan implements org.apache.thrift.TBase, jav return false; } + boolean this_present_cacheBlocks = true && this.isSetCacheBlocks(); + boolean that_present_cacheBlocks = true && that.isSetCacheBlocks(); + if (this_present_cacheBlocks || that_present_cacheBlocks) { + if (!(this_present_cacheBlocks && that_present_cacheBlocks)) + return false; + if (this.cacheBlocks != that.cacheBlocks) + return false; + } + return true; } @@ -777,6 +833,11 @@ public class TScan implements org.apache.thrift.TBase, jav if (present_reversed) list.add(reversed); + boolean present_cacheBlocks = true && (isSetCacheBlocks()); + list.add(present_cacheBlocks); + if (present_cacheBlocks) + list.add(cacheBlocks); + return list.hashCode(); } @@ -878,6 +939,16 @@ public class TScan implements org.apache.thrift.TBase, jav return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCacheBlocks()).compareTo(other.isSetCacheBlocks()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCacheBlocks()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cacheBlocks, other.cacheBlocks); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -967,6 +1038,12 @@ public class TScan implements org.apache.thrift.TBase, jav sb.append(this.reversed); first = false; } + if (isSetCacheBlocks()) { + if (!first) sb.append(", "); + sb.append("cacheBlocks:"); + sb.append(this.cacheBlocks); + first = false; + } sb.append(")"); return sb.toString(); } @@ -1094,6 +1171,14 @@ public class TScan implements org.apache.thrift.TBase, jav org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 10: // CACHE_BLOCKS + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.cacheBlocks = iprot.readBool(); + struct.setCacheBlocksIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1169,6 +1254,11 @@ public class TScan implements org.apache.thrift.TBase, jav oprot.writeBool(struct.reversed); oprot.writeFieldEnd(); } + if (struct.isSetCacheBlocks()) { + oprot.writeFieldBegin(CACHE_BLOCKS_FIELD_DESC); + oprot.writeBool(struct.cacheBlocks); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1214,7 +1304,10 @@ public class TScan implements org.apache.thrift.TBase, jav if (struct.isSetReversed()) { optionals.set(8); } - oprot.writeBitSet(optionals, 9); + if (struct.isSetCacheBlocks()) { + optionals.set(9); + } + oprot.writeBitSet(optionals, 10); if (struct.isSetStartRow()) { oprot.writeBinary(struct.startRow); } @@ -1248,12 +1341,15 @@ public class TScan implements org.apache.thrift.TBase, jav if (struct.isSetReversed()) { oprot.writeBool(struct.reversed); } + if (struct.isSetCacheBlocks()) { + oprot.writeBool(struct.cacheBlocks); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, TScan struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(9); + BitSet incoming = iprot.readBitSet(10); if (incoming.get(0)) { struct.startRow = iprot.readBinary(); struct.setStartRowIsSet(true); @@ -1299,6 +1395,10 @@ public class TScan implements org.apache.thrift.TBase, jav struct.reversed = iprot.readBool(); struct.setReversedIsSet(true); } + if (incoming.get(9)) { + struct.cacheBlocks = iprot.readBool(); + struct.setCacheBlocksIsSet(true); + } } } diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java index 9f60d61..9dea9a5 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java @@ -74,7 +74,7 @@ import org.apache.thrift.TException; /** * This class is a glue object that connects Thrift RPC calls to the HBase client API primarily - * defined in the HTableInterface. + * defined in the Table interface. */ @InterfaceAudience.Private @SuppressWarnings("deprecation") diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java index d5cf287..94bdbb5 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java @@ -430,6 +430,10 @@ public class ThriftUtilities { out.setReversed(in.isReversed()); } + if (in.isSetCacheBlocks()) { + out.setCacheBlocks(in.isCacheBlocks()); + } + return out; } diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAppend.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAppend.java index 17ad5d5..e0e3074 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAppend.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAppend.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TAppend implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TAppend"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAuthorization.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAuthorization.java index a315ab5..6c355a6 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAuthorization.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TAuthorization.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TAuthorization implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TAuthorization"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java index 60362bb..7da4dda 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TCellVisibility implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCellVisibility"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java index f1d95e5..d0d336c 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java @@ -39,7 +39,7 @@ import org.slf4j.LoggerFactory; * in a HBase table by column family and optionally * a column qualifier and timestamp */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumn"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java index 8ce3af6..2fb3f76 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java @@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory; /** * Represents a single cell and the amount to increment it by */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TColumnIncrement implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumnIncrement"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java index 3ac829d..3ceb4c0 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java @@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory; /** * Represents a single cell and its value. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TColumnValue implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumnValue"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDelete.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDelete.java index a0efa8e..8f7f119 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDelete.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDelete.java @@ -60,7 +60,7 @@ import org.slf4j.LoggerFactory; * by changing the durability. If you don't provide durability, it defaults to * column family's default setting for durability. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TDelete implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TDelete"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java index 95c1bce..a493bd9 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java @@ -47,7 +47,7 @@ import org.slf4j.LoggerFactory; * If you specify a time range and a timestamp the range is ignored. * Timestamps on TColumns are ignored. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TGet implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGet"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java index 0e54f13..c010806 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class THBaseService { public interface Iface { diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java index 043548c..129ab2e 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class THRegionInfo implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("THRegionInfo"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java index 84f1e84..94b25ff 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class THRegionLocation implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("THRegionLocation"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java index baf0b3d..2e50d3d 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java @@ -39,7 +39,7 @@ import org.slf4j.LoggerFactory; * to the HBase master or a HBase region server. Also used to return * more general HBase error conditions. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TIOError extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TIOError"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java index c2d67bb..9387429 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java @@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory; * A TIllegalArgument exception indicates an illegal or invalid * argument was passed into a procedure. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TIllegalArgument extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TIllegalArgument"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIncrement.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIncrement.java index d21b451..8d62eb5 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIncrement.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIncrement.java @@ -41,7 +41,7 @@ import org.slf4j.LoggerFactory; * by changing the durability. If you don't provide durability, it defaults to * column family's default setting for durability. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TIncrement implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TIncrement"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPut.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPut.java index 5a55e03..552e3d4 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPut.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TPut.java @@ -46,7 +46,7 @@ import org.slf4j.LoggerFactory; * by changing the durability. If you don't provide durability, it defaults to * column family's default setting for durability. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TPut implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TPut"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TResult.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TResult.java index 20c8a90..8c16012 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TResult.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TResult.java @@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory; /** * if no Result is found, row and columnValues will not be set. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TResult"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TRowMutations.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TRowMutations.java index 33c46bd..dfa06ff 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TRowMutations.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TRowMutations.java @@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory; /** * A TRowMutations object is used to apply a number of Mutations to a single row. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TRowMutations implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRowMutations"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java index 4ae759c..e0bcb55 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TScan.java @@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory; * Any timestamps in the columns are ignored, use timeRange to select by timestamp. * Max versions defaults to 1. */ -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TScan implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TScan"); @@ -53,6 +53,7 @@ public class TScan implements org.apache.thrift.TBase, jav private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC = new org.apache.thrift.protocol.TField("attributes", org.apache.thrift.protocol.TType.MAP, (short)9); private static final org.apache.thrift.protocol.TField AUTHORIZATIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("authorizations", org.apache.thrift.protocol.TType.STRUCT, (short)10); private static final org.apache.thrift.protocol.TField REVERSED_FIELD_DESC = new org.apache.thrift.protocol.TField("reversed", org.apache.thrift.protocol.TType.BOOL, (short)11); + private static final org.apache.thrift.protocol.TField CACHE_BLOCKS_FIELD_DESC = new org.apache.thrift.protocol.TField("cacheBlocks", org.apache.thrift.protocol.TType.BOOL, (short)12); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -71,6 +72,7 @@ public class TScan implements org.apache.thrift.TBase, jav public Map attributes; // optional public TAuthorization authorizations; // optional public boolean reversed; // optional + public boolean cacheBlocks; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -84,7 +86,8 @@ public class TScan implements org.apache.thrift.TBase, jav BATCH_SIZE((short)8, "batchSize"), ATTRIBUTES((short)9, "attributes"), AUTHORIZATIONS((short)10, "authorizations"), - REVERSED((short)11, "reversed"); + REVERSED((short)11, "reversed"), + CACHE_BLOCKS((short)12, "cacheBlocks"); private static final Map byName = new HashMap(); @@ -121,6 +124,8 @@ public class TScan implements org.apache.thrift.TBase, jav return AUTHORIZATIONS; case 11: // REVERSED return REVERSED; + case 12: // CACHE_BLOCKS + return CACHE_BLOCKS; default: return null; } @@ -165,8 +170,9 @@ public class TScan implements org.apache.thrift.TBase, jav private static final int __MAXVERSIONS_ISSET_ID = 1; private static final int __BATCHSIZE_ISSET_ID = 2; private static final int __REVERSED_ISSET_ID = 3; + private static final int __CACHEBLOCKS_ISSET_ID = 4; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.START_ROW,_Fields.STOP_ROW,_Fields.COLUMNS,_Fields.CACHING,_Fields.MAX_VERSIONS,_Fields.TIME_RANGE,_Fields.FILTER_STRING,_Fields.BATCH_SIZE,_Fields.ATTRIBUTES,_Fields.AUTHORIZATIONS,_Fields.REVERSED}; + private static final _Fields optionals[] = {_Fields.START_ROW,_Fields.STOP_ROW,_Fields.COLUMNS,_Fields.CACHING,_Fields.MAX_VERSIONS,_Fields.TIME_RANGE,_Fields.FILTER_STRING,_Fields.BATCH_SIZE,_Fields.ATTRIBUTES,_Fields.AUTHORIZATIONS,_Fields.REVERSED,_Fields.CACHE_BLOCKS}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -195,6 +201,8 @@ public class TScan implements org.apache.thrift.TBase, jav new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TAuthorization.class))); tmpMap.put(_Fields.REVERSED, new org.apache.thrift.meta_data.FieldMetaData("reversed", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.CACHE_BLOCKS, new org.apache.thrift.meta_data.FieldMetaData("cacheBlocks", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TScan.class, metaDataMap); } @@ -239,6 +247,7 @@ public class TScan implements org.apache.thrift.TBase, jav this.authorizations = new TAuthorization(other.authorizations); } this.reversed = other.reversed; + this.cacheBlocks = other.cacheBlocks; } public TScan deepCopy() { @@ -262,6 +271,8 @@ public class TScan implements org.apache.thrift.TBase, jav this.authorizations = null; setReversedIsSet(false); this.reversed = false; + setCacheBlocksIsSet(false); + this.cacheBlocks = false; } public byte[] getStartRow() { @@ -580,6 +591,29 @@ public class TScan implements org.apache.thrift.TBase, jav __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __REVERSED_ISSET_ID, value); } + public boolean isCacheBlocks() { + return this.cacheBlocks; + } + + public TScan setCacheBlocks(boolean cacheBlocks) { + this.cacheBlocks = cacheBlocks; + setCacheBlocksIsSet(true); + return this; + } + + public void unsetCacheBlocks() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __CACHEBLOCKS_ISSET_ID); + } + + /** Returns true if field cacheBlocks is set (has been assigned a value) and false otherwise */ + public boolean isSetCacheBlocks() { + return EncodingUtils.testBit(__isset_bitfield, __CACHEBLOCKS_ISSET_ID); + } + + public void setCacheBlocksIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __CACHEBLOCKS_ISSET_ID, value); + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case START_ROW: @@ -670,6 +704,14 @@ public class TScan implements org.apache.thrift.TBase, jav } break; + case CACHE_BLOCKS: + if (value == null) { + unsetCacheBlocks(); + } else { + setCacheBlocks((Boolean)value); + } + break; + } } @@ -708,6 +750,9 @@ public class TScan implements org.apache.thrift.TBase, jav case REVERSED: return isReversed(); + case CACHE_BLOCKS: + return isCacheBlocks(); + } throw new IllegalStateException(); } @@ -741,6 +786,8 @@ public class TScan implements org.apache.thrift.TBase, jav return isSetAuthorizations(); case REVERSED: return isSetReversed(); + case CACHE_BLOCKS: + return isSetCacheBlocks(); } throw new IllegalStateException(); } @@ -857,6 +904,15 @@ public class TScan implements org.apache.thrift.TBase, jav return false; } + boolean this_present_cacheBlocks = true && this.isSetCacheBlocks(); + boolean that_present_cacheBlocks = true && that.isSetCacheBlocks(); + if (this_present_cacheBlocks || that_present_cacheBlocks) { + if (!(this_present_cacheBlocks && that_present_cacheBlocks)) + return false; + if (this.cacheBlocks != that.cacheBlocks) + return false; + } + return true; } @@ -919,6 +975,11 @@ public class TScan implements org.apache.thrift.TBase, jav if (present_reversed) list.add(reversed); + boolean present_cacheBlocks = true && (isSetCacheBlocks()); + list.add(present_cacheBlocks); + if (present_cacheBlocks) + list.add(cacheBlocks); + return list.hashCode(); } @@ -1040,6 +1101,16 @@ public class TScan implements org.apache.thrift.TBase, jav return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCacheBlocks()).compareTo(other.isSetCacheBlocks()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCacheBlocks()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cacheBlocks, other.cacheBlocks); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1153,6 +1224,12 @@ public class TScan implements org.apache.thrift.TBase, jav sb.append(this.reversed); first = false; } + if (isSetCacheBlocks()) { + if (!first) sb.append(", "); + sb.append("cacheBlocks:"); + sb.append(this.cacheBlocks); + first = false; + } sb.append(")"); return sb.toString(); } @@ -1317,6 +1394,14 @@ public class TScan implements org.apache.thrift.TBase, jav org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 12: // CACHE_BLOCKS + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.cacheBlocks = iprot.readBool(); + struct.setCacheBlocksIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1416,6 +1501,11 @@ public class TScan implements org.apache.thrift.TBase, jav oprot.writeBool(struct.reversed); oprot.writeFieldEnd(); } + if (struct.isSetCacheBlocks()) { + oprot.writeFieldBegin(CACHE_BLOCKS_FIELD_DESC); + oprot.writeBool(struct.cacheBlocks); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1467,7 +1557,10 @@ public class TScan implements org.apache.thrift.TBase, jav if (struct.isSetReversed()) { optionals.set(10); } - oprot.writeBitSet(optionals, 11); + if (struct.isSetCacheBlocks()) { + optionals.set(11); + } + oprot.writeBitSet(optionals, 12); if (struct.isSetStartRow()) { oprot.writeBinary(struct.startRow); } @@ -1514,12 +1607,15 @@ public class TScan implements org.apache.thrift.TBase, jav if (struct.isSetReversed()) { oprot.writeBool(struct.reversed); } + if (struct.isSetCacheBlocks()) { + oprot.writeBool(struct.cacheBlocks); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, TScan struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(11); + BitSet incoming = iprot.readBitSet(12); if (incoming.get(0)) { struct.startRow = iprot.readBinary(); struct.setStartRowIsSet(true); @@ -1587,6 +1683,10 @@ public class TScan implements org.apache.thrift.TBase, jav struct.reversed = iprot.readBool(); struct.setReversedIsSet(true); } + if (incoming.get(11)) { + struct.cacheBlocks = iprot.readBool(); + struct.setCacheBlocksIsSet(true); + } } } diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TServerName.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TServerName.java index a25f2c5..27188e9 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TServerName.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TServerName.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TServerName implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TServerName"); diff --git hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java index 24e5d58..74c7255 100644 --- hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java +++ hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2015-12-13") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = "2016-05-25") public class TTimeRange implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TTimeRange"); diff --git hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift index 9cab7ff..13faa8e 100644 --- hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift +++ hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift @@ -150,7 +150,8 @@ struct TScan { 6:optional Text filterString, 7:optional i32 batchSize, 8:optional bool sortColumns, - 9:optional bool reversed + 9:optional bool reversed, + 10:optional bool cacheBlocks } /** diff --git hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift index 8afeef1..00f0203 100644 --- hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift +++ hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift @@ -224,6 +224,7 @@ struct TScan { 9: optional map attributes 10: optional TAuthorization authorizations 11: optional bool reversed + 12: optional bool cacheBlocks } /** diff --git src/main/asciidoc/_chapters/architecture.adoc src/main/asciidoc/_chapters/architecture.adoc index faa1230..9f59cd5 100644 --- src/main/asciidoc/_chapters/architecture.adoc +++ src/main/asciidoc/_chapters/architecture.adoc @@ -2119,7 +2119,7 @@ This is not necessary on new tables. [[ops.date.tiered.config]] ====== Configuring Date Tiered Compaction -Each of the settings for date tiered compaction should be configured at the table or column family, after disabling the table. +Each of the settings for date tiered compaction should be configured at the table or column family level. If you use HBase shell, the general command pattern is as follows: [source,sql] @@ -2199,7 +2199,6 @@ You can enable stripe compaction for a table or a column family, by setting its You also need to set the `hbase.hstore.blockingStoreFiles` to a high number, such as 100 (rather than the default value of 10). .Procedure: Enable Stripe Compaction -. If the table already exists, disable the table. . Run one of following commands in the HBase shell. Replace the table name `orders_table` with the name of your table. + @@ -2215,7 +2214,6 @@ create 'orders_table', 'blobs_cf', CONFIGURATION => {'hbase.hstore.engine.class' . Enable the table. .Procedure: Disable Stripe Compaction -. Disable the table. . Set the `hbase.hstore.engine.class` option to either nil or `org.apache.hadoop.hbase.regionserver.DefaultStoreEngine`. Either option has the same effect. + @@ -2232,7 +2230,7 @@ This is not necessary on new tables. [[ops.stripe.config]] ====== Configuring Stripe Compaction -Each of the settings for stripe compaction should be configured at the table or column family, after disabling the table. +Each of the settings for stripe compaction should be configured at the table or column family level. If you use HBase shell, the general command pattern is as follows: [source,sql] diff --git src/main/asciidoc/_chapters/configuration.adoc src/main/asciidoc/_chapters/configuration.adoc index d705db9..4702bcb 100644 --- src/main/asciidoc/_chapters/configuration.adoc +++ src/main/asciidoc/_chapters/configuration.adoc @@ -1111,6 +1111,37 @@ Only a subset of all configurations can currently be changed in the running serv Here is an incomplete list: `hbase.regionserver.thread.compaction.large`, `hbase.regionserver.thread.compaction.small`, `hbase.regionserver.thread.split`, `hbase.regionserver.thread.merge`, as well as compaction policy and configurations and adjustment to offpeak hours. For the full list consult the patch attached to link:https://issues.apache.org/jira/browse/HBASE-12147[HBASE-12147 Porting Online Config Change from 89-fb]. +[[amazon_s3_configuration]] +== Using Amazon S3 Storage + +HBase is designed to be tightly coupled with HDFS, and testing of other filesystems +has not been thorough. + +The following limitations have been reported: + +- RegionServers should be deployed in Amazon EC2 to mitigate latency and bandwidth +limitations when accessing the filesystem, and RegionServers must remain available +to preserve data locality. +- S3 writes each inbound and outbound file to disk, which adds overhead to each operation. +- The best performance is achieved when all clients and servers are in the Amazon +cloud, rather than a heterogenous architecture. +- You must be aware of the location of `hadoop.tmp.dir` so that the local `/tmp/` +directory is not filled to capacity. +- HBase has a different file usage pattern than MapReduce jobs and has been optimized for +HDFS, rather than distant networked storage. +- The `s3a://` protocol is strongly recommended. The `s3n://` and `s3://` protocols have serious +limitations and do not use the Amazon AWS SDK. The `s3a://` protocol is supported +for use with HBase if you use Hadoop 2.6.1 or higher with HBase 1.2 or higher. Hadoop +2.6.0 is not supported with HBase at all. + +Configuration details for Amazon S3 and associated Amazon services such as EMR are +out of the scope of the HBase documentation. See the +link:https://wiki.apache.org/hadoop/AmazonS3[Hadoop Wiki entry on Amazon S3 Storage] +and +link:http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/emr-hbase.html[Amazon's documentation for deploying HBase in EMR]. + +One use case that is well-suited for Amazon S3 is storing snapshots. See <>. + ifdef::backend-docbook[] [index] == Index diff --git src/main/asciidoc/_chapters/developer.adoc src/main/asciidoc/_chapters/developer.adoc index 4091833..74ce3df 100644 --- src/main/asciidoc/_chapters/developer.adoc +++ src/main/asciidoc/_chapters/developer.adoc @@ -864,7 +864,8 @@ Also, keep in mind that if you are running tests in the `hbase-server` module yo [[hbase.unittests]] === Unit Tests -Apache HBase unit tests are subdivided into four categories: small, medium, large, and integration with corresponding JUnit link:http://www.junit.org/node/581[categories]: `SmallTests`, `MediumTests`, `LargeTests`, `IntegrationTests`. +Apache HBase test cases are subdivided into four categories: small, medium, large, and +integration with corresponding JUnit link:http://www.junit.org/node/581[categories]: `SmallTests`, `MediumTests`, `LargeTests`, `IntegrationTests`. JUnit categories are denoted using java annotations and look like this in your unit test code. [source,java] @@ -879,10 +880,11 @@ public class TestHRegionInfo { } ---- -The above example shows how to mark a unit test as belonging to the `small` category. -All unit tests in HBase have a categorization. +The above example shows how to mark a test case as belonging to the `small` category. +All test cases in HBase should have a categorization. -The first three categories, `small`, `medium`, and `large`, are for tests run when you type `$ mvn test`. +The first three categories, `small`, `medium`, and `large`, are for test cases which run when you +type `$ mvn test`. In other words, these three categorizations are for HBase unit tests. The `integration` category is not for unit tests, but for integration tests. These are run when you invoke `$ mvn verify`. @@ -890,22 +892,23 @@ Integration tests are described in <>. HBase uses a patched maven surefire plugin and maven profiles to implement its unit test characterizations. -Keep reading to figure which annotation of the set small, medium, and large to put on your new HBase unit test. +Keep reading to figure which annotation of the set small, medium, and large to put on your new +HBase test case. .Categorizing Tests Small Tests (((SmallTests))):: - _Small_ tests are executed in a shared JVM. - We put in this category all the tests that can be executed quickly in a shared JVM. - The maximum execution time for a small test is 15 seconds, and small tests should not use a (mini)cluster. + _Small_ test cases are executed in a shared JVM and individual test cases should run in 15 seconds + or less; i.e. a link:https://en.wikipedia.org/wiki/JUnit[junit test fixture], a java object made + up of test methods, should finish in under 15 seconds. These test cases can not use mini cluster. + These are run as part of patch pre-commit. Medium Tests (((MediumTests))):: - _Medium_ tests represent tests that must be executed before proposing a patch. - They are designed to run in less than 30 minutes altogether, and are quite stable in their results. - They are designed to last less than 50 seconds individually. - They can use a cluster, and each of them is executed in a separate JVM. + _Medium_ test cases are executed in separate JVM and individual test case should run in 50 seconds + or less. Together, they should take less than 30 minutes, and are quite stable in their results. + These test cases can use a mini cluster. These are run as part of patch pre-commit. Large Tests (((LargeTests))):: - _Large_ tests are everything else. + _Large_ test cases are everything else. They are typically large-scale tests, regression tests for specific bugs, timeout tests, performance tests. They are executed before a commit on the pre-integration machines. They can be run on the developer machine as well. @@ -1049,9 +1052,7 @@ ConnectionCount=1 (was 1) * All tests must be categorized, if not they could be skipped. * All tests should be written to be as fast as possible. -* Small category tests should last less than 15 seconds, and must not have any side effect. -* Medium category tests should last less than 50 seconds. -* Large category tests should last less than 3 minutes. +* See < for test case categories and corresponding timeouts. This should ensure a good parallelization for people using it, and ease the analysis when the test fails. [[hbase.tests.sleeps]] @@ -1080,56 +1081,28 @@ This will allow to share the cluster later. [[hbase.tests.example.code]] ==== Tests Skeleton Code -Here is a test skeleton code with Categorization and a Category-based timeout Rule to copy and paste and use as basis for test contribution. +Here is a test skeleton code with Categorization and a Category-based timeout rule to copy and paste and use as basis for test contribution. [source,java] ---- /** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Describe what this testcase tests. Talk about resources initialized in @BeforeClass (before + * any test is run) and before each test is run, etc. */ -package org.apache.hadoop.hbase; - -import static org.junit.Assert.*; - -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; -import org.junit.rules.TestRule; - -/** - * Skeleton HBase test - */ -// NOTICE: See how we've 'categorized' this test. All hbase unit tests need to be categorized as -// either 'small', 'medium', or 'large'. See http://hbase.apache.org/book.html#hbase.tests -// for more on these categories. +// Specify the category as explained in <>. @Category(SmallTests.class) public class TestExample { - // Handy test rule that allows you subsequently get at the name of the current method. See - // down in 'test()' where we use it in the 'fail' message. + // Replace the TestExample.class in the below with the name of your test fixture class. + private static final Log LOG = LogFactory.getLog(TestExample.class); + + // Handy test rule that allows you subsequently get the name of the current method. See + // down in 'testExampleFoo()' where we use it to log current test's name. @Rule public TestName testName = new TestName(); - // Rather than put a @Test (timeout=.... on each test so for sure the test times out, instead - // just the CategoryBasedTimeout... It will apply to each test in this test set, the timeout - // that goes w/ the particular test categorization. - @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). - withLookingForStuckThread(true).build(); + // CategoryBasedTimeout.forClass() decides the timeout based on the category + // (small/medium/large) of the testcase. @ClassRule requires that the full testcase runs within + // this timeout irrespective of individual test methods' times. + @ClassRule + public static TestRule timeout = CategoryBasedTimeout.forClass(TestExample.class); @Before public void setUp() throws Exception { @@ -1140,8 +1113,8 @@ public class TestExample { } @Test - public void test() { - fail(testName.getMethodName() + " is not yet implemented"); + public void testExampleFoo() { + LOG.info("Running test " + testName.getMethodName()); } } ---- @@ -1780,21 +1753,29 @@ It provides a nice overview that applies equally to the Apache HBase Project. [[submitting.patches.create]] ==== Create Patch -The script _dev-support/make_patch.sh_ has been provided to help you adhere to patch-creation guidelines. -The script has the following syntax: +Use _dev-support/submit-patch.py_ to create patches and optionally, upload to jira and update +reviews on Review Board. Patch name is formatted as (JIRA).(branch name).(patch number).patch to +follow Yetus' naming rules. Use `-h` flag to know detailed usage information. Most useful options +are: ----- -$ make_patch.sh [-a] [-p ] ----- +. `-b BRANCH, --branch BRANCH` : Specify base branch for generating the diff. If not specified, tracking branch is used. If there is no tracking branch, error will be thrown. +. `-jid JIRA_ID, --jira-id JIRA_ID` : Jira id of the issue. If set, we deduce next patch version from attachments in the jira and also upload the new patch. Script will ask for jira username/password for authentication. If not set, patch is named .patch. + +The script builds a new patch, and uses REST API to upload it to the jira (if --jira-id is +specified) and update the review on ReviewBoard (if --skip-review-board not specified). +Remote links in the jira are used to figure out if a review request already exists. If no review +request is present, then creates a new one and populates all required fields using jira summary, +patch description, etc. Also adds this review's link to the jira. -. If you do not pass a `patch_dir`, the script defaults to _~/patches/_. - If the `patch_dir` does not exist, it is created. -. By default, if an existing patch exists with the JIRA ID, the version of the new patch is incremented (_HBASE-XXXX-v3.patch_). If the `-a` option is passed, the version is not incremented, but the suffix `-addendum` is added (_HBASE-XXXX-v2-addendum.patch_). A second addendum to a given version is not supported. -. Detects whether you have more than one local commit on your branch. - If you do, the script offers you the chance to run +git rebase - -i+ to squash the changes into a single commit so that it can use +git format-patch+. - If you decline, the script uses +git diff+ instead. - The patch is saved in a configurable directory and is ready to be attached to your JIRA. +Authentication:: +Since attaching patches on JIRA and creating/changing review request on ReviewBoard requires a +logged in user, the script will prompt you for username and password. To avoid the hassle every +time, set up `~/.apache-creds` with login details and encrypt it by following the steps in footer +of script's help message. + +Python dependencies:: +To install required python dependencies, execute +`pip install -r dev-support/python-requirements.txt` from the master branch. .Patching Workflow @@ -1803,21 +1784,12 @@ $ make_patch.sh [-a] [-p ] * Submit one single patch for a fix. If necessary, squash local commits to merge local commits into a single one first. See this link:http://stackoverflow.com/questions/5308816/how-to-use-git-merge-squash[Stack Overflow question] for more information about squashing commits. -* The patch should have the JIRA ID in the name. - If you are generating from a branch, include the target branch in the filename. - A common naming scheme for patches is: -+ ----- -HBASE-XXXX.patch ----- -+ ----- -HBASE-XXXX-0.90.patch # to denote that the patch is against branch 0.90 ----- +* Patch name should be as follows to adhere to Yetus' naming convention. + ---- -HBASE-XXXX-v3.patch # to denote that this is the third version of the patch +(JIRA).(branch name).(patch number).patch ---- +For eg. HBASE-11625.master.001.patch, HBASE-XXXXX.branch-1.2.0005.patch, etc. * To submit a patch, first create it using one of the methods in <>. Next, attach the patch to the JIRA (one patch for the whole fix), using the dialog. @@ -1831,8 +1803,7 @@ Please understand that not every patch may get committed, and that feedback will * If you need to revise your patch, leave the previous patch file(s) attached to the JIRA, and upload the new one, following the naming conventions in <>. Cancel the Patch Available flag and then re-trigger it, by toggling the btn:[Patch Available] button in JIRA. JIRA sorts attached files by the time they were attached, and has no problem with multiple attachments with the same name. - However, at times it is easier to refer to different version of a patch if you add `-vX`, where the [replaceable]_X_ is the version (starting with 2). -* If you need to submit your patch against multiple branches, rather than just master, name each version of the patch with the branch it is for, following the naming conventions in <>. + However, at times it is easier to increment patch number in the patch name. [[patching.methods]] .Methods to Create Patches diff --git src/main/asciidoc/_chapters/hbase-default.adoc src/main/asciidoc/_chapters/hbase-default.adoc index df750e0..7a65446 100644 --- src/main/asciidoc/_chapters/hbase-default.adoc +++ src/main/asciidoc/_chapters/hbase-default.adoc @@ -1585,16 +1585,6 @@ Set to true to cause the hosting server (master or regionserver) `true` -[[hbase.online.schema.update.enable]] -*`hbase.online.schema.update.enable`*:: -+ -.Description -Set true to enable online schema changes. -+ -.Default -`true` - - [[hbase.table.lock.enable]] *`hbase.table.lock.enable`*:: + diff --git src/main/asciidoc/_chapters/ops_mgt.adoc src/main/asciidoc/_chapters/ops_mgt.adoc index 583a872..bc75951 100644 --- src/main/asciidoc/_chapters/ops_mgt.adoc +++ src/main/asciidoc/_chapters/ops_mgt.adoc @@ -2050,6 +2050,74 @@ The following example limits the above example to 200 MB/sec. $ bin/hbase org.apache.hadoop.hbase.snapshot.ExportSnapshot -snapshot MySnapshot -copy-to hdfs://srv2:8082/hbase -mappers 16 -bandwidth 200 ---- +[[snapshots_s3]] +=== Storing Snapshots in an Amazon S3 Bucket + +For general information and limitations of using Amazon S3 storage with HBase, see +<>. You can also store and retrieve snapshots from Amazon +S3, using the following procedure. + +NOTE: You can also store snapshots in Microsoft Azure Blob Storage. See <>. + +.Prerequisites +- You must be using HBase 1.0 or higher and Hadoop 2.6.1 or higher, which is the first +configuration that uses the Amazon AWS SDK. +- You must use the `s3a://` protocol to connect to Amazon S3. The older `s3n://` +and `s3://` protocols have various limitations and do not use the Amazon AWS SDK. +- The `s3a://` URI must be configured and available on the server where you run +the commands to export and restore the snapshot. + +After you have fulfilled the prerequisites, take the snapshot like you normally would. +Afterward, you can export it using the `org.apache.hadoop.hbase.snapshot.ExportSnapshot` +command like the one below, substituting your own `s3a://` path in the `copy-from` +or `copy-to` directive and substituting or modifying other options as required: + +---- +$ hbase org.apache.hadoop.hbase.snapshot.ExportSnapshot \ + -snapshot MySnapshot \ + -copy-from hdfs://srv2:8082/hbase \ + -copy-to s3a:////hbase \ + -chuser MyUser \ + -chgroup MyGroup \ + -chmod 700 \ + -mappers 16 +---- + +---- +$ hbase org.apache.hadoop.hbase.snapshot.ExportSnapshot \ + -snapshot MySnapshot + -copy-from s3a:////hbase \ + -copy-to hdfs://srv2:8082/hbase \ + -chuser MyUser \ + -chgroup MyGroup \ + -chmod 700 \ + -mappers 16 +---- + +You can also use the `org.apache.hadoop.hbase.snapshot.SnapshotInfo` utility with the `s3a://` path by including the +`-remote-dir` option. + +---- +$ hbase org.apache.hadoop.hbase.snapshot.SnapshotInfo \ + -remote-dir s3a:////hbase \ + -list-snapshots +---- + +[[snapshots_azure]] +== Storing Snapshots in Microsoft Azure Blob Storage + +You can store snapshots in Microsoft Azure Blog Storage using the same techniques +as in <>. + +.Prerequisites +- You must be using HBase 1.2 or higher with Hadoop 2.7.1 or + higher. No version of HBase supports Hadoop 2.7.0. +- Your hosts must be configured to be aware of the Azure blob storage filesystem. + See http://hadoop.apache.org/docs/r2.7.1/hadoop-azure/index.html. + +After you meet the prerequisites, follow the instructions +in <>, replacingthe protocol specifier with `wasb://` or `wasbs://`. + [[ops.capacity]] == Capacity Planning and Region Sizing diff --git src/main/asciidoc/_chapters/performance.adoc src/main/asciidoc/_chapters/performance.adoc index a0c00ae..5f27640 100644 --- src/main/asciidoc/_chapters/performance.adoc +++ src/main/asciidoc/_chapters/performance.adoc @@ -499,7 +499,7 @@ For bulk imports, this means that all clients will write to the same region unti A useful pattern to speed up the bulk import process is to pre-create empty regions. Be somewhat conservative in this, because too-many regions can actually degrade performance. -There are two different approaches to pre-creating splits. +There are two different approaches to pre-creating splits using the HBase API. The first approach is to rely on the default `Admin` strategy (which is implemented in `Bytes.split`)... [source,java] @@ -511,7 +511,7 @@ int numberOfRegions = ...; // # of regions to create admin.createTable(table, startKey, endKey, numberOfRegions); ---- -And the other approach is to define the splits yourself... +And the other approach, using the HBase API, is to define the splits yourself... [source,java] ---- @@ -519,8 +519,23 @@ byte[][] splits = ...; // create your own splits admin.createTable(table, splits); ---- +You can achieve a similar effect using the HBase Shell to create tables by specifying split options. + +[source] +---- +# create table with specific split points +hbase>create 't1','f1',SPLITS => ['\x10\x00', '\x20\x00', '\x30\x00', '\x40\x00'] + +# create table with four regions based on random bytes keys +hbase>create 't2','f1', { NUMREGIONS => 4 , SPLITALGO => 'UniformSplit' } + +# create table with five regions based on hex keys +create 't3','f1', { NUMREGIONS => 5, SPLITALGO => 'HexStringSplit' } +---- + See <> for issues related to understanding your keyspace and pre-creating regions. See <> for discussion on manually pre-splitting regions. +See <> for more details of using the HBase Shell to pre-split tables. [[def.log.flush]] === Table Creation: Deferred Log Flush diff --git src/main/asciidoc/_chapters/shell.adoc src/main/asciidoc/_chapters/shell.adoc index a4237fd..8f1f59b 100644 --- src/main/asciidoc/_chapters/shell.adoc +++ src/main/asciidoc/_chapters/shell.adoc @@ -352,6 +352,68 @@ hbase(main):022:0> Date.new(1218920189000).toString() => "Sat Aug 16 20:56:29 UT To output in a format that is exactly like that of the HBase log format will take a little messing with link:http://download.oracle.com/javase/6/docs/api/java/text/SimpleDateFormat.html[SimpleDateFormat]. +[[tricks.pre-split]] +=== Pre-splitting tables with the HBase Shell +You can use a variety of options to pre-split tables when creating them via the HBase Shell `create` command. + +The simplest approach is to specify an array of split points when creating the table. Note that when specifying string literals as split points, these will create split points based on the underlying byte representation of the string. So when specifying a split point of '10', we are actually specifying the byte split point '\x31\30'. + +The split points will define `n+1` regions where `n` is the number of split points. The lowest region will contain all keys from the lowest possible key up to but not including the first split point key. +The next region will contain keys from the first split point up to, but not including the next split point key. +This will continue for all split points up to the last. The last region will be defined from the last split point up to the maximum possible key. + +[source] +---- +hbase>create 't1','f',SPLITS => ['10','20',30'] +---- + +In the above example, the table 't1' will be created with column family 'f', pre-split to four regions. Note the first region will contain all keys from '\x00' up to '\x30' (as '\x31' is the ASCII code for '1'). + +You can pass the split points in a file using following variation. In this example, the splits are read from a file corresponding to the local path on the local filesystem. Each line in the file specifies a split point key. + +[source] +---- +hbase>create 't14','f',SPLITS_FILE=>'splits.txt' +---- + +The other options are to automatically compute splits based on a desired number of regions and a splitting algorithm. +HBase supplies algorithms for splitting the key range based on uniform splits or based on hexadecimal keys, but you can provide your own splitting algorithm to subdivide the key range. + +[source] +---- +# create table with four regions based on random bytes keys +hbase>create 't2','f1', { NUMREGIONS => 4 , SPLITALGO => 'UniformSplit' } + +# create table with five regions based on hex keys +hbase>create 't3','f1', { NUMREGIONS => 5, SPLITALGO => 'HexStringSplit' } +---- + +As the HBase Shell is effectively a Ruby environment, you can use simple Ruby scripts to compute splits algorithmically. + +[source] +---- +# generate splits for long (Ruby fixnum) key range from start to end key +hbase(main):070:0> def gen_splits(start_key,end_key,num_regions) +hbase(main):071:1> results=[] +hbase(main):072:1> range=end_key-start_key +hbase(main):073:1> incr=(range/num_regions).floor +hbase(main):074:1> for i in 1 .. num_regions-1 +hbase(main):075:2> results.push([i*incr+start_key].pack("N")) +hbase(main):076:2> end +hbase(main):077:1> return results +hbase(main):078:1> end +hbase(main):079:0> +hbase(main):080:0> splits=gen_splits(1,2000000,10) +=> ["\000\003\r@", "\000\006\032\177", "\000\t'\276", "\000\f4\375", "\000\017B<", "\000\022O{", "\000\025\\\272", "\000\030i\371", "\000\ew8"] +hbase(main):081:0> create 'test_splits','f',SPLITS=>splits +0 row(s) in 0.2670 seconds + +=> Hbase::Table - test_splits +---- + +Note that the HBase Shell command `truncate` effectively drops and recreates the table with default options which will discard any pre-splitting. +If you need to truncate a pre-split table, you must drop and recreate the table explicitly to re-specify custom split options. + === Debug ==== Shell debug switch diff --git src/main/site/site.xml src/main/site/site.xml index 6d4de53..fb237bb 100644 --- src/main/site/site.xml +++ src/main/site/site.xml @@ -43,13 +43,8 @@ - hbasecon2016 - - images/hbasecon2016-stacked.png - http://hbasecon.com/ Apache HBase

NameRegion ServerReadRequestsWriteRequestsStorefileSizeNum.StorefilesMemSizeLocalityStart KeyEnd KeyReplicaID