From 4dd0b9e4077b970e8381c8753b24e4aa9964a33b Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Fri, 2 Feb 2018 00:23:37 -0800 Subject: [PATCH] HBASE-19919 Tidying up logging --- .../hadoop/hbase/zookeeper/ReadOnlyZKClient.java | 4 +- .../java/org/apache/hadoop/hbase/net/Address.java | 19 +++++++ .../org/apache/hadoop/hbase/net/TestAddress.java | 50 +++++++++++++++++++ .../ZKSplitLogManagerCoordination.java | 58 +++++++++++----------- .../hadoop/hbase/coprocessor/CoprocessorHost.java | 3 +- .../org/apache/hadoop/hbase/ipc/RpcExecutor.java | 4 +- .../hadoop/hbase/master/ActiveMasterManager.java | 4 +- .../hadoop/hbase/master/MasterCoprocessorHost.java | 2 +- .../hbase/master/assignment/AssignmentManager.java | 2 +- .../hadoop/hbase/regionserver/ChunkCreator.java | 14 +++--- .../hadoop/hbase/regionserver/RSRpcServices.java | 7 ++- .../hbase/security/access/AccessController.java | 2 +- .../hbase/security/access/ZKPermissionWatcher.java | 2 +- .../hadoop/hbase/util/FSTableDescriptors.java | 12 ++--- .../hbase/zookeeper/RecoverableZooKeeper.java | 2 +- 15 files changed, 129 insertions(+), 56 deletions(-) create mode 100644 hbase-common/src/test/java/org/apache/hadoop/hbase/net/TestAddress.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java index 275fafbeba..c8f7792f33 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java @@ -131,8 +131,8 @@ public final class ReadOnlyZKClient implements Closeable { conf.getInt(RECOVERY_RETRY_INTERVAL_MILLIS, DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS); this.keepAliveTimeMs = conf.getInt(KEEPALIVE_MILLIS, DEFAULT_KEEPALIVE_MILLIS); LOG.info( - "Start read only zookeeper connection {} to {}, " + "session timeout {} ms, retries {}, " + - "retry interval {} ms, keep alive {} ms", + "Start connection {} to {}, session timeout={} ms, retries {}, " + + "retry interval {}ms, keepAlive={}ms", getId(), connectString, sessionTimeoutMs, maxRetries, retryIntervalMs, keepAliveTimeMs); Threads.setDaemonThreadRunning(new Thread(this::run), "ReadOnlyZKClient-" + connectString + "@" + getId()); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java index 1b12a66e2e..9d7f65c7b3 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.net; +import org.apache.commons.lang.StringUtils; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort; @@ -58,6 +59,24 @@ public class Address implements Comparable
{ return this.hostAndPort.toString(); } + /** + * If hostname is a.b.c and the port is 123, return a:123 instead of a.b.c:123. + * @return if host looks like it is resolved -- not an IP -- then strip the domain portion + * otherwise returns same as {@link #toString()}} + */ + public String toStringWithoutDomain() { + String hostname = getHostname(); + String [] parts = hostname.split("\\."); + if (parts.length > 1) { + for (String part: parts) { + if (!StringUtils.isNumeric(part)) { + return Address.fromParts(parts[0], getPort()).toString(); + } + } + } + return toString(); + } + @Override // Don't use HostAndPort equals... It is wonky including // ipv6 brackets diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/net/TestAddress.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/net/TestAddress.java new file mode 100644 index 0000000000..22c7940351 --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/net/TestAddress.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.net; + + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Assert; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; + +@Category({ MiscTests.class, SmallTests.class }) +public class TestAddress { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestAddress.class); + + @Test + public void testGetHostWithoutDomain() { + assertEquals("a:123", + Address.fromParts("a.b.c", 123).toStringWithoutDomain()); + assertEquals("1:123", + Address.fromParts("1.b.c", 123).toStringWithoutDomain()); + assertEquals("123.456.789.1:123", + Address.fromParts("123.456.789.1", 123).toStringWithoutDomain()); + assertEquals("[2001:db8::1]:80", + Address.fromParts("[2001:db8::1]:", 80).toStringWithoutDomain()); + assertEquals("[2001:db8::1]:80", + Address.fromParts("[2001:db8::1]:", 80).toStringWithoutDomain()); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java index 4e0c973d8c..d2777a0d76 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java @@ -151,7 +151,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements } Task task = findOrCreateOrphanTask(path); if (task.isOrphan() && (task.incarnation.get() == 0)) { - LOG.info("resubmitting unassigned orphan task " + path); + LOG.info("Resubmitting unassigned orphan task " + path); // ignore failure to resubmit. The timeout-monitor will handle it later // albeit in a more crude fashion resubmitTask(path, task, FORCE); @@ -202,7 +202,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements SplitLogCounters.tot_mgr_resubmit_force.increment(); version = -1; } - LOG.info("resubmitting task " + path); + LOG.info("Resubmitting task " + path); task.incarnation.incrementAndGet(); boolean result = resubmit(path, version); if (!result) { @@ -280,7 +280,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements SplitLogCounters.tot_mgr_rescan_deleted.increment(); } SplitLogCounters.tot_mgr_missing_state_in_delete.increment(); - LOG.debug("deleted task without in memory state " + path); + LOG.debug("Deleted task without in memory state " + path); return; } synchronized (task) { @@ -328,13 +328,13 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements } private void createNodeSuccess(String path) { - LOG.debug("put up splitlog task at znode " + path); + LOG.debug("Put up splitlog task at znode " + path); getDataSetWatch(path, zkretries); } private void createNodeFailure(String path) { // TODO the Manager should split the log locally instead of giving up - LOG.warn("failed to create task node" + path); + LOG.warn("Failed to create task node " + path); setDone(path, FAILURE); } @@ -360,15 +360,15 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements data = ZKMetadata.removeMetaData(data); SplitLogTask slt = SplitLogTask.parseFrom(data); if (slt.isUnassigned()) { - LOG.debug("task not yet acquired " + path + " ver = " + version); + LOG.debug("Task not yet acquired " + path + ", ver=" + version); handleUnassignedTask(path); } else if (slt.isOwned()) { heartbeat(path, version, slt.getServerName()); } else if (slt.isResigned()) { - LOG.info("task " + path + " entered state: " + slt.toString()); + LOG.info("Task " + path + " entered state=" + slt.toString()); resubmitOrFail(path, FORCE); } else if (slt.isDone()) { - LOG.info("task " + path + " entered state: " + slt.toString()); + LOG.info("Task " + path + " entered state=" + slt.toString()); if (taskFinisher != null && !ZKSplitLog.isRescanNode(watcher, path)) { if (taskFinisher.finish(slt.getServerName(), ZKSplitLog.getFileName(path)) == Status.DONE) { setDone(path, SUCCESS); @@ -379,7 +379,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements setDone(path, SUCCESS); } } else if (slt.isErr()) { - LOG.info("task " + path + " entered state: " + slt.toString()); + LOG.info("Task " + path + " entered state=" + slt.toString()); resubmitOrFail(path, CHECK); } else { LOG.error(HBaseMarkers.FATAL, "logic error - unexpected zk state for path = " @@ -395,7 +395,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements } private void getDataSetWatchFailure(String path) { - LOG.warn("failed to set data watch " + path); + LOG.warn("Failed to set data watch " + path); setDone(path, FAILURE); } @@ -404,7 +404,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements if (task == null) { if (!ZKSplitLog.isRescanNode(watcher, path)) { SplitLogCounters.tot_mgr_unacquired_orphan_done.increment(); - LOG.debug("unacquired orphan task is done " + path); + LOG.debug("Unacquired orphan task is done " + path); } } else { synchronized (task) { @@ -441,7 +441,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements private Task findOrCreateOrphanTask(String path) { return computeIfAbsent(details.getTasks(), path, Task::new, () -> { - LOG.info("creating orphan task " + path); + LOG.info("Creating orphan task " + path); SplitLogCounters.tot_mgr_orphan_task_acquired.increment(); }); } @@ -450,7 +450,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements Task task = findOrCreateOrphanTask(path); if (new_version != task.last_version) { if (task.isUnassigned()) { - LOG.info("task " + path + " acquired by " + workerName); + LOG.info("Task " + path + " acquired by " + workerName); } task.heartbeat(EnvironmentEdgeManager.currentTime(), new_version, workerName); SplitLogCounters.tot_mgr_heartbeat.increment(); @@ -468,11 +468,11 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements try { orphans = ZKUtil.listChildrenNoWatch(this.watcher, this.watcher.znodePaths.splitLogZNode); if (orphans == null) { - LOG.warn("could not get children of " + this.watcher.znodePaths.splitLogZNode); + LOG.warn("Could not get children of " + this.watcher.znodePaths.splitLogZNode); return; } } catch (KeeperException e) { - LOG.warn("could not get children of " + this.watcher.znodePaths.splitLogZNode + " " + LOG.warn("Could not get children of " + this.watcher.znodePaths.splitLogZNode + " " + StringUtils.stringifyException(e)); return; } @@ -483,9 +483,9 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements String nodepath = ZNodePaths.joinZNode(watcher.znodePaths.splitLogZNode, path); if (ZKSplitLog.isRescanNode(watcher, nodepath)) { rescan_nodes++; - LOG.debug("found orphan rescan node " + path); + LOG.debug("Found orphan rescan node " + path); } else { - LOG.info("found orphan task " + path); + LOG.info("Found orphan task " + path); } getDataSetWatch(nodepath, zkretries); } @@ -511,11 +511,11 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements SplitLogTask slt = new SplitLogTask.Unassigned(this.details.getServerName()); if (ZKUtil.setData(this.watcher, path, slt.toByteArray(), version) == false) { - LOG.debug("failed to resubmit task " + path + " version changed"); + LOG.debug("Failed to resubmit task " + path + " version changed"); return false; } } catch (NoNodeException e) { - LOG.warn("failed to resubmit because znode doesn't exist " + path + LOG.warn("Failed to resubmit because znode doesn't exist " + path + " task done (or forced done by removing the znode)"); try { getDataSetWatchSuccess(path, null, Integer.MIN_VALUE); @@ -525,11 +525,11 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements } return false; } catch (KeeperException.BadVersionException e) { - LOG.debug("failed to resubmit task " + path + " version changed"); + LOG.debug("Failed to resubmit task " + path + " version changed"); return false; } catch (KeeperException e) { SplitLogCounters.tot_mgr_resubmit_failed.increment(); - LOG.warn("failed to resubmit " + path, e); + LOG.warn("Failed to resubmit " + path, e); return false; } return true; @@ -590,11 +590,11 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements // this manager are get-znode-data, task-finisher and delete-znode. // And all code pieces correctly handle the case of suddenly // disappearing task-znode. - LOG.debug("found pre-existing znode " + path); + LOG.debug("Found pre-existing znode " + path); SplitLogCounters.tot_mgr_node_already_exists.increment(); } else { Long retry_count = (Long) ctx; - LOG.warn("create rc =" + KeeperException.Code.get(rc) + " for " + path + LOG.warn("Create rc=" + KeeperException.Code.get(rc) + " for " + path + " remaining retries=" + retry_count); if (retry_count == 0) { SplitLogCounters.tot_mgr_node_create_err.increment(); @@ -625,7 +625,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements } if (rc == KeeperException.Code.NONODE.intValue()) { SplitLogCounters.tot_mgr_get_data_nonode.increment(); - LOG.warn("task znode " + path + " vanished or not created yet."); + LOG.warn("Task znode " + path + " vanished or not created yet."); // ignore since we should not end up in a case where there is in-memory task, // but no znode. The only case is between the time task is created in-memory // and the znode is created. See HBASE-11217. @@ -634,11 +634,11 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements Long retry_count = (Long) ctx; if (retry_count < 0) { - LOG.warn("getdata rc = " + KeeperException.Code.get(rc) + " " + path + LOG.warn("Getdata rc=" + KeeperException.Code.get(rc) + " " + path + ". Ignoring error. No error handling. No retrying."); return; } - LOG.warn("getdata rc = " + KeeperException.Code.get(rc) + " " + path + LOG.warn("Getdata rc=" + KeeperException.Code.get(rc) + " " + path + " remaining retries=" + retry_count); if (retry_count == 0) { SplitLogCounters.tot_mgr_get_data_err.increment(); @@ -675,10 +675,10 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements if (rc != KeeperException.Code.NONODE.intValue()) { SplitLogCounters.tot_mgr_node_delete_err.increment(); Long retry_count = (Long) ctx; - LOG.warn("delete rc=" + KeeperException.Code.get(rc) + " for " + path + LOG.warn("Delete rc=" + KeeperException.Code.get(rc) + " for " + path + " remaining retries=" + retry_count); if (retry_count == 0) { - LOG.warn("delete failed " + path); + LOG.warn("Delete failed " + path); details.getFailedDeletions().add(path); deleteNodeFailure(path); } else { @@ -691,7 +691,7 @@ public class ZKSplitLogManagerCoordination extends ZKListener implements + " in earlier retry rounds. zkretries = " + ctx); } } else { - LOG.debug("deleted " + path); + LOG.debug("Deleted " + path); } deleteNodeSuccess(path); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java index 05ac9f67f2..4c5605650d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java @@ -157,8 +157,7 @@ public abstract class CoprocessorHost