From f5f2588d6c18aa2a1cd8c9a3ba266c9b023c607c Mon Sep 17 00:00:00 2001 From: Sean Busbey Date: Tue, 17 Apr 2018 14:40:25 -0500 Subject: [PATCH 1/4] HBASE-20439 Clean up incorrect use of commons-logging in hbase-server --- .../org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java | 7 ++++--- .../org/apache/hadoop/hbase/quotas/FileArchiverNotifierImpl.java | 7 ++++--- .../org/apache/hadoop/hbase/quotas/RegionSizeReportingChore.java | 7 ++++--- .../java/org/apache/hadoop/hbase/quotas/RegionSizeStoreImpl.java | 7 ++++--- .../hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java | 8 +++++--- .../java/org/apache/hadoop/hbase/TestClusterPortAssignment.java | 7 ++++--- .../java/org/apache/hadoop/hbase/client/TestFlushFromClient.java | 7 ++++--- .../apache/hadoop/hbase/client/TestSeparateClientZKCluster.java | 7 ++++--- .../org/apache/hadoop/hbase/procedure/TestFailedProcCleanup.java | 7 ++++--- .../test/java/org/apache/hadoop/hbase/wal/TestDisabledWAL.java | 7 ++++--- 10 files changed, 41 insertions(+), 30 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java index 8f735bde4d..550aea7e06 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/zksyncer/ClientZKSyncer.java @@ -25,8 +25,6 @@ import java.util.Map; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.util.Threads; @@ -37,6 +35,9 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * Tracks the target znode(s) on server ZK cluster and synchronize them to client ZK cluster if * changed @@ -45,7 +46,7 @@ import org.apache.zookeeper.KeeperException; */ @InterfaceAudience.Private public abstract class ClientZKSyncer extends ZKListener { - private static final Log LOG = LogFactory.getLog(ClientZKSyncer.class); + private static final Logger LOG = LoggerFactory.getLogger(ClientZKSyncer.class); private final Server server; private final ZKWatcher clientZkWatcher; // We use queues and daemon threads to synchronize the data to client ZK cluster diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierImpl.java index 8cde9c1cff..58434f7bde 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierImpl.java @@ -35,8 +35,6 @@ import java.util.function.Predicate; import java.util.stream.Collectors; import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -54,6 +52,9 @@ import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.HashMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.Multimap; import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; @@ -69,7 +70,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.Snapshot */ @InterfaceAudience.Private public class FileArchiverNotifierImpl implements FileArchiverNotifier { - private static final Log LOG = LogFactory.getLog(FileArchiverNotifierImpl.class); + private static final Logger LOG = LoggerFactory.getLogger(FileArchiverNotifierImpl.class); private final Connection conn; private final Configuration conf; private final FileSystem fs; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeReportingChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeReportingChore.java index bf525e55e6..56798fd937 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeReportingChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeReportingChore.java @@ -23,8 +23,6 @@ import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.TimeUnit; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.client.RegionInfo; @@ -33,12 +31,15 @@ import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * A Chore which sends the region size reports on this RegionServer to the Master. */ @InterfaceAudience.Private public class RegionSizeReportingChore extends ScheduledChore { - private static final Log LOG = LogFactory.getLog(RegionSizeReportingChore.class); + private static final Logger LOG = LoggerFactory.getLogger(RegionSizeReportingChore.class); static final String REGION_SIZE_REPORTING_CHORE_PERIOD_KEY = "hbase.regionserver.quotas.region.size.reporting.chore.period"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeStoreImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeStoreImpl.java index 4b48869f6c..b7f59a3018 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeStoreImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionSizeStoreImpl.java @@ -20,13 +20,14 @@ import java.util.Iterator; import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * A {@link RegionSizeStore} implementation backed by a ConcurrentHashMap. We expected similar * amounts of reads and writes to the "store", so using a RWLock is not going to provide any @@ -34,7 +35,7 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Private public class RegionSizeStoreImpl implements RegionSizeStore { - private static final Log LOG = LogFactory.getLog(RegionSizeStoreImpl.class); + private static final Logger LOG = LoggerFactory.getLogger(RegionSizeStoreImpl.class); private static final long sizeOfEntry = ClassSize.align( ClassSize.CONCURRENT_HASHMAP_ENTRY + ClassSize.OBJECT + Bytes.SIZEOF_LONG diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java index a237a52dc6..a0dc629ad7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java @@ -21,8 +21,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.RegionTooBusyException; @@ -32,6 +30,10 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * StoreHotnessProtector is designed to help limit the concurrency of puts with dense columns, it * does best-effort to avoid exhausting all RS's handlers. When a lot of clients write requests with @@ -60,7 +62,7 @@ import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesti */ @InterfaceAudience.Private public class StoreHotnessProtector { - private static final Log LOG = LogFactory.getLog(StoreHotnessProtector.class); + private static final Logger LOG = LoggerFactory.getLogger(StoreHotnessProtector.class); private volatile int parallelPutToStoreThreadLimit; private volatile int parallelPreparePutToStoreThreadLimit; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClusterPortAssignment.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClusterPortAssignment.java index 0c8247f362..69541010a4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClusterPortAssignment.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClusterPortAssignment.java @@ -22,8 +22,6 @@ import static org.junit.Assert.assertTrue; import java.net.BindException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -31,6 +29,9 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + @Category(MediumTests.class) public class TestClusterPortAssignment { @ClassRule @@ -38,7 +39,7 @@ public class TestClusterPortAssignment { HBaseClassTestRule.forClass(TestClusterPortAssignment.class); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final Log LOG = LogFactory.getLog(TestClusterPortAssignment.class); + private static final Logger LOG = LoggerFactory.getLogger(TestClusterPortAssignment.class); /** * Check that we can start an HBase cluster specifying a custom set of diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFlushFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFlushFromClient.java index 207e1fc781..7afd36b77b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFlushFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFlushFromClient.java @@ -25,8 +25,6 @@ import java.util.Arrays; import java.util.List; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; @@ -47,6 +45,9 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + @Category({MediumTests.class, ClientTests.class}) public class TestFlushFromClient { @@ -54,7 +55,7 @@ public class TestFlushFromClient { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestFlushFromClient.class); - private static final Log LOG = LogFactory.getLog(TestFlushFromClient.class); + private static final Logger LOG = LoggerFactory.getLogger(TestFlushFromClient.class); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static AsyncConnection asyncConn; private static final byte[][] SPLITS = new byte[][]{Bytes.toBytes("3"), Bytes.toBytes("7")}; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java index 769ac64be8..533af935b5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java @@ -20,8 +20,6 @@ package org.apache.hadoop.hbase.client; import java.io.File; import org.apache.commons.io.FileUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; @@ -44,9 +42,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + @Category(MediumTests.class) public class TestSeparateClientZKCluster { - private static final Log LOG = LogFactory.getLog(TestSeparateClientZKCluster.class); + private static final Logger LOG = LoggerFactory.getLogger(TestSeparateClientZKCluster.class); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static final File clientZkDir = new File("/tmp/TestSeparateClientZKCluster"); private static final int ZK_SESSION_TIMEOUT = 5000; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestFailedProcCleanup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestFailedProcCleanup.java index bf72d331cb..3e219517d6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestFailedProcCleanup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestFailedProcCleanup.java @@ -23,8 +23,6 @@ import static org.junit.Assert.fail; import java.io.IOException; import java.util.List; import java.util.Optional; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -45,6 +43,9 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; /** @@ -57,7 +58,7 @@ public class TestFailedProcCleanup { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestFailedProcCleanup.class); - private static final Log LOG = LogFactory.getLog(TestFailedProcCleanup.class); + private static final Logger LOG = LoggerFactory.getLogger(TestFailedProcCleanup.class); protected static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static Configuration conf; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestDisabledWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestDisabledWAL.java index bd59ce9410..bee76c6d8a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestDisabledWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestDisabledWAL.java @@ -22,8 +22,6 @@ import static org.junit.Assert.fail; import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -45,6 +43,9 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + @Category({ RegionServerTests.class, MediumTests.class }) public class TestDisabledWAL { @@ -55,7 +56,7 @@ public class TestDisabledWAL { @Rule public TestName name = new TestName(); - private static final Log LOG = LogFactory.getLog(TestDisabledWAL.class); + private static final Logger LOG = LoggerFactory.getLogger(TestDisabledWAL.class); static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private Table table; private TableName tableName; -- 2.16.1 From c67fb610d1d26e665f84f0b741b041eb0ebfb484 Mon Sep 17 00:00:00 2001 From: Sean Busbey Date: Tue, 17 Apr 2018 15:21:49 -0500 Subject: [PATCH 2/4] HBASE-20440 Clean up incorrect use of commons-lang 2.y --- hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java | 2 +- .../apache/hadoop/hbase/quotas/FileArchiverNotifierFactoryImpl.java | 4 ++-- .../java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierImpl.java | 2 +- .../org/apache/hadoop/hbase/regionserver/TestHdfsSnapshotHRegion.java | 2 +- .../hadoop/hbase/util/compaction/TestMajorCompactionRequest.java | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java index 9d7f65c7b3..ab7fa3bcd4 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.net; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierFactoryImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierFactoryImpl.java index 3d21518957..5b6d8c14bf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierFactoryImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierFactoryImpl.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hbase.quotas; import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.TableName; @@ -111,4 +111,4 @@ public final class FileArchiverNotifierFactoryImpl implements FileArchiverNotifi return "CacheKey[TableName=" + tn + "]"; } } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierImpl.java index 58434f7bde..aa916963c3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileArchiverNotifierImpl.java @@ -34,7 +34,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; import java.util.function.Predicate; import java.util.stream.Collectors; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHdfsSnapshotHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHdfsSnapshotHRegion.java index feea0865a0..6c20b5ba75 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHdfsSnapshotHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHdfsSnapshotHRegion.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; -import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactionRequest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactionRequest.java index b62648189a..adecd5c6b0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactionRequest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/compaction/TestMajorCompactionRequest.java @@ -22,7 +22,7 @@ import java.util.List; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; -import org.apache.commons.lang.RandomStringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -167,4 +167,4 @@ public class TestMajorCompactionRequest { doReturn(mock(Connection.class)).when(spy).getConnection(eq(configuration)); return spy; } -} \ No newline at end of file +} -- 2.16.1 From 11ab2c1d4d775030147e5c1a184394e8a24f811b Mon Sep 17 00:00:00 2001 From: Sean Busbey Date: Tue, 17 Apr 2018 16:15:11 -0500 Subject: [PATCH 3/4] HBASE-20442 clean up incorrect use of commons-collections 3 --- .../java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java | 3 ++- .../src/main/java/org/apache/hadoop/hbase/client/RowMutations.java | 3 ++- hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java | 2 +- .../apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java | 2 +- .../src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java | 2 +- .../main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java | 2 +- .../main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java | 2 +- .../java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java | 2 +- .../src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java | 4 ++-- 9 files changed, 12 insertions(+), 10 deletions(-) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java index 093ef763af..5ce11d19c0 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java @@ -24,7 +24,6 @@ import java.util.Collections; import java.util.List; import java.util.Map; -import org.apache.commons.collections.MapUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -42,6 +41,8 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils; + /** * Implementation of a log cleaner that checks if a log is still scheduled for incremental backup * before deleting it when its TTL is over. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java index 4b426cf10e..345e26aa02 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java @@ -23,10 +23,11 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; -import org.apache.commons.collections.CollectionUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; + /** * Performs multiple mutations atomically on a single row. * Currently {@link Put} and {@link Delete} are supported. diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java index a315fd2b39..6eb09c11df 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java @@ -38,7 +38,6 @@ import java.util.Comparator; import java.util.Iterator; import java.util.List; -import org.apache.commons.collections.CollectionUtils; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; @@ -50,6 +49,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; import com.google.protobuf.ByteString; diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java index 6d721281f2..b9ebfb9051 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java @@ -29,7 +29,6 @@ import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; import java.util.stream.Collectors; -import org.apache.commons.collections.CollectionUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; @@ -53,6 +52,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; /** * ZK based replication queue storage. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index eccb67ec8c..1fb6afe7ec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -70,7 +70,6 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.Function; -import org.apache.commons.collections.CollectionUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -188,6 +187,7 @@ import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.protobuf.Service; import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 922fa863c8..8828a22610 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -42,7 +42,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.LongAdder; -import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.mutable.MutableObject; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -150,6 +149,7 @@ import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 9a71a207dd..fa9a3de432 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -27,7 +27,6 @@ import java.util.OptionalInt; import java.util.concurrent.CountDownLatch; import java.util.concurrent.locks.ReentrantLock; -import org.apache.commons.collections.CollectionUtils; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; @@ -54,6 +53,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; /** * Scanner scans both the memstore and the Store. Coalesce KeyValue stream into List<KeyValue> diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java index 4db69737b8..ac5d3ed37b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java @@ -25,7 +25,6 @@ import java.util.List; import java.util.Set; import java.util.TreeSet; -import org.apache.commons.collections.CollectionUtils; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; @@ -39,6 +38,7 @@ import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; /** * A WAL Entry for {@link AbstractFSWAL} implementation. Immutable. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index 6c77c4ce95..dee5fb045f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -46,8 +46,6 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileAlreadyExistsException; @@ -95,6 +93,8 @@ import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesti import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType; -- 2.16.1 From 6638a3d9484cf6138f6e0c130a4ff210f843c824 Mon Sep 17 00:00:00 2001 From: Sean Busbey Date: Mon, 9 Apr 2018 13:37:44 -0500 Subject: [PATCH 4/4] HBASE-20332 shaded mapreduce module shouldn't include hadoop * modify the jar checking script to take args; make hadoop stuff optional * separate out checking the artifacts that have hadoop vs those that don't. * * Unfortunately means we need two modules for checking things * * put in a safety check that the support script for checking jar contents is maintained in both modules * * have to carve out an exception for o.a.hadoop.metrics2. :( * fix duplicated class warning * clean up dependencies in hbase-server and some modules that depend on it. --- hbase-backup/pom.xml | 8 +- .../java/org/apache/hadoop/hbase/net/Address.java | 2 +- hbase-mapreduce/pom.xml | 18 ++ hbase-rest/pom.xml | 13 ++ hbase-server/pom.xml | 109 ++++++----- .../throttle/StoreHotnessProtector.java | 3 +- hbase-shaded/hbase-shaded-check-invariants/pom.xml | 54 ++++-- .../resources/ensure-jars-have-correct-contents.sh | 92 +++++++-- hbase-shaded/hbase-shaded-mapreduce/pom.xml | 198 ++++++++++++++++++- .../pom.xml | 215 +++++++++++++++++++++ .../resources/ensure-jars-have-correct-contents.sh | 129 +++++++++++++ hbase-shaded/pom.xml | 13 ++ pom.xml | 36 +++- 13 files changed, 790 insertions(+), 100 deletions(-) create mode 100644 hbase-shaded/hbase-shaded-with-hadoop-check-invariants/pom.xml create mode 100644 hbase-shaded/hbase-shaded-with-hadoop-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh diff --git a/hbase-backup/pom.xml b/hbase-backup/pom.xml index 7afd51eaf0..3cd3f48aaa 100644 --- a/hbase-backup/pom.xml +++ b/hbase-backup/pom.xml @@ -264,9 +264,6 @@ 3.0 - - 3.0-SNAPSHOT - org.apache.hadoop @@ -276,6 +273,11 @@ org.apache.hadoop hadoop-mapreduce-client-core + + org.apache.hadoop + hadoop-distcp + ${hadoop.version} + diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java index ab7fa3bcd4..b7931a451c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java @@ -26,7 +26,7 @@ import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort; * An immutable type to hold a hostname and port combo, like an Endpoint * or java.net.InetSocketAddress (but without danger of our calling * resolve -- we do NOT want a resolve happening every time we want - * to hold a hostname and port combo). This class is also <>. + * to hold a hostname and port combo). This class is also @{code Comparable}. *

In implementation this class is a facade over Guava's {@link HostAndPort}. * We cannot have Guava classes in our API hence this Type. */ diff --git a/hbase-mapreduce/pom.xml b/hbase-mapreduce/pom.xml index af80737644..5a6a634f40 100644 --- a/hbase-mapreduce/pom.xml +++ b/hbase-mapreduce/pom.xml @@ -196,6 +196,15 @@ org.apache.hbase hbase-server + + + + commons-logging + commons-logging + + org.apache.hbase @@ -246,10 +255,19 @@ junit test + com.fasterxml.jackson.core jackson-databind + + com.fasterxml.jackson.core + jackson-core + + + com.fasterxml.jackson.core + jackson-annotations + diff --git a/hbase-rest/pom.xml b/hbase-rest/pom.xml index 617f254a2f..1419a35820 100644 --- a/hbase-rest/pom.xml +++ b/hbase-rest/pom.xml @@ -299,6 +299,19 @@ com.fasterxml.jackson.jaxrs jackson-jaxrs-json-provider + + + org.codehaus.jettison + jettison + + + stax + stax-api + + + org.glassfish.web diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index e35fc8336d..aefb4c8891 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -70,6 +70,14 @@ org.apache.maven.plugins maven-remote-resources-plugin 1.5 + + + + org.apache.hbase + hbase-resource-bundle + ${project.version} + + default @@ -391,12 +399,6 @@ org.apache.hbase hbase-metrics - - - org.apache.hbase - hbase-resource-bundle - true - commons-codec commons-codec @@ -436,19 +438,16 @@ jetty-webapp - + org.glassfish.web javax.servlet.jsp + - org.codehaus.jettison - jettison - - - stax - stax-api - - + javax.servlet.jsp + javax.servlet.jsp-api @@ -500,9 +499,20 @@ javax.servlet javax.servlet-api + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.core + jackson-core + - javax.ws.rs - javax.ws.rs-api + com.fasterxml.jackson.core + jackson-annotations @@ -510,11 +520,6 @@ org.apache.htrace htrace-core4 - - org.apache.htrace - htrace-core - ${htrace-hadoop.version} - com.lmax disruptor @@ -555,6 +560,15 @@ httpcore test + + + commons-logging + commons-logging + compile + org.apache.commons commons-crypto @@ -673,34 +687,10 @@ - - org.apache.hadoop - hadoop-distcp - ${hadoop-two.version} - org.apache.hadoop hadoop-common - - org.apache.hadoop - hadoop-auth - - - org.apache.hadoop - hadoop-annotations - ${hadoop-two.version} - - - org.apache.hadoop - hadoop-client - - - com.google.guava - guava - - - org.apache.hadoop hadoop-mapreduce-client-core @@ -755,6 +745,12 @@ ${netty.hadoop.version} test + + org.apache.htrace + htrace-core + ${htrace-hadoop.version} + test + @@ -795,21 +791,32 @@ org.apache.hadoop - hadoop-distcp - ${hadoop-three.version} + hadoop-common org.apache.hadoop - hadoop-common + hadoop-hdfs + + + org.apache.hadoop + hadoop-hdfs-client + ${hadoop.version} + + + org.apache.hadoop + hadoop-mapreduce-client-core org.apache.hadoop hadoop-hdfs + test-jar + test org.apache.hadoop - hadoop-annotations - ${hadoop-three.version} + hadoop-mapreduce-client-jobclient + test-jar + test org.apache.hadoop diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java index a0dc629ad7..71fd89b5c9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/StoreHotnessProtector.java @@ -29,11 +29,12 @@ import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + /** * StoreHotnessProtector is designed to help limit the concurrency of puts with dense columns, it * does best-effort to avoid exhausting all RS's handlers. When a lot of clients write requests with diff --git a/hbase-shaded/hbase-shaded-check-invariants/pom.xml b/hbase-shaded/hbase-shaded-check-invariants/pom.xml index 7322769f0b..7ba4a41782 100644 --- a/hbase-shaded/hbase-shaded-check-invariants/pom.xml +++ b/hbase-shaded/hbase-shaded-check-invariants/pom.xml @@ -26,7 +26,7 @@ Enforces our invariants for our shaded artifacts. e.g. shaded clients have a specific set of transitive dependencies and shaded clients only contain classes that are in particular packages. Does the enforcement through - the maven-enforcer-plugin and and integration test. + the maven-enforcer-plugin and integration test. Apache HBase Shaded Packaging Invariants @@ -34,11 +34,15 @@ - - org.apache.hbase - hbase-shaded-client - ${project.version} - + org.apache.hbase hbase-shaded-mapreduce @@ -113,6 +117,8 @@ com.github.stephenc.findbugs:* org.apache.htrace:* + + org.apache.hadoop:* @@ -158,18 +164,37 @@ - org.codehaus.mojo exec-maven-plugin 1.6.0 + + + make-sure-validation-files-are-in-sync + validate + + exec + + + diff + false + + ../hbase-shaded-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh + ../hbase-shaded-with-hadoop-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh + + + + check-jar-contents integration-test @@ -180,6 +205,9 @@ ${shell-executable} ${project.build.testOutputDirectory} false + ensure-jars-have-correct-contents.sh ${hbase-client-artifacts} diff --git a/hbase-shaded/hbase-shaded-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh b/hbase-shaded/hbase-shaded-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh index 8bda8ce953..eff1d20302 100644 --- a/hbase-shaded/hbase-shaded-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh +++ b/hbase-shaded/hbase-shaded-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh @@ -15,33 +15,67 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Usage: $0 [/path/to/some/example.jar:/path/to/another/example/created.jar] -# -# accepts a single command line argument with a colon separated list of -# paths to jars to check. Iterates through each such passed jar and checks -# all the contained paths to make sure they follow the below constructed -# safe list. +set -e +function usage { + echo "Usage: ${0} [options] [/path/to/some/example.jar:/path/to/another/example/created.jar]" + echo "" + echo " accepts a single command line argument with a colon separated list of" + echo " paths to jars to check. Iterates through each such passed jar and checks" + echo " all the contained paths to make sure they follow the below constructed" + echo " safe list." + echo "" + echo " --allow-hadoop Include stuff from the Apache Hadoop project in the list" + echo " of allowed jar contents. default: false" + echo " --debug print more info to stderr" + exit 1 +} +# if no args specified, show usage +if [ $# -lt 1 ]; then + usage +fi + +# Get arguments +declare allow_hadoop +declare debug +while [ $# -gt 0 ] +do + case "$1" in + --allow-hadoop) shift; allow_hadoop="true";; + --debug) shift; debug="true";; + --) shift; break;; + -*) usage ;; + *) break;; # terminate while loop + esac +done + +# should still have jars to check. +if [ $# -lt 1 ]; then + usage +fi +if [ -n "${debug}" ]; then + echo "[DEBUG] Checking on jars: $*" >&2 + echo "jar command is: $(which jar)" >&2 + echo "grep command is: $(which grep)" >&2 + grep -V >&2 || true +fi + +IFS=: read -r -d '' -a artifact_list < <(printf '%s\0' "$1") -# we have to allow the directories that lead to the org/apache/hadoop dir -allowed_expr="(^org/$|^org/apache/$" +# we have to allow the directories that lead to the hbase dirs +allowed_expr="(^org/$|^org/apache/$|^org/apache/hadoop/$" # We allow the following things to exist in our client artifacts: -# * classes in packages that start with org.apache.hadoop, which by -# convention should be in a path that looks like org/apache/hadoop -allowed_expr+="|^org/apache/hadoop/" +# * classes in packages that start with org.apache.hadoop.hbase, which by +# convention should be in a path that looks like org/apache/hadoop/hbase +allowed_expr+="|^org/apache/hadoop/hbase" # * classes in packages that start with org.apache.hbase allowed_expr+="|^org/apache/hbase/" # * whatever in the "META-INF" directory allowed_expr+="|^META-INF/" # * the folding tables from jcodings allowed_expr+="|^tables/" -# * Hadoop's and HBase's default configuration files, which have the form +# * HBase's default configuration files, which have the form # "_module_-default.xml" -allowed_expr+="|^[^-]*-default.xml$" -# * Hadoop's versioning properties files, which have the form -# "_module_-version-info.properties" -allowed_expr+="|^[^-]*-version-info.properties$" -# * Hadoop's application classloader properties file. -allowed_expr+="|^org.apache.hadoop.application-classloader.properties$" +allowed_expr+="|^hbase-default.xml$" # public suffix list used by httpcomponents allowed_expr+="|^mozilla/$" allowed_expr+="|^mozilla/public-suffix-list.txt$" @@ -51,12 +85,30 @@ allowed_expr+="|^properties.dtd$" allowed_expr+="|^PropertyList-1.0.dtd$" +if [ -n "${allow_hadoop}" ]; then + # * classes in packages that start with org.apache.hadoop, which by + # convention should be in a path that looks like org/apache/hadoop + allowed_expr+="|^org/apache/hadoop/" + # * Hadoop's default configuration files, which have the form + # "_module_-default.xml" + allowed_expr+="|^[^-]*-default.xml$" + # * Hadoop's versioning properties files, which have the form + # "_module_-version-info.properties" + allowed_expr+="|^[^-]*-version-info.properties$" + # * Hadoop's application classloader properties file. + allowed_expr+="|^org.apache.hadoop.application-classloader.properties$" +else + # We have some classes for integrating with the Hadoop Metrics2 system + # that have to be in a particular package space due to access rules. + allowed_expr+="|^org/apache/hadoop/metrics2" +fi + + allowed_expr+=")" declare -i bad_artifacts=0 declare -a bad_contents -IFS=: read -r -d '' -a artifact_list < <(printf '%s\0' "$1") for artifact in "${artifact_list[@]}"; do - bad_contents=($(jar tf "${artifact}" | grep -v -E "${allowed_expr}")) + bad_contents=($(jar tf "${artifact}" | grep -v -E "${allowed_expr}" || true)) if [ ${#bad_contents[@]} -gt 0 ]; then echo "[ERROR] Found artifact with unexpected contents: '${artifact}'" echo " Please check the following and either correct the build or update" diff --git a/hbase-shaded/hbase-shaded-mapreduce/pom.xml b/hbase-shaded/hbase-shaded-mapreduce/pom.xml index cfcc357877..6c76167e57 100644 --- a/hbase-shaded/hbase-shaded-mapreduce/pom.xml +++ b/hbase-shaded/hbase-shaded-mapreduce/pom.xml @@ -62,6 +62,10 @@ + org.apache.hbase hbase-mapreduce @@ -137,10 +141,6 @@ org.eclipse.jetty jetty-webapp - - org.glassfish.web - javax.servlet.jsp - org.glassfish.jersey.core jersey-server @@ -149,6 +149,17 @@ org.glassfish.jersey.containers jersey-container-servlet-core + + + org.glassfish.web + javax.servlet.jsp + + + javax.servlet.jsp + javax.servlet.jsp-api + @@ -158,12 +169,183 @@ release - - org.apache.maven.plugins - maven-shade-plugin - + + + org.apache.maven.plugins + maven-shade-plugin + + + aggregate-into-a-jar-with-relocated-third-parties + + + + org.apache.hadoop:* + + org.apache.hbase:hbase-resource-bundle + org.slf4j:* + com.google.code.findbugs:* + com.github.stephenc.findbugs:* + org.apache.htrace:* + org.apache.yetus:* + log4j:* + commons-logging:* + + + + + + + + + + hadoop-2.0 + + + + !hadoop.profile + + + + + org.apache.hadoop + hadoop-common + provided + + + org.apache.htrace + htrace-core + + + net.java.dev.jets3t + jets3t + + + javax.servlet.jsp + jsp-api + + + org.mortbay.jetty + jetty + + + com.sun.jersey + jersey-server + + + com.sun.jersey + jersey-core + + + com.sun.jersey + jersey-json + + + javax.servlet + servlet-api + + + tomcat + jasper-compiler + + + tomcat + jasper-runtime + + + com.google.code.findbugs + jsr305 + + + + + org.apache.hadoop + hadoop-hdfs + provided + + + org.apache.htrace + htrace-core + + + javax.servlet.jsp + jsp-api + + + javax.servlet + servlet-api + + + io.netty + netty + + + stax + stax-api + + + xerces + xercesImpl + + + ${hadoop-two.version} + + + org.apache.hadoop + hadoop-mapreduce-client-core + provided + + + com.google.guava + guava + + + + + org.apache.hadoop + hadoop-auth + provided + + + + + + + hadoop-3.0 + + + hadoop.profile + 3.0 + + + + ${hadoop-three.version} + + + + org.apache.hadoop + hadoop-common + provided + + + org.apache.hadoop + hadoop-hdfs + provided + + + org.apache.hadoop + hadoop-auth + provided + + + diff --git a/hbase-shaded/hbase-shaded-with-hadoop-check-invariants/pom.xml b/hbase-shaded/hbase-shaded-with-hadoop-check-invariants/pom.xml new file mode 100644 index 0000000000..07789f4712 --- /dev/null +++ b/hbase-shaded/hbase-shaded-with-hadoop-check-invariants/pom.xml @@ -0,0 +1,215 @@ + + + + 4.0.0 + + hbase + org.apache.hbase + 3.0.0-SNAPSHOT + ../.. + + hbase-shaded-with-hadoop-check-invariants + pom + + + Enforces our invariants for our shaded artifacts. e.g. shaded clients have + a specific set of transitive dependencies and shaded clients only contain + classes that are in particular packages. Does the enforcement through + the maven-enforcer-plugin and integration test. + + Apache HBase Shaded Packaging Invariants (with Hadoop bundled) + + + + + + + + org.apache.hbase + hbase-shaded-client + ${project.version} + + + + com.github.stephenc.findbugs + findbugs-annotations + provided + + + log4j + log4j + provided + + + + junit + junit + provided + + + org.mockito + mockito-core + provided + + + + + + org.apache.maven.plugins + maven-site-plugin + + true + + + + org.apache.maven.plugins + maven-enforcer-plugin + + + org.codehaus.mojo + extra-enforcer-rules + 1.0-beta-6 + + + + + enforce-banned-dependencies + + enforce + + + true + + + + + + org.slf4j:* + log4j:* + commons-logging:* + + com.google.code.findbugs:* + com.github.stephenc.findbugs:* + + org.apache.htrace:* + + + + + true + + + + + + + + org.apache.maven.plugins + maven-resources-plugin + + + test-resources + pre-integration-test + + testResources + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + + put-client-artifacts-in-a-property + pre-integration-test + + build-classpath + + + provided + true + hbase-client-artifacts + + + + + + org.codehaus.mojo + exec-maven-plugin + 1.6.0 + + + + make-sure-validation-files-are-in-sync + validate + + exec + + + diff + false + + ../hbase-shaded-with-hadoop-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh + ../hbase-shaded-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh + + + + + + check-jar-contents-for-stuff-with-hadoop + integration-test + + exec + + + ${shell-executable} + ${project.build.testOutputDirectory} + false + + ensure-jars-have-correct-contents.sh + --allow-hadoop + ${hbase-client-artifacts} + + + + + + + + + diff --git a/hbase-shaded/hbase-shaded-with-hadoop-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh b/hbase-shaded/hbase-shaded-with-hadoop-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh new file mode 100644 index 0000000000..eff1d20302 --- /dev/null +++ b/hbase-shaded/hbase-shaded-with-hadoop-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh @@ -0,0 +1,129 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +function usage { + echo "Usage: ${0} [options] [/path/to/some/example.jar:/path/to/another/example/created.jar]" + echo "" + echo " accepts a single command line argument with a colon separated list of" + echo " paths to jars to check. Iterates through each such passed jar and checks" + echo " all the contained paths to make sure they follow the below constructed" + echo " safe list." + echo "" + echo " --allow-hadoop Include stuff from the Apache Hadoop project in the list" + echo " of allowed jar contents. default: false" + echo " --debug print more info to stderr" + exit 1 +} +# if no args specified, show usage +if [ $# -lt 1 ]; then + usage +fi + +# Get arguments +declare allow_hadoop +declare debug +while [ $# -gt 0 ] +do + case "$1" in + --allow-hadoop) shift; allow_hadoop="true";; + --debug) shift; debug="true";; + --) shift; break;; + -*) usage ;; + *) break;; # terminate while loop + esac +done + +# should still have jars to check. +if [ $# -lt 1 ]; then + usage +fi +if [ -n "${debug}" ]; then + echo "[DEBUG] Checking on jars: $*" >&2 + echo "jar command is: $(which jar)" >&2 + echo "grep command is: $(which grep)" >&2 + grep -V >&2 || true +fi + +IFS=: read -r -d '' -a artifact_list < <(printf '%s\0' "$1") + +# we have to allow the directories that lead to the hbase dirs +allowed_expr="(^org/$|^org/apache/$|^org/apache/hadoop/$" +# We allow the following things to exist in our client artifacts: +# * classes in packages that start with org.apache.hadoop.hbase, which by +# convention should be in a path that looks like org/apache/hadoop/hbase +allowed_expr+="|^org/apache/hadoop/hbase" +# * classes in packages that start with org.apache.hbase +allowed_expr+="|^org/apache/hbase/" +# * whatever in the "META-INF" directory +allowed_expr+="|^META-INF/" +# * the folding tables from jcodings +allowed_expr+="|^tables/" +# * HBase's default configuration files, which have the form +# "_module_-default.xml" +allowed_expr+="|^hbase-default.xml$" +# public suffix list used by httpcomponents +allowed_expr+="|^mozilla/$" +allowed_expr+="|^mozilla/public-suffix-list.txt$" +# Comes from commons-configuration, not sure if relocatable. +allowed_expr+="|^digesterRules.xml$" +allowed_expr+="|^properties.dtd$" +allowed_expr+="|^PropertyList-1.0.dtd$" + + +if [ -n "${allow_hadoop}" ]; then + # * classes in packages that start with org.apache.hadoop, which by + # convention should be in a path that looks like org/apache/hadoop + allowed_expr+="|^org/apache/hadoop/" + # * Hadoop's default configuration files, which have the form + # "_module_-default.xml" + allowed_expr+="|^[^-]*-default.xml$" + # * Hadoop's versioning properties files, which have the form + # "_module_-version-info.properties" + allowed_expr+="|^[^-]*-version-info.properties$" + # * Hadoop's application classloader properties file. + allowed_expr+="|^org.apache.hadoop.application-classloader.properties$" +else + # We have some classes for integrating with the Hadoop Metrics2 system + # that have to be in a particular package space due to access rules. + allowed_expr+="|^org/apache/hadoop/metrics2" +fi + + +allowed_expr+=")" +declare -i bad_artifacts=0 +declare -a bad_contents +for artifact in "${artifact_list[@]}"; do + bad_contents=($(jar tf "${artifact}" | grep -v -E "${allowed_expr}" || true)) + if [ ${#bad_contents[@]} -gt 0 ]; then + echo "[ERROR] Found artifact with unexpected contents: '${artifact}'" + echo " Please check the following and either correct the build or update" + echo " the allowed list with reasoning." + echo "" + for bad_line in "${bad_contents[@]}"; do + echo " ${bad_line}" + done + bad_artifacts=${bad_artifacts}+1 + else + echo "[INFO] Artifact looks correct: '$(basename "${artifact}")'" + fi +done + +# if there was atleast one bad artifact, exit with failure +if [ "${bad_artifacts}" -gt 0 ]; then + exit 1 +fi diff --git a/hbase-shaded/pom.xml b/hbase-shaded/pom.xml index 24c515844e..93b122fe08 100644 --- a/hbase-shaded/pom.xml +++ b/hbase-shaded/pom.xml @@ -42,6 +42,7 @@ hbase-shaded-client hbase-shaded-mapreduce hbase-shaded-check-invariants + hbase-shaded-with-hadoop-check-invariants @@ -118,6 +119,7 @@ maven-shade-plugin + aggregate-into-a-jar-with-relocated-third-parties package shade @@ -449,12 +451,23 @@ META-INF/ECLIPSEF.RSA + + + commons-beanutils:commons-beanutils-core + + org/apache/commons/collections/*.class + + org.apache.hbase:hbase-server hbase-webapps/* hbase-webapps/**/* + **/*_jsp.class diff --git a/pom.xml b/pom.xml index f8f1150f29..eb091c0139 100755 --- a/pom.xml +++ b/pom.xml @@ -1437,8 +1437,12 @@ 11.0.2 hbase-hadoop2-compat src/main/assembly/hadoop-two-compat.xml - 0.5.0 + 3.1.0-incubating + + 3.6.2.Final + 0.5.0 1.7.7 1.10 @@ -1466,7 +1470,6 @@ 4.12 1.3 4.2.0-incubating - 3.2.0-incubating 1.2.17 2.1.0 @@ -1594,7 +1597,8 @@ org.mortbay.jetty:servlet-api, javax.servlet:servlet-api: These are excluded because they are the same implementations. I chose org.mortbay.jetty:servlet-api-2.5 instead, which is a third implementation of the same, because Hadoop also uses this version - javax.servlet:jsp-api in favour of org.mortbay.jetty:jsp-api-2.1 + javax.servlet:jsp-api in favour of javax.servlet.jsp:javax.servlet.jsp-api:2.3.1 since it + is what glassfish's jspC jar uses and that's where we get our own need for a jsp-api. --> @@ -1909,6 +1913,14 @@ commons-math3 ${commons-math.version} + + + commons-logging + commons-logging + 1.2 + org.apache.zookeeper zookeeper @@ -1972,6 +1984,16 @@ jackson-jaxrs-json-provider ${jackson.version} + + com.fasterxml.jackson.core + jackson-annotations + ${jackson.version} + + + com.fasterxml.jackson.core + jackson-core + ${jackson.version} + com.fasterxml.jackson.core jackson-databind @@ -2075,6 +2097,12 @@ javax.servlet.jsp ${glassfish.jsp.version} + + + javax.servlet.jsp + javax.servlet.jsp-api + 2.3.1 + org.glassfish javax.el @@ -2472,6 +2500,7 @@ 3.6.2.Final + 3.1.0-incubating @@ -2749,6 +2778,7 @@ 3.10.5.Final + 4.1.0-incubating -- 2.16.1