diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java index 449c4b7985..4f4b53c1f2 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java @@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.mapreduce.InputSplit; @@ -128,7 +128,7 @@ public class TestWALRecordReader { */ @Test public void testPartialRead() throws Exception { - final WALFactory walfactory = new WALFactory(conf, getName()); + final WALProviderFactory walfactory = new WALProviderFactory(conf, getName()); WAL log = walfactory.getWAL(info); // This test depends on timestamp being millisecond based and the filename of the WAL also // being millisecond based. @@ -187,7 +187,7 @@ public class TestWALRecordReader { */ @Test public void testWALRecordReader() throws Exception { - final WALFactory walfactory = new WALFactory(conf, getName()); + final WALProviderFactory walfactory = new WALProviderFactory(conf, getName()); WAL log = walfactory.getWAL(info); byte [] value = Bytes.toBytes("value"); WALEdit edit = new WALEdit(); @@ -249,7 +249,7 @@ public class TestWALRecordReader { */ @Test public void testWALRecordReaderActiveArchiveTolerance() throws Exception { - final WALFactory walfactory = new WALFactory(conf, getName()); + final WALProviderFactory walfactory = new WALProviderFactory(conf, getName()); WAL log = walfactory.getWAL(info); byte [] value = Bytes.toBytes("value"); WALEdit edit = new WALEdit(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 4b6da53edd..45c57d37cf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -170,7 +170,7 @@ import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALSplitter; @@ -4698,7 +4698,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi status.setStatus("Opening recovered edits"); WAL.Reader reader = null; try { - reader = WALFactory.createReader(fs, edits, conf); + reader = WALProviderFactory.getInstance(conf).createReader(fs, edits, null, true); long currentEditSeqId = -1; long currentReplaySeqId = -1; long firstSeqIdInLog = -1; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 7c7b4cc02e..126854bc0e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -160,7 +160,7 @@ import org.apache.hadoop.hbase.util.VersionInfo; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.NettyAsyncFSWALConfigHelper; import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALProvider; import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; @@ -378,7 +378,7 @@ public class HRegionServer extends HasThread implements */ ScheduledChore periodicFlusher; - protected volatile WALFactory walFactory; + protected volatile WALProviderFactory walFactory; // WAL roller. log is protected rather than private to avoid // eclipse warning when accessed by inner classes @@ -1833,8 +1833,8 @@ public class HRegionServer extends HasThread implements private void setupWALAndReplication() throws IOException { boolean isMasterNoTableOrSystemTableOnly = this instanceof HMaster && !LoadBalancer.isMasterCanHostUserRegions(conf); - WALFactory factory = - new WALFactory(conf, serverName.toString(), !isMasterNoTableOrSystemTableOnly); + WALProviderFactory factory = + new WALProviderFactory(conf, serverName.toString(), !isMasterNoTableOrSystemTableOnly); if (!isMasterNoTableOrSystemTableOnly) { // TODO Replication make assumptions here based on the default filesystem impl Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME); @@ -2136,12 +2136,12 @@ public class HRegionServer extends HasThread implements @Override public List getWALs() throws IOException { - return walFactory.getWALs(); + return walFactory.getWALProvider().getWALs(); } @Override public WAL getWAL(RegionInfo regionInfo) throws IOException { - WAL wal = walFactory.getWAL(regionInfo); + WAL wal = walFactory.getWALProvider().getWAL(regionInfo); if (this.walRoller != null) { this.walRoller.addWAL(wal); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java index 4a9712cfba..b484443d91 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java @@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.util.ExceptionUtil; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.SyncReplicationWALProvider; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -86,7 +86,7 @@ public class SplitLogWorker implements Runnable { } public SplitLogWorker(Configuration conf, RegionServerServices server, - LastSequenceId sequenceIdChecker, WALFactory factory) { + LastSequenceId sequenceIdChecker, WALProviderFactory factory) { this(server, conf, server, (f, p) -> splitLog(f, p, conf, server, sequenceIdChecker, factory)); } @@ -153,7 +153,7 @@ public class SplitLogWorker implements Runnable { } private static Status splitLog(String name, CancelableProgressable p, Configuration conf, - RegionServerServices server, LastSequenceId sequenceIdChecker, WALFactory factory) { + RegionServerServices server, LastSequenceId sequenceIdChecker, WALProviderFactory factory) { Path walDir; FileSystem fs; try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java index 5beda79839..3913fa4074 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java @@ -68,7 +68,7 @@ import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.FSWALIdentity; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALPrettyPrinter; import org.apache.hadoop.hbase.wal.WALProvider.WriterBase; @@ -97,7 +97,7 @@ import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesti * (smaller) than the most-recent flush. *

* To read an WAL, call - * {@link WALFactory#createReader(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path)}. * + * {@link WALProviderFactory#createReader(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path)}. * *

Failure Semantic

If an exception on append or sync, roll the WAL because the current WAL * is now a lame duck; any more appends or syncs will fail also with the same original exception. If * we have made successful appends to the WAL and we then are unable to sync them, our current @@ -1084,7 +1084,7 @@ public abstract class AbstractFSWAL implements WAL { AbstractFSWALProvider.DEFAULT_SEPARATE_OLDLOGDIR)) { archiveDir = new Path(archiveDir, p.getName()); } - WALSplitter.split(baseDir, p, archiveDir, fs, conf, WALFactory.getInstance(conf)); + WALSplitter.split(baseDir, p, archiveDir, fs, conf, WALProviderFactory.getInstance(conf)); } private static void usage() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Compressor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Compressor.java index 13f5d6ef35..b2e0db0943 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Compressor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Compressor.java @@ -34,7 +34,7 @@ import org.apache.hadoop.io.WritableUtils; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALProvider; /** @@ -72,7 +72,7 @@ public class Compressor { FileSystem inFS = input.getFileSystem(conf); FileSystem outFS = output.getFileSystem(conf); - WAL.Reader in = WALFactory.createReaderIgnoreCustomClass(inFS, input, conf); + WAL.Reader in = WALProviderFactory.getInstance(conf).createReader(inFS, input, null, false); WALProvider.Writer out = null; try { @@ -82,7 +82,7 @@ public class Compressor { } boolean compress = ((ReaderBase)in).hasCompression(); conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, !compress); - out = WALFactory.createWALWriter(outFS, output, conf); + out = WALProviderFactory.getInstance(conf).createWALWriter(outFS, output, false); WAL.Entry e = null; while ((e = in.next()) != null) out.append(e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/FSWALEntryStream.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/FSWALEntryStream.java index 4fcdc2b2f0..5fbfbcc4ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/FSWALEntryStream.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/FSWALEntryStream.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.wal.FSWALIdentity; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WAL.Reader; import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALIdentity; import org.apache.hadoop.hbase.wal.WALProvider; import org.apache.yetus.audience.InterfaceAudience; @@ -458,7 +458,7 @@ public class FSWALEntryStream implements WALEntryStream { private Reader createReader(WALIdentity walId, Configuration conf) throws IOException { Path path = ((FSWALIdentity) walId).getPath(); - return WALFactory.createReader(fs, path, conf); + return walProvider.createReader(fs, path, null, true); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java index 24963f1ad0..c6d0268261 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplaySyncReplicationWALCallable.java @@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WAL.Reader; import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -141,7 +141,7 @@ public class ReplaySyncReplicationWALCallable implements RSProcedureCallable { long length = rs.getWALFileSystem().getFileStatus(path).getLen(); try { FSUtils.getInstance(fs, conf).recoverFileLease(fs, path, conf); - return WALFactory.createReader(rs.getWALFileSystem(), path, rs.getConfiguration()); + return WALProviderFactory.getInstance(conf).createReader(rs.getWALFileSystem(), path, null, true); } catch (EOFException e) { if (length <= 0) { LOG.warn("File is empty. Could not open {} for reading because {}", path, e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java index e13feeba2e..55961e00d4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -75,8 +75,8 @@ public class ReplicationSyncUp extends Configured implements Tool { new ZKWatcher(conf, "syncupReplication" + System.currentTimeMillis(), abortable, true)) { System.out.println("Start Replication Server start"); DummyServer dummyServer = new DummyServer(zkw); - WALFactory factory = - new WALFactory(conf, dummyServer.getServerName().toString()); + WALProviderFactory factory = + new WALProviderFactory(conf, dummyServer.getServerName().toString()); Replication replication = new Replication(); replication.initialize(dummyServer, factory.getWALProvider()); ReplicationSourceManager manager = replication.getReplicationManager(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 8176942364..de4249ee26 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -132,7 +132,7 @@ import org.apache.hadoop.hbase.util.hbck.ReplicationChecker; import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler; import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl; import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -1542,7 +1542,7 @@ public class HBaseFsck extends Configured implements Closeable { // unless I pass along via the conf. Configuration confForWAL = new Configuration(c); confForWAL.set(HConstants.HBASE_DIR, rootdir.toString()); - WAL wal = new WALFactory(confForWAL, walFactoryID).getWAL(metaHRI); + WAL wal = new WALProviderFactory(confForWAL, walFactoryID).getWAL(metaHRI); HRegion meta = HRegion.createHRegion(metaHRI, rootdir, c, metaDescriptor, wal); MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, true); return meta; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java index e5303b0fce..d74ec3843f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.wal; import java.io.FileNotFoundException; import java.io.IOException; +import java.io.InterruptedIOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -39,6 +40,7 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL; +import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.replication.regionserver.FSWALEntryStream; import org.apache.hadoop.hbase.replication.regionserver.MetricsSource; @@ -46,6 +48,7 @@ import org.apache.hadoop.hbase.replication.regionserver.ReplicationSyncUp; import org.apache.hadoop.hbase.replication.regionserver.WALEntryStream; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.LeaseNotRecoveredException; import org.apache.yetus.audience.InterfaceAudience; @@ -90,7 +93,7 @@ public abstract class AbstractFSWALProvider> implemen } protected volatile T wal; - protected WALFactory factory; + protected WALProviderFactory factory; protected Configuration conf; protected List listeners = new ArrayList<>(); protected String providerId; @@ -107,6 +110,16 @@ public abstract class AbstractFSWALProvider> implemen private Path rootDir; private Path oldLogDir; + + /** + * How long to attempt opening in-recovery wals + */ + private int timeoutMillis; + + /** + * Configuration-specified WAL Reader used when a custom reader is requested + */ + private Class logReaderClass; private FileSystem fs; @@ -117,7 +130,7 @@ public abstract class AbstractFSWALProvider> implemen * null */ @Override - public void init(WALFactory factory, Configuration conf, String providerId) throws IOException { + public void init(WALProviderFactory factory, Configuration conf, String providerId) throws IOException { if (!initialized.compareAndSet(false, true)) { throw new IllegalStateException("WALProvider.init should only be called once."); } @@ -137,6 +150,10 @@ public abstract class AbstractFSWALProvider> implemen rootDir = FSUtils.getRootDir(conf); oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME); this.fs = CommonFSUtils.getWALFileSystem(conf); + timeoutMillis = conf.getInt("hbase.hlog.open.timeout", 300000); + /* TODO Both of these are probably specific to the fs wal provider */ + logReaderClass = conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class, + AbstractFSWALProvider.Reader.class); doInit(conf); } @@ -480,7 +497,7 @@ public abstract class AbstractFSWALProvider> implemen * @return WAL Reader instance * @throws IOException */ - public static org.apache.hadoop.hbase.wal.WAL.Reader openReader(Path path, Configuration conf) + public org.apache.hadoop.hbase.wal.WAL.Reader openReader(Path path, Configuration conf) throws IOException { @@ -493,7 +510,7 @@ public abstract class AbstractFSWALProvider> implemen try { // Detect if this is a new file, if so get a new reader else // reset the current reader so that we see the new data - reader = WALFactory.createReader(path.getFileSystem(conf), path, conf); + reader = createReader(path.getFileSystem(conf), path, null, true); return reader; } catch (FileNotFoundException fnfe) { // If the log was archived, continue reading from there @@ -656,4 +673,69 @@ public abstract class AbstractFSWALProvider> implemen LOG.error("Didn't find path for: " + path.getName()); return path; } + + @Override + public org.apache.hadoop.hbase.wal.WAL.Reader createReader(FileSystem fs, Path path, + CancelableProgressable reporter, boolean allowCustom) throws IOException { + Class lrClass = + allowCustom ? logReaderClass : ProtobufLogReader.class; + try { + // A wal file could be under recovery, so it may take several + // tries to get it open. Instead of claiming it is corrupted, retry + // to open it up to 5 minutes by default. + long startWaiting = EnvironmentEdgeManager.currentTime(); + long openTimeout = timeoutMillis + startWaiting; + int nbAttempt = 0; + AbstractFSWALProvider.Reader reader = null; + while (true) { + try { + reader = lrClass.getDeclaredConstructor().newInstance(); + reader.init(fs, path, conf, null); + return reader; + } catch (IOException e) { + if (reader != null) { + try { + reader.close(); + } catch (IOException exception) { + LOG.warn("Could not close FSDataInputStream" + exception.getMessage()); + LOG.debug("exception details", exception); + } + } + + String msg = e.getMessage(); + if (msg != null + && (msg.contains("Cannot obtain block length") + || msg.contains("Could not obtain the last block") || msg + .matches("Blocklist for [^ ]* has changed.*"))) { + if (++nbAttempt == 1) { + LOG.warn("Lease should have recovered. This is not expected. Will retry", e); + } + if (reporter != null && !reporter.progress()) { + throw new InterruptedIOException("Operation is cancelled"); + } + if (nbAttempt > 2 && openTimeout < EnvironmentEdgeManager.currentTime()) { + LOG.error("Can't open after " + nbAttempt + " attempts and " + + (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms " + " for " + path); + } else { + try { + Thread.sleep(nbAttempt < 3 ? 500 : 1000); + continue; // retry + } catch (InterruptedException ie) { + InterruptedIOException iioe = new InterruptedIOException(); + iioe.initCause(ie); + throw iioe; + } + } + throw new LeaseNotRecoveredException(e); + } else { + throw e; + } + } + } + } catch (IOException ie) { + throw ie; + } catch (Exception e) { + throw new IOException("Cannot get log reader", e); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java index 062b3688d3..9311e7aee7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java @@ -132,4 +132,11 @@ public class AsyncFSWALProvider extends AbstractFSWALProvider { return false; } } + + @Override + public Writer createWriter(Configuration conf, FileSystem fs, Path path, boolean overwritable) + throws IOException { + return FSHLogProvider.createWriter(conf, fs, path, overwritable, + WALUtil.getWALBlockSize(conf, fs, path, overwritable)); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java index a9e3a0a66d..5a9fbb58e1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java @@ -27,6 +27,7 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.PriorityBlockingQueue; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; @@ -39,8 +40,10 @@ import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost; import org.apache.hadoop.hbase.replication.regionserver.MetricsSource; import org.apache.hadoop.hbase.replication.regionserver.WALEntryStream; +import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.WAL.Entry; +import org.apache.hadoop.hbase.wal.WAL.Reader; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -61,7 +64,7 @@ class DisabledWALProvider implements WALProvider { WAL disabled; @Override - public void init(WALFactory factory, Configuration conf, String providerId) throws IOException { + public void init(WALProviderFactory factory, Configuration conf, String providerId) throws IOException { if (null != disabled) { throw new IllegalStateException("WALProvider.init should only be called once."); } @@ -351,4 +354,67 @@ class DisabledWALProvider implements WALProvider { return wal; } + public Writer createWriter(Configuration conf, FileSystem fs, Path path, boolean overwritable) + throws IOException { + return new Writer() { + + @Override + public void close() throws IOException { + + } + + @Override + public long getLength() { + return 0; + } + + @Override + public void sync(boolean forceSync) throws IOException { + + } + + @Override + public void append(Entry entry) throws IOException { + + } + }; + } + + @Override + public Reader createReader(FileSystem fs, Path path, CancelableProgressable reporter, + boolean allowCustom) throws IOException { + return new Reader() { + + @Override + public void close() throws IOException { + + } + + @Override + public void seek(long pos) throws IOException { + + } + + @Override + public void reset() throws IOException { + + } + + @Override + public Entry next(Entry reuse) throws IOException { + return null; + } + + @Override + public Entry next() throws IOException { + return null; + } + + @Override + public long getPosition() throws IOException { + return 0; + } + }; + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java index b02a4d30fd..721997afbc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java @@ -58,7 +58,7 @@ public class FSHLogProvider extends AbstractFSWALProvider { * @param overwritable if the created writer can overwrite. For recovered edits, it is true and * for WAL it is false. Thus we can distinguish WAL and recovered edits by this. */ - public static Writer createWriter(final Configuration conf, final FileSystem fs, final Path path, + public Writer createWriter(final Configuration conf, final FileSystem fs, final Path path, final boolean overwritable) throws IOException { return createWriter(conf, fs, path, overwritable, WALUtil.getWALBlockSize(conf, fs, path, overwritable)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java index 8cd667af82..4d27f1c748 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java @@ -30,6 +30,8 @@ import java.util.concurrent.PriorityBlockingQueue; import java.util.concurrent.locks.Lock; import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; @@ -41,8 +43,10 @@ import org.apache.hadoop.hbase.replication.regionserver.FSWALEntryStream; import org.apache.hadoop.hbase.replication.regionserver.MetricsSource; import org.apache.hadoop.hbase.replication.regionserver.WALEntryStream; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.KeyLocker; +import org.apache.hadoop.hbase.wal.WAL.Reader; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -65,6 +69,7 @@ import org.slf4j.LoggerFactory; */ @InterfaceAudience.Private public class RegionGroupingProvider implements WALProvider { + private static final Logger LOG = LoggerFactory.getLogger(RegionGroupingProvider.class); /** @@ -129,7 +134,7 @@ public class RegionGroupingProvider implements WALProvider { /** delegate provider for WAL creation/roll/close */ public static final String DELEGATE_PROVIDER = "hbase.wal.regiongrouping.delegate.provider"; - public static final String DEFAULT_DELEGATE_PROVIDER = WALFactory.Providers.defaultProvider + public static final String DEFAULT_DELEGATE_PROVIDER = WALProviderFactory.Providers.defaultProvider .name(); private static final String META_WAL_GROUP_NAME = "meta"; @@ -140,7 +145,7 @@ public class RegionGroupingProvider implements WALProvider { private final KeyLocker createLock = new KeyLocker<>(); private RegionGroupingStrategy strategy; - private WALFactory factory; + private WALProviderFactory factory; private Configuration conf; private List listeners = new ArrayList<>(); private String providerId; @@ -148,7 +153,7 @@ public class RegionGroupingProvider implements WALProvider { private WALProvider delegateProvider; @Override - public void init(WALFactory factory, Configuration conf, String providerId) throws IOException { + public void init(WALProviderFactory factory, Configuration conf, String providerId) throws IOException { if (null != strategy) { throw new IllegalStateException("WALProvider.init should only be called once."); } @@ -165,12 +170,12 @@ public class RegionGroupingProvider implements WALProvider { this.providerId = sb.toString(); this.strategy = getStrategy(conf, REGION_GROUPING_STRATEGY, DEFAULT_REGION_GROUPING_STRATEGY); this.providerClass = factory.getProviderClass(DELEGATE_PROVIDER, DEFAULT_DELEGATE_PROVIDER); - delegateProvider = WALFactory.createProvider(providerClass); + delegateProvider = WALProviderFactory.createProvider(providerClass); delegateProvider.init(factory, conf, providerId); } private WALProvider createProvider(String group) throws IOException { - WALProvider provider = WALFactory.createProvider(providerClass); + WALProvider provider = WALProviderFactory.createProvider(this.providerClass); provider.init(factory, conf, META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : group); provider.addWALActionsListener(new MetricsWAL()); @@ -316,4 +321,16 @@ public class RegionGroupingProvider implements WALProvider { return delegateProvider.locateWalId(wal, server, deadRegionServers); } + @Override + public Writer createWriter(Configuration conf, FileSystem fs, Path path, boolean overwritable) + throws IOException { + return delegateProvider.createWriter(conf, fs, path, overwritable); + } + + @Override + public Reader createReader(FileSystem fs, Path path, CancelableProgressable reporter, + boolean allowCustom) throws IOException { + return delegateProvider.createReader(fs, path, reporter, allowCustom); + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java index fc287604e1..3e85ddf0f9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java @@ -37,6 +37,8 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -50,10 +52,12 @@ import org.apache.hadoop.hbase.replication.regionserver.MetricsSource; import org.apache.hadoop.hbase.replication.regionserver.PeerActionListener; import org.apache.hadoop.hbase.replication.regionserver.SyncReplicationPeerInfoProvider; import org.apache.hadoop.hbase.replication.regionserver.WALEntryStream; +import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.KeyLocker; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.wal.WAL.Reader; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -80,12 +84,10 @@ public class SyncReplicationWALProvider implements WALProvider, PeerActionListen @VisibleForTesting public static final String DUAL_WAL_IMPL = "hbase.wal.sync.impl"; - private final WALProvider provider; - private SyncReplicationPeerInfoProvider peerInfoProvider = new DefaultSyncReplicationPeerInfoProvider(); - private WALFactory factory; + private WALProviderFactory factory; private Configuration conf; @@ -105,8 +107,10 @@ public class SyncReplicationWALProvider implements WALProvider, PeerActionListen private final KeyLocker createLock = new KeyLocker<>(); + private WALProvider delegateProvider; + SyncReplicationWALProvider(WALProvider provider) { - this.provider = provider; + this.delegateProvider = provider; } public void setPeerInfoProvider(SyncReplicationPeerInfoProvider peerInfoProvider) { @@ -114,11 +118,11 @@ public class SyncReplicationWALProvider implements WALProvider, PeerActionListen } @Override - public void init(WALFactory factory, Configuration conf, String providerId) throws IOException { + public void init(WALProviderFactory factory, Configuration conf, String providerId) throws IOException { if (!initialized.compareAndSet(false, true)) { throw new IllegalStateException("WALProvider.init should only be called once."); } - provider.init(factory, conf, providerId); + delegateProvider.init(factory, conf, providerId); this.conf = conf; this.factory = factory; Pair> eventLoopGroupAndChannelClass = @@ -198,7 +202,7 @@ public class SyncReplicationWALProvider implements WALProvider, PeerActionListen @Override public WAL getWAL(RegionInfo region) throws IOException { if (region == null) { - return provider.getWAL(null); + return delegateProvider.getWAL(null); } WAL wal = null; Optional> peerIdAndRemoteWALDir = @@ -207,13 +211,13 @@ public class SyncReplicationWALProvider implements WALProvider, PeerActionListen Pair pair = peerIdAndRemoteWALDir.get(); wal = getWAL(pair.getFirst(), pair.getSecond()); } - return wal != null ? wal : provider.getWAL(region); + return wal != null ? wal : delegateProvider.getWAL(region); } private Stream getWALStream() { return Streams.concat( peerId2WAL.values().stream().filter(Optional::isPresent).map(Optional::get), - provider.getWALs().stream()); + delegateProvider.getWALs().stream()); } @Override @@ -235,7 +239,7 @@ public class SyncReplicationWALProvider implements WALProvider, PeerActionListen } } } - provider.shutdown(); + delegateProvider.shutdown(); if (failure != null) { throw failure; } @@ -255,7 +259,7 @@ public class SyncReplicationWALProvider implements WALProvider, PeerActionListen } } } - provider.close(); + delegateProvider.close(); if (failure != null) { throw failure; } @@ -263,13 +267,13 @@ public class SyncReplicationWALProvider implements WALProvider, PeerActionListen @Override public long getNumLogFiles() { - return peerId2WAL.size() + provider.getNumLogFiles(); + return peerId2WAL.size() + delegateProvider.getNumLogFiles(); } @Override public long getLogFileSize() { return peerId2WAL.values().stream().filter(Optional::isPresent).map(Optional::get) - .mapToLong(DualAsyncFSWAL::getLogFileSize).sum() + provider.getLogFileSize(); + .mapToLong(DualAsyncFSWAL::getLogFileSize).sum() + delegateProvider.getLogFileSize(); } private void safeClose(WAL wal) { @@ -285,7 +289,7 @@ public class SyncReplicationWALProvider implements WALProvider, PeerActionListen @Override public void addWALActionsListener(WALActionsListener listener) { listeners.add(listener); - provider.addWALActionsListener(listener); + delegateProvider.addWALActionsListener(listener); } @Override @@ -352,7 +356,18 @@ public class SyncReplicationWALProvider implements WALProvider, PeerActionListen @VisibleForTesting WALProvider getWrappedProvider() { - return provider; + return delegateProvider; + } + + @Override + public WALIdentity createWalIdentity(ServerName serverName, String walName, boolean isArchive) { + return delegateProvider.createWalIdentity(serverName, walName, isArchive); + } + + @Override + public WALIdentity locateWalId(WALIdentity wal, Server server, List deadRegionServers) + throws IOException { + return delegateProvider.locateWalId(wal, server, deadRegionServers); } @Override @@ -364,14 +379,14 @@ public class SyncReplicationWALProvider implements WALProvider, PeerActionListen } @Override - public WALIdentity createWalIdentity(ServerName serverName, String walName, boolean isArchive) { - return provider.createWalIdentity(serverName, walName, isArchive); + public Writer createWriter(Configuration conf, FileSystem fs, Path path, boolean overwritable) + throws IOException { + return delegateProvider.createWriter(conf, fs, path, overwritable); } @Override - public WALIdentity locateWalId(WALIdentity wal, Server server, List deadRegionServers) - throws IOException { - return provider.locateWalId(wal, server, deadRegionServers); + public Reader createReader(FileSystem fs, Path path, CancelableProgressable reporter, + boolean allowCustom) throws IOException { + return delegateProvider.createReader(fs, path, reporter, allowCustom); } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java index 281f3c9c1d..31dec36bdd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java @@ -259,7 +259,7 @@ public class WALPrettyPrinter { throw new IOException(p + " is not a file"); } - WAL.Reader log = WALFactory.createReader(fs, p, conf); + WAL.Reader log = WALProviderFactory.getInstance(conf).createReader(fs, p, null, false); if (log instanceof ProtobufLogReader) { List writerClsNames = ((ProtobufLogReader) log).getWriterClsNames(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java index 3ef4efa6da..1b85e1bbe3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java @@ -25,11 +25,15 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.PriorityBlockingQueue; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.replication.regionserver.MetricsSource; import org.apache.hadoop.hbase.replication.regionserver.WALEntryStream; +import org.apache.hadoop.hbase.util.CancelableProgressable; +import org.apache.hadoop.hbase.wal.WAL.Reader; import org.apache.yetus.audience.InterfaceAudience; /** @@ -48,7 +52,7 @@ public interface WALProvider { * @param conf may not be null * @param providerId differentiate between providers from one factory. may be null */ - void init(WALFactory factory, Configuration conf, String providerId) throws IOException; + void init(WALProviderFactory factory, Configuration conf, String providerId) throws IOException; /** * @param region the region which we want to get a WAL for it. Could be null. @@ -145,4 +149,28 @@ public interface WALProvider { */ WALIdentity locateWalId(WALIdentity wal, Server server, List deadRegionServers) throws IOException; + + /** + * Create a writer + * @param conf configuration + * @param fs WAL FileSystem + * @param path Path of the wal + * @param overwritable is overwritable + * @return Writer + * @throws IOException IOException + */ + Writer createWriter(Configuration conf, FileSystem fs, Path path, boolean overwritable) + throws IOException; + + /** + * Create a reader + * @param fs WAL filesystem + * @param path Path of the wal + * @param reporter CancelableProgressable + * @param allowCustom allow custom reader class + * @return Reader + * @throws IOException IOException + */ + Reader createReader(final FileSystem fs, final Path path, CancelableProgressable reporter, + boolean allowCustom) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProviderFactory.java similarity index 56% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProviderFactory.java index 8bde6d2001..39ed1ed794 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProviderFactory.java @@ -18,18 +18,13 @@ package org.apache.hadoop.hbase.wal; import java.io.IOException; -import java.io.InterruptedIOException; -import java.util.List; import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; -import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader; import org.apache.hadoop.hbase.util.CancelableProgressable; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.LeaseNotRecoveredException; import org.apache.hadoop.hbase.wal.WAL.Reader; import org.apache.hadoop.hbase.wal.WALProvider.Writer; import org.apache.yetus.audience.InterfaceAudience; @@ -59,9 +54,9 @@ import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesti * Alternatively, you may provide a custom implementation of {@link WALProvider} by class name. */ @InterfaceAudience.Private -public class WALFactory { +public class WALProviderFactory { - private static final Logger LOG = LoggerFactory.getLogger(WALFactory.class); + private static final Logger LOG = LoggerFactory.getLogger(WALProviderFactory.class); /** * Maps between configuration names for providers and implementation classes. @@ -90,35 +85,8 @@ public class WALFactory { // lazily intialized; most RegionServers don't deal with META private final AtomicReference metaProvider = new AtomicReference<>(); - /** - * Configuration-specified WAL Reader used when a custom reader is requested - */ - private final Class logReaderClass; - - /** - * How long to attempt opening in-recovery wals - */ - private final int timeoutMillis; - private final Configuration conf; - // Used for the singleton WALFactory, see below. - private WALFactory(Configuration conf) { - // this code is duplicated here so we can keep our members final. - // until we've moved reader/writer construction down into providers, this initialization must - // happen prior to provider initialization, in case they need to instantiate a reader/writer. - timeoutMillis = conf.getInt("hbase.hlog.open.timeout", 300000); - /* TODO Both of these are probably specific to the fs wal provider */ - logReaderClass = conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class, - AbstractFSWALProvider.Reader.class); - this.conf = conf; - // end required early initialization - - // this instance can't create wals, just reader/writers. - provider = null; - factoryId = SINGLETON_ID; - } - @VisibleForTesting Providers getDefaultProvider() { return Providers.defaultProvider; @@ -170,7 +138,7 @@ public class WALFactory { * @param factoryId a unique identifier for this factory. used i.e. by filesystem implementations * to make a directory */ - public WALFactory(Configuration conf, String factoryId) throws IOException { + public WALProviderFactory(Configuration conf, String factoryId) throws IOException { // default enableSyncReplicationWALProvider is true, only disable SyncReplicationWALProvider // for HMaster or HRegionServer which take system table only. See HBASE-19999 this(conf, factoryId, true); @@ -184,19 +152,14 @@ public class WALFactory { * @param enableSyncReplicationWALProvider whether wrap the wal provider to a * {@link SyncReplicationWALProvider} */ - public WALFactory(Configuration conf, String factoryId, boolean enableSyncReplicationWALProvider) + public WALProviderFactory(Configuration conf, String factoryId, boolean enableSyncReplicationWALProvider) throws IOException { - // until we've moved reader/writer construction down into providers, this initialization must - // happen prior to provider initialization, in case they need to instantiate a reader/writer. - timeoutMillis = conf.getInt("hbase.hlog.open.timeout", 300000); - /* TODO Both of these are probably specific to the fs wal provider */ - logReaderClass = conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class, - AbstractFSWALProvider.Reader.class); this.conf = conf; this.factoryId = factoryId; // end required early initialization if (conf.getBoolean("hbase.regionserver.hlog.enabled", true)) { - WALProvider provider = createProvider(getProviderClass(WAL_PROVIDER, DEFAULT_WAL_PROVIDER)); + WALProvider provider = + createProvider(getProviderClass(WAL_PROVIDER, DEFAULT_WAL_PROVIDER)); if (enableSyncReplicationWALProvider) { provider = new SyncReplicationWALProvider(provider); } @@ -249,9 +212,6 @@ public class WALFactory { } } - public List getWALs() { - return provider.getWALs(); - } @VisibleForTesting WALProvider getMetaProvider() throws IOException { @@ -296,121 +256,24 @@ public class WALFactory { } } - public Reader createReader(final FileSystem fs, final Path path) throws IOException { - return createReader(fs, path, (CancelableProgressable)null); - } - - /** - * Create a reader for the WAL. If you are reading from a file that's being written to and need - * to reopen it multiple times, use {@link WAL.Reader#reset()} instead of this method - * then just seek back to the last known good position. - * @return A WAL reader. Close when done with it. - * @throws IOException - */ - public Reader createReader(final FileSystem fs, final Path path, - CancelableProgressable reporter) throws IOException { - return createReader(fs, path, reporter, true); - } public Reader createReader(final FileSystem fs, final Path path, CancelableProgressable reporter, boolean allowCustom) throws IOException { - Class lrClass = - allowCustom ? logReaderClass : ProtobufLogReader.class; - try { - // A wal file could be under recovery, so it may take several - // tries to get it open. Instead of claiming it is corrupted, retry - // to open it up to 5 minutes by default. - long startWaiting = EnvironmentEdgeManager.currentTime(); - long openTimeout = timeoutMillis + startWaiting; - int nbAttempt = 0; - AbstractFSWALProvider.Reader reader = null; - while (true) { - try { - reader = lrClass.getDeclaredConstructor().newInstance(); - reader.init(fs, path, conf, null); - return reader; - } catch (IOException e) { - if (reader != null) { - try { - reader.close(); - } catch (IOException exception) { - LOG.warn("Could not close FSDataInputStream" + exception.getMessage()); - LOG.debug("exception details", exception); - } - } - - String msg = e.getMessage(); - if (msg != null - && (msg.contains("Cannot obtain block length") - || msg.contains("Could not obtain the last block") || msg - .matches("Blocklist for [^ ]* has changed.*"))) { - if (++nbAttempt == 1) { - LOG.warn("Lease should have recovered. This is not expected. Will retry", e); - } - if (reporter != null && !reporter.progress()) { - throw new InterruptedIOException("Operation is cancelled"); - } - if (nbAttempt > 2 && openTimeout < EnvironmentEdgeManager.currentTime()) { - LOG.error("Can't open after " + nbAttempt + " attempts and " - + (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms " + " for " + path); - } else { - try { - Thread.sleep(nbAttempt < 3 ? 500 : 1000); - continue; // retry - } catch (InterruptedException ie) { - InterruptedIOException iioe = new InterruptedIOException(); - iioe.initCause(ie); - throw iioe; - } - } - throw new LeaseNotRecoveredException(e); - } else { - throw e; - } - } - } - } catch (IOException ie) { - throw ie; - } catch (Exception e) { - throw new IOException("Cannot get log reader", e); - } - } - - /** - * Create a writer for the WAL. - * Uses defaults. - *

- * Should be package-private. public only for tests and - * {@link org.apache.hadoop.hbase.regionserver.wal.Compressor} - * @return A WAL writer. Close when done with it. - */ - public Writer createWALWriter(final FileSystem fs, final Path path) throws IOException { - return FSHLogProvider.createWriter(conf, fs, path, false); - } - - /** - * Should be package-private, visible for recovery testing. - * Uses defaults. - * @return an overwritable writer for recovered edits. caller should close. - */ - @VisibleForTesting - public Writer createRecoveredEditsWriter(final FileSystem fs, final Path path) - throws IOException { - return FSHLogProvider.createWriter(conf, fs, path, true); + return provider.createReader(fs, path, reporter, allowCustom); } // These static methods are currently used where it's impractical to // untangle the reliance on state in the filesystem. They rely on singleton // WALFactory that just provides Reader / Writers. // For now, first Configuration object wins. Practically this just impacts the reader/writer class - private static final AtomicReference singleton = new AtomicReference<>(); - private static final String SINGLETON_ID = WALFactory.class.getName(); + private static final AtomicReference singleton = new AtomicReference<>(); + private static final String SINGLETON_ID = WALProviderFactory.class.getName(); // Public only for FSHLog - public static WALFactory getInstance(Configuration configuration) { - WALFactory factory = singleton.get(); + public static WALProviderFactory getInstance(Configuration configuration) throws IOException { + WALProviderFactory factory = singleton.get(); if (null == factory) { - WALFactory temp = new WALFactory(configuration); + WALProviderFactory temp = new WALProviderFactory(configuration, SINGLETON_ID); if (singleton.compareAndSet(null, temp)) { factory = temp; } else { @@ -426,58 +289,9 @@ public class WALFactory { return factory; } - /** - * Create a reader for the given path, accept custom reader classes from conf. - * If you already have a WALFactory, you should favor the instance method. - * @return a WAL Reader, caller must close. - */ - public static Reader createReader(final FileSystem fs, final Path path, - final Configuration configuration) throws IOException { - return getInstance(configuration).createReader(fs, path); - } - - /** - * Create a reader for the given path, accept custom reader classes from conf. - * If you already have a WALFactory, you should favor the instance method. - * @return a WAL Reader, caller must close. - */ - static Reader createReader(final FileSystem fs, final Path path, - final Configuration configuration, final CancelableProgressable reporter) throws IOException { - return getInstance(configuration).createReader(fs, path, reporter); - } - - /** - * Create a reader for the given path, ignore custom reader classes from conf. - * If you already have a WALFactory, you should favor the instance method. - * only public pending move of {@link org.apache.hadoop.hbase.regionserver.wal.Compressor} - * @return a WAL Reader, caller must close. - */ - public static Reader createReaderIgnoreCustomClass(final FileSystem fs, final Path path, - final Configuration configuration) throws IOException { - return getInstance(configuration).createReader(fs, path, null, false); - } - - /** - * If you already have a WALFactory, you should favor the instance method. - * Uses defaults. - * @return a Writer that will overwrite files. Caller must close. - */ - static Writer createRecoveredEditsWriter(final FileSystem fs, final Path path, - final Configuration configuration) - throws IOException { - return FSHLogProvider.createWriter(configuration, fs, path, true); - } - - /** - * If you already have a WALFactory, you should favor the instance method. - * Uses defaults. - * @return a writer that won't overwrite files. Caller must close. - */ - @VisibleForTesting - public static Writer createWALWriter(final FileSystem fs, final Path path, - final Configuration configuration) + public Writer createWALWriter(final FileSystem fs, final Path path, boolean overwritable) throws IOException { - return FSHLogProvider.createWriter(configuration, fs, path, false); + return provider.createWriter(conf, fs, path, overwritable); } public final WALProvider getWALProvider() { @@ -487,4 +301,8 @@ public class WALFactory { public final WALProvider getMetaWALProvider() { return this.metaProvider.get(); } + + public Reader createReader(FileSystem fs, Path path) throws IOException { + return provider.createReader(fs, path, null, true); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index bc67d9863d..6612b88f7e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -126,7 +126,7 @@ public class WALSplitter { private EntryBuffers entryBuffers; private SplitLogWorkerCoordination splitLogWorkerCoordination; - private final WALFactory walFactory; + private final WALProviderFactory walFactory; private MonitoredTask status; @@ -149,7 +149,7 @@ public class WALSplitter { @VisibleForTesting - WALSplitter(final WALFactory factory, Configuration conf, Path walDir, + WALSplitter(final WALProviderFactory factory, Configuration conf, Path walDir, FileSystem walFS, LastSequenceId idChecker, SplitLogWorkerCoordination splitLogWorkerCoordination) { this.conf = HBaseConfiguration.create(conf); @@ -189,7 +189,7 @@ public class WALSplitter { */ public static boolean splitLogFile(Path walDir, FileStatus logfile, FileSystem walFS, Configuration conf, CancelableProgressable reporter, LastSequenceId idChecker, - SplitLogWorkerCoordination splitLogWorkerCoordination, final WALFactory factory) + SplitLogWorkerCoordination splitLogWorkerCoordination, final WALProviderFactory factory) throws IOException { WALSplitter s = new WALSplitter(factory, conf, walDir, walFS, idChecker, splitLogWorkerCoordination); @@ -202,7 +202,7 @@ public class WALSplitter { // which uses this method to do log splitting. @VisibleForTesting public static List split(Path rootDir, Path logDir, Path oldLogDir, - FileSystem walFS, Configuration conf, final WALFactory factory) throws IOException { + FileSystem walFS, Configuration conf, final WALProviderFactory factory) throws IOException { final FileStatus[] logfiles = SplitLogManager.getFileList(conf, Collections.singletonList(logDir), null); List splits = new ArrayList<>(); @@ -796,7 +796,7 @@ public class WALSplitter { */ protected Writer createWriter(Path logfile) throws IOException { - return walFactory.createRecoveredEditsWriter(walFS, logfile); + return walFactory.createWALWriter(walFS, logfile, true); } /** @@ -804,7 +804,7 @@ public class WALSplitter { * @return new Reader instance, caller should close */ protected Reader getReader(Path curLogFile, CancelableProgressable reporter) throws IOException { - return walFactory.createReader(walFS, curLogFile, reporter); + return walFactory.createReader(walFS, curLogFile, reporter, true); } /** @@ -1282,7 +1282,7 @@ public class WALSplitter { private void deleteOneWithFewerEntries(WriterAndPath wap, Path dst) throws IOException { long dstMinLogSeqNum = -1L; - try (WAL.Reader reader = walFactory.createReader(walFS, dst)) { + try (WAL.Reader reader = walFactory.createReader(walFS, dst, null, true)) { WAL.Entry entry = reader.next(); if (entry != null) { dstMinLogSeqNum = entry.getKey().getSequenceId(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 31a7cad3f5..edce6e7998 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -132,7 +132,7 @@ import org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm; import org.apache.hadoop.hbase.util.RetryCounter; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.zookeeper.EmptyWatcher; import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -2509,7 +2509,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { // unless I pass along via the conf. Configuration confForWAL = new Configuration(conf); confForWAL.set(HConstants.HBASE_DIR, rootDir.toString()); - return new WALFactory(confForWAL, "hregion-" + RandomStringUtils.randomNumeric(8)).getWAL(hri); + return new WALProviderFactory(confForWAL, "hregion-" + RandomStringUtils.randomNumeric(8)).getWAL(hri); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSequenceIdMonotonicallyIncreasing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSequenceIdMonotonicallyIncreasing.java index e657d9c74a..150a94ea89 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSequenceIdMonotonicallyIncreasing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSequenceIdMonotonicallyIncreasing.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -93,8 +93,8 @@ public class TestSequenceIdMonotonicallyIncreasing { private long getMaxSeqId(HRegionServer rs, RegionInfo region) throws IOException { Path walFile = ((AbstractFSWAL) rs.getWAL(null)).getCurrentFileName(); long maxSeqId = -1L; - try (WAL.Reader reader = - WALFactory.createReader(UTIL.getTestFileSystem(), walFile, UTIL.getConfiguration())) { + try (WAL.Reader reader = WALProviderFactory.getInstance(UTIL.getConfiguration()) + .createReader(UTIL.getTestFileSystem(), walFile, null, true)) { for (;;) { WAL.Entry entry = reader.next(); if (entry == null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java index dde020d432..b10d21f226 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java @@ -61,7 +61,7 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALSplitter; import org.junit.After; @@ -109,7 +109,7 @@ public class TestWALObserver { private Path hbaseWALRootDir; private Path oldLogDir; private Path logDir; - private WALFactory wals; + private WALProviderFactory wals; @BeforeClass public static void setupBeforeClass() throws Exception { @@ -155,7 +155,7 @@ public class TestWALObserver { if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseWALRootDir)) { TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseWALRootDir, true); } - this.wals = new WALFactory(conf, serverName); + this.wals = new WALProviderFactory(conf, serverName); } @After @@ -353,7 +353,7 @@ public class TestWALObserver { Path p = runWALSplit(newConf); LOG.info("WALSplit path == " + p); // Make a new wal for new region open. - final WALFactory wals2 = new WALFactory(conf, + final WALProviderFactory wals2 = new WALProviderFactory(conf, ServerName.valueOf(currentTest.getMethodName() + "2", 16010, System.currentTimeMillis()) .toString()); WAL wal2 = wals2.getWAL(null); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java index 388c53dc05..a4b6e9fe78 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java @@ -78,7 +78,7 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hbase.zookeeper.ZKUtil; @@ -639,7 +639,7 @@ public abstract class AbstractTestDLS { private int countWAL(Path log, FileSystem fs, Configuration conf) throws IOException { int count = 0; - try (WAL.Reader in = WALFactory.createReader(fs, log, conf)) { + try (WAL.Reader in = WALProviderFactory.getInstance(conf).createReader(fs, log, null, true)) { WAL.Entry e; while ((e = in.next()) != null) { if (!WALEdit.isMetaEditFamily(e.getEdit().getCells().get(0))) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java index dc51dae9a1..a517871c73 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java @@ -52,7 +52,7 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.junit.After; import org.junit.Before; import org.junit.ClassRule; @@ -133,7 +133,7 @@ public class TestCacheOnWriteInSchema { private final String testDescription; private HRegion region; private HStore store; - private WALFactory walFactory; + private WALProviderFactory walFactory; private FileSystem fs; public TestCacheOnWriteInSchema(CacheOnWriteType cowType) { @@ -179,7 +179,7 @@ public class TestCacheOnWriteInSchema { fs.delete(logdir, true); RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); - walFactory = new WALFactory(conf, id); + walFactory = new WALProviderFactory(conf, id); region = TEST_UTIL.createLocalHRegion(info, htd, walFactory.getWAL(info)); store = new HStore(region, hcd, conf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java index 4263de5695..524c96328a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java @@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.junit.After; import org.junit.Before; import org.junit.ClassRule; @@ -175,7 +175,7 @@ public class TestCompactionArchiveConcurrentClose { ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null); final Configuration walConf = new Configuration(conf); FSUtils.setRootDir(walConf, tableDir); - final WALFactory wals = new WALFactory(walConf, "log_" + info.getEncodedName()); + final WALProviderFactory wals = new WALProviderFactory(walConf, "log_" + info.getEncodedName()); HRegion region = new HRegion(fs, wals.getWAL(info), conf, htd, null); region.initialize(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java index 1199c6fc86..1f75ca58cd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java @@ -49,7 +49,7 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.junit.After; import org.junit.Before; import org.junit.ClassRule; @@ -196,7 +196,7 @@ public class TestCompactionArchiveIOException { HRegionFileSystem fs = new HRegionFileSystem(conf, errFS, tableDir, info); final Configuration walConf = new Configuration(conf); FSUtils.setRootDir(walConf, tableDir); - final WALFactory wals = new WALFactory(walConf, "log_" + info.getEncodedName()); + final WALProviderFactory wals = new WALProviderFactory(walConf, "log_" + info.getEncodedName()); HRegion region = new HRegion(fs, wals.getWAL(info), conf, htd, null); region.initialize(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java index 78042cc976..87faebee41 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java @@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.wal.AsyncFSWALProvider; import org.apache.hadoop.hbase.wal.FSHLogProvider; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALProvider; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; @@ -105,7 +105,7 @@ public class TestCompactionInDeadRegionServer { @Before public void setUp() throws Exception { - UTIL.getConfiguration().setClass(WALFactory.WAL_PROVIDER, walProvider, WALProvider.class); + UTIL.getConfiguration().setClass(WALProviderFactory.WAL_PROVIDER, walProvider, WALProvider.class); UTIL.getConfiguration().setInt(HConstants.ZK_SESSION_TIMEOUT, 2000); UTIL.getConfiguration().setClass(HConstants.REGION_SERVER_IMPL, IgnoreYouAreDeadRS.class, HRegionServer.class); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index 77f796f8c3..7f50debe91 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -57,7 +57,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdge; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSTableDescriptors; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.junit.AfterClass; import org.junit.Before; import org.junit.ClassRule; @@ -947,7 +947,7 @@ public class TestDefaultMemStore { EnvironmentEdgeForMemstoreTest edge = new EnvironmentEdgeForMemstoreTest(); EnvironmentEdgeManager.injectEdge(edge); edge.setCurrentTimeMillis(1234); - WALFactory wFactory = new WALFactory(conf, "1234"); + WALProviderFactory wFactory = new WALProviderFactory(conf, "1234"); HRegion meta = HRegion.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, testDir, conf, FSTableDescriptors.createMetaTableDescriptor(conf), wFactory.getWAL(RegionInfoBuilder.FIRST_META_REGIONINFO)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java index 9c5a667bd0..e65e7536bd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java @@ -66,7 +66,7 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.junit.Assert; import org.junit.Before; import org.junit.ClassRule; @@ -165,7 +165,7 @@ public class TestHMobStore { ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null); final Configuration walConf = new Configuration(conf); FSUtils.setRootDir(walConf, basedir); - final WALFactory wals = new WALFactory(walConf, methodName); + final WALProviderFactory wals = new WALProviderFactory(walConf, methodName); region = new HRegion(tableDir, wals.getWAL(info), fs, conf, info, htd, null); store = new HMobStore(region, hcd, conf); if(testStore) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index b2d9a1bff2..e8cfc6ba5f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -157,7 +157,7 @@ import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.FaultyFSLog; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALProvider; import org.apache.hadoop.hbase.wal.WALProvider.Writer; @@ -382,7 +382,7 @@ public class TestHRegion { final Path logDir = TEST_UTIL.getDataTestDirOnTestFS(callingMethod + ".log"); final Configuration walConf = new Configuration(conf); FSUtils.setRootDir(walConf, logDir); - return new WALFactory(walConf, callingMethod) + return new WALProviderFactory(walConf, callingMethod) .getWAL(RegionInfoBuilder.newBuilder(tableName).build()); } @@ -435,7 +435,7 @@ public class TestHRegion { public void testFlushAndMemstoreSizeCounting() throws Exception { byte[] family = Bytes.toBytes("family"); this.region = initHRegion(tableName, method, CONF, family); - final WALFactory wals = new WALFactory(CONF, method); + final WALProviderFactory wals = new WALProviderFactory(CONF, method); try { for (byte[] row : HBaseTestingUtility.ROWS) { Put put = new Put(row); @@ -675,7 +675,7 @@ public class TestHRegion { public void testSkipRecoveredEditsReplay() throws Exception { byte[] family = Bytes.toBytes("family"); this.region = initHRegion(tableName, method, CONF, family); - final WALFactory wals = new WALFactory(CONF, method); + final WALProviderFactory wals = new WALProviderFactory(CONF, method); try { Path regiondir = region.getRegionFileSystem().getRegionDir(); FileSystem fs = region.getRegionFileSystem().getFileSystem(); @@ -689,7 +689,7 @@ public class TestHRegion { for (long i = minSeqId; i <= maxSeqId; i += 10) { Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i)); fs.create(recoveredEdits); - WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits); + WALProvider.Writer writer = wals.createWALWriter(fs, recoveredEdits, true); long time = System.nanoTime(); WALEdit edit = new WALEdit(); @@ -726,7 +726,7 @@ public class TestHRegion { public void testSkipRecoveredEditsReplaySomeIgnored() throws Exception { byte[] family = Bytes.toBytes("family"); this.region = initHRegion(tableName, method, CONF, family); - final WALFactory wals = new WALFactory(CONF, method); + final WALProviderFactory wals = new WALProviderFactory(CONF, method); try { Path regiondir = region.getRegionFileSystem().getRegionDir(); FileSystem fs = region.getRegionFileSystem().getFileSystem(); @@ -740,7 +740,7 @@ public class TestHRegion { for (long i = minSeqId; i <= maxSeqId; i += 10) { Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i)); fs.create(recoveredEdits); - WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits); + WALProvider.Writer writer = wals.createWALWriter(fs, recoveredEdits, true); long time = System.nanoTime(); WALEdit edit = new WALEdit(); @@ -809,7 +809,7 @@ public class TestHRegion { public void testSkipRecoveredEditsReplayTheLastFileIgnored() throws Exception { byte[] family = Bytes.toBytes("family"); this.region = initHRegion(tableName, method, CONF, family); - final WALFactory wals = new WALFactory(CONF, method); + final WALProviderFactory wals = new WALProviderFactory(CONF, method); try { Path regiondir = region.getRegionFileSystem().getRegionDir(); FileSystem fs = region.getRegionFileSystem().getFileSystem(); @@ -826,7 +826,7 @@ public class TestHRegion { for (long i = minSeqId; i <= maxSeqId; i += 10) { Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i)); fs.create(recoveredEdits); - WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits); + WALProvider.Writer writer = wals.createWALWriter(fs, recoveredEdits, true); long time = System.nanoTime(); WALEdit edit = null; @@ -878,7 +878,7 @@ public class TestHRegion { CONF.setClass(HConstants.REGION_IMPL, HRegionForTesting.class, Region.class); byte[] family = Bytes.toBytes("family"); this.region = initHRegion(tableName, method, CONF, family); - final WALFactory wals = new WALFactory(CONF, method); + final WALProviderFactory wals = new WALProviderFactory(CONF, method); try { Path regiondir = region.getRegionFileSystem().getRegionDir(); FileSystem fs = region.getRegionFileSystem().getFileSystem(); @@ -938,7 +938,7 @@ public class TestHRegion { Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", 1000)); fs.create(recoveredEdits); - WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits); + WALProvider.Writer writer = wals.createWALWriter(fs, recoveredEdits, true); long time = System.nanoTime(); @@ -989,7 +989,7 @@ public class TestHRegion { Path logDir = TEST_UTIL.getDataTestDirOnTestFS(method + ".log"); final Configuration walConf = new Configuration(TEST_UTIL.getConfiguration()); FSUtils.setRootDir(walConf, logDir); - final WALFactory wals = new WALFactory(walConf, method); + final WALProviderFactory wals = new WALProviderFactory(walConf, method); final WAL wal = wals.getWAL(RegionInfoBuilder.newBuilder(tableName).build()); this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, @@ -1018,8 +1018,8 @@ public class TestHRegion { // now verify that the flush markers are written wal.shutdown(); - WAL.Reader reader = WALFactory.createReader(fs, AbstractFSWALProvider.getCurrentFileName(wal), - TEST_UTIL.getConfiguration()); + WAL.Reader reader = WALProviderFactory.getInstance(TEST_UTIL.getConfiguration()).createReader(fs, + AbstractFSWALProvider.getCurrentFileName(wal), null, true); try { List flushDescriptors = new ArrayList<>(); long lastFlushSeqId = -1; @@ -1063,7 +1063,7 @@ public class TestHRegion { Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", 1000)); fs.create(recoveredEdits); - WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits); + WALProvider.Writer writer = wals.createWALWriter(fs, recoveredEdits, true); for (WAL.Entry entry : flushDescriptors) { writer.append(entry); @@ -4477,8 +4477,8 @@ public class TestHRegion { FSUtils.setRootDir(walConf, logDir); // XXX: The spied AsyncFSWAL can not work properly because of a Mockito defect that can not // deal with classes which have a field of an inner class. See discussions in HBASE-15536. - walConf.set(WALFactory.WAL_PROVIDER, "filesystem"); - final WALFactory wals = new WALFactory(walConf, TEST_UTIL.getRandomUUID().toString()); + walConf.set(WALProviderFactory.WAL_PROVIDER, "filesystem"); + final WALProviderFactory wals = new WALProviderFactory(walConf, TEST_UTIL.getRandomUUID().toString()); final WAL wal = spy(wals.getWAL(RegionInfoBuilder.newBuilder(tableName).build())); this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false, tableDurability, wal, @@ -4625,10 +4625,10 @@ public class TestHRegion { } } - static WALFactory createWALFactory(Configuration conf, Path rootDir) throws IOException { + static WALProviderFactory createWALFactory(Configuration conf, Path rootDir) throws IOException { Configuration confForWAL = new Configuration(conf); confForWAL.set(HConstants.HBASE_DIR, rootDir.toString()); - return new WALFactory(confForWAL, "hregion-" + RandomStringUtils.randomNumeric(8)); + return new WALProviderFactory(confForWAL, "hregion-" + RandomStringUtils.randomNumeric(8)); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java index 3b3b8c39db..34c71d242c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java @@ -75,7 +75,7 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay; import org.apache.hadoop.util.StringUtils; @@ -179,7 +179,7 @@ public class TestHRegionReplayEvents { secondaryHri = RegionInfoBuilder.newBuilder(htd.getTableName()).setRegionId(time).setReplicaId(1).build(); - WALFactory wals = TestHRegion.createWALFactory(CONF, rootDir); + WALProviderFactory wals = TestHRegion.createWALFactory(CONF, rootDir); walPrimary = wals.getWAL(primaryHri); walSecondary = wals.getWAL(secondaryHri); @@ -317,9 +317,9 @@ public class TestHRegionReplayEvents { } WAL.Reader createWALReaderForPrimary() throws FileNotFoundException, IOException { - return WALFactory.createReader(TEST_UTIL.getTestFileSystem(), - AbstractFSWALProvider.getCurrentFileName(walPrimary), - TEST_UTIL.getConfiguration()); + return WALProviderFactory.getInstance(TEST_UTIL.getConfiguration()).createReader( + TEST_UTIL.getTestFileSystem(), AbstractFSWALProvider.getCurrentFileName(walPrimary), null, + true); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java index ca7629b936..ab2ba86958 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.junit.Assert; import org.junit.ClassRule; import org.junit.Test; @@ -70,7 +70,7 @@ public class TestHRegionWithInMemoryFlush extends TestHRegion { public void testFlushAndMemstoreSizeCounting() throws Exception { byte[] family = Bytes.toBytes("family"); this.region = initHRegion(tableName, method, CONF, family); - final WALFactory wals = new WALFactory(CONF, method); + final WALProviderFactory wals = new WALProviderFactory(CONF, method); int count = 0; try { for (byte[] row : HBaseTestingUtility.ROWS) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java index 72a9c75f6d..0a60d2afa8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java @@ -102,7 +102,7 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.util.Progressable; import org.junit.After; import org.junit.AfterClass; @@ -217,7 +217,7 @@ public class TestHStore { RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); Configuration walConf = new Configuration(conf); FSUtils.setRootDir(walConf, basedir); - WALFactory wals = new WALFactory(walConf, methodName); + WALProviderFactory wals = new WALProviderFactory(walConf, methodName); region = new HRegion(new HRegionFileSystem(conf, fs, tableDir, info), wals.getWAL(info), conf, htd, null); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java index 543126e5b5..974cc9cf59 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java @@ -46,7 +46,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.wal.WALSplitter; import org.junit.ClassRule; @@ -174,7 +174,7 @@ public class TestRecoveredEdits { // Based on HRegion#replayRecoveredEdits WAL.Reader reader = null; try { - reader = WALFactory.createReader(fs, edits, conf); + reader = WALProviderFactory.getInstance(conf).createReader(fs, edits, null, true); WAL.Entry entry; while ((entry = reader.next()) != null) { WALKey key = entry.getKey(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java index 7aeff84a82..1d7c067441 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java @@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALProvider; import org.apache.hadoop.hbase.wal.WALSplitter; @@ -139,7 +139,7 @@ public class TestRecoveredEditsReplayAndAbort { region = HRegion.newHRegion(tableDir, wal, TEST_UTIL.getTestFileSystem(), CONF, info, htd, rs); //create some recovered.edits - final WALFactory wals = new WALFactory(CONF, method); + final WALProviderFactory wals = new WALProviderFactory(CONF, method); try { Path regiondir = region.getRegionFileSystem().getRegionDir(); FileSystem fs = region.getRegionFileSystem().getFileSystem(); @@ -155,8 +155,7 @@ public class TestRecoveredEditsReplayAndAbort { String.format("%019d", i)); LOG.info("Begin to write recovered.edits : " + recoveredEdits); fs.create(recoveredEdits); - WALProvider.Writer writer = wals - .createRecoveredEditsWriter(fs, recoveredEdits); + WALProvider.Writer writer = wals.createWALWriter(fs, recoveredEdits, true); for (long j = i; j < i + 100; j++) { long time = System.nanoTime(); WALEdit edit = new WALEdit(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java index f1b090d4a6..9489712acc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.zookeeper.KeeperException.SessionExpiredException; import org.junit.After; import org.junit.Before; @@ -115,7 +115,7 @@ public class TestShutdownWhileWALBroken { public void setUp() throws Exception { UTIL.getConfiguration().setClass(HConstants.REGION_SERVER_IMPL, MyRegionServer.class, HRegionServer.class); - UTIL.getConfiguration().set(WALFactory.WAL_PROVIDER, walType); + UTIL.getConfiguration().set(WALProviderFactory.WAL_PROVIDER, walType); UTIL.startMiniCluster(2); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java index 7ce2418d30..3d623a7fec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java @@ -52,7 +52,7 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.StoppableImplementation; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.junit.Before; import org.junit.ClassRule; import org.junit.Rule; @@ -117,7 +117,7 @@ public class TestStoreFileRefresherChore { new FailingHRegionFileSystem(conf, tableDir.getFileSystem(conf), tableDir, info); final Configuration walConf = new Configuration(conf); FSUtils.setRootDir(walConf, tableDir); - final WALFactory wals = new WALFactory(walConf, "log_" + replicaId); + final WALProviderFactory wals = new WALProviderFactory(walConf, "log_" + replicaId); ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null); HRegion region = new HRegion(fs, wals.getWAL(info), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALMonotonicallyIncreasingSeqId.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALMonotonicallyIncreasingSeqId.java index 599260be9d..e93312929a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALMonotonicallyIncreasingSeqId.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALMonotonicallyIncreasingSeqId.java @@ -46,7 +46,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -84,7 +84,7 @@ public class TestWALMonotonicallyIncreasingSeqId { private final Logger LOG = LoggerFactory.getLogger(getClass()); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static Path testDir = TEST_UTIL.getDataTestDir("TestWALMonotonicallyIncreasingSeqId"); - private WALFactory wals; + private WALProviderFactory wals; private FileSystem fileSystem; private Configuration walConf; private HRegion region; @@ -121,7 +121,7 @@ public class TestWALMonotonicallyIncreasingSeqId { final Configuration walConf = new Configuration(conf); FSUtils.setRootDir(walConf, tableDir); this.walConf = walConf; - wals = new WALFactory(walConf, "log_" + replicaId); + wals = new WALProviderFactory(walConf, "log_" + replicaId); ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null); HRegion region = HRegion.createHRegion(info, TEST_UTIL.getDefaultRootDirPath(), conf, htd, wals.getWAL(info)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java index 4c19aa0a82..a206e5bf71 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java @@ -46,7 +46,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.junit.After; import org.junit.Assert; @@ -164,8 +164,8 @@ public abstract class AbstractTestLogRolling { @Test public void testLogRollOnNothingWritten() throws Exception { final Configuration conf = TEST_UTIL.getConfiguration(); - final WALFactory wals = - new WALFactory(conf, ServerName.valueOf("test.com", 8080, 1).toString()); + final WALProviderFactory wals = + new WALProviderFactory(conf, ServerName.valueOf("test.com", 8080, 1).toString()); final WAL newLog = wals.getWAL(null); try { // Now roll the log before we write anything. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java index 5098609a69..7e37c96553 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java @@ -25,7 +25,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALProvider; import org.junit.After; import org.junit.AfterClass; @@ -43,7 +43,7 @@ public abstract class AbstractTestProtobufLog { protected FileSystem fs; protected Path dir; - protected WALFactory wals; + protected WALProviderFactory wals; @Rule public final TestName currentTest = new TestName(); @@ -52,7 +52,7 @@ public abstract class AbstractTestProtobufLog { public void setUp() throws Exception { fs = TEST_UTIL.getDFSCluster().getFileSystem(); dir = new Path(TEST_UTIL.createRootDir(), currentTest.getMethodName()); - wals = new WALFactory(TEST_UTIL.getConfiguration(), currentTest.getMethodName()); + wals = new WALProviderFactory(TEST_UTIL.getConfiguration(), currentTest.getMethodName()); } @After diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java index 3f9040be74..102c4f49c5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java @@ -94,7 +94,7 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hdfs.DFSInputStream; @@ -124,7 +124,7 @@ public abstract class AbstractTestWALReplay { private Path logDir; private FileSystem fs; private Configuration conf; - private WALFactory wals; + private WALProviderFactory wals; @Rule public final TestName currentTest = new TestName(); @@ -161,7 +161,7 @@ public abstract class AbstractTestWALReplay { if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) { TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true); } - this.wals = new WALFactory(conf, currentTest.getMethodName()); + this.wals = new WALProviderFactory(conf, currentTest.getMethodName()); } @After diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRollPeriod.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRollPeriod.java index c0a6162540..5340b15b78 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRollPeriod.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRollPeriod.java @@ -21,7 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.experimental.categories.Category; @@ -36,7 +36,7 @@ public class TestAsyncLogRollPeriod extends AbstractTestLogRollPeriod { @BeforeClass public static void setUpBeforeClass() throws Exception { Configuration conf = AbstractTestLogRollPeriod.TEST_UTIL.getConfiguration(); - conf.set(WALFactory.WAL_PROVIDER, "asyncfs"); + conf.set(WALProviderFactory.WAL_PROVIDER, "asyncfs"); AbstractTestLogRollPeriod.setUpBeforeClass(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java index 8afae061be..4cf71d88d9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests; import org.apache.hadoop.hbase.wal.AsyncFSWALProvider; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.junit.BeforeClass; @@ -47,7 +47,7 @@ public class TestAsyncLogRolling extends AbstractTestLogRolling { public static void setUpBeforeClass() throws Exception { Configuration conf = TestAsyncLogRolling.TEST_UTIL.getConfiguration(); conf.setInt(FanOutOneBlockAsyncDFSOutputHelper.ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES, 100); - conf.set(WALFactory.WAL_PROVIDER, "asyncfs"); + conf.set(WALProviderFactory.WAL_PROVIDER, "asyncfs"); AbstractTestLogRolling.setUpBeforeClass(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java index 0740954f90..b0a89a5c06 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -54,7 +54,7 @@ public class TestAsyncWALReplay extends AbstractTestWALReplay { GROUP = new NioEventLoopGroup(1, Threads.newDaemonThreadFactory("TestAsyncWALReplay")); CHANNEL_CLASS = NioSocketChannel.class; Configuration conf = AbstractTestWALReplay.TEST_UTIL.getConfiguration(); - conf.set(WALFactory.WAL_PROVIDER, "asyncfs"); + conf.set(WALProviderFactory.WAL_PROVIDER, "asyncfs"); AbstractTestWALReplay.setUpBeforeClass(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java index f73b4f159e..83c4cbe2ae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.AsyncFSWALProvider; import org.apache.hadoop.hbase.wal.AsyncFSWALProvider.AsyncWriter; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -56,7 +56,7 @@ public class TestCombinedAsyncWriter { private static Class CHANNEL_CLASS; - private static WALFactory WALS; + private static WALProviderFactory WALS; @Rule public final TestName name = new TestName(); @@ -67,7 +67,7 @@ public class TestCombinedAsyncWriter { CHANNEL_CLASS = NioSocketChannel.class; UTIL.startMiniDFSCluster(3); UTIL.getTestFileSystem().mkdirs(UTIL.getDataTestDirOnTestFS()); - WALS = new WALFactory(UTIL.getConfiguration(), TestCombinedAsyncWriter.class.getSimpleName()); + WALS = new WALProviderFactory(UTIL.getConfiguration(), TestCombinedAsyncWriter.class.getSimpleName()); } @AfterClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java index 4effa6de11..7480601284 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java @@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.junit.After; import org.junit.AfterClass; @@ -113,7 +113,7 @@ public class TestDurability { @Before public void setUp() { - CONF.set(WALFactory.WAL_PROVIDER, walProvider); + CONF.set(WALProviderFactory.WAL_PROVIDER, walProvider); } @After @@ -123,7 +123,7 @@ public class TestDurability { @Test public void testDurability() throws Exception { - WALFactory wals = new WALFactory(CONF, + WALProviderFactory wals = new WALProviderFactory(CONF, ServerName.valueOf("TestDurability", 16010, System.currentTimeMillis()).toString()); HRegion region = createHRegion(wals, Durability.USE_DEFAULT); WAL wal = region.getWAL(); @@ -187,7 +187,7 @@ public class TestDurability { byte[] col3 = Bytes.toBytes("col3"); // Setting up region - WALFactory wals = new WALFactory(CONF, + WALProviderFactory wals = new WALProviderFactory(CONF, ServerName.valueOf("TestIncrement", 16010, System.currentTimeMillis()).toString()); HRegion region = createHRegion(wals, Durability.USE_DEFAULT); WAL wal = region.getWAL(); @@ -253,7 +253,7 @@ public class TestDurability { byte[] col1 = Bytes.toBytes("col1"); // Setting up region - WALFactory wals = new WALFactory(CONF, + WALProviderFactory wals = new WALProviderFactory(CONF, ServerName .valueOf("testIncrementWithReturnResultsSetToFalse", 16010, System.currentTimeMillis()) .toString()); @@ -275,7 +275,7 @@ public class TestDurability { return p; } - private void verifyWALCount(WALFactory wals, WAL log, int expected) throws Exception { + private void verifyWALCount(WALProviderFactory wals, WAL log, int expected) throws Exception { Path walPath = AbstractFSWALProvider.getCurrentFileName(log); WAL.Reader reader = wals.createReader(FS, walPath); int count = 0; @@ -288,7 +288,7 @@ public class TestDurability { } // lifted from TestAtomicOperation - private HRegion createHRegion(WALFactory wals, Durability durability) throws IOException { + private HRegion createHRegion(WALProviderFactory wals, Durability durability) throws IOException { TableName tableName = TableName.valueOf(name.getMethodName().replaceAll("[^A-Za-z0-9-_]", "_")); TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java index 3eed1372a1..549445dc21 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java @@ -49,7 +49,7 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -104,7 +104,7 @@ public class TestLogRollAbort { // the namenode might still try to choose the recently-dead datanode // for a pipeline, so try to a new pipeline multiple times TEST_UTIL.getConfiguration().setInt("dfs.client.block.write.retries", 10); - TEST_UTIL.getConfiguration().set(WALFactory.WAL_PROVIDER, "filesystem"); + TEST_UTIL.getConfiguration().set(WALProviderFactory.WAL_PROVIDER, "filesystem"); } private Configuration conf; @@ -191,7 +191,7 @@ public class TestLogRollAbort { String logName = ServerName.valueOf("testLogRollAfterSplitStart", 16010, System.currentTimeMillis()).toString(); Path thisTestsDir = new Path(HBASELOGDIR, AbstractFSWALProvider.getWALDirectoryName(logName)); - final WALFactory wals = new WALFactory(conf, logName); + final WALProviderFactory wals = new WALProviderFactory(conf, logName); try { // put some entries in an WAL diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollPeriod.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollPeriod.java index 0d6a1831b0..f92640d032 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollPeriod.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollPeriod.java @@ -21,7 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.experimental.categories.Category; @@ -36,7 +36,7 @@ public class TestLogRollPeriod extends AbstractTestLogRollPeriod { @BeforeClass public static void setUpBeforeClass() throws Exception { Configuration conf = AbstractTestLogRollPeriod.TEST_UTIL.getConfiguration(); - conf.set(WALFactory.WAL_PROVIDER, "filesystem"); + conf.set(WALProviderFactory.WAL_PROVIDER, "filesystem"); AbstractTestLogRollPeriod.setUpBeforeClass(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java index 8e7cacf0b9..85de3fae29 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java @@ -54,7 +54,7 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.FSWALIdentity; import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALIdentity; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -91,7 +91,7 @@ public class TestLogRolling extends AbstractTestLogRolling { conf.setInt("dfs.client.block.write.retries", 30); conf.setInt("hbase.regionserver.hlog.tolerable.lowreplication", 2); conf.setInt("hbase.regionserver.hlog.lowreplication.rolllimit", 3); - conf.set(WALFactory.WAL_PROVIDER, "filesystem"); + conf.set(WALProviderFactory.WAL_PROVIDER, "filesystem"); AbstractTestLogRolling.setUpBeforeClass(); } @@ -326,7 +326,8 @@ public class TestLogRolling extends AbstractTestLogRolling { LOG.debug("Reading WAL " + FSUtils.getPath(p)); WAL.Reader reader = null; try { - reader = WALFactory.createReader(fs, p, TEST_UTIL.getConfiguration()); + reader = + WALProviderFactory.getInstance(TEST_UTIL.getConfiguration()).createReader(fs, p, null, true); WAL.Entry entry; while ((entry = reader.next()) != null) { LOG.debug("#" + entry.getKey().getSequenceId() + ": " + entry.getEdit().getCells()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java index 819df673c9..0331490aba 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.junit.ClassRule; import org.junit.Test; @@ -88,10 +88,10 @@ public class TestLogRollingNoCluster { // The implementation needs to know the 'handler' count. TEST_UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HANDLER_COUNT, NUM_THREADS); final Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); - conf.set(WALFactory.WAL_PROVIDER, "filesystem"); + conf.set(WALProviderFactory.WAL_PROVIDER, "filesystem"); FSUtils.setRootDir(conf, dir); conf.set("hbase.regionserver.hlog.writer.impl", HighLatencySyncWriter.class.getName()); - final WALFactory wals = new WALFactory(conf, TestLogRollingNoCluster.class.getName()); + final WALProviderFactory wals = new WALProviderFactory(conf, TestLogRollingNoCluster.class.getName()); final WAL wal = wals.getWAL(null); Appender [] appenders = null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestProtobufLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestProtobufLog.java index d429a01fdb..21ab43bd2f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestProtobufLog.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestProtobufLog.java @@ -1,19 +1,12 @@ /** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ package org.apache.hadoop.hbase.regionserver.wal; @@ -22,7 +15,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.wal.FSHLogProvider; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALProvider.Writer; import org.junit.ClassRule; import org.junit.experimental.categories.Category; @@ -36,6 +29,6 @@ public class TestProtobufLog extends AbstractTestProtobufLog { @Override protected Writer createWriter(Path path) throws IOException { - return FSHLogProvider.createWriter(TEST_UTIL.getConfiguration(), fs, path, false); + return WALProviderFactory.getInstance(TEST_UTIL.getConfiguration()).createWALWriter(fs, path, false); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java index 880dea7891..356c6b58b7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALIdentity; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.junit.After; @@ -98,7 +98,7 @@ public class TestWALActionsListener { @Test public void testActionListener() throws Exception { DummyWALActionsListener observer = new DummyWALActionsListener(); - final WALFactory wals = new WALFactory(conf, "testActionListener"); + final WALProviderFactory wals = new WALProviderFactory(conf, "testActionListener"); wals.getWALProvider().addWALActionsListener(observer); DummyWALActionsListener laterobserver = new DummyWALActionsListener(); RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(SOME_BYTES)) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALConfiguration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALConfiguration.java index 4f03be5b94..9f380cbe0b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALConfiguration.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALConfiguration.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALProvider; import org.junit.Before; import org.junit.ClassRule; @@ -71,7 +71,7 @@ public class TestWALConfiguration { @Before public void before() { - TEST_UTIL.getConfiguration().set(WALFactory.WAL_PROVIDER, walProvider); + TEST_UTIL.getConfiguration().set(WALProviderFactory.WAL_PROVIDER, walProvider); } /** @@ -82,7 +82,7 @@ public class TestWALConfiguration { @Test public void testBlocksizeDefaultsToTwiceHDFSBlockSize() throws IOException { TableName tableName = TableName.valueOf("test"); - final WALFactory walFactory = new WALFactory(TEST_UTIL.getConfiguration(), this.walProvider); + final WALProviderFactory walFactory = new WALProviderFactory(TEST_UTIL.getConfiguration(), this.walProvider); Configuration conf = TEST_UTIL.getConfiguration(); WALProvider provider = walFactory.getWALProvider(); // Get a WAL instance from the provider. Check its blocksize. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java index 66e19a8cfd..5bac4d2cb6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.experimental.categories.Category; @@ -41,7 +41,7 @@ public class TestWALReplay extends AbstractTestWALReplay { @BeforeClass public static void setUpBeforeClass() throws Exception { Configuration conf = AbstractTestWALReplay.TEST_UTIL.getConfiguration(); - conf.set(WALFactory.WAL_PROVIDER, "filesystem"); + conf.set(WALProviderFactory.WAL_PROVIDER, "filesystem"); AbstractTestWALReplay.setUpBeforeClass(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SerialReplicationTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SerialReplicationTestBase.java index 1b98518728..8030703006 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SerialReplicationTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SerialReplicationTestBase.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WAL.Entry; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALProvider; import org.junit.After; import org.junit.AfterClass; @@ -184,7 +184,7 @@ public class SerialReplicationTestBase { protected final void setupWALWriter() throws IOException { logPath = new Path(LOG_DIR, name.getMethodName()); - WRITER = WALFactory.createWALWriter(FS, logPath, UTIL.getConfiguration()); + WRITER = WALProviderFactory.getInstance(UTIL.getConfiguration()).createWALWriter(FS, logPath, false); } protected final void waitUntilReplicationDone(int expectedEntries) throws Exception { @@ -192,7 +192,8 @@ public class SerialReplicationTestBase { @Override public boolean evaluate() throws Exception { - try (WAL.Reader reader = WALFactory.createReader(FS, logPath, UTIL.getConfiguration())) { + try (WAL.Reader reader = + WALProviderFactory.getInstance(UTIL.getConfiguration()).createReader(FS, logPath, null, true)) { int count = 0; while (reader.next() != null) { count++; @@ -225,7 +226,8 @@ public class SerialReplicationTestBase { protected final void checkOrder(int expectedEntries) throws IOException { try (WAL.Reader reader = - WALFactory.createReader(UTIL.getTestFileSystem(), logPath, UTIL.getConfiguration())) { + WALProviderFactory.getInstance(UTIL.getConfiguration()).createReader(UTIL.getTestFileSystem(), + logPath, null, true)) { long seqId = -1L; int count = 0; for (Entry entry;;) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java index 0cfd8335f4..adaad39d0e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.FSWALIdentity; import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALIdentity; import org.junit.Before; import org.junit.ClassRule; @@ -101,7 +101,7 @@ public class TestReplicationEmptyWALRecovery extends TestReplicationBase { emptyWalPaths.add(emptyWalPath); } - WALFactory factory = new WALFactory(conf1, "empty-wal-recovery"); + WALProviderFactory factory = new WALProviderFactory(conf1, "empty-wal-recovery"); // inject our empty wal into the replication queue, and then roll the original wal, which // enqueues a new wal behind our empty wal. We must roll the wal here as now we use the WAL to // determine if the file being replicated currently is still opened for write, so just inject a diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java index 07e626b3c8..e22244ed5b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java @@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils.StreamLacksCapabilityException; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WAL.Entry; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; @@ -108,7 +108,8 @@ public class TestSerialReplication extends SerialReplicationTestBase { regionsToSeqId.put(region.getEncodedName(), -1L); regions.stream().map(RegionInfo::getEncodedName).forEach(n -> regionsToSeqId.put(n, -1L)); try (WAL.Reader reader = - WALFactory.createReader(UTIL.getTestFileSystem(), logPath, UTIL.getConfiguration())) { + WALProviderFactory.getInstance(UTIL.getConfiguration()).createReader(UTIL.getTestFileSystem(), + logPath, null, true)) { int count = 0; for (Entry entry;;) { entry = reader.next(); @@ -169,7 +170,8 @@ public class TestSerialReplication extends SerialReplicationTestBase { regionsToSeqId.put(region.getEncodedName(), -1L); regions.stream().map(RegionInfo::getEncodedName).forEach(n -> regionsToSeqId.put(n, -1L)); try (WAL.Reader reader = - WALFactory.createReader(UTIL.getTestFileSystem(), logPath, UTIL.getConfiguration())) { + WALProviderFactory.getInstance(UTIL.getConfiguration()).createReader(UTIL.getTestFileSystem(), + logPath, null, true)) { int count = 0; for (Entry entry;;) { entry = reader.next(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java index 42adab60b5..ee2a80a7a8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WAL.Reader; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.junit.Assert; import org.junit.ClassRule; import org.junit.Test; @@ -124,7 +124,8 @@ public class TestSyncReplicationActive extends SyncReplicationTestBase { Assert.assertTrue(files.length > 0); for (FileStatus file : files) { try ( - Reader reader = WALFactory.createReader(fs2, file.getPath(), utility.getConfiguration())) { + Reader reader = WALProviderFactory.getInstance(utility.getConfiguration()).createReader(fs2, + file.getPath(), null, true)) { Entry entry = reader.next(); Assert.assertTrue(entry != null); while (entry != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleAsyncWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleAsyncWAL.java index 594aac0b5c..8fb93be82a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleAsyncWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleAsyncWAL.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hbase.replication.TestReplicationEndpoint; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.wal.RegionGroupingProvider; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.experimental.categories.Category; @@ -36,7 +36,7 @@ public class TestReplicationEndpointWithMultipleAsyncWAL extends TestReplication @BeforeClass public static void setUpBeforeClass() throws Exception { - conf1.set(WALFactory.WAL_PROVIDER, "multiwal"); + conf1.set(WALProviderFactory.WAL_PROVIDER, "multiwal"); conf1.set(RegionGroupingProvider.DELEGATE_PROVIDER, "asyncfs"); TestReplicationEndpoint.setUpBeforeClass(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleWAL.java index 68b41be457..7806457724 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleWAL.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hbase.replication.TestReplicationEndpoint; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.wal.RegionGroupingProvider; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.experimental.categories.Category; @@ -36,7 +36,7 @@ public class TestReplicationEndpointWithMultipleWAL extends TestReplicationEndpo @BeforeClass public static void setUpBeforeClass() throws Exception { - conf1.set(WALFactory.WAL_PROVIDER, "multiwal"); + conf1.set(WALProviderFactory.WAL_PROVIDER, "multiwal"); conf1.set(RegionGroupingProvider.DELEGATE_PROVIDER, "filesystem"); TestReplicationEndpoint.setUpBeforeClass(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL.java index 4685f24c0d..032b5767ea 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hbase.replication.TestReplicationKillMasterRSCompressed import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.wal.RegionGroupingProvider; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.experimental.categories.Category; @@ -37,7 +37,7 @@ public class TestReplicationKillMasterRSCompressedWithMultipleAsyncWAL extends @BeforeClass public static void setUpBeforeClass() throws Exception { - conf1.set(WALFactory.WAL_PROVIDER, "multiwal"); + conf1.set(WALProviderFactory.WAL_PROVIDER, "multiwal"); conf1.set(RegionGroupingProvider.DELEGATE_PROVIDER, "asyncfs"); TestReplicationKillMasterRSCompressed.setUpBeforeClass(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleWAL.java index 82fef3aa58..9139aa7d1e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationKillMasterRSCompressedWithMultipleWAL.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hbase.replication.TestReplicationKillMasterRSCompressed import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.wal.RegionGroupingProvider; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.experimental.categories.Category; @@ -37,7 +37,7 @@ public class TestReplicationKillMasterRSCompressedWithMultipleWAL extends @BeforeClass public static void setUpBeforeClass() throws Exception { - conf1.set(WALFactory.WAL_PROVIDER, "multiwal"); + conf1.set(WALProviderFactory.WAL_PROVIDER, "multiwal"); conf1.set(RegionGroupingProvider.DELEGATE_PROVIDER, "filesystem"); TestReplicationKillMasterRSCompressed.setUpBeforeClass(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleAsyncWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleAsyncWAL.java index 1451499347..3688871bba 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleAsyncWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleAsyncWAL.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hbase.replication.TestReplicationSyncUpTool; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.wal.RegionGroupingProvider; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.experimental.categories.Category; @@ -37,7 +37,7 @@ public class TestReplicationSyncUpToolWithMultipleAsyncWAL extends TestReplicati @BeforeClass public static void setUpBeforeClass() throws Exception { - conf1.set(WALFactory.WAL_PROVIDER, "multiwal"); + conf1.set(WALProviderFactory.WAL_PROVIDER, "multiwal"); conf1.set(RegionGroupingProvider.DELEGATE_PROVIDER, "asyncfs"); TestReplicationBase.setUpBeforeClass(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleWAL.java index e487039dcd..f17353fd93 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/multiwal/TestReplicationSyncUpToolWithMultipleWAL.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hbase.replication.TestReplicationSyncUpTool; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.wal.RegionGroupingProvider; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.experimental.categories.Category; @@ -37,7 +37,7 @@ public class TestReplicationSyncUpToolWithMultipleWAL extends TestReplicationSyn @BeforeClass public static void setUpBeforeClass() throws Exception { - conf1.set(WALFactory.WAL_PROVIDER, "multiwal"); + conf1.set(WALProviderFactory.WAL_PROVIDER, "multiwal"); conf1.set(RegionGroupingProvider.DELEGATE_PROVIDER, "filesystem"); TestReplicationBase.setUpBeforeClass(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRaceWhenCreatingReplicationSource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRaceWhenCreatingReplicationSource.java index bd800a841f..3a39cf1df4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRaceWhenCreatingReplicationSource.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRaceWhenCreatingReplicationSource.java @@ -44,7 +44,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WAL.Entry; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALProvider; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -131,14 +131,14 @@ public class TestRaceWhenCreatingReplicationSource { @BeforeClass public static void setUpBeforeClass() throws Exception { - UTIL.getConfiguration().set(WALFactory.WAL_PROVIDER, "multiwal"); + UTIL.getConfiguration().set(WALProviderFactory.WAL_PROVIDER, "multiwal"); // make sure that we will create a new group for the table UTIL.getConfiguration().setInt("hbase.wal.regiongrouping.numgroups", 8); UTIL.startMiniCluster(3); Path dir = UTIL.getDataTestDirOnTestFS(); FS = UTIL.getTestFileSystem(); LOG_PATH = new Path(dir, "replicated"); - WRITER = WALFactory.createWALWriter(FS, LOG_PATH, UTIL.getConfiguration()); + WRITER = WALProviderFactory.getInstance(UTIL.getConfiguration()).createWALWriter(FS, LOG_PATH, false); UTIL.getAdmin().addReplicationPeer(PEER_ID, ReplicationPeerConfig.newBuilder().setClusterKey("127.0.0.1:2181:/hbase") .setReplicationEndpointImpl(LocalReplicationEndpoint.class.getName()).build(), @@ -184,7 +184,8 @@ public class TestRaceWhenCreatingReplicationSource { @Override public boolean evaluate() throws Exception { - try (WAL.Reader reader = WALFactory.createReader(FS, LOG_PATH, UTIL.getConfiguration())) { + try (WAL.Reader reader = WALProviderFactory.getInstance(UTIL.getConfiguration()).createReader(FS, + LOG_PATH, null, true)) { return reader.next() != null; } catch (IOException e) { return false; @@ -196,7 +197,8 @@ public class TestRaceWhenCreatingReplicationSource { return "Replication has not catched up"; } }); - try (WAL.Reader reader = WALFactory.createReader(FS, LOG_PATH, UTIL.getConfiguration())) { + try (WAL.Reader reader = + WALProviderFactory.getInstance(UTIL.getConfiguration()).createReader(FS, LOG_PATH, null, true)) { Cell cell = reader.next().getEdit().getCells().get(0); assertEquals(1, Bytes.toInt(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); assertArrayEquals(CF, CellUtil.cloneFamily(cell)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java index 6322f7903b..ab9f71a2ed 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java @@ -53,7 +53,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.FSWALIdentity; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALIdentity; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALProvider; @@ -113,8 +113,8 @@ public class TestReplicationSource { Path logPath = new Path(logDir, "log"); if (!FS.exists(logDir)) FS.mkdirs(logDir); if (!FS.exists(oldLogDir)) FS.mkdirs(oldLogDir); - WALProvider.Writer writer = WALFactory.createWALWriter(FS, logPath, - TEST_UTIL.getConfiguration()); + WALProvider.Writer writer = + WALProviderFactory.getInstance(TEST_UTIL.getConfiguration()).createWALWriter(FS, logPath, false); for(int i = 0; i < 3; i++) { byte[] b = Bytes.toBytes(Integer.toString(i)); KeyValue kv = new KeyValue(b,b,b); @@ -127,7 +127,8 @@ public class TestReplicationSource { } writer.close(); - WAL.Reader reader = WALFactory.createReader(FS, logPath, TEST_UTIL.getConfiguration()); + WAL.Reader reader = + WALProviderFactory.getInstance(TEST_UTIL.getConfiguration()).createReader(FS, logPath, null, true); WAL.Entry entry = reader.next(); assertNotNull(entry); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java index f816ea694e..9a172da790 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java @@ -88,7 +88,7 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.FSWALIdentity; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALIdentity; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALProvider; @@ -199,8 +199,8 @@ public abstract class TestReplicationSourceManager { remoteLogDir = utility.getDataTestDir(ReplicationUtils.REMOTE_WAL_DIR_NAME); replication = new Replication(); DummyServer dummyServer = new DummyServer(); - WALFactory factory = - new WALFactory(conf, dummyServer.getServerName().toString()); + WALProviderFactory factory = + new WALProviderFactory(conf, dummyServer.getServerName().toString()); replication.initialize(dummyServer, factory.getWALProvider()); managerOfCluster = getManagerFromCluster(); if (managerOfCluster != null) { @@ -288,8 +288,8 @@ public abstract class TestReplicationSourceManager { WALEdit edit = new WALEdit(); edit.add(kv); - WALFactory wals = - new WALFactory(utility.getConfiguration(), URLEncoder.encode("regionserver:60020", "UTF8")); + WALProviderFactory wals = + new WALProviderFactory(utility.getConfiguration(), URLEncoder.encode("regionserver:60020", "UTF8")); ReplicationSourceManager replicationManager = replication.getReplicationManager(); wals.getWALProvider() .addWALActionsListener(new ReplicationSourceWALActionListener(conf, replicationManager)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.java index 4f2f163ccd..14da1a8c42 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.java @@ -59,7 +59,7 @@ import org.apache.hadoop.hbase.wal.FSWALIdentity; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALProviderFactory; import org.apache.hadoop.hbase.wal.WALIdentity; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALProvider; @@ -128,7 +128,7 @@ public class TestWALEntryStream { public void setUp() throws Exception { walQueue = new PriorityBlockingQueue<>(); pathWatcher = new PathWatcher(); - WALFactory wals = new WALFactory(CONF, tn.getMethodName()); + WALProviderFactory wals = new WALProviderFactory(CONF, tn.getMethodName()); walProvider = wals.getWALProvider(); walProvider.addWALActionsListener(pathWatcher); log = wals.getWAL(info); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java index e2cf6f5af6..b3c0ad36a7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java @@ -43,9 +43,11 @@ import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.replication.regionserver.FSWALEntryStream; import org.apache.hadoop.hbase.replication.regionserver.MetricsSource; import org.apache.hadoop.hbase.replication.regionserver.WALEntryStream; +import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.WAL.Entry; +import org.apache.hadoop.hbase.wal.WAL.Reader; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -77,7 +79,9 @@ import org.slf4j.LoggerFactory; */ @InterfaceAudience.Private public class IOTestProvider implements WALProvider { + private static final Logger LOG = LoggerFactory.getLogger(IOTestProvider.class); + /** delegate provider for WAL creation/roll/close */ private static final String ALLOWED_OPERATIONS = "hbase.wal.iotestprovider.operations"; private enum AllowedOperations { @@ -88,7 +92,7 @@ public class IOTestProvider implements WALProvider { none } - private WALFactory factory; + private WALProviderFactory factory; private Configuration conf; @@ -98,10 +102,7 @@ public class IOTestProvider implements WALProvider { protected AtomicBoolean initialized = new AtomicBoolean(false); private List listeners = new ArrayList<>(); - - private Path oldLogDir; - - private Path rootDir; + private WALProvider delegateProvider; /** * @param factory factory that made us, identity used for FS layout. may not be null @@ -110,15 +111,14 @@ public class IOTestProvider implements WALProvider { * null */ @Override - public void init(WALFactory factory, Configuration conf, String providerId) throws IOException { + public void init(WALProviderFactory factory, Configuration conf, String providerId) throws IOException { if (!initialized.compareAndSet(false, true)) { throw new IllegalStateException("WALProvider.init should only be called once."); } this.factory = factory; this.conf = conf; this.providerId = providerId != null ? providerId : DEFAULT_PROVIDER_ID; - rootDir = FSUtils.getRootDir(conf); - oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME); + this.delegateProvider = WALProviderFactory.createProvider(WALProviderFactory.Providers.defaultProvider.clazz); } @Override @@ -312,21 +312,25 @@ public class IOTestProvider implements WALProvider { @Override public WALIdentity createWalIdentity(ServerName serverName, String walName, boolean isArchive) { - Path walPath; - if (isArchive) { - walPath = new Path(oldLogDir, walName); - } else { - Path logDir = - new Path(rootDir, AbstractFSWALProvider.getWALDirectoryName(serverName.toString())); - walPath = new Path(logDir, walName); - } - return new FSWALIdentity(walPath); + return delegateProvider.createWalIdentity(serverName, walName, isArchive); } @Override public WALIdentity locateWalId(WALIdentity wal, Server server, List deadRegionServers) throws IOException { - return wal; + return delegateProvider.locateWalId(wal, server, deadRegionServers); + } + + @Override + public Writer createWriter(Configuration conf, FileSystem fs, Path path, boolean overwritable) + throws IOException { + return delegateProvider.createWriter(conf, fs, path, overwritable); + } + + @Override + public Reader createReader(FileSystem fs, Path path, CancelableProgressable reporter, + boolean allowCustom) throws IOException { + return delegateProvider.createReader(fs, path, reporter, allowCustom); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestBoundedRegionGroupingStrategy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestBoundedRegionGroupingStrategy.java index b24daa1299..6677acdf84 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestBoundedRegionGroupingStrategy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestBoundedRegionGroupingStrategy.java @@ -21,7 +21,7 @@ import static org.apache.hadoop.hbase.wal.BoundedGroupingStrategy.DEFAULT_NUM_RE import static org.apache.hadoop.hbase.wal.BoundedGroupingStrategy.NUM_REGION_GROUPS; import static org.apache.hadoop.hbase.wal.RegionGroupingProvider.DELEGATE_PROVIDER; import static org.apache.hadoop.hbase.wal.RegionGroupingProvider.REGION_GROUPING_STRATEGY; -import static org.apache.hadoop.hbase.wal.WALFactory.WAL_PROVIDER; +import static org.apache.hadoop.hbase.wal.WALProviderFactory.WAL_PROVIDER; import static org.junit.Assert.assertEquals; import java.io.IOException; @@ -179,13 +179,13 @@ public class TestBoundedRegionGroupingStrategy { @Test public void setMembershipDedups() throws IOException { final int temp = CONF.getInt(NUM_REGION_GROUPS, DEFAULT_NUM_REGION_GROUPS); - WALFactory wals = null; + WALProviderFactory wals = null; try { CONF.setInt(NUM_REGION_GROUPS, temp * 4); // Set HDFS root directory for storing WAL FSUtils.setRootDir(CONF, TEST_UTIL.getDataTestDirOnTestFS()); - wals = new WALFactory(CONF, "setMembershipDedups"); + wals = new WALProviderFactory(CONF, "setMembershipDedups"); Set seen = new HashSet<>(temp * 4); int count = 0; // we know that this should see one of the wals more than once diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestFSHLogProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestFSHLogProvider.java index 3205d7328e..ddcb906918 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestFSHLogProvider.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestFSHLogProvider.java @@ -199,8 +199,8 @@ public class TestFSHLogProvider { scopes2.put(fam, 0); } Configuration localConf = new Configuration(conf); - localConf.set(WALFactory.WAL_PROVIDER, FSHLogProvider.class.getName()); - WALFactory wals = new WALFactory(localConf, currentTest.getMethodName()); + localConf.set(WALProviderFactory.WAL_PROVIDER, FSHLogProvider.class.getName()); + WALProviderFactory wals = new WALProviderFactory(localConf, currentTest.getMethodName()); try { RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); RegionInfo hri2 = RegionInfoBuilder.newBuilder(htd2.getTableName()).build(); @@ -280,8 +280,8 @@ public class TestFSHLogProvider { scopes2.put(fam, 0); } Configuration localConf = new Configuration(conf); - localConf.set(WALFactory.WAL_PROVIDER, FSHLogProvider.class.getName()); - WALFactory wals = new WALFactory(localConf, currentTest.getMethodName()); + localConf.set(WALProviderFactory.WAL_PROVIDER, FSHLogProvider.class.getName()); + WALProviderFactory wals = new WALProviderFactory(localConf, currentTest.getMethodName()); try { WAL wal = wals.getWAL(null); assertEquals(0, AbstractFSWALProvider.getNumRolledLogFiles(wal)); @@ -354,8 +354,8 @@ public class TestFSHLogProvider { @Test public void setMembershipDedups() throws IOException { Configuration localConf = new Configuration(conf); - localConf.set(WALFactory.WAL_PROVIDER, FSHLogProvider.class.getName()); - WALFactory wals = new WALFactory(localConf, currentTest.getMethodName()); + localConf.set(WALProviderFactory.WAL_PROVIDER, FSHLogProvider.class.getName()); + WALProviderFactory wals = new WALProviderFactory(localConf, currentTest.getMethodName()); try { final Set seen = new HashSet<>(1); assertTrue("first attempt to add WAL from default provider should work.", diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestRaceBetweenGetWALAndGetWALs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestRaceBetweenGetWALAndGetWALs.java index 26ff11836f..396828d54a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestRaceBetweenGetWALAndGetWALs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestRaceBetweenGetWALAndGetWALs.java @@ -26,8 +26,11 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.ForkJoinPool; import java.util.concurrent.Future; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL; +import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Threads; @@ -65,6 +68,13 @@ public class TestRaceBetweenGetWALAndGetWALs { @Override protected void doInit(Configuration conf) throws IOException { } + + @Override + public Writer createWriter(Configuration conf, FileSystem fs, Path path, boolean overwritable) + throws IOException { + return FSHLogProvider.createWriter(conf, fs, path, overwritable, + WALUtil.getWALBlockSize(conf, fs, path)); + } } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSecureWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSecureWAL.java index 81938065bb..c1afa1efba 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSecureWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSecureWAL.java @@ -104,7 +104,7 @@ public class TestSecureWAL { @Before public void setUp() { - TEST_UTIL.getConfiguration().set(WALFactory.WAL_PROVIDER, walProvider); + TEST_UTIL.getConfiguration().set(WALProviderFactory.WAL_PROVIDER, walProvider); } @Test @@ -118,8 +118,8 @@ public class TestSecureWAL { final byte[] family = Bytes.toBytes("family"); final byte[] value = Bytes.toBytes("Test value"); FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem(); - final WALFactory wals = - new WALFactory(TEST_UTIL.getConfiguration(), tableName.getNameAsString()); + final WALProviderFactory wals = + new WALProviderFactory(TEST_UTIL.getConfiguration(), tableName.getNameAsString()); // Write the WAL final WAL wal = wals.getWAL(regionInfo); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java index 8189cef081..271254eb37 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java @@ -70,7 +70,7 @@ public class TestSyncReplicationWALProvider { private static RegionInfo REGION_NO_REP = RegionInfoBuilder.newBuilder(TABLE_NO_REP).build(); - private static WALFactory FACTORY; + private static WALProviderFactory FACTORY; public static final class InfoProvider implements SyncReplicationPeerInfoProvider { @@ -93,7 +93,7 @@ public class TestSyncReplicationWALProvider { @BeforeClass public static void setUpBeforeClass() throws Exception { UTIL.startMiniDFSCluster(3); - FACTORY = new WALFactory(UTIL.getConfiguration(), "test"); + FACTORY = new WALProviderFactory(UTIL.getConfiguration(), "test"); ((SyncReplicationWALProvider) FACTORY.getWALProvider()).setPeerInfoProvider(new InfoProvider()); UTIL.getTestFileSystem().mkdirs(new Path(REMOTE_WAL_DIR, PEER_ID)); } @@ -162,11 +162,11 @@ public class TestSyncReplicationWALProvider { WAL walNoRep = FACTORY.getWAL(REGION_NO_REP); assertThat(walNoRep, not(instanceOf(DualAsyncFSWAL.class))); DualAsyncFSWAL wal = (DualAsyncFSWAL) FACTORY.getWAL(REGION); - assertEquals(2, FACTORY.getWALs().size()); + assertEquals(2, FACTORY.getWALProvider().getWALs().size()); testReadWrite(wal); SyncReplicationWALProvider walProvider = (SyncReplicationWALProvider) FACTORY.getWALProvider(); walProvider.peerSyncReplicationStateChange(PEER_ID, SyncReplicationState.ACTIVE, SyncReplicationState.DOWNGRADE_ACTIVE, 1); - assertEquals(1, FACTORY.getWALs().size()); + assertEquals(1, FACTORY.getWALProvider().getWALs().size()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java index 3e73300077..7515186c79 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.wal; -import static org.apache.hadoop.hbase.wal.WALFactory.META_WAL_PROVIDER; -import static org.apache.hadoop.hbase.wal.WALFactory.WAL_PROVIDER; +import static org.apache.hadoop.hbase.wal.WALProviderFactory.META_WAL_PROVIDER; +import static org.apache.hadoop.hbase.wal.WALProviderFactory.WAL_PROVIDER; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; @@ -62,7 +62,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.hbase.wal.WALFactory.Providers; +import org.apache.hadoop.hbase.wal.WALProviderFactory.Providers; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -98,7 +98,7 @@ public class TestWALFactory { protected FileSystem fs; protected Path dir; - protected WALFactory wals; + protected WALProviderFactory wals; private ServerName currentServername; @Rule @@ -109,7 +109,7 @@ public class TestWALFactory { fs = cluster.getFileSystem(); dir = new Path(hbaseDir, currentTest.getMethodName()); this.currentServername = ServerName.valueOf(currentTest.getMethodName(), 16010, 1); - wals = new WALFactory(conf, this.currentServername.toString()); + wals = new WALProviderFactory(conf, this.currentServername.toString()); } @After @@ -165,7 +165,7 @@ public class TestWALFactory { @Test public void canCloseSingleton() throws IOException { - WALFactory.getInstance(conf).close(); + WALProviderFactory.getInstance(conf).close(); } /** @@ -681,55 +681,55 @@ public class TestWALFactory { Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); // if providers are not set but enable SyncReplicationWALProvider by default for master node // with not only system tables - WALFactory walFactory = new WALFactory(conf, this.currentServername.toString()); + WALProviderFactory walFactory = new WALProviderFactory(conf, this.currentServername.toString()); assertEquals(SyncReplicationWALProvider.class, walFactory.getWALProvider().getClass()); WALProvider wrappedWALProvider = ((SyncReplicationWALProvider) walFactory.getWALProvider()) .getWrappedProvider(); assertEquals(wrappedWALProvider.getClass(), walFactory.getMetaProvider().getClass()); // if providers are not set and do not enable SyncReplicationWALProvider - walFactory = new WALFactory(conf, this.currentServername.toString(), false); + walFactory = new WALProviderFactory(conf, this.currentServername.toString(), false); assertEquals(walFactory.getWALProvider().getClass(), walFactory.getMetaProvider().getClass()); } @Test public void testOnlySetWALProvider() throws IOException { Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); - conf.set(WAL_PROVIDER, WALFactory.Providers.multiwal.name()); - WALFactory walFactory = new WALFactory(conf, this.currentServername.toString()); + conf.set(WAL_PROVIDER, WALProviderFactory.Providers.multiwal.name()); + WALProviderFactory walFactory = new WALProviderFactory(conf, this.currentServername.toString()); WALProvider wrappedWALProvider = ((SyncReplicationWALProvider) walFactory.getWALProvider()) .getWrappedProvider(); assertEquals(SyncReplicationWALProvider.class, walFactory.getWALProvider().getClass()); // class of WALProvider and metaWALProvider are the same when metaWALProvider is not set - assertEquals(WALFactory.Providers.multiwal.clazz, wrappedWALProvider.getClass()); - assertEquals(WALFactory.Providers.multiwal.clazz, walFactory.getMetaProvider().getClass()); + assertEquals(WALProviderFactory.Providers.multiwal.clazz, wrappedWALProvider.getClass()); + assertEquals(WALProviderFactory.Providers.multiwal.clazz, walFactory.getMetaProvider().getClass()); } @Test public void testOnlySetMetaWALProvider() throws IOException { Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); - conf.set(META_WAL_PROVIDER, WALFactory.Providers.asyncfs.name()); - WALFactory walFactory = new WALFactory(conf, this.currentServername.toString()); + conf.set(META_WAL_PROVIDER, WALProviderFactory.Providers.asyncfs.name()); + WALProviderFactory walFactory = new WALProviderFactory(conf, this.currentServername.toString()); WALProvider wrappedWALProvider = ((SyncReplicationWALProvider) walFactory.getWALProvider()) .getWrappedProvider(); assertEquals(SyncReplicationWALProvider.class, walFactory.getWALProvider().getClass()); - assertEquals(WALFactory.Providers.defaultProvider.clazz, wrappedWALProvider.getClass()); - assertEquals(WALFactory.Providers.asyncfs.clazz, walFactory.getMetaProvider().getClass()); + assertEquals(WALProviderFactory.Providers.defaultProvider.clazz, wrappedWALProvider.getClass()); + assertEquals(WALProviderFactory.Providers.asyncfs.clazz, walFactory.getMetaProvider().getClass()); } @Test public void testDefaultProvider() throws IOException { final Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); // AsyncFSWal is the default, we should be able to request any WAL. - final WALFactory normalWalFactory = new WALFactory(conf, this.currentServername.toString()); + final WALProviderFactory normalWalFactory = new WALProviderFactory(conf, this.currentServername.toString()); Class fshLogProvider = normalWalFactory.getProviderClass( - WALFactory.WAL_PROVIDER, Providers.filesystem.name()); + WALProviderFactory.WAL_PROVIDER, Providers.filesystem.name()); assertEquals(Providers.filesystem.clazz, fshLogProvider); // Imagine a world where MultiWAL is the default - final WALFactory customizedWalFactory = new WALFactory( + final WALProviderFactory customizedWalFactory = new WALProviderFactory( conf, this.currentServername.toString()) { @Override Providers getDefaultProvider() { @@ -738,17 +738,17 @@ public class TestWALFactory { }; // If we don't specify a WALProvider, we should get the default implementation. Class multiwalProviderClass = customizedWalFactory.getProviderClass( - WALFactory.WAL_PROVIDER, Providers.multiwal.name()); + WALProviderFactory.WAL_PROVIDER, Providers.multiwal.name()); assertEquals(Providers.multiwal.clazz, multiwalProviderClass); } @Test public void testCustomProvider() throws IOException { final Configuration config = new Configuration(TEST_UTIL.getConfiguration()); - config.set(WALFactory.WAL_PROVIDER, IOTestProvider.class.getName()); - final WALFactory walFactory = new WALFactory(config, this.currentServername.toString()); + config.set(WALProviderFactory.WAL_PROVIDER, IOTestProvider.class.getName()); + final WALProviderFactory walFactory = new WALProviderFactory(config, this.currentServername.toString()); Class walProvider = walFactory.getProviderClass( - WALFactory.WAL_PROVIDER, Providers.filesystem.name()); + WALProviderFactory.WAL_PROVIDER, Providers.filesystem.name()); assertEquals(IOTestProvider.class, walProvider); WALProvider metaWALProvider = walFactory.getMetaProvider(); assertEquals(IOTestProvider.class, metaWALProvider.getClass()); @@ -757,10 +757,10 @@ public class TestWALFactory { @Test public void testCustomMetaProvider() throws IOException { final Configuration config = new Configuration(TEST_UTIL.getConfiguration()); - config.set(WALFactory.META_WAL_PROVIDER, IOTestProvider.class.getName()); - final WALFactory walFactory = new WALFactory(config, this.currentServername.toString()); + config.set(WALProviderFactory.META_WAL_PROVIDER, IOTestProvider.class.getName()); + final WALProviderFactory walFactory = new WALProviderFactory(config, this.currentServername.toString()); Class walProvider = walFactory.getProviderClass( - WALFactory.WAL_PROVIDER, Providers.filesystem.name()); + WALProviderFactory.WAL_PROVIDER, Providers.filesystem.name()); assertEquals(Providers.filesystem.clazz, walProvider); WALProvider metaWALProvider = walFactory.getMetaProvider(); assertEquals(IOTestProvider.class, metaWALProvider.getClass()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALMethods.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALMethods.java index b20b3a5902..dfc7d89b80 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALMethods.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALMethods.java @@ -106,7 +106,7 @@ public class TestWALMethods { final Configuration walConf = new Configuration(util.getConfiguration()); FSUtils.setRootDir(walConf, regiondir); - (new WALFactory(walConf, "dummyLogName")).getWAL(null); + (new WALProviderFactory(walConf, "dummyLogName")).getWAL(null); NavigableSet files = WALSplitter.getSplitEditFilesSorted(fs, regiondir); assertEquals(7, files.size()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALOpenAfterDNRollingStart.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALOpenAfterDNRollingStart.java index 7d7896c3f9..f070670b16 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALOpenAfterDNRollingStart.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALOpenAfterDNRollingStart.java @@ -122,8 +122,8 @@ public class TestWALOpenAfterDNRollingStart { currentFile = new Path(oldLogDir, currentFile.getName()); } // if the log is not rolled, then we can never open this wal forever. - try (WAL.Reader reader = WALFactory.createReader(TEST_UTIL.getTestFileSystem(), currentFile, - TEST_UTIL.getConfiguration())) { + try (WAL.Reader reader = WALProviderFactory.getInstance(TEST_UTIL.getConfiguration()) + .createReader(TEST_UTIL.getTestFileSystem(), currentFile, null, true)) { reader.next(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.java index bc21a65b16..77b5a75acc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.java @@ -90,7 +90,7 @@ public class TestWALReaderOnSecureWAL { FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir()); } - private Path writeWAL(final WALFactory wals, final String tblName, boolean offheap) throws IOException { + private Path writeWAL(final WALProviderFactory wals, final String tblName, boolean offheap) throws IOException { Configuration conf = TEST_UTIL.getConfiguration(); String clsName = conf.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName()); conf.setClass(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, SecureWALCellCodec.class, @@ -152,7 +152,7 @@ public class TestWALReaderOnSecureWAL { WALProvider.AsyncWriter.class); conf.setBoolean(WAL_ENCRYPTION, true); FileSystem fs = TEST_UTIL.getTestFileSystem(); - final WALFactory wals = new WALFactory(conf, currentTest.getMethodName()); + final WALProviderFactory wals = new WALProviderFactory(conf, currentTest.getMethodName()); Path walPath = writeWAL(wals, currentTest.getMethodName(), offheap); // Insure edits are not plaintext @@ -195,7 +195,7 @@ public class TestWALReaderOnSecureWAL { WALProvider.Writer.class); conf.setBoolean(WAL_ENCRYPTION, false); FileSystem fs = TEST_UTIL.getTestFileSystem(); - final WALFactory wals = new WALFactory(conf, ServerName + final WALProviderFactory wals = new WALProviderFactory(conf, ServerName .valueOf(currentTest.getMethodName(), 16010, System.currentTimeMillis()).toString()); Path walPath = writeWAL(wals, currentTest.getMethodName(), false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALRootDir.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALRootDir.java index 40fad6ad52..4ca93aceb0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALRootDir.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALRootDir.java @@ -64,7 +64,7 @@ public class TestWALRootDir { private static final byte [] family = Bytes.toBytes("column"); private static Path walRootDir; private static Path rootDir; - private static WALFactory wals; + private static WALProviderFactory wals; @Before public void setUp() throws Exception { @@ -90,7 +90,7 @@ public class TestWALRootDir { @Test public void testWALRootDir() throws Exception { RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build(); - wals = new WALFactory(conf, "testWALRootDir"); + wals = new WALProviderFactory(conf, "testWALRootDir"); WAL log = wals.getWAL(regionInfo); assertEquals(1, getWALFiles(walFs, walRootDir).size()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java index e6644f07dd..5e73356127 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java @@ -181,7 +181,7 @@ public class TestWALSplit { @Rule public TestName name = new TestName(); - private WALFactory wals = null; + private WALProviderFactory wals = null; @Before public void setUp() throws Exception { @@ -197,7 +197,7 @@ public class TestWALSplit { REGIONS.clear(); Collections.addAll(REGIONS, "bbb", "ccc"); InstrumentedLogWriter.activateFailure = false; - wals = new WALFactory(conf, name.getMethodName()); + wals = new WALProviderFactory(conf, name.getMethodName()); WALDIR = new Path(HBASELOGDIR, AbstractFSWALProvider.getWALDirectoryName(ServerName.valueOf(name.getMethodName(), 16010, System.currentTimeMillis()).toString())); @@ -423,7 +423,7 @@ public class TestWALSplit { FILENAME_BEING_SPLIT, TMPDIRNAME, conf); String parentOfParent = p.getParent().getParent().getName(); assertEquals(parentOfParent, RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName()); - WALFactory.createRecoveredEditsWriter(fs, p, conf).close(); + wals.createWALWriter(fs, p, false).close(); } private void useDifferentDFSClient() throws IOException { @@ -632,7 +632,7 @@ public class TestWALSplit { LOG.debug("no previous CORRUPTDIR to clean."); } // change to the faulty reader - wals = new WALFactory(conf, name.getMethodName()); + wals = new WALProviderFactory(conf, name.getMethodName()); generateWALs(-1); // Our reader will render all of these files corrupt. final Set walDirContents = new HashSet<>(); @@ -687,7 +687,7 @@ public class TestWALSplit { assertEquals(1, splitLog.length); int actualCount = 0; - Reader in = wals.createReader(fs, splitLog[0]); + Reader in = wals.createReader(fs, splitLog[0], null, true); @SuppressWarnings("unused") Entry entry; while ((entry = in.next()) != null) ++actualCount; @@ -1151,7 +1151,7 @@ public class TestWALSplit { @Override protected Writer createWriter(Path logfile) throws IOException { - Writer writer = wals.createRecoveredEditsWriter(this.walFS, logfile); + Writer writer = wals.createWALWriter(this.walFS, logfile, false); // After creating writer, simulate region's // replayRecoveredEditsIfAny() which gets SplitEditFiles of this // region and delete them, excluding files with '.temp' suffix. @@ -1210,7 +1210,7 @@ public class TestWALSplit { int seq = 0; int numRegionEventsAdded = 0; for (int i = 0; i < writers; i++) { - ws[i] = wals.createWALWriter(fs, new Path(WALDIR, WAL_FILE_PREFIX + i)); + ws[i] = wals.createWALWriter(fs, new Path(WALDIR, WAL_FILE_PREFIX + i), false); for (int j = 0; j < entries; j++) { int prefix = 0; for (String region : REGIONS) { @@ -1339,7 +1339,7 @@ public class TestWALSplit { private int countWAL(Path log) throws IOException { int count = 0; - Reader in = wals.createReader(fs, log); + Reader in = wals.createReader(fs, log, null, true); while (in.next() != null) { count++; } @@ -1409,8 +1409,7 @@ public class TestWALSplit { } private void injectEmptyFile(String suffix, boolean closeFile) throws IOException { - Writer writer = - WALFactory.createWALWriter(fs, new Path(WALDIR, WAL_FILE_PREFIX + suffix), conf); + Writer writer = wals.createWALWriter(fs, new Path(WALDIR, WAL_FILE_PREFIX + suffix), false); if (closeFile) { writer.close(); } @@ -1418,8 +1417,8 @@ public class TestWALSplit { private boolean logsAreEqual(Path p1, Path p2) throws IOException { Reader in1, in2; - in1 = wals.createReader(fs, p1); - in2 = wals.createReader(fs, p2); + in1 = wals.createReader(fs, p1, null, true); + in2 = wals.createReader(fs, p2, null, true); Entry entry1; Entry entry2; while ((entry1 = in1.next()) != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java index 861b289f14..3fe1718e57 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java @@ -317,7 +317,7 @@ public final class WALPerformanceEvaluation extends Configured implements Tool { rootRegionDir = rootRegionDir.makeQualified(fs.getUri(), fs.getWorkingDirectory()); cleanRegionRootDir(fs, rootRegionDir); FSUtils.setRootDir(getConf(), rootRegionDir); - final WALFactory wals = new WALFactory(getConf(), "wals"); + final WALProviderFactory wals = new WALProviderFactory(getConf(), "wals"); final HRegion[] regions = new HRegion[numRegions]; final Runnable[] benchmarks = new Runnable[numRegions]; final MockRegionServerServices mockServices = new MockRegionServerServices(getConf()); @@ -413,7 +413,7 @@ public final class WALPerformanceEvaluation extends Configured implements Tool { * @return Count of edits. * @throws IOException */ - private long verify(final WALFactory wals, final Path wal, final boolean verbose) + private long verify(final WALProviderFactory wals, final Path wal, final boolean verbose) throws IOException { WAL.Reader reader = wals.createReader(wal.getFileSystem(getConf()), wal); long count = 0; @@ -490,7 +490,7 @@ public final class WALPerformanceEvaluation extends Configured implements Tool { private final Set walsListenedTo = new HashSet<>(); private HRegion openRegion(final FileSystem fs, final Path dir, final TableDescriptor htd, - final WALFactory wals, final long whenToRoll, final LogRoller roller) throws IOException { + final WALProviderFactory wals, final long whenToRoll, final LogRoller roller) throws IOException { // Initialize HRegion RegionInfo regionInfo = RegionInfoBuilder.newBuilder(htd.getTableName()).build(); // Initialize WAL