From 6a0775433f9346d3a661ab8b780129ed8ddd592b Mon Sep 17 00:00:00 2001 From: Alex Newman Date: Wed, 27 Oct 2010 21:25:12 -0700 Subject: [PATCH] HBASE-2935. fix testCorruptedFileGetsArchivedIfSkipErrors --- .../apache/hadoop/hbase/regionserver/wal/HLog.java | 7 + .../regionserver/wal/SequenceFileLogReader.java | 6 +- .../wal/FaultySequenceFileLogReader.java | 79 ++++++++++++ .../hbase/regionserver/wal/TestHLogSplit.java | 130 ++++++++++++-------- 4 files changed, 169 insertions(+), 53 deletions(-) create mode 100644 src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultySequenceFileLogReader.java diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java b/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java index c8a10af..24c76a1 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java @@ -135,6 +135,10 @@ public class HLog implements Syncable { private static Class logWriterClass; private static Class logReaderClass; + static void resetLogReaderClass() { + HLog.logReaderClass = null; + } + private OutputStream hdfs_out; // OutputStream associated with the current SequenceFile.writer private int initialReplication; // initial replication factor of SequenceFile.writer private Method getNumCurrentReplicas; // refers to DFSOutputStream.getNumCurrentReplicas @@ -557,11 +561,14 @@ public class HLog implements Syncable { final Path path, Configuration conf) throws IOException { try { + if (logReaderClass == null) { + logReaderClass = conf.getClass("hbase.regionserver.hlog.reader.impl", SequenceFileLogReader.class, Reader.class); } + HLog.Reader reader = logReaderClass.newInstance(); reader.init(fs, path, conf); return reader; diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java b/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java index 0b7dc78..497c5d0 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java @@ -50,7 +50,7 @@ public class SequenceFileLogReader implements HLog.Reader { * this.end = in.getPos() + length; * */ - private static class WALReader extends SequenceFile.Reader { + static class WALReader extends SequenceFile.Reader { WALReader(final FileSystem fs, final Path p, final Configuration c) throws IOException { @@ -131,7 +131,7 @@ public class SequenceFileLogReader implements HLog.Reader { int edit = 0; long entryStart = 0; - private Class keyClass; + protected Class keyClass; /** * Default constructor. @@ -217,7 +217,7 @@ public class SequenceFileLogReader implements HLog.Reader { return reader.getPosition(); } - private IOException addFileInfoToException(final IOException ioe) + protected IOException addFileInfoToException(final IOException ioe) throws IOException { long pos = -1; try { diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultySequenceFileLogReader.java b/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultySequenceFileLogReader.java new file mode 100644 index 0000000..63ead7b --- /dev/null +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultySequenceFileLogReader.java @@ -0,0 +1,79 @@ +/** + * Copyright 2010 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver.wal; + +import java.io.IOException; +import java.util.LinkedList; +import java.util.Queue; + +import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry; + +public class FaultySequenceFileLogReader extends SequenceFileLogReader { + + enum FailureType { + BEGINNING, MIDDLE, END, NONE + } + + Queue nextQueue = new LinkedList(); + int numberOfFileEntries = 0; + + FailureType getFailureType() { + return FailureType.valueOf(conf.get("faultysequencefilelogreader.failuretype", FailureType.NONE.name())); + } + + @Override + public HLog.Entry next(HLog.Entry reuse) throws IOException { + this.entryStart = this.reader.getPosition(); + boolean b = true; + + if (nextQueue.isEmpty()) { // Read the whole thing at once and fake reading + while (b == true) { + HLogKey key = HLog.newKey(conf); + WALEdit val = new WALEdit(); + HLog.Entry e = new HLog.Entry(key, val); + b = this.reader.next(e.getKey(), e.getEdit()); + nextQueue.offer(e); + numberOfFileEntries++; + } + } + + if (nextQueue.size() == this.numberOfFileEntries + && getFailureType() == FailureType.BEGINNING) { + throw this.addFileInfoToException(new IOException("fake Exception")); + } else if (nextQueue.size() == this.numberOfFileEntries / 2 + && getFailureType() == FailureType.MIDDLE) { + throw this.addFileInfoToException(new IOException("fake Exception")); + } else if (nextQueue.size() == 1 && getFailureType() == FailureType.END) { + throw this.addFileInfoToException(new IOException("fake Exception")); + } + + if (nextQueue.peek() != null) { + edit++; + } + + Entry e = nextQueue.poll(); + + if (e.getEdit().isEmpty()) { + return null; + } + return e; + } +} diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java b/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java index 473c359..55e04f7 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java @@ -37,6 +37,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -300,29 +301,91 @@ public class TestHLogSplit { } } - // TODO: fix this test (HBASE-2935) - //@Test + @Test public void testCorruptedFileGetsArchivedIfSkipErrors() throws IOException { conf.setBoolean(HBASE_SKIP_ERRORS, true); + Class backupClass = conf.getClass("hbase.regionserver.hlog.reader.impl", + Reader.class); + InstrumentedSequenceFileLogWriter.activateFailure = false; + HLog.resetLogReaderClass(); - Path c1 = new Path(hlogDir, HLOG_FILE_PREFIX + "0"); - Path c2 = new Path(hlogDir, HLOG_FILE_PREFIX + "5"); - Path c3 = new Path(hlogDir, HLOG_FILE_PREFIX + (NUM_WRITERS - 1)); - generateHLogs(-1); - corruptHLog(c1, Corruptions.INSERT_GARBAGE_IN_THE_MIDDLE, false, fs); - corruptHLog(c2, Corruptions.APPEND_GARBAGE, true, fs); - corruptHLog(c3, Corruptions.INSERT_GARBAGE_ON_FIRST_LINE, true, fs); + try { + Path c1 = new Path(hlogDir, HLOG_FILE_PREFIX + "0"); + conf.setClass("hbase.regionserver.hlog.reader.impl", + FaultySequenceFileLogReader.class, HLog.Reader.class); + String[] failureTypes = { "begin", "middle", "end" }; + for (FaultySequenceFileLogReader.FailureType failureType : FaultySequenceFileLogReader.FailureType.values()) { + conf.set("faultysequencefilelogreader.failuretype", failureType.name()); + generateHLogs(1, ENTRIES, -1); + fs.initialize(fs.getUri(), conf); + HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf); + logSplitter.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf); + FileStatus[] archivedLogs = fs.listStatus(corruptDir); + assertEquals("expected a different file", c1.getName(), archivedLogs[0] + .getPath().getName()); + assertEquals(archivedLogs.length, 1); + fs.delete(new Path(oldLogDir, HLOG_FILE_PREFIX + "0"), false); + } + } finally { + conf.setClass("hbase.regionserver.hlog.reader.impl", backupClass, + Reader.class); + HLog.resetLogReaderClass(); + } + } - fs.initialize(fs.getUri(), conf); - HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf); - logSplitter.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf); + @Test(expected = IOException.class) + public void testTrailingGarbageCorruptionLogFileSkipErrorsFalseThrows() + throws IOException { + conf.setBoolean(HBASE_SKIP_ERRORS, false); + Class backupClass = conf.getClass("hbase.regionserver.hlog.reader.impl", + Reader.class); + InstrumentedSequenceFileLogWriter.activateFailure = false; + HLog.resetLogReaderClass(); - FileStatus[] archivedLogs = fs.listStatus(corruptDir); + try { + conf.setClass("hbase.regionserver.hlog.reader.impl", + FaultySequenceFileLogReader.class, HLog.Reader.class); + conf.set("faultysequencefilelogreader.failuretype", FaultySequenceFileLogReader.FailureType.BEGINNING.name()); + generateHLogs(Integer.MAX_VALUE); + fs.initialize(fs.getUri(), conf); + HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf); + logSplitter.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf); + } finally { + conf.setClass("hbase.regionserver.hlog.reader.impl", backupClass, + Reader.class); + HLog.resetLogReaderClass(); + } - assertEquals("expected a different file", c1.getName(), archivedLogs[0].getPath().getName()); - assertEquals("expected a different file", c2.getName(), archivedLogs[1].getPath().getName()); - assertEquals("expected a different file", c3.getName(), archivedLogs[2].getPath().getName()); - assertEquals(archivedLogs.length, 3); + } + + @Test + public void testCorruptedLogFilesSkipErrorsFalseDoesNotTouchLogs() + throws IOException { + conf.setBoolean(HBASE_SKIP_ERRORS, false); + Class backupClass = conf.getClass("hbase.regionserver.hlog.reader.impl", + Reader.class); + InstrumentedSequenceFileLogWriter.activateFailure = false; + HLog.resetLogReaderClass(); + + try { + conf.setClass("hbase.regionserver.hlog.reader.impl", + FaultySequenceFileLogReader.class, HLog.Reader.class); + conf.set("faultysequencefilelogreader.failuretype", FaultySequenceFileLogReader.FailureType.BEGINNING.name()); + generateHLogs(-1); + fs.initialize(fs.getUri(), conf); + HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf); + try { + logSplitter.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf); + } catch (IOException e) { + assertEquals( + "if skip.errors is false all files should remain in place", + NUM_WRITERS, fs.listStatus(hlogDir).length); + } + } finally { + conf.setClass("hbase.regionserver.hlog.reader.impl", backupClass, + Reader.class); + HLog.resetLogReaderClass(); + } } @@ -372,39 +435,6 @@ public class TestHLogSplit { assertEquals("wrong number of files in the archive log", NUM_WRITERS, archivedLogs.length); } - - - // TODO: fix this test (HBASE-2935) - //@Test(expected = IOException.class) - public void testTrailingGarbageCorruptionLogFileSkipErrorsFalseThrows() throws IOException { - conf.setBoolean(HBASE_SKIP_ERRORS, false); - generateHLogs(Integer.MAX_VALUE); - corruptHLog(new Path(hlogDir, HLOG_FILE_PREFIX + "5"), - Corruptions.APPEND_GARBAGE, true, fs); - - fs.initialize(fs.getUri(), conf); - HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf); - logSplitter.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf); - } - - // TODO: fix this test (HBASE-2935) - //@Test - public void testCorruptedLogFilesSkipErrorsFalseDoesNotTouchLogs() throws IOException { - conf.setBoolean(HBASE_SKIP_ERRORS, false); - generateHLogs(-1); - corruptHLog(new Path(hlogDir, HLOG_FILE_PREFIX + "5"), - Corruptions.APPEND_GARBAGE, true, fs); - fs.initialize(fs.getUri(), conf); - try { - HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf); - logSplitter.splitLog(hbaseDir, hlogDir, oldLogDir, fs, conf); - } catch (IOException e) {/* expected */} - - assertEquals("if skip.errors is false all files should remain in place", - NUM_WRITERS, fs.listStatus(hlogDir).length); - } - - @Test public void testSplit() throws IOException { generateHLogs(-1); -- 1.6.6.2