diff --git pom.xml pom.xml
index cb0399a..5c83b5a 100644
--- pom.xml
+++ pom.xml
@@ -238,9 +238,9 @@
https://repository.apache.org/content/repositories/releases/
- hadoop-non-releases
- Hadoop non-releases
- http://people.apache.org/~rawson/repo/
+ apache non-releases
+ Apache non-releases
+ http://people.apache.org/~stack/m2/repository
false
@@ -505,7 +505,7 @@
com.agilejava.docbkx
docbkx-maven-plugin
- 2.0.11
+ 2.0.13
multipage
@@ -871,6 +871,7 @@
2.5
1.1.1
2.1
+ 1.6
r09
1.5.5
5.5.23
@@ -878,7 +879,7 @@
6.1.26
6.1.14
1.4
- 1.6.0
+ 1.6.5
4.10-HBASE-1
1.2.16
1.8.5
@@ -886,7 +887,7 @@
1.5.8
1.0.1
0.7.0
- 3.4.0
+ 3.4.1rc0
0.0.1-SNAPSHOT
/usr
@@ -941,6 +942,11 @@
${commons-cli.version}
+ commons-configuration
+ commons-configuration
+ ${commons-configuration.version}
+
+
com.github.stephenc.high-scale-lib
high-scale-lib
1.1.1
@@ -1359,16 +1365,16 @@
-
+
- hadoop-0.20
+ hadoop-1.0
!hadoop.profile
- 0.20.205.0
+ 1.0.0rc2
@@ -1399,7 +1405,6 @@
-
org.apache.hadoop
hadoop-test
diff --git src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
index 1592f0f..90f9243 100644
--- src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
+++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
@@ -707,7 +707,7 @@ public class HLog implements Syncable {
writer.init(fs, path, conf);
return writer;
} catch (Exception e) {
- IOException ie = new IOException("cannot get log writer");
+ IOException ie = new IOException("cannot get log writer", e);
ie.initCause(e);
throw ie;
}
diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index 00bb18c..e2b3a8c 100644
--- src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -140,6 +140,118 @@ public class TestHRegion extends HBaseTestCase {
// /tmp/testtable
//////////////////////////////////////////////////////////////////////////////
+
+ public void testSkipRecoveredEditsReplay() throws Exception {
+ String method = "testSkipRecoveredEditsReplay";
+ byte[] tableName = Bytes.toBytes(method);
+ byte[] family = Bytes.toBytes("family");
+ Configuration conf = HBaseConfiguration.create();
+ initHRegion(tableName, method, conf, family);
+ Path regiondir = region.getRegionDir();
+ FileSystem fs = region.getFilesystem();
+ byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
+
+ Path recoveredEditsDir = HLog.getRegionDirRecoveredEditsDir(regiondir);
+
+ long maxSeqId = 1050;
+ long minSeqId = 1000;
+
+ for (long i = minSeqId; i <= maxSeqId; i += 10) {
+ Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
+ fs.create(recoveredEdits);
+ HLog.Writer writer = HLog.createWriter(fs, recoveredEdits, conf);
+
+ long time = System.nanoTime();
+ WALEdit edit = new WALEdit();
+ edit.add(new KeyValue(row, family, Bytes.toBytes(i),
+ time, KeyValue.Type.Put, Bytes.toBytes(i)));
+ writer.append(new HLog.Entry(new HLogKey(regionName, tableName,
+ i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
+
+ writer.close();
+ }
+ MonitoredTask status = TaskMonitor.get().createStatus(method);
+ long seqId = region.replayRecoveredEditsIfAny(regiondir, minSeqId-1, null, status);
+ assertEquals(maxSeqId, seqId);
+ Get get = new Get(row);
+ Result result = region.get(get, null);
+ for (long i = minSeqId; i <= maxSeqId; i += 10) {
+ List kvs = result.getColumn(family, Bytes.toBytes(i));
+ assertEquals(1, kvs.size());
+ assertEquals(Bytes.toBytes(i), kvs.get(0).getValue());
+ }
+ }
+
+ public void testSkipRecoveredEditsReplaySomeIgnored() throws Exception {
+ String method = "testSkipRecoveredEditsReplaySomeIgnored";
+ byte[] tableName = Bytes.toBytes(method);
+ byte[] family = Bytes.toBytes("family");
+ initHRegion(tableName, method, HBaseConfiguration.create(), family);
+ Path regiondir = region.getRegionDir();
+ FileSystem fs = region.getFilesystem();
+ byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
+
+ Path recoveredEditsDir = HLog.getRegionDirRecoveredEditsDir(regiondir);
+
+ long maxSeqId = 1050;
+ long minSeqId = 1000;
+
+ for (long i = minSeqId; i <= maxSeqId; i += 10) {
+ Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
+ fs.create(recoveredEdits);
+ HLog.Writer writer = HLog.createWriter(fs, recoveredEdits, conf);
+
+ long time = System.nanoTime();
+ WALEdit edit = new WALEdit();
+ edit.add(new KeyValue(row, family, Bytes.toBytes(i),
+ time, KeyValue.Type.Put, Bytes.toBytes(i)));
+ writer.append(new HLog.Entry(new HLogKey(regionName, tableName,
+ i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
+
+ writer.close();
+ }
+ long recoverSeqId = 1030;
+ MonitoredTask status = TaskMonitor.get().createStatus(method);
+ long seqId = region.replayRecoveredEditsIfAny(regiondir, recoverSeqId-1, null, status);
+ assertEquals(maxSeqId, seqId);
+ Get get = new Get(row);
+ Result result = region.get(get, null);
+ for (long i = minSeqId; i <= maxSeqId; i += 10) {
+ List kvs = result.getColumn(family, Bytes.toBytes(i));
+ if (i < recoverSeqId) {
+ assertEquals(0, kvs.size());
+ } else {
+ assertEquals(1, kvs.size());
+ assertEquals(Bytes.toBytes(i), kvs.get(0).getValue());
+ }
+ }
+ }
+
+ public void testSkipRecoveredEditsReplayAllIgnored() throws Exception {
+ String method = "testSkipRecoveredEditsReplayAllIgnored";
+ byte[] tableName = Bytes.toBytes(method);
+ byte[] family = Bytes.toBytes("family");
+ initHRegion(tableName, method, HBaseConfiguration.create(), family);
+ Path regiondir = region.getRegionDir();
+ FileSystem fs = region.getFilesystem();
+
+ Path recoveredEditsDir = HLog.getRegionDirRecoveredEditsDir(regiondir);
+ for (int i = 1000; i < 1050; i += 10) {
+ Path recoveredEdits = new Path(
+ recoveredEditsDir, String.format("%019d", i));
+ FSDataOutputStream dos= fs.create(recoveredEdits);
+ dos.writeInt(i);
+ dos.close();
+ }
+ long minSeqId = 2000;
+ Path recoveredEdits = new Path(
+ recoveredEditsDir, String.format("%019d", minSeqId-1));
+ FSDataOutputStream dos= fs.create(recoveredEdits);
+ dos.close();
+ long seqId = region.replayRecoveredEditsIfAny(regiondir, minSeqId, null, null);
+ assertEquals(minSeqId, seqId);
+ }
+
public void testGetWhileRegionClose() throws IOException {
Configuration hc = initSplit();
int numRows = 100;
@@ -2828,115 +2940,6 @@ public class TestHRegion extends HBaseTestCase {
region.get(g, null);
}
- public void testSkipRecoveredEditsReplay() throws Exception {
- String method = "testSkipRecoveredEditsReplay";
- byte[] tableName = Bytes.toBytes(method);
- byte[] family = Bytes.toBytes("family");
- Configuration conf = HBaseConfiguration.create();
- initHRegion(tableName, method, conf, family);
- Path regiondir = region.getRegionDir();
- FileSystem fs = region.getFilesystem();
- byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
-
- Path recoveredEditsDir = HLog.getRegionDirRecoveredEditsDir(regiondir);
-
- long maxSeqId = 1050;
- long minSeqId = 1000;
-
- for (long i = minSeqId; i <= maxSeqId; i += 10) {
- Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
- HLog.Writer writer = HLog.createWriter(fs, recoveredEdits, conf);
-
- long time = System.nanoTime();
- WALEdit edit = new WALEdit();
- edit.add(new KeyValue(row, family, Bytes.toBytes(i),
- time, KeyValue.Type.Put, Bytes.toBytes(i)));
- writer.append(new HLog.Entry(new HLogKey(regionName, tableName,
- i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
-
- writer.close();
- }
- MonitoredTask status = TaskMonitor.get().createStatus(method);
- long seqId = region.replayRecoveredEditsIfAny(regiondir, minSeqId-1, null, status);
- assertEquals(maxSeqId, seqId);
- Get get = new Get(row);
- Result result = region.get(get, null);
- for (long i = minSeqId; i <= maxSeqId; i += 10) {
- List kvs = result.getColumn(family, Bytes.toBytes(i));
- assertEquals(1, kvs.size());
- assertEquals(Bytes.toBytes(i), kvs.get(0).getValue());
- }
- }
-
- public void testSkipRecoveredEditsReplaySomeIgnored() throws Exception {
- String method = "testSkipRecoveredEditsReplaySomeIgnored";
- byte[] tableName = Bytes.toBytes(method);
- byte[] family = Bytes.toBytes("family");
- initHRegion(tableName, method, HBaseConfiguration.create(), family);
- Path regiondir = region.getRegionDir();
- FileSystem fs = region.getFilesystem();
- byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
-
- Path recoveredEditsDir = HLog.getRegionDirRecoveredEditsDir(regiondir);
-
- long maxSeqId = 1050;
- long minSeqId = 1000;
-
- for (long i = minSeqId; i <= maxSeqId; i += 10) {
- Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
- HLog.Writer writer = HLog.createWriter(fs, recoveredEdits, conf);
-
- long time = System.nanoTime();
- WALEdit edit = new WALEdit();
- edit.add(new KeyValue(row, family, Bytes.toBytes(i),
- time, KeyValue.Type.Put, Bytes.toBytes(i)));
- writer.append(new HLog.Entry(new HLogKey(regionName, tableName,
- i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
-
- writer.close();
- }
- long recoverSeqId = 1030;
- MonitoredTask status = TaskMonitor.get().createStatus(method);
- long seqId = region.replayRecoveredEditsIfAny(regiondir, recoverSeqId-1, null, status);
- assertEquals(maxSeqId, seqId);
- Get get = new Get(row);
- Result result = region.get(get, null);
- for (long i = minSeqId; i <= maxSeqId; i += 10) {
- List kvs = result.getColumn(family, Bytes.toBytes(i));
- if (i < recoverSeqId) {
- assertEquals(0, kvs.size());
- } else {
- assertEquals(1, kvs.size());
- assertEquals(Bytes.toBytes(i), kvs.get(0).getValue());
- }
- }
- }
-
- public void testSkipRecoveredEditsReplayAllIgnored() throws Exception {
- String method = "testSkipRecoveredEditsReplayAllIgnored";
- byte[] tableName = Bytes.toBytes(method);
- byte[] family = Bytes.toBytes("family");
- initHRegion(tableName, method, HBaseConfiguration.create(), family);
- Path regiondir = region.getRegionDir();
- FileSystem fs = region.getFilesystem();
-
- Path recoveredEditsDir = HLog.getRegionDirRecoveredEditsDir(regiondir);
- for (int i = 1000; i < 1050; i += 10) {
- Path recoveredEdits = new Path(
- recoveredEditsDir, String.format("%019d", i));
- FSDataOutputStream dos= fs.create(recoveredEdits);
- dos.writeInt(i);
- dos.close();
- }
- long minSeqId = 2000;
- Path recoveredEdits = new Path(
- recoveredEditsDir, String.format("%019d", minSeqId-1));
- FSDataOutputStream dos= fs.create(recoveredEdits);
- dos.close();
- long seqId = region.replayRecoveredEditsIfAny(regiondir, minSeqId, null, null);
- assertEquals(minSeqId, seqId);
- }
-
public void testIndexesScanWithOneDeletedRow() throws IOException {
byte[] tableName = Bytes.toBytes("testIndexesScanWithOneDeletedRow");
byte[] family = Bytes.toBytes("family");
diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
index 92ab06c..17783f8 100644
--- src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
+++ src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
@@ -587,8 +587,16 @@ public class TestStore extends TestCase {
Progressable progress) throws IOException {
return new FaultyOutputStream(super.create(f, permission,
overwrite, bufferSize, replication, blockSize, progress), faultPos);
- }
+ }
+ @Override
+ public FSDataOutputStream createNonRecursive(Path f, boolean overwrite,
+ int bufferSize, short replication, long blockSize, Progressable progress)
+ throws IOException {
+ // Fake it. Call create instead. The default implementation throws an IOE
+ // that this is not supported.
+ return create(f, overwrite, bufferSize, replication, blockSize, progress);
+ }
}
static class FaultyOutputStream extends FSDataOutputStream {
diff --git src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java
index a061ee6..18b00ba 100644
--- src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java
+++ src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java
@@ -19,24 +19,27 @@
*/
package org.apache.hadoop.hbase.replication;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.BeforeClass;
-import org.junit.Ignore;
import org.junit.Test;
import org.junit.experimental.categories.Category;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-
@Category(MediumTests.class)
public class TestReplicationSource {
@@ -44,7 +47,7 @@ public class TestReplicationSource {
LogFactory.getLog(TestReplicationSource.class);
private final static HBaseTestingUtility TEST_UTIL =
new HBaseTestingUtility();
- private static FileSystem fs;
+ private static FileSystem FS;
private static Path oldLogDir;
private static Path logDir;
private static Configuration conf = HBaseConfiguration.create();
@@ -55,11 +58,13 @@ public class TestReplicationSource {
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniDFSCluster(1);
- fs = TEST_UTIL.getDFSCluster().getFileSystem();
- oldLogDir = new Path(fs.getHomeDirectory(),
+ FS = TEST_UTIL.getDFSCluster().getFileSystem();
+ oldLogDir = new Path(FS.getHomeDirectory(),
HConstants.HREGION_OLDLOGDIR_NAME);
- logDir = new Path(fs.getHomeDirectory(),
+ if (FS.exists(oldLogDir)) FS.delete(oldLogDir, true);
+ logDir = new Path(FS.getHomeDirectory(),
HConstants.HREGION_LOGDIR_NAME);
+ if (FS.exists(logDir)) FS.delete(logDir, true);
}
/**
@@ -71,7 +76,9 @@ public class TestReplicationSource {
@Test
public void testLogMoving() throws Exception{
Path logPath = new Path(logDir, "log");
- HLog.Writer writer = HLog.createWriter(fs, logPath, conf);
+ if (!FS.exists(logDir)) FS.mkdirs(logDir);
+ if (!FS.exists(oldLogDir)) FS.mkdirs(oldLogDir);
+ HLog.Writer writer = HLog.createWriter(FS, logPath, conf);
for(int i = 0; i < 3; i++) {
byte[] b = Bytes.toBytes(Integer.toString(i));
KeyValue kv = new KeyValue(b,b,b);
@@ -83,12 +90,12 @@ public class TestReplicationSource {
}
writer.close();
- HLog.Reader reader = HLog.getReader(fs, logPath, conf);
+ HLog.Reader reader = HLog.getReader(FS, logPath, conf);
HLog.Entry entry = reader.next();
assertNotNull(entry);
Path oldLogPath = new Path(oldLogDir, "log");
- fs.rename(logPath, oldLogPath);
+ FS.rename(logPath, oldLogPath);
entry = reader.next();
assertNotNull(entry);