Index: src/docbkx/book.xml
===================================================================
--- src/docbkx/book.xml (revision 1208106)
+++ src/docbkx/book.xml (working copy)
@@ -256,8 +256,8 @@
Scan scan = new Scan();
scan.addColumn(Bytes.toBytes("cf"),Bytes.toBytes("attr"));
-scan.setStartRow( Bytes.toBytes("row"));
-scan.setStopRow( Bytes.toBytes("row" + new byte[] {0})); // note: stop key != start key
+scan.setStartRow( Bytes.toBytes("row")); // start key is inclusive
+scan.setStopRow( Bytes.toBytes("row" + new byte[] {0})); // stop key is exclusive
for(Result result : htable.getScanner(scan)) {
// process Result instance
}
Index: src/docbkx/troubleshooting.xml
===================================================================
--- src/docbkx/troubleshooting.xml (revision 1208106)
+++ src/docbkx/troubleshooting.xml (working copy)
@@ -523,8 +523,41 @@
-
+
+
+ MapReduce
+
+ You Think You're On The Cluster, But You're Actually Local
+ This following stacktrace happened using ImportTsv, but things like this
+ can happen on any job with a mis-configuration.
+
+ WARN mapred.LocalJobRunner: job_local_0001
+java.lang.IllegalArgumentException: Can't read partitions file
+ at org.apache.hadoop.hbase.mapreduce.hadoopbackport.TotalOrderPartitioner.setConf(TotalOrderPartitioner.java:111)
+ at org.apache.hadoop.util.ReflectionUtils.setConf(ReflectionUtils.java:62)
+ at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:117)
+ at org.apache.hadoop.mapred.MapTask$NewOutputCollector.<init>(MapTask.java:560)
+ at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:639)
+ at org.apache.hadoop.mapred.MapTask.run(MapTask.java:323)
+ at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:210)
+Caused by: java.io.FileNotFoundException: File _partition.lst does not exist.
+ at org.apache.hadoop.fs.RawLocalFileSystem.getFileStatus(RawLocalFileSystem.java:383)
+ at org.apache.hadoop.fs.FilterFileSystem.getFileStatus(FilterFileSystem.java:251)
+ at org.apache.hadoop.fs.FileSystem.getLength(FileSystem.java:776)
+ at org.apache.hadoop.io.SequenceFile$Reader.<init>(SequenceFile.java:1424)
+ at org.apache.hadoop.io.SequenceFile$Reader.<init>(SequenceFile.java:1419)
+ at org.apache.hadoop.hbase.mapreduce.hadoopbackport.TotalOrderPartitioner.readPartitions(TotalOrderPartitioner.java:296)
+
+ .. see the critical portion of the stack? It's...
+
+ at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:210)
+
+ LocalJobRunner means the job is running locally, not on the cluster.
+
+
+
+
NameNode
For more information on the NameNode, see .