diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
index 2fde925..04bfbb5 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
@@ -37,6 +37,6 @@ public class SecureBulkLoadUtil {
}
public static Path getBaseStagingDir(Configuration conf) {
- return new Path(conf.get(BULKLOAD_STAGING_DIR, "/tmp/hbase-staging"));
+ return new Path(conf.get(BULKLOAD_STAGING_DIR));
}
}
diff --git hbase-common/src/main/resources/hbase-default.xml hbase-common/src/main/resources/hbase-default.xml
index 69b1f8b..f773d21 100644
--- hbase-common/src/main/resources/hbase-default.xml
+++ hbase-common/src/main/resources/hbase-default.xml
@@ -63,6 +63,20 @@ possible configurations would overwhelm and obscure the important.
machine restart.
+ hbase.fs.tmp.dir
+ /user/${user.name}/hbase-staging
+ A staging directory in default file system (HDFS)
+ for keeping temporary data.
+
+
+
+ hbase.bulkload.staging.dir
+ ${hbase.fs.tmp.dir}
+ A staging directory in default file system (HDFS)
+ for bulk loading.
+
+
+
hbase.cluster.distributed
false
The mode the cluster will be in. Possible values are
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index 465be56..5935b86 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -588,7 +588,7 @@ public class HFileOutputFormat2
Configuration conf = job.getConfiguration();
// create the partitions file
FileSystem fs = FileSystem.get(conf);
- Path partitionsPath = new Path(conf.get("hadoop.tmp.dir"), "partitions_" + UUID.randomUUID());
+ Path partitionsPath = new Path(conf.get("hbase.fs.tmp.dir"), "partitions_" + UUID.randomUUID());
fs.makeQualified(partitionsPath);
writePartitions(conf, partitionsPath, splitPoints);
fs.deleteOnExit(partitionsPath);