diff --git a/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java b/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java index b7b8001..971a98d 100644 --- a/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java +++ b/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java @@ -63,6 +63,7 @@ public class ImportTsv { final static String BULK_OUTPUT_CONF_KEY = "importtsv.bulk.output"; final static String COLUMNS_CONF_KEY = "importtsv.columns"; final static String SEPARATOR_CONF_KEY = "importtsv.separator"; + final static String TIMESTAMP_CONF_KEY = "importtsv.timestamp"; final static String DEFAULT_SEPARATOR = "\t"; static class TsvParser { @@ -220,7 +221,7 @@ public class ImportTsv { if (parser.getRowKeyColumnIndex() == -1) { throw new RuntimeException("No row key column specified"); } - ts = System.currentTimeMillis(); + ts = conf.getLong(TIMESTAMP_CONF_KEY, System.currentTimeMillis()); skipBadLines = context.getConfiguration().getBoolean( SKIP_LINES_CONF_KEY, true); @@ -346,7 +347,9 @@ public class ImportTsv { "\n" + "Other options that may be specified with -D include:\n" + " -D" + SKIP_LINES_CONF_KEY + "=false - fail if encountering an invalid line\n" + - " '-D" + SEPARATOR_CONF_KEY + "=|' - eg separate on pipes instead of tabs"; + " '-D" + SEPARATOR_CONF_KEY + "=|' - eg separate on pipes instead of tabs\n" + + " -D" + TIMESTAMP_CONF_KEY + "=currentTimeAsLong - use the specified timestamp for the import\n"; + System.err.println(usage); } diff --git a/src/site/xdoc/bulk-loads.xml b/src/site/xdoc/bulk-loads.xml index 99e163b..885bccc 100644 --- a/src/site/xdoc/bulk-loads.xml +++ b/src/site/xdoc/bulk-loads.xml @@ -105,6 +105,7 @@ In order to prepare data for a bulk data load, pass the option: Other options that may be specified with -D include: -Dimporttsv.skip.bad.lines=false - fail if encountering an invalid line + -Dimporttsv.timestamp=currentTimeAsLong - use the specified timestamp for the import