From 796d34567dce6da6099a8eabc96d026c5062a197 Mon Sep 17 00:00:00 2001 From: Andrey Elenskiy Date: Thu, 3 Jan 2019 14:15:17 -0800 Subject: [PATCH] HBASE-21476 Support for nanosecond timestamps --- .../apache/hadoop/hbase/HTableDescriptor.java | 27 ++++++- .../hadoop/hbase/client/TableDescriptor.java | 13 ++- .../hbase/client/TableDescriptorBuilder.java | 41 +++++++++- .../hbase/util/DefaultEnvironmentEdge.java | 14 ++++ .../hadoop/hbase/util/EnvironmentEdge.java | 9 +++ .../hbase/util/EnvironmentEdgeManager.java | 12 ++- .../apache/hadoop/hbase/master/HMaster.java | 6 +- .../master/MasterMobCompactionThread.java | 26 +++--- .../hbase/master/MasterRpcServices.java | 5 +- .../hbase/master/MobCompactionChore.java | 2 +- .../org/apache/hadoop/hbase/mob/MobUtils.java | 10 +-- .../hbase/mob/compactions/MobCompactor.java | 13 +-- .../compactions/PartitionedMobCompactor.java | 27 ++++--- .../hadoop/hbase/regionserver/HRegion.java | 27 +++++-- .../hadoop/hbase/regionserver/HStore.java | 48 +++-------- .../hadoop/hbase/regionserver/ScanInfo.java | 79 ++++++++++++++++--- .../hbase/regionserver/StoreScanner.java | 3 +- .../hadoop/hbase/HBaseTestingUtility.java | 63 ++++++++++----- .../mob/compactions/TestMobCompactor.java | 11 ++- .../TestPartitionedMobCompactor.java | 33 ++++---- .../regionserver/TestCompactingMemStore.java | 3 +- .../regionserver/TestDefaultMemStore.java | 5 +- .../regionserver/TestMobStoreCompaction.java | 5 +- .../regionserver/TestReversibleScanners.java | 3 +- .../hbase/regionserver/TestStoreScanner.java | 11 +-- .../compactions/TestDateTieredCompactor.java | 5 +- .../compactions/TestStripeCompactor.java | 5 +- .../TestCompactionScanQueryMatcher.java | 2 +- .../TestUserScanQueryMatcher.java | 18 ++--- hbase-shell/src/main/ruby/hbase/admin.rb | 2 +- 30 files changed, 357 insertions(+), 171 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 19a5e03b72..f239fe6aed 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -58,6 +58,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable DEFAULT_VALUES = new HashMap<>(); private final static Set RESERVED_KEYWORDS = new HashSet<>(); @@ -216,6 +227,7 @@ public class TableDescriptorBuilder { DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION)); DEFAULT_VALUES.put(NORMALIZATION_ENABLED, String.valueOf(DEFAULT_NORMALIZATION_ENABLED)); DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY)); + DEFAULT_VALUES.put(NANOSECOND_TIMESTAMPS, String.valueOf(DEFAULT_NANOSECOND_TIMESTAMPS)); DEFAULT_VALUES.keySet().stream() .map(s -> new Bytes(Bytes.toBytes(s))).forEach(RESERVED_KEYWORDS::add); RESERVED_KEYWORDS.add(IS_META_KEY); @@ -452,6 +464,11 @@ public class TableDescriptorBuilder { return this; } + public TableDescriptorBuilder setNanosecondTimestamps(final boolean isNano) { + desc.setNanosecondTimestamps(isNano); + return this; + } + @Deprecated public TableDescriptorBuilder setOwner(User owner) { desc.setOwner(owner); @@ -765,6 +782,27 @@ public class TableDescriptorBuilder { return setValue(COMPACTION_ENABLED_KEY, Boolean.toString(isEnable)); } + /** + * Check if table handles timestamps with nanoseconds precision. + * + * @return true if table uses nanosecond precision for timestamps. + */ + @Override + public boolean isNanosecondTimestamps() { + return getOrDefault(NANOSECOND_TIMESTAMPS_KEY, Boolean::valueOf, + DEFAULT_NANOSECOND_TIMESTAMPS); + } + + /** + * Setting the NANOSECOND_TIMESTAMPS flag + * + * @param isNano True to use nanosecond precision for timestamps. + * @return the modifyable TD + */ + public ModifyableTableDescriptor setNanosecondTimestamps(final boolean isNano) { + return setValue(NANOSECOND_TIMESTAMPS_KEY, Boolean.toString(isNano)); + } + /** * Check if normalization enable flag of the table is true. If flag is false * then no region normalizer won't attempt to normalize this table. @@ -773,7 +811,8 @@ public class TableDescriptorBuilder { */ @Override public boolean isNormalizationEnabled() { - return getOrDefault(NORMALIZATION_ENABLED_KEY, Boolean::valueOf, DEFAULT_NORMALIZATION_ENABLED); + return getOrDefault(NORMALIZATION_ENABLED_KEY, Boolean::valueOf, + DEFAULT_NORMALIZATION_ENABLED); } /** diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java index 422cc16507..d4abd833f0 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java @@ -18,6 +18,8 @@ */ package org.apache.hadoop.hbase.util; +import java.time.Instant; + import org.apache.yetus.audience.InterfaceAudience; /** @@ -35,4 +37,16 @@ public class DefaultEnvironmentEdge implements EnvironmentEdge { public long currentTime() { return System.currentTimeMillis(); } + + /** + * {@inheritDoc} + *

+ * This implementation returns current time in nanoseconds via {@link java.time.Instant#now()} + *

+ */ + @Override + public long currentTimeNano() { + Instant now = Instant.now(); + return now.getEpochSecond() * 1000000000 + now.getNano(); + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java index 635c2764d2..6528edbc4e 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java @@ -34,4 +34,13 @@ public interface EnvironmentEdge { * @return Current time. */ long currentTime(); + + /** + * Returns current time in nanosecond precision. + * + * @return current time in nanoseconds. + */ + default long currentTimeNano() { + return currentTime() * 1000000; + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java index c38ddf7f86..2c1a27e2e1 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java @@ -60,7 +60,7 @@ import org.apache.yetus.audience.InterfaceAudience; is advised not to inject any other {@link org.apache.hadoop.hbase.util.EnvironmentEdge}. */ @InterfaceAudience.Private -public class EnvironmentEdgeManager { +final public class EnvironmentEdgeManager { private static volatile EnvironmentEdge delegate = new DefaultEnvironmentEdge(); private EnvironmentEdgeManager() { @@ -108,4 +108,14 @@ public class EnvironmentEdgeManager { public static long currentTime() { return getDelegate().currentTime(); } + + /** + * Defers to the delegate and calls the + * {@link EnvironmentEdge#currentTimeNano()} method. + * + * @return current time in nanoseconds according to the delegate. + */ + public static long currentTimeNano() { + return getDelegate().currentTimeNano(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 60d0441aab..cadc65d938 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -3510,13 +3510,13 @@ public class HMaster extends HRegionServer implements MasterServices { /** * Requests mob compaction. - * @param tableName The table the compact. + * @param td The descriptor of the table to compact. * @param columns The compacted columns. * @param allFiles Whether add all mob files into the compaction. */ - public void requestMobCompaction(TableName tableName, + public void requestMobCompaction(TableDescriptor td, List columns, boolean allFiles) throws IOException { - mobCompactThread.requestMobCompaction(conf, fs, tableName, columns, allFiles); + mobCompactThread.requestMobCompaction(conf, fs, td, columns, allFiles); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java index 9d6da0c1ff..a811ac2f79 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java @@ -29,11 +29,11 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.master.locking.LockManager; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.procedure2.LockType; @@ -74,20 +74,20 @@ public class MasterMobCompactionThread { * Requests mob compaction * @param conf The Configuration * @param fs The file system - * @param tableName The table the compact + * @param td The descriptor of the table to compact * @param columns The column descriptors * @param allFiles Whether add all mob files into the compaction. */ - public void requestMobCompaction(Configuration conf, FileSystem fs, TableName tableName, + public void requestMobCompaction(Configuration conf, FileSystem fs, TableDescriptor td, List columns, boolean allFiles) throws IOException { - master.reportMobCompactionStart(tableName); + master.reportMobCompactionStart(td.getTableName()); try { - masterMobPool.execute(new CompactionRunner(fs, tableName, columns, + masterMobPool.execute(new CompactionRunner(fs, td, columns, allFiles, mobCompactorPool)); } catch (RejectedExecutionException e) { // in case the request is rejected by the pool try { - master.reportMobCompactionEnd(tableName); + master.reportMobCompactionEnd(td.getTableName()); } catch (IOException e1) { LOG.error("Failed to mark end of mob compaction", e1); } @@ -95,22 +95,22 @@ public class MasterMobCompactionThread { } if (LOG.isDebugEnabled()) { LOG.debug("The mob compaction is requested for the columns " + columns - + " of the table " + tableName.getNameAsString()); + + " of the table " + td.getTableName().getNameAsString()); } } private class CompactionRunner implements Runnable { private FileSystem fs; - private TableName tableName; + private TableDescriptor tableDescriptor; private List hcds; private boolean allFiles; private ExecutorService pool; - public CompactionRunner(FileSystem fs, TableName tableName, List hcds, + public CompactionRunner(FileSystem fs, TableDescriptor td, List hcds, boolean allFiles, ExecutorService pool) { super(); this.fs = fs; - this.tableName = tableName; + this.tableDescriptor = td; this.hcds = hcds; this.allFiles = allFiles; this.pool = pool; @@ -120,17 +120,17 @@ public class MasterMobCompactionThread { public void run() { // These locks are on dummy table names, and only used for compaction/mob file cleaning. final LockManager.MasterLock lock = master.getLockManager().createMasterLock( - MobUtils.getTableLockName(tableName), LockType.EXCLUSIVE, + MobUtils.getTableLockName(tableDescriptor.getTableName()), LockType.EXCLUSIVE, this.getClass().getName() + ": mob compaction"); try { for (ColumnFamilyDescriptor hcd : hcds) { - MobUtils.doMobCompaction(conf, fs, tableName, hcd, pool, allFiles, lock); + MobUtils.doMobCompaction(conf, fs, tableDescriptor, hcd, pool, allFiles, lock); } } catch (IOException e) { LOG.error("Failed to perform the mob compaction", e); } finally { try { - master.reportMobCompactionEnd(tableName); + master.reportMobCompactionEnd(tableDescriptor.getTableName()); } catch (IOException e) { LOG.error("Failed to mark end of mob compaction", e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 778a4379b1..d8d4fe19f7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -1703,7 +1703,8 @@ public class MasterRpcServices extends RSRpcServices } boolean allFiles = false; List compactedColumns = new ArrayList<>(); - ColumnFamilyDescriptor[] hcds = master.getTableDescriptors().get(tableName).getColumnFamilies(); + TableDescriptor td = master.getTableDescriptors().get(tableName); + ColumnFamilyDescriptor[] hcds = td.getColumnFamilies(); byte[] family = null; if (request.hasFamily()) { family = request.getFamily().toByteArray(); @@ -1737,7 +1738,7 @@ public class MasterRpcServices extends RSRpcServices LOG.trace("User-triggered mob compaction requested for table: " + tableName.getNameAsString() + " for column family: " + familyLogMsg); } - master.requestMobCompaction(tableName, compactedColumns, allFiles); + master.requestMobCompaction(td, compactedColumns, allFiles); return CompactRegionResponse.newBuilder().build(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java index 6c5d677a86..a826f0a935 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java @@ -75,7 +75,7 @@ public class MobCompactionChore extends ScheduledChore { reported = true; } MobUtils.doMobCompaction(master.getConfiguration(), master.getFileSystem(), - htd.getTableName(), hcd, pool, false, lock); + htd, hcd, pool, false, lock); } } finally { if (reported) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java index 304a62e890..d88e12e2d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java @@ -831,12 +831,12 @@ public final class MobUtils { * Performs the mob compaction. * @param conf the Configuration * @param fs the file system - * @param tableName the table the compact + * @param td the descriptor of the table to compact * @param hcd the column descriptor * @param pool the thread pool * @param allFiles Whether add all mob files into the compaction. */ - public static void doMobCompaction(Configuration conf, FileSystem fs, TableName tableName, + public static void doMobCompaction(Configuration conf, FileSystem fs, TableDescriptor td, ColumnFamilyDescriptor hcd, ExecutorService pool, boolean allFiles, LockManager.MasterLock lock) throws IOException { String className = conf.get(MobConstants.MOB_COMPACTOR_CLASS_KEY, @@ -845,8 +845,8 @@ public final class MobUtils { MobCompactor compactor = null; try { compactor = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] { - Configuration.class, FileSystem.class, TableName.class, ColumnFamilyDescriptor.class, - ExecutorService.class }, new Object[] { conf, fs, tableName, hcd, pool }); + Configuration.class, FileSystem.class, TableDescriptor.class, ColumnFamilyDescriptor.class, + ExecutorService.class }, new Object[] { conf, fs, td, hcd, pool }); } catch (Exception e) { throw new IOException("Unable to load configured mob file compactor '" + className + "'", e); } @@ -858,7 +858,7 @@ public final class MobUtils { compactor.compact(allFiles); } catch (Exception e) { LOG.error("Failed to compact the mob files for the column " + hcd.getNameAsString() - + " in the table " + tableName.getNameAsString(), e); + + " in the table " + td.getTableName().getNameAsString(), e); } finally { lock.release(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java index c5d93ea6e0..74eec2ca0a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java @@ -27,9 +27,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.util.FSUtils; @@ -41,22 +41,23 @@ public abstract class MobCompactor { protected FileSystem fs; protected Configuration conf; - protected TableName tableName; + protected TableDescriptor tableDescriptor; protected ColumnFamilyDescriptor column; protected Path mobTableDir; protected Path mobFamilyDir; protected ExecutorService pool; - public MobCompactor(Configuration conf, FileSystem fs, TableName tableName, + public MobCompactor(Configuration conf, FileSystem fs, TableDescriptor td, ColumnFamilyDescriptor column, ExecutorService pool) { this.conf = conf; this.fs = fs; - this.tableName = tableName; + this.tableDescriptor = td; this.column = column; this.pool = pool; - mobTableDir = FSUtils.getTableDir(MobUtils.getMobHome(conf), tableName); - mobFamilyDir = MobUtils.getMobFamilyPath(conf, tableName, column.getNameAsString()); + mobTableDir = FSUtils.getTableDir(MobUtils.getMobHome(conf), tableDescriptor.getTableName()); + mobFamilyDir = MobUtils.getMobFamilyPath(conf, tableDescriptor.getTableName(), + column.getNameAsString()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java index 9f1ab965a1..26de0583bf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java @@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.MobCompactPartitionPolicy; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.hfile.CacheConfig; @@ -72,7 +73,6 @@ import org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.C import org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartition; import org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartitionId; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.hadoop.hbase.regionserver.ScanType; @@ -82,11 +82,11 @@ import org.apache.hadoop.hbase.regionserver.StoreFileScanner; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.regionserver.StoreScanner; import org.apache.hadoop.hbase.security.EncryptionUtil; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -110,13 +110,14 @@ public class PartitionedMobCompactor extends MobCompactor { private final byte[] refCellTags; private Encryption.Context cryptoContext = Encryption.Context.NONE; - public PartitionedMobCompactor(Configuration conf, FileSystem fs, TableName tableName, + public PartitionedMobCompactor(Configuration conf, FileSystem fs, TableDescriptor td, ColumnFamilyDescriptor column, ExecutorService pool) throws IOException { - super(conf, fs, tableName, column, pool); + super(conf, fs, td, column, pool); mergeableSize = conf.getLong(MobConstants.MOB_COMPACTION_MERGEABLE_THRESHOLD, MobConstants.DEFAULT_MOB_COMPACTION_MERGEABLE_THRESHOLD); delFileMaxCount = conf.getInt(MobConstants.MOB_DELFILE_MAX_COUNT, MobConstants.DEFAULT_MOB_DELFILE_MAX_COUNT); + TableName tableName = td.getTableName(); // default is 100 compactionBatchSize = conf.getInt(MobConstants.MOB_COMPACTION_BATCH_SIZE, MobConstants.DEFAULT_MOB_COMPACTION_BATCH_SIZE); @@ -364,7 +365,8 @@ public class PartitionedMobCompactor extends MobCompactor { for (CompactionDelPartition delPartition : request.getDelPartitions()) { LOG.info(Objects.toString(delPartition.listDelFiles())); try { - MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, column.getName(), + MobUtils.removeMobFiles(conf, fs, tableDescriptor.getTableName(), + mobTableDir, column.getName(), delPartition.getStoreFiles()); } catch (IOException e) { LOG.error("Failed to archive the del files " + delPartition.getStoreFiles(), e); @@ -466,7 +468,7 @@ public class PartitionedMobCompactor extends MobCompactor { } List paths = new ArrayList<>(); final Connection c = ConnectionFactory.createConnection(conf); - final Table table = c.getTable(tableName); + final Table tb = c.getTable(tableDescriptor.getTableName()); try { Map>> results = new HashMap<>(); @@ -484,7 +486,7 @@ public class PartitionedMobCompactor extends MobCompactor { @Override public List call() throws Exception { LOG.info("Compacting mob files for partition " + partition.getPartitionId()); - return compactMobFilePartition(request, partition, delFiles, c, table); + return compactMobFilePartition(request, partition, delFiles, c, tb); } })); } @@ -505,7 +507,7 @@ public class PartitionedMobCompactor extends MobCompactor { } } finally { try { - table.close(); + tb.close(); } catch (IOException e) { LOG.error("Failed to close the Table", e); } @@ -684,7 +686,8 @@ public class PartitionedMobCompactor extends MobCompactor { try { closeStoreFileReaders(mobFilesToCompact); closeReaders = false; - MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, column.getName(), mobFilesToCompact); + MobUtils.removeMobFiles(conf, fs, tableDescriptor.getTableName(), mobTableDir, + column.getName(), mobFilesToCompact); } catch (IOException e) { LOG.error("Failed to archive the files " + mobFilesToCompact, e); } @@ -793,7 +796,8 @@ public class PartitionedMobCompactor extends MobCompactor { Path path = MobUtils.commitFile(conf, fs, filePath, mobFamilyDir, compactionCacheConfig); // archive the old del files try { - MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, column.getName(), delFiles); + MobUtils.removeMobFiles(conf, fs, tableDescriptor.getTableName(), mobTableDir, + column.getName(), delFiles); } catch (IOException e) { LOG.error("Failed to archive the old del files " + delFiles, e); } @@ -811,8 +815,7 @@ public class PartitionedMobCompactor extends MobCompactor { throws IOException { List scanners = StoreFileScanner.getScannersForStoreFiles(filesToCompact, false, true, false, false, HConstants.LATEST_TIMESTAMP); - long ttl = HStore.determineTTLFromFamily(column); - ScanInfo scanInfo = new ScanInfo(conf, column, ttl, 0, CellComparator.getInstance()); + ScanInfo scanInfo = new ScanInfo(conf, tableDescriptor, column, CellComparator.getInstance()); return new StoreScanner(scanInfo, scanType, scanners); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index afa7b84b86..0734346dee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -648,6 +648,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi final long timestampSlop; final long rowProcessorTimeout; + private final boolean isNanosecondTimestamps; + // Last flush time for each Store. Useful when we are flushing for each column private final ConcurrentMap lastStoreFlushTimeMap = new ConcurrentHashMap<>(); @@ -787,15 +789,21 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi this.maxBusyWaitDuration = conf.getLong("hbase.ipc.client.call.purge.timeout", 2 * HConstants.DEFAULT_HBASE_RPC_TIMEOUT); + this.isNanosecondTimestamps = this.htableDescriptor.isNanosecondTimestamps(); /* * timestamp.slop provides a server-side constraint on the timestamp. This * assumes that you base your TS around currentTimeMillis(). In this case, * throw an error to the user if the user-specified TS is newer than now + * slop. LATEST_TIMESTAMP == don't use this functionality */ - this.timestampSlop = conf.getLong( + long tSlop = conf.getLong( "hbase.hregion.keyvalue.timestamp.slop.millisecs", HConstants.LATEST_TIMESTAMP); + if (tSlop != HConstants.LATEST_TIMESTAMP && this.isNanosecondTimestamps) { + // Scale slob to nanosecond precision if this table configured to handle nanoseconds. + tSlop *= 1000000; + } + this.timestampSlop = tSlop; /** * Timeout for the process time in processRowsWithLocks(). @@ -1273,6 +1281,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } } + private static long currentTimeForCell(final boolean isNano) { + return isNano ? + EnvironmentEdgeManager.currentTimeNano() : EnvironmentEdgeManager.currentTime(); + } + @Override public RegionInfo getRegionInfo() { return this.fs.getRegionInfo(); @@ -3527,7 +3540,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi public void checkAndPrepare() throws IOException { final int[] metrics = {0, 0}; // index 0: puts, index 1: deletes visitBatchOperations(true, this.size(), new Visitor() { - private long now = EnvironmentEdgeManager.currentTime(); + private long now = currentTimeForCell(region.isNanosecondTimestamps); private WALEdit walEdit; @Override public boolean visit(int index) throws IOException { @@ -3852,7 +3865,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi @Override public void checkAndPrepare() throws IOException { - long now = EnvironmentEdgeManager.currentTime(); + long now = currentTimeForCell(region.isNanosecondTimestamps); visitBatchOperations(true, this.size(), (int index) -> { checkAndPrepareMutation(index, now); return true; @@ -4009,7 +4022,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // STEP 2. Update mini batch of all operations in progress with LATEST_TIMESTAMP timestamp // We should record the timestamp only after we have acquired the rowLock, // otherwise, newer puts/deletes are not guaranteed to have a newer timestamp - long now = EnvironmentEdgeManager.currentTime(); + long now = currentTimeForCell(isNanosecondTimestamps); batchOp.prepareMiniBatchOperations(miniBatchOp, now, acquiredRowLocks); // STEP 3. Build WAL edit @@ -4153,7 +4166,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // non-decreasing (see HBASE-14070) we should make sure that the mutation has a // larger timestamp than what was observed via Get. doBatchMutate already does this, but // there is no way to pass the cellTs. See HBASE-14054. - long now = EnvironmentEdgeManager.currentTime(); + long now = currentTimeForCell(isNanosecondTimestamps); long ts = Math.max(now, cellTs); // ensure write is not eclipsed byte[] byteTs = Bytes.toBytes(ts); if (mutation != null) { @@ -7520,6 +7533,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // Short circuit the read only case if (processor.readOnly()) { try { + // TODO: should this be currentTimeForCell? long now = EnvironmentEdgeManager.currentTime(); doProcessRowWithTimeout(processor, now, this, null, null, timeout); processor.postProcess(this, walEdit, true); @@ -7555,6 +7569,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // STEP 3. Region lock lock(this.updatesLock.readLock(), acquiredRowLocks.isEmpty() ? 1 : acquiredRowLocks.size()); locked = true; + // TODO: should this be currentTimeForCell? long now = EnvironmentEdgeManager.currentTime(); // STEP 4. Let the processor scan the rows, generate mutations and add waledits doProcessRowWithTimeout(processor, now, this, mutations, walEdit, timeout); @@ -7880,7 +7895,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi private WALEdit reckonDeltas(Operation op, Mutation mutation, Durability effectiveDurability, Map> forMemStore, List results) throws IOException { WALEdit walEdit = null; - long now = EnvironmentEdgeManager.currentTime(); + long now = currentTimeForCell(isNanosecondTimestamps); final boolean writeToWAL = effectiveDurability != Durability.SKIP_WAL; // Process a Store/family at a time. for (Map.Entry> entry: mutation.getFamilyCellMap().entrySet()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index a2e097dadb..5e8b9249a7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -266,15 +266,9 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat this.dataBlockEncoder = new HFileDataBlockEncoderImpl(family.getDataBlockEncoding()); this.comparator = region.getCellComparator(); - // used by ScanQueryMatcher - long timeToPurgeDeletes = - Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0); - LOG.trace("Time to purge deletes set to {}ms in store {}", timeToPurgeDeletes, this); - // Get TTL - long ttl = determineTTLFromFamily(family); - // Why not just pass a HColumnDescriptor in here altogether? Even if have - // to clone it? - scanInfo = new ScanInfo(conf, family, ttl, timeToPurgeDeletes, this.comparator); + + this.scanInfo = new ScanInfo(conf, region.getTableDescriptor(), family, this.comparator); + this.memstore = getMemstore(); this.offPeakHours = OffPeakHours.getInstance(conf); @@ -390,25 +384,6 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat return StoreEngine.create(store, conf, comparator); } - /** - * @param family - * @return TTL in seconds of the specified family - */ - public static long determineTTLFromFamily(final ColumnFamilyDescriptor family) { - // HCD.getTimeToLive returns ttl in seconds. Convert to milliseconds. - long ttl = family.getTimeToLive(); - if (ttl == HConstants.FOREVER) { - // Default is unlimited ttl. - ttl = Long.MAX_VALUE; - } else if (ttl == -1) { - ttl = Long.MAX_VALUE; - } else { - // Second -> ms adjust for user data - ttl *= 1000; - } - return ttl; - } - @Override public String getColumnFamilyName() { return this.family.getNameAsString(); @@ -1799,10 +1774,12 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat } private void removeUnneededFiles() throws IOException { - if (!conf.getBoolean("hbase.store.delete.expired.storefile", true)) return; + if (!conf.getBoolean("hbase.store.delete.expired.storefile", true)) { + return; + } if (getColumnFamilyDescriptor().getMinVersions() > 0) { LOG.debug("Skipping expired store file removal due to min version being {}", - getColumnFamilyDescriptor().getMinVersions()); + getColumnFamilyDescriptor().getMinVersions()); return; } this.lock.readLock().lock(); @@ -1811,8 +1788,9 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat synchronized (filesCompacting) { long cfTtl = getStoreFileTtl(); if (cfTtl != Long.MAX_VALUE) { - delSfs = storeEngine.getStoreFileManager().getUnneededFiles( - EnvironmentEdgeManager.currentTime() - cfTtl, filesCompacting); + long now = scanInfo.isNanosecondTimestamps() ? + EnvironmentEdgeManager.currentTimeNano() : EnvironmentEdgeManager.currentTime(); + delSfs = storeEngine.getStoreFileManager().getUnneededFiles(now - cfTtl, filesCompacting); addToCompactingFiles(delSfs); } } @@ -1829,9 +1807,9 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat replaceStoreFiles(delSfs, newFiles); completeCompaction(delSfs); LOG.info("Completed removal of " + delSfs.size() + " unnecessary (expired) file(s) in " - + this + " of " + this.getRegionInfo().getRegionNameAsString() - + "; total size for store is " - + TraditionalBinaryPrefix.long2String(storeSize.get(), "", 1)); + + this + " of " + this.getRegionInfo().getRegionNameAsString() + + "; total size for store is " + + TraditionalBinaryPrefix.long2String(storeSize.get(), "", 1)); } public void cancelRequestedCompaction(CompactionContext compaction) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java index 2fde311ccf..9552ceb790 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java @@ -22,6 +22,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeepDeletedCells; import org.apache.hadoop.hbase.util.Bytes; @@ -50,6 +51,7 @@ public class ScanInfo { private boolean parallelSeekEnabled; private final long preadMaxBytes; private final boolean newVersionBehavior; + private final boolean nanosecondTimestamps; public static final long FIXED_OVERHEAD = ClassSize.align(ClassSize.OBJECT + (2 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_INT) @@ -57,17 +59,17 @@ public class ScanInfo { /** * @param conf + * @param table {@link TableDescriptor} describing the table * @param family {@link ColumnFamilyDescriptor} describing the column family - * @param ttl Store's TTL (in ms) - * @param timeToPurgeDeletes duration in ms after which a delete marker can be purged during a - * major compaction. * @param comparator The store's comparator */ - public ScanInfo(Configuration conf, ColumnFamilyDescriptor family, long ttl, - long timeToPurgeDeletes, CellComparator comparator) { - this(conf, family.getName(), family.getMinVersions(), family.getMaxVersions(), ttl, - family.getKeepDeletedCells(), family.getBlocksize(), timeToPurgeDeletes, comparator, - family.isNewVersionBehavior()); + public ScanInfo(Configuration conf, TableDescriptor table, ColumnFamilyDescriptor family, + CellComparator comparator) { + this(conf, family.getName(), family.getMinVersions(), family.getMaxVersions(), + determineTTL(family, table.isNanosecondTimestamps()), + family.getKeepDeletedCells(), family.getBlocksize(), + determineTimeToPurgeDeletes(conf, table.isNanosecondTimestamps()), comparator, + family.isNewVersionBehavior(), table.isNanosecondTimestamps()); } private static long getCellsPerTimeoutCheck(Configuration conf) { @@ -88,21 +90,25 @@ public class ScanInfo { * be purged during a major compaction. * @param keepDeletedCells Store's keepDeletedCells setting * @param comparator The store's comparator + * @param newVersionBehavior whether compare cells by MVCC when scanning + * @param nanosecondTimestamps whether treat timestamps as nanoseconds */ public ScanInfo(Configuration conf, byte[] family, int minVersions, int maxVersions, long ttl, KeepDeletedCells keepDeletedCells, long blockSize, long timeToPurgeDeletes, - CellComparator comparator, boolean newVersionBehavior) { + CellComparator comparator, boolean newVersionBehavior, boolean nanosecondTimestamps) { this(family, minVersions, maxVersions, ttl, keepDeletedCells, timeToPurgeDeletes, comparator, conf.getLong(HConstants.TABLE_MAX_ROWSIZE_KEY, HConstants.TABLE_MAX_ROWSIZE_DEFAULT), conf.getBoolean("hbase.storescanner.use.pread", false), getCellsPerTimeoutCheck(conf), conf.getBoolean(StoreScanner.STORESCANNER_PARALLEL_SEEK_ENABLE, false), - conf.getLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, 4 * blockSize), newVersionBehavior); + conf.getLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, 4 * blockSize), newVersionBehavior, + nanosecondTimestamps); } private ScanInfo(byte[] family, int minVersions, int maxVersions, long ttl, KeepDeletedCells keepDeletedCells, long timeToPurgeDeletes, CellComparator comparator, long tableMaxRowSize, boolean usePread, long cellsPerTimeoutCheck, - boolean parallelSeekEnabled, long preadMaxBytes, boolean newVersionBehavior) { + boolean parallelSeekEnabled, long preadMaxBytes, boolean newVersionBehavior, + boolean nanosecondTimestamps) { this.family = family; this.minVersions = minVersions; this.maxVersions = maxVersions; @@ -116,6 +122,47 @@ public class ScanInfo { this.parallelSeekEnabled = parallelSeekEnabled; this.preadMaxBytes = preadMaxBytes; this.newVersionBehavior = newVersionBehavior; + this.nanosecondTimestamps = nanosecondTimestamps; + } + + /** + * @param conf is global configuration + * @param isNanosecondTimestamps whether timestamps are treated as nanoseconds + * @return TTL for deleted cells. Default in milliseconds and + * in nanoseconds if NANOSECOND_TIMESTAMPS table attribute is provided. + */ + private static long determineTimeToPurgeDeletes(final Configuration conf, + final boolean isNanosecondTimestamps) { + long ttpd = Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0); + if (isNanosecondTimestamps) { + ttpd *= 1000000; + } + return ttpd; + } + + /** + * @param family is store's family + * @param isNanosecondTimestamps whether timestamps are treated as nanoseconds + * @return TTL of the specified column family. Default in milliseconds and + * in nanoseconds if NANOSECOND_TIMESTAMPS table attribute is provided. + */ + private static long determineTTL(final ColumnFamilyDescriptor family, + final boolean isNanosecondTimestamps) { + // ColumnFamilyDescriptor.getTimeToLive() returns ttl in seconds. + long ttl = family.getTimeToLive(); + if (ttl == HConstants.FOREVER) { + // Default is unlimited ttl. + ttl = Long.MAX_VALUE; + } else if (ttl == -1) { + ttl = Long.MAX_VALUE; + } else if (isNanosecondTimestamps) { + // Second -> ns adjust for user data + ttl *= 1000000000; + } else { + // Second -> ms adjust for user data + ttl *= 1000; + } + return ttl; } long getTableMaxRowSize() { @@ -170,12 +217,20 @@ public class ScanInfo { return newVersionBehavior; } + /** + * Whether ScanInfo is for table that assumes nanosecond timestamps. + * @return true if nanosecond timestamps. + */ + public boolean isNanosecondTimestamps() { + return nanosecondTimestamps; + } + /** * Used for CP users for customizing max versions, ttl and keepDeletedCells. */ ScanInfo customize(int maxVersions, long ttl, KeepDeletedCells keepDeletedCells) { return new ScanInfo(family, minVersions, maxVersions, ttl, keepDeletedCells, timeToPurgeDeletes, comparator, tableMaxRowSize, usePread, cellsPerTimeoutCheck, parallelSeekEnabled, - preadMaxBytes, newVersionBehavior); + preadMaxBytes, newVersionBehavior, nanosecondTimestamps); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index be226fed04..fc4879054c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -171,7 +171,8 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner get = scan.isGetScan(); explicitColumnQuery = numColumns > 0; this.scan = scan; - this.now = EnvironmentEdgeManager.currentTime(); + this.now = scanInfo.isNanosecondTimestamps() ? + EnvironmentEdgeManager.currentTimeNano() : EnvironmentEdgeManager.currentTime(); this.oldestUnexpiredTS = scan.isRaw() ? 0L : now - scanInfo.getTtl(); this.minVersions = scanInfo.getMinVersions(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 512cca0c10..099467b790 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -711,11 +711,24 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { new Path(root, "mapreduce-am-staging-root-dir").toString()); } + /** + * Check whether the tests should assume NANOSECOND_TIMESTAMPS when creating + * new column tables. Default is false. + */ + public boolean isNanosecondTimestampsEnabled() { + final String propName = "hbase.tests.nanosecond.timestamps"; + String v = System.getProperty(propName); + if (v != null){ + return Boolean.parseBoolean(v); + } + return false; + } + /** * Check whether the tests should assume NEW_VERSION_BEHAVIOR when creating - * new column families. Default to false. + * new column families. Default is false. */ - public boolean isNewVersionBehaviorEnabled(){ + public boolean isNewVersionBehaviorEnabled() { final String propName = "hbase.tests.new.version.behavior"; String v = System.getProperty(propName); if (v != null){ @@ -1464,7 +1477,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } builder.setColumnFamily(cfdb.build()); } - TableDescriptor td = builder.build(); + TableDescriptor td = builder.setNanosecondTimestamps(isNanosecondTimestampsEnabled()).build(); getAdmin().createTable(td, splitKeys); // HBaseAdmin only waits for regions to appear in hbase:meta // we should wait until they are assigned @@ -1488,7 +1501,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { .setNewVersionBehavior(true).build()); } } - getAdmin().createTable(builder.build(), splitRows); + getAdmin().createTable( + builder.setNanosecondTimestamps(isNanosecondTimestampsEnabled()).build(), splitRows); // HBaseAdmin only waits for regions to appear in hbase:meta // we should wait until they are assigned waitUntilAllRegionsAssigned(htd.getTableName()); @@ -1550,6 +1564,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { public Table createTable(TableName tableName, byte[][] families, int numVersions, byte[][] splitKeys) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); + desc.setNanosecondTimestamps(isNanosecondTimestampsEnabled()); for (byte[] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions); if (isNewVersionBehaviorEnabled()) { @@ -1589,6 +1604,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { public Table createTable(TableName tableName, byte[][] families, int numVersions, int blockSize) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); + desc.setNanosecondTimestamps(isNanosecondTimestampsEnabled()); for (byte[] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family) .setMaxVersions(numVersions) @@ -1607,25 +1623,26 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { public Table createTable(TableName tableName, byte[][] families, int numVersions, int blockSize, String cpName) throws IOException { - HTableDescriptor desc = new HTableDescriptor(tableName); - for (byte[] family : families) { - HColumnDescriptor hcd = new HColumnDescriptor(family) - .setMaxVersions(numVersions) - .setBlocksize(blockSize); - if (isNewVersionBehaviorEnabled()) { - hcd.setNewVersionBehavior(true); - } - desc.addFamily(hcd); - } - if(cpName != null) { - desc.addCoprocessor(cpName); + HTableDescriptor desc = new HTableDescriptor(tableName); + desc.setNanosecondTimestamps(isNanosecondTimestampsEnabled()); + for (byte[] family : families) { + HColumnDescriptor hcd = new HColumnDescriptor(family) + .setMaxVersions(numVersions) + .setBlocksize(blockSize); + if (isNewVersionBehaviorEnabled()) { + hcd.setNewVersionBehavior(true); } - getAdmin().createTable(desc); - // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are - // assigned - waitUntilAllRegionsAssigned(tableName); - return getConnection().getTable(tableName); + desc.addFamily(hcd); + } + if(cpName != null) { + desc.addCoprocessor(cpName); } + getAdmin().createTable(desc); + // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are + // assigned + waitUntilAllRegionsAssigned(tableName); + return getConnection().getTable(tableName); + } /** * Create a table. @@ -1639,6 +1656,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { int[] numVersions) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); + desc.setNanosecondTimestamps(isNanosecondTimestampsEnabled()); int i = 0; for (byte[] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family) @@ -1667,6 +1685,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { public Table createTable(TableName tableName, byte[] family, byte[][] splitRows) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); + desc.setNanosecondTimestamps(isNanosecondTimestampsEnabled()); HColumnDescriptor hcd = new HColumnDescriptor(family); if (isNewVersionBehaviorEnabled()) { hcd.setNewVersionBehavior(true); @@ -1791,6 +1810,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { public HTableDescriptor createTableDescriptor(final TableName name, final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) { HTableDescriptor htd = new HTableDescriptor(name); + htd.setNanosecondTimestamps(isNanosecondTimestampsEnabled()); for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) { HColumnDescriptor hcd = new HColumnDescriptor(cfName) .setMinVersions(minVersions) @@ -1824,6 +1844,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { public HTableDescriptor createTableDescriptor(final TableName tableName, byte[][] families, int maxVersions) { HTableDescriptor desc = new HTableDescriptor(tableName); + desc.setNanosecondTimestamps(isNanosecondTimestampsEnabled()); for (byte[] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family) .setMaxVersions(maxVersions); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java index 7c67cd8e79..71ffc4cd73 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java @@ -319,7 +319,7 @@ public class TestMobCompactor { countFiles(tableName, false, family2)); // do the mob file compaction - MobCompactor compactor = new PartitionedMobCompactor(conf, fs, tableName, hcd1, pool); + MobCompactor compactor = new PartitionedMobCompactor(conf, fs, desc, hcd1, pool); compactor.compact(); assertEquals("After compaction: mob rows count", regionNum * (rowNumPerRegion - delRowNum), @@ -459,7 +459,7 @@ public class TestMobCompactor { countFiles(tableName, false, family2)); // do the mob compaction - MobCompactor compactor = new PartitionedMobCompactor(conf, fs, tableName, hcd1, pool); + MobCompactor compactor = new PartitionedMobCompactor(conf, fs, desc, hcd1, pool); compactor.compact(); assertEquals("After first compaction: mob rows count", regionNum @@ -1169,11 +1169,10 @@ public class TestMobCompactor { Bytes.toBytes(mobValue8))); } - private void commonPolicyTestLogic (final String tableNameAsString, + private void commonPolicyTestLogic(final String tableNameAsString, final MobCompactPartitionPolicy pType, final boolean majorCompact, final int expectedFileNumbers, final String[] expectedFileNames, - final boolean setupAndLoadData - ) throws Exception { + final boolean setupAndLoadData) throws Exception { if (setupAndLoadData) { setUpForPolicyTest(tableNameAsString, pType); @@ -1197,7 +1196,7 @@ public class TestMobCompactor { Path mobDirPath = MobUtils.getMobFamilyPath(conf, tableName, family1); FileStatus[] fileList = fs.listStatus(mobDirPath); - assertTrue(fileList.length == expectedFileNumbers); + assertEquals(expectedFileNumbers, fileList.length); // the file names are expected ArrayList fileNames = new ArrayList<>(expectedFileNumbers); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java index 2cf741ed9b..943fbd34d0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java @@ -37,6 +37,7 @@ import java.util.concurrent.RejectedExecutionHandler; import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -48,11 +49,13 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.MobCompactPartitionPolicy; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; @@ -63,7 +66,6 @@ import org.apache.hadoop.hbase.mob.compactions.MobCompactionRequest.CompactionTy import org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionDelPartition; import org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartition; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.KeyValueScanner; import org.apache.hadoop.hbase.regionserver.ScanInfo; @@ -380,7 +382,8 @@ public class TestPartitionedMobCompactor { listFiles(); TableName tName = TableName.valueOf(tableName); - MobCompactor compactor = new PartitionedMobCompactor(conf, faultyFs, tName, hcd, pool); + MobCompactor compactor = new PartitionedMobCompactor(conf, faultyFs, + new HTableDescriptor(tName), hcd, pool); faultyFs.setThrowException(true); try { compactor.compact(allFiles, true); @@ -485,8 +488,8 @@ public class TestPartitionedMobCompactor { listFiles(); - PartitionedMobCompactor compactor = new PartitionedMobCompactor(conf, fs, - TableName.valueOf(tableName), hcd, pool) { + HTableDescriptor td = new HTableDescriptor(TableName.valueOf(tableName)); + PartitionedMobCompactor compactor = new PartitionedMobCompactor(conf, fs, td, hcd, pool) { @Override public List compact(List files, boolean isForceAllFiles) throws IOException { @@ -516,11 +519,11 @@ public class TestPartitionedMobCompactor { int PartitionsIncludeDelFiles = 0; CacheConfig cacheConfig = null; - MyPartitionedMobCompactor(Configuration conf, FileSystem fs, TableName tableName, + MyPartitionedMobCompactor(Configuration conf, FileSystem fs, TableDescriptor td, ColumnFamilyDescriptor column, ExecutorService pool, final int delPartitionSize, final CacheConfig cacheConf, final int PartitionsIncludeDelFiles) throws IOException { - super(conf, fs, tableName, column, pool); + super(conf, fs, td, column, pool); this.delPartitionSize = delPartitionSize; this.cacheConfig = cacheConf; this.PartitionsIncludeDelFiles = PartitionsIncludeDelFiles; @@ -618,8 +621,9 @@ public class TestPartitionedMobCompactor { listFiles(); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); MyPartitionedMobCompactor compactor = new MyPartitionedMobCompactor(conf, fs, - TableName.valueOf(tableName), hcd, pool, 1, cacheConf, 1); + htd, hcd, pool, 1, cacheConf, 1); compactor.compact(allFiles, true); } @@ -639,7 +643,7 @@ public class TestPartitionedMobCompactor { listFiles(); MyPartitionedMobCompactor compactor = new MyPartitionedMobCompactor(conf, fs, - TableName.valueOf(tableName), hcd, pool, 3, cacheConf, 3); + new HTableDescriptor(TableName.valueOf(tableName)), hcd, pool, 3, cacheConf, 3); compactor.compact(allFiles, true); } @@ -669,8 +673,8 @@ public class TestPartitionedMobCompactor { */ private void testSelectFiles(String tableName, final CompactionType type, final boolean isForceAllFiles, final List expected) throws IOException { - PartitionedMobCompactor compactor = new PartitionedMobCompactor(conf, fs, - TableName.valueOf(tableName), hcd, pool) { + HTableDescriptor td = new HTableDescriptor(TableName.valueOf(tableName)); + PartitionedMobCompactor compactor = new PartitionedMobCompactor(conf, fs, td, hcd, pool) { @Override public List compact(List files, boolean isForceAllFiles) throws IOException { @@ -739,8 +743,8 @@ public class TestPartitionedMobCompactor { */ private void testCompactDelFiles(String tableName, final int expectedFileCount, final int expectedCellCount, boolean isForceAllFiles) throws IOException { - PartitionedMobCompactor compactor = new PartitionedMobCompactor(conf, fs, - TableName.valueOf(tableName), hcd, pool) { + HTableDescriptor td = new HTableDescriptor(TableName.valueOf(tableName)); + PartitionedMobCompactor compactor = new PartitionedMobCompactor(conf, fs, td, hcd, pool) { @Override protected List performCompaction(PartitionedMobCompactionRequest request) throws IOException { @@ -884,9 +888,8 @@ public class TestPartitionedMobCompactor { } List scanners = new ArrayList<>(StoreFileScanner.getScannersForStoreFiles(sfs, false, true, false, false, HConstants.LATEST_TIMESTAMP)); - long timeToPurgeDeletes = Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0); - long ttl = HStore.determineTTLFromFamily(hcd); - ScanInfo scanInfo = new ScanInfo(conf, hcd, ttl, timeToPurgeDeletes, CellComparatorImpl.COMPARATOR); + ScanInfo scanInfo = new ScanInfo(conf, new HTableDescriptor(TableName.valueOf("test")), + hcd, CellComparatorImpl.COMPARATOR); StoreScanner scanner = new StoreScanner(scanInfo, ScanType.COMPACT_RETAIN_DELETES, scanners); List results = new ArrayList<>(); boolean hasMore = true; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java index 8dbddb9e44..8b8df88af5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java @@ -213,7 +213,8 @@ public class TestCompactingMemStore extends TestDefaultMemStore { Configuration conf = HBaseConfiguration.create(); for (int startRowId = 0; startRowId < ROW_COUNT; startRowId++) { ScanInfo scanInfo = new ScanInfo(conf, FAMILY, 0, 1, Integer.MAX_VALUE, - KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, this.memstore.getComparator(), false); + KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, this.memstore.getComparator(), + false, false); try (InternalScanner scanner = new StoreScanner(new Scan().withStartRow(Bytes.toBytes(startRowId)), scanInfo, null, memstore.getScanners(0))) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index ad0d2ffe98..940d18d15c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -182,7 +182,8 @@ public class TestDefaultMemStore { List result = new ArrayList<>(); Configuration conf = HBaseConfiguration.create(); ScanInfo scanInfo = new ScanInfo(conf, null, 0, 1, HConstants.LATEST_TIMESTAMP, - KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, this.memstore.getComparator(), false); + KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, this.memstore.getComparator(), + false, false); int count = 0; try (StoreScanner s = new StoreScanner(scan, scanInfo, null, memstorescanners)) { while (s.next(result)) { @@ -604,7 +605,7 @@ public class TestDefaultMemStore { for (int startRowId = 0; startRowId < ROW_COUNT; startRowId++) { ScanInfo scanInfo = new ScanInfo(conf, FAMILY, 0, 1, Integer.MAX_VALUE, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, this.memstore.getComparator(), false); + HConstants.DEFAULT_BLOCKSIZE, 0, this.memstore.getComparator(), false, false); try (InternalScanner scanner = new StoreScanner(new Scan().withStartRow(Bytes.toBytes(startRowId)), scanInfo, null, memstore.getScanners(0))) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java index cc9e3850ff..571f15b097 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java @@ -449,10 +449,7 @@ public class TestMobStoreCompaction { List scanners = StoreFileScanner.getScannersForStoreFiles(sfs, false, true, false, false, HConstants.LATEST_TIMESTAMP); - long timeToPurgeDeletes = Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0); - long ttl = HStore.determineTTLFromFamily(hcd); - ScanInfo scanInfo = new ScanInfo(copyOfConf, hcd, ttl, timeToPurgeDeletes, - CellComparatorImpl.COMPARATOR); + ScanInfo scanInfo = new ScanInfo(copyOfConf, htd, hcd, CellComparatorImpl.COMPARATOR); StoreScanner scanner = new StoreScanner(scanInfo, ScanType.COMPACT_DROP_DELETES, scanners); try { size += UTIL.countRows(scanner); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java index 66e18472ea..4a2c40122b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java @@ -270,7 +270,8 @@ public class TestReversibleScanners { ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(), FAMILYNAME, 0, Integer.MAX_VALUE, Long.MAX_VALUE, - KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, CellComparatorImpl.COMPARATOR, false); + KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, CellComparatorImpl.COMPARATOR, + false, false); // Case 1.Test a full reversed scan Scan scan = new Scan(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java index d5775da83a..b6536b5746 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java @@ -75,7 +75,8 @@ public class TestStoreScanner { private static final byte[] CF = Bytes.toBytes(CF_STR); static Configuration CONF = HBaseConfiguration.create(); private ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, Integer.MAX_VALUE, Long.MAX_VALUE, - KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false); + KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), + false, false); /** * From here on down, we have a bunch of defines and specific CELL_GRID of Cells. The @@ -814,7 +815,7 @@ public class TestStoreScanner { Scan scan = new Scan(); scan.readVersions(1); ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, 500, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false); + HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false, false); try (StoreScanner scanner = new StoreScanner(scan, scanInfo, null, scanners)) { List results = new ArrayList<>(); assertEquals(true, scanner.next(results)); @@ -878,7 +879,7 @@ public class TestStoreScanner { scan.readVersions(1); // scanner with ttl equal to 500 ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, 500, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false); + HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false, false); try (StoreScanner scanner = new StoreScanner(scan, scanInfo, null, scanners)) { List results = new ArrayList<>(); assertEquals(true, scanner.next(results)); @@ -941,7 +942,7 @@ public class TestStoreScanner { KeepDeletedCells.FALSE /* keepDeletedCells */, HConstants.DEFAULT_BLOCKSIZE /* block size */, 200, /* timeToPurgeDeletes */ - CellComparator.getInstance(), false); + CellComparator.getInstance(), false, false); try (StoreScanner scanner = new StoreScanner(scanInfo, OptionalInt.of(2), ScanType.COMPACT_DROP_DELETES, scanners)) { List results = new ArrayList<>(); @@ -970,7 +971,7 @@ public class TestStoreScanner { create("R1", "cf", "a", now - 10, KeyValue.Type.Put, "dont-care"), }; List scanners = scanFixture(kvs); ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, 500, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false); + HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false, false); try (StoreScanner storeScanner = new StoreScanner(scanInfo, OptionalInt.empty(), ScanType.COMPACT_RETAIN_DELETES, scanners)) { assertFalse(storeScanner.isScanUsePread()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java index 698dc816c1..f90753dd0d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java @@ -32,6 +32,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.OptionalLong; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -40,6 +41,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.regionserver.HStore; @@ -97,8 +99,9 @@ public class TestDateTieredCompactor { conf.setBoolean("hbase.regionserver.compaction.private.readers", usePrivateReaders); final Scanner scanner = new Scanner(input); // Create store mock that is satisfactory for compactor. + HTableDescriptor td = new HTableDescriptor(TABLE_NAME); HColumnDescriptor col = new HColumnDescriptor(NAME_OF_THINGS); - ScanInfo si = new ScanInfo(conf, col, Long.MAX_VALUE, 0, CellComparatorImpl.COMPARATOR); + ScanInfo si = new ScanInfo(conf, td, col, CellComparatorImpl.COMPARATOR); HStore store = mock(HStore.class); when(store.getStorefiles()).thenReturn(storefiles); when(store.getColumnFamilyDescriptor()).thenReturn(col); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java index 8b5df7262a..c6581e1e27 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java @@ -30,6 +30,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -39,6 +40,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.regionserver.HStore; @@ -198,8 +200,9 @@ public class TestStripeCompactor { final Scanner scanner = new Scanner(input); // Create store mock that is satisfactory for compactor. + HTableDescriptor td = new HTableDescriptor(TABLE_NAME); HColumnDescriptor col = new HColumnDescriptor(NAME_OF_THINGS); - ScanInfo si = new ScanInfo(conf, col, Long.MAX_VALUE, 0, CellComparatorImpl.COMPARATOR); + ScanInfo si = new ScanInfo(conf, td, col, CellComparatorImpl.COMPARATOR); HStore store = mock(HStore.class); when(store.getColumnFamilyDescriptor()).thenReturn(col); when(store.getScanInfo()).thenReturn(si); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestCompactionScanQueryMatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestCompactionScanQueryMatcher.java index 223508f2a9..83a1e5b6df 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestCompactionScanQueryMatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestCompactionScanQueryMatcher.java @@ -79,7 +79,7 @@ public class TestCompactionScanQueryMatcher extends AbstractTestScanQueryMatcher long now = EnvironmentEdgeManager.currentTime(); // Set time to purge deletes to negative value to avoid it ever happening. ScanInfo scanInfo = new ScanInfo(this.conf, fam2, 0, 1, ttl, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, -1L, rowComparator, false); + HConstants.DEFAULT_BLOCKSIZE, -1L, rowComparator, false, false); CompactionScanQueryMatcher qm = CompactionScanQueryMatcher.create(scanInfo, ScanType.COMPACT_RETAIN_DELETES, Long.MAX_VALUE, HConstants.OLDEST_TIMESTAMP, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java index 861b83e2fe..0358e6a6b2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java @@ -63,7 +63,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { // Do with fam2 which has a col2 qualifier. UserScanQueryMatcher qm = UserScanQueryMatcher.create(scan, new ScanInfo(this.conf, fam2, 10, 1, ttl, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false), + HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false, false), get.getFamilyMap().get(fam2), now - ttl, now, null); Cell kv = new KeyValue(row1, fam2, col2, 1, data); Cell cell = PrivateCellUtil.createLastOnRowCol(kv); @@ -90,7 +90,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { // 2,4,5 UserScanQueryMatcher qm = UserScanQueryMatcher.create( scan, new ScanInfo(this.conf, fam2, 0, 1, ttl, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false), + HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false, false), get.getFamilyMap().get(fam2), now - ttl, now, null); List memstore = new ArrayList<>(6); @@ -133,7 +133,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { long now = EnvironmentEdgeManager.currentTime(); UserScanQueryMatcher qm = UserScanQueryMatcher.create(scan, new ScanInfo(this.conf, fam2, 0, 1, - ttl, KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false), + ttl, KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false, false), null, now - ttl, now, null); List memstore = new ArrayList<>(6); @@ -179,7 +179,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { long now = EnvironmentEdgeManager.currentTime(); UserScanQueryMatcher qm = UserScanQueryMatcher.create(scan, new ScanInfo(this.conf, fam2, 0, 1, testTTL, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false), + HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false, false), get.getFamilyMap().get(fam2), now - testTTL, now, null); KeyValue[] kvs = new KeyValue[] { new KeyValue(row1, fam2, col1, now - 100, data), @@ -223,7 +223,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { UserScanQueryMatcher qm = UserScanQueryMatcher.create(scan, new ScanInfo(this.conf, fam2, 0, 1, testTTL, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false), + HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false, false), null, now - testTTL, now, null); KeyValue[] kvs = new KeyValue[] { new KeyValue(row1, fam2, col1, now - 100, data), @@ -268,7 +268,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { // scan with column 2,4,5 UserScanQueryMatcher qm = UserScanQueryMatcher.create( scanWithFilter, new ScanInfo(this.conf, fam2, 0, 1, ttl, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false), + HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false, false), get.getFamilyMap().get(fam2), now - ttl, now, null); List memstore = new ArrayList<>(); @@ -318,7 +318,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { // scan with column 2,4,5, the family with maxVersion = 3 UserScanQueryMatcher qm = UserScanQueryMatcher.create( scanWithFilter, new ScanInfo(this.conf, fam2, 0, 3, ttl, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false), + HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false, false), get.getFamilyMap().get(fam2), now - ttl, now, null); List memstore = new ArrayList<>(); @@ -338,7 +338,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { scanWithFilter = new Scan(scan).setFilter(new AlwaysIncludeFilter()).readVersions(1); qm = UserScanQueryMatcher.create( scanWithFilter, new ScanInfo(this.conf, fam2, 0, 2, ttl, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false), + HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false, false), get.getFamilyMap().get(fam2), now - ttl, now, null); List memstore2 = new ArrayList<>(); @@ -378,7 +378,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { // scan with column 2,4,5, the family with maxVersion = 5 UserScanQueryMatcher qm = UserScanQueryMatcher.create( scanWithFilter, new ScanInfo(this.conf, fam2, 0, 5, ttl, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false), + HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false, false), get.getFamilyMap().get(fam2), now - ttl, now, null); List memstore = new ArrayList<>(); diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 68bb3535d8..60b2624cb2 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -29,7 +29,6 @@ java_import org.apache.hadoop.hbase.TableName # Wrapper for org.apache.hadoop.hbase.client.HBaseAdmin module Hbase - # rubocop:disable Metrics/ClassLength class Admin include HBaseConstants @@ -1237,6 +1236,7 @@ module Hbase htd.setRegionMemstoreReplication(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::REGION_MEMSTORE_REPLICATION))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::REGION_MEMSTORE_REPLICATION) htd.setRegionSplitPolicyClassName(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::SPLIT_POLICY)) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::SPLIT_POLICY) htd.setRegionReplication(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::REGION_REPLICATION))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::REGION_REPLICATION) + htd.setNanosecondTimestamps(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::NANOSECOND_TIMESTAMPS))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::NANOSECOND_TIMESTAMPS) set_user_metadata(htd, arg.delete(METADATA)) if arg[METADATA] set_descriptor_config(htd, arg.delete(CONFIGURATION)) if arg[CONFIGURATION] end -- 2.18.0