From 23cbade3c6124915d8540d34f9f2e45c7f674ebe Mon Sep 17 00:00:00 2001 From: Andrey Elenskiy Date: Tue, 5 Jan 2021 11:18:20 -0800 Subject: [PATCH] HBASE-21476 Support for nanosecond timestamps --- .../apache/hadoop/hbase/HTableDescriptor.java | 23 ++++++ .../hadoop/hbase/client/TableDescriptor.java | 7 ++ .../hbase/client/TableDescriptorBuilder.java | 38 +++++++++ .../hbase/util/DefaultEnvironmentEdge.java | 14 ++++ .../hadoop/hbase/util/EnvironmentEdge.java | 9 +++ .../hbase/util/EnvironmentEdgeManager.java | 12 ++- .../apache/hadoop/hbase/master/HMaster.java | 6 +- .../master/MasterMobCompactionThread.java | 26 +++--- .../hbase/master/MasterRpcServices.java | 5 +- .../hbase/master/MobCompactionChore.java | 2 +- .../org/apache/hadoop/hbase/mob/MobUtils.java | 11 +-- .../hbase/mob/compactions/MobCompactor.java | 13 +-- .../compactions/PartitionedMobCompactor.java | 24 +++--- .../hadoop/hbase/regionserver/HRegion.java | 26 ++++-- .../hadoop/hbase/regionserver/HStore.java | 34 ++------ .../hadoop/hbase/regionserver/ScanInfo.java | 81 ++++++++++++++++--- .../hbase/regionserver/StoreScanner.java | 3 +- .../hadoop/hbase/HBaseTestingUtility.java | 25 +++++- .../TestRegionCoprocessorHost.java | 2 +- .../mob/compactions/TestMobCompactor.java | 4 +- .../TestPartitionedMobCompactor.java | 33 ++++---- .../regionserver/TestBulkLoadReplication.java | 7 +- .../regionserver/TestCompactingMemStore.java | 3 +- .../regionserver/TestDefaultMemStore.java | 5 +- .../hbase/regionserver/TestHRegion.java | 5 +- .../regionserver/TestMajorCompaction.java | 4 +- .../regionserver/TestMobStoreCompaction.java | 5 +- .../regionserver/TestReversibleScanners.java | 3 +- .../hbase/regionserver/TestStoreScanner.java | 16 ++-- .../regionserver/TestStoreScannerClosure.java | 3 +- .../compactions/TestDateTieredCompactor.java | 5 +- .../compactions/TestStripeCompactor.java | 5 +- .../TestCompactionScanQueryMatcher.java | 2 +- .../TestUserScanQueryMatcher.java | 18 ++--- hbase-shell/src/main/ruby/hbase/admin.rb | 1 + 35 files changed, 335 insertions(+), 145 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index c537c5038d..1f14f588d8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -62,6 +62,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable DEFAULT_VALUES = new HashMap<>(); private final static Set RESERVED_KEYWORDS = new HashSet<>(); @@ -247,6 +258,7 @@ public class TableDescriptorBuilder { DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION)); DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY)); + DEFAULT_VALUES.put(NANOSECOND_TIMESTAMPS, String.valueOf(DEFAULT_NANOSECOND_TIMESTAMPS)); DEFAULT_VALUES.keySet().stream() .map(s -> new Bytes(Bytes.toBytes(s))).forEach(RESERVED_KEYWORDS::add); RESERVED_KEYWORDS.add(IS_META_KEY); @@ -497,6 +509,11 @@ public class TableDescriptorBuilder { return this; } + public TableDescriptorBuilder setNanosecondTimestamps(final boolean isNano) { + desc.setNanosecondTimestamps(isNano); + return this; + } + /** * @deprecated since 2.0.0 and will be removed in 3.0.0. * @see HBASE-15583 @@ -871,6 +888,27 @@ public class TableDescriptorBuilder { return setValue(MERGE_ENABLED_KEY, Boolean.toString(isEnable)); } + /** + * Check if table handles timestamps with nanoseconds precision. + * + * @return true if table uses nanosecond precision for timestamps. + */ + @Override + public boolean isNanosecondTimestamps() { + return getOrDefault(NANOSECOND_TIMESTAMPS_KEY, Boolean::valueOf, + DEFAULT_NANOSECOND_TIMESTAMPS); + } + + /** + * Setting the NANOSECOND_TIMESTAMPS flag + * + * @param isNano True to use nanosecond precision for timestamps. + * @return the modifyable TD + */ + public ModifyableTableDescriptor setNanosecondTimestamps(final boolean isNano) { + return setValue(NANOSECOND_TIMESTAMPS_KEY, Boolean.toString(isNano)); + } + /** * Check if normalization enable flag of the table is true. If flag is false * then no region normalizer won't attempt to normalize this table. diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java index 422cc16507..d4abd833f0 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java @@ -18,6 +18,8 @@ */ package org.apache.hadoop.hbase.util; +import java.time.Instant; + import org.apache.yetus.audience.InterfaceAudience; /** @@ -35,4 +37,16 @@ public class DefaultEnvironmentEdge implements EnvironmentEdge { public long currentTime() { return System.currentTimeMillis(); } + + /** + * {@inheritDoc} + *

+ * This implementation returns current time in nanoseconds via {@link java.time.Instant#now()} + *

+ */ + @Override + public long currentTimeNano() { + Instant now = Instant.now(); + return now.getEpochSecond() * 1000000000 + now.getNano(); + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java index 635c2764d2..6528edbc4e 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java @@ -34,4 +34,13 @@ public interface EnvironmentEdge { * @return Current time. */ long currentTime(); + + /** + * Returns current time in nanosecond precision. + * + * @return current time in nanoseconds. + */ + default long currentTimeNano() { + return currentTime() * 1000000; + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java index c38ddf7f86..2c1a27e2e1 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java @@ -60,7 +60,7 @@ import org.apache.yetus.audience.InterfaceAudience; is advised not to inject any other {@link org.apache.hadoop.hbase.util.EnvironmentEdge}. */ @InterfaceAudience.Private -public class EnvironmentEdgeManager { +final public class EnvironmentEdgeManager { private static volatile EnvironmentEdge delegate = new DefaultEnvironmentEdge(); private EnvironmentEdgeManager() { @@ -108,4 +108,14 @@ public class EnvironmentEdgeManager { public static long currentTime() { return getDelegate().currentTime(); } + + /** + * Defers to the delegate and calls the + * {@link EnvironmentEdge#currentTimeNano()} method. + * + * @return current time in nanoseconds according to the delegate. + */ + public static long currentTimeNano() { + return getDelegate().currentTimeNano(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 9e7942b2d5..0696104930 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -3384,13 +3384,13 @@ public class HMaster extends HRegionServer implements MasterServices { /** * Requests mob compaction. - * @param tableName The table the compact. + * @param td The descriptor of the table to compact. * @param columns The compacted columns. * @param allFiles Whether add all mob files into the compaction. */ - public void requestMobCompaction(TableName tableName, + public void requestMobCompaction(TableDescriptor td, List columns, boolean allFiles) throws IOException { - mobCompactThread.requestMobCompaction(conf, getFileSystem(), tableName, columns, allFiles); + mobCompactThread.requestMobCompaction(conf, getFileSystem(), td, columns, allFiles); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java index 0779eeafe8..71750c24c4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java @@ -28,8 +28,8 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.master.locking.LockManager; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.procedure2.LockType; @@ -71,20 +71,20 @@ public class MasterMobCompactionThread { * Requests mob compaction * @param conf The Configuration * @param fs The file system - * @param tableName The table the compact + * @param td The descriptor of the table to compact * @param columns The column descriptors * @param allFiles Whether add all mob files into the compaction. */ - public void requestMobCompaction(Configuration conf, FileSystem fs, TableName tableName, + public void requestMobCompaction(Configuration conf, FileSystem fs, TableDescriptor td, List columns, boolean allFiles) throws IOException { - master.reportMobCompactionStart(tableName); + master.reportMobCompactionStart(td.getTableName()); try { - masterMobPool.execute(new CompactionRunner(fs, tableName, columns, + masterMobPool.execute(new CompactionRunner(fs, td, columns, allFiles, mobCompactorPool)); } catch (RejectedExecutionException e) { // in case the request is rejected by the pool try { - master.reportMobCompactionEnd(tableName); + master.reportMobCompactionEnd(td.getTableName()); } catch (IOException e1) { LOG.error("Failed to mark end of mob compaction", e1); } @@ -92,22 +92,22 @@ public class MasterMobCompactionThread { } if (LOG.isDebugEnabled()) { LOG.debug("The mob compaction is requested for the columns " + columns - + " of the table " + tableName.getNameAsString()); + + " of the table " + td.getTableName().getNameAsString()); } } private class CompactionRunner implements Runnable { private FileSystem fs; - private TableName tableName; + private TableDescriptor tableDescriptor; private List hcds; private boolean allFiles; private ExecutorService pool; - public CompactionRunner(FileSystem fs, TableName tableName, List hcds, + public CompactionRunner(FileSystem fs, TableDescriptor td, List hcds, boolean allFiles, ExecutorService pool) { super(); this.fs = fs; - this.tableName = tableName; + this.tableDescriptor = td; this.hcds = hcds; this.allFiles = allFiles; this.pool = pool; @@ -117,17 +117,17 @@ public class MasterMobCompactionThread { public void run() { // These locks are on dummy table names, and only used for compaction/mob file cleaning. final LockManager.MasterLock lock = master.getLockManager().createMasterLock( - MobUtils.getTableLockName(tableName), LockType.EXCLUSIVE, + MobUtils.getTableLockName(tableDescriptor.getTableName()), LockType.EXCLUSIVE, this.getClass().getName() + ": mob compaction"); try { for (ColumnFamilyDescriptor hcd : hcds) { - MobUtils.doMobCompaction(conf, fs, tableName, hcd, pool, allFiles, lock); + MobUtils.doMobCompaction(conf, fs, tableDescriptor, hcd, pool, allFiles, lock); } } catch (IOException e) { LOG.error("Failed to perform the mob compaction", e); } finally { try { - master.reportMobCompactionEnd(tableName); + master.reportMobCompactionEnd(tableDescriptor.getTableName()); } catch (IOException e) { LOG.error("Failed to mark end of mob compaction", e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 15ade550ca..fa230a0e27 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -1868,7 +1868,8 @@ public class MasterRpcServices extends RSRpcServices implements } boolean allFiles = false; List compactedColumns = new ArrayList<>(); - ColumnFamilyDescriptor[] hcds = master.getTableDescriptors().get(tableName).getColumnFamilies(); + TableDescriptor td = master.getTableDescriptors().get(tableName); + ColumnFamilyDescriptor[] hcds = td.getColumnFamilies(); byte[] family = null; if (request.hasFamily()) { family = request.getFamily().toByteArray(); @@ -1902,7 +1903,7 @@ public class MasterRpcServices extends RSRpcServices implements LOG.trace("User-triggered mob compaction requested for table: " + tableName.getNameAsString() + " for column family: " + familyLogMsg); } - master.requestMobCompaction(tableName, compactedColumns, allFiles); + master.requestMobCompaction(td, compactedColumns, allFiles); return CompactRegionResponse.newBuilder().build(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java index 6c5d677a86..a826f0a935 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java @@ -75,7 +75,7 @@ public class MobCompactionChore extends ScheduledChore { reported = true; } MobUtils.doMobCompaction(master.getConfiguration(), master.getFileSystem(), - htd.getTableName(), hcd, pool, false, lock); + htd, hcd, pool, false, lock); } } finally { if (reported) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java index c2c0d8102a..d03851a0e3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java @@ -817,12 +817,12 @@ public final class MobUtils { * Performs the mob compaction. * @param conf the Configuration * @param fs the file system - * @param tableName the table the compact + * @param td the descriptor of the table to compact * @param hcd the column descriptor * @param pool the thread pool * @param allFiles Whether add all mob files into the compaction. */ - public static void doMobCompaction(Configuration conf, FileSystem fs, TableName tableName, + public static void doMobCompaction(Configuration conf, FileSystem fs, TableDescriptor td, ColumnFamilyDescriptor hcd, ExecutorService pool, boolean allFiles, LockManager.MasterLock lock) throws IOException { @@ -830,10 +830,11 @@ public final class MobUtils { PartitionedMobCompactor.class.getName()); // instantiate the mob compactor. MobCompactor compactor = null; + final TableName tableName = td.getTableName(); try { compactor = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] { - Configuration.class, FileSystem.class, TableName.class, ColumnFamilyDescriptor.class, - ExecutorService.class }, new Object[] { conf, fs, tableName, hcd, pool }); + Configuration.class, FileSystem.class, TableDescriptor.class, ColumnFamilyDescriptor.class, + ExecutorService.class }, new Object[] { conf, fs, td, hcd, pool }); } catch (Exception e) { throw new IOException("Unable to load configured mob file compactor '" + className + "'", e); } @@ -847,7 +848,7 @@ public final class MobUtils { compactor.compact(allFiles); } catch (Exception e) { LOG.error("Failed to compact the mob files for the column " + hcd.getNameAsString() - + " in the table " + tableName.getNameAsString(), e); + + " in the table " + td.getTableName().getNameAsString(), e); } finally { LOG.info("end MOB compaction of files for table='{}', column='{}', allFiles={}, " + "compactor='{}'", tableName, hcd.getNameAsString(), allFiles, compactor.getClass()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java index 8b5fa74ec5..d42f363187 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/MobCompactor.java @@ -26,8 +26,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.yetus.audience.InterfaceAudience; @@ -40,22 +40,23 @@ public abstract class MobCompactor { protected FileSystem fs; protected Configuration conf; - protected TableName tableName; + protected TableDescriptor tableDescriptor; protected ColumnFamilyDescriptor column; protected Path mobTableDir; protected Path mobFamilyDir; protected ExecutorService pool; - public MobCompactor(Configuration conf, FileSystem fs, TableName tableName, + public MobCompactor(Configuration conf, FileSystem fs, TableDescriptor td, ColumnFamilyDescriptor column, ExecutorService pool) { this.conf = conf; this.fs = fs; - this.tableName = tableName; + this.tableDescriptor = td; this.column = column; this.pool = pool; - mobTableDir = CommonFSUtils.getTableDir(MobUtils.getMobHome(conf), tableName); - mobFamilyDir = MobUtils.getMobFamilyPath(conf, tableName, column.getNameAsString()); + mobTableDir = CommonFSUtils.getTableDir(MobUtils.getMobHome(conf), tableDescriptor.getTableName()); + mobFamilyDir = MobUtils.getMobFamilyPath(conf, tableDescriptor.getTableName(), + column.getNameAsString()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java index fc323eaf37..6858062f3f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java @@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.MobCompactPartitionPolicy; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.hfile.CacheConfig; @@ -72,7 +73,6 @@ import org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.C import org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartition; import org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartitionId; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.hadoop.hbase.regionserver.ScanType; @@ -109,13 +109,14 @@ public class PartitionedMobCompactor extends MobCompactor { private final byte[] refCellTags; private Encryption.Context cryptoContext = Encryption.Context.NONE; - public PartitionedMobCompactor(Configuration conf, FileSystem fs, TableName tableName, + public PartitionedMobCompactor(Configuration conf, FileSystem fs, TableDescriptor td, ColumnFamilyDescriptor column, ExecutorService pool) throws IOException { - super(conf, fs, tableName, column, pool); + super(conf, fs, td, column, pool); mergeableSize = conf.getLong(MobConstants.MOB_COMPACTION_MERGEABLE_THRESHOLD, MobConstants.DEFAULT_MOB_COMPACTION_MERGEABLE_THRESHOLD); delFileMaxCount = conf.getInt(MobConstants.MOB_DELFILE_MAX_COUNT, MobConstants.DEFAULT_MOB_DELFILE_MAX_COUNT); + TableName tableName = td.getTableName(); // default is 100 compactionBatchSize = conf.getInt(MobConstants.MOB_COMPACTION_BATCH_SIZE, MobConstants.DEFAULT_MOB_COMPACTION_BATCH_SIZE); @@ -306,7 +307,7 @@ public class PartitionedMobCompactor extends MobCompactor { } LOG.info("The compaction type is {}, the request has {} del files, {} selected files, and {} " + "irrelevant files table '{}' and column '{}'", request.getCompactionType(), totalDelFiles, - selectedFileCount, irrelevantFileCount, tableName, column.getNameAsString()); + selectedFileCount, irrelevantFileCount, tableDescriptor.getTableName(), column.getNameAsString()); return request; } @@ -332,6 +333,8 @@ public class PartitionedMobCompactor extends MobCompactor { delPartition.addDelFileList(newDelPaths); } + final TableName tableName = tableDescriptor.getTableName(); + List paths = null; int totalDelFileCount = 0; try { @@ -462,6 +465,7 @@ public class PartitionedMobCompactor extends MobCompactor { protected List compactMobFiles(final PartitionedMobCompactionRequest request) throws IOException { Collection partitions = request.compactionPartitions; + final TableName tableName = tableDescriptor.getTableName(); if (partitions == null || partitions.isEmpty()) { LOG.info("No partitions of mob files in table='{}' and column='{}'", tableName, column.getNameAsString()); @@ -575,7 +579,7 @@ public class PartitionedMobCompactor extends MobCompactor { } LOG.info("Compaction is finished. The number of mob files is changed from {} to {} for " + "partition={} for table='{}' and column='{}'", files.size(), newFiles.size(), - partition.getPartitionId(), tableName, column.getNameAsString()); + partition.getPartitionId(), table.getName(), column.getNameAsString()); return newFiles; } @@ -630,6 +634,7 @@ public class PartitionedMobCompactor extends MobCompactor { boolean cleanupBulkloadDirOfPartition = false; boolean cleanupCommittedMobFile = false; boolean closeReaders= true; + final TableName tableName = table.getName(); try { try { @@ -695,7 +700,8 @@ public class PartitionedMobCompactor extends MobCompactor { try { closeStoreFileReaders(mobFilesToCompact); closeReaders = false; - MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, column.getName(), mobFilesToCompact); + MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, + column.getName(), mobFilesToCompact); } catch (IOException e) { LOG.error("Failed to archive the files " + mobFilesToCompact, e); } @@ -808,7 +814,8 @@ public class PartitionedMobCompactor extends MobCompactor { Path path = MobUtils.commitFile(conf, fs, filePath, mobFamilyDir, compactionCacheConfig); // archive the old del files try { - MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, column.getName(), delFiles); + MobUtils.removeMobFiles(conf, fs, tableDescriptor.getTableName(), mobTableDir, + column.getName(), delFiles); } catch (IOException e) { LOG.error("Failed to archive the old del files " + delFiles, e); } @@ -826,8 +833,7 @@ public class PartitionedMobCompactor extends MobCompactor { throws IOException { List scanners = StoreFileScanner.getScannersForStoreFiles(filesToCompact, false, true, false, false, HConstants.LATEST_TIMESTAMP); - long ttl = HStore.determineTTLFromFamily(column); - ScanInfo scanInfo = new ScanInfo(conf, column, ttl, 0, CellComparator.getInstance()); + ScanInfo scanInfo = new ScanInfo(conf, tableDescriptor, column, CellComparator.getInstance()); return new StoreScanner(scanInfo, scanType, scanners); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 401c16e3dc..b34d878266 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -860,9 +860,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi * throw an error to the user if the user-specified TS is newer than now + * slop. LATEST_TIMESTAMP == don't use this functionality */ - this.timestampSlop = conf.getLong( + long tSlop = conf.getLong( "hbase.hregion.keyvalue.timestamp.slop.millisecs", HConstants.LATEST_TIMESTAMP); + if (tSlop != HConstants.LATEST_TIMESTAMP && this.htableDescriptor.isNanosecondTimestamps()) { + // Scale slob to nanosecond precision if this table configured to handle nanoseconds. + tSlop *= 1000000; + } + this.timestampSlop = tSlop; /** * Timeout for the process time in processRowsWithLocks(). @@ -1389,6 +1394,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } } + private static long currentTimeForCell(final boolean isNano) { + return isNano ? + EnvironmentEdgeManager.currentTimeNano() : EnvironmentEdgeManager.currentTime(); + } + @Override public RegionInfo getRegionInfo() { return this.fs.getRegionInfo(); @@ -3822,7 +3832,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi final int[] metrics = {0, 0, 0, 0}; visitBatchOperations(true, this.size(), new Visitor() { - private long now = EnvironmentEdgeManager.currentTime(); + private long now = currentTimeForCell(region.htableDescriptor.isNanosecondTimestamps()); private WALEdit walEdit; @Override public boolean visit(int index) throws IOException { @@ -4485,7 +4495,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi @Override public void checkAndPrepare() throws IOException { - long now = EnvironmentEdgeManager.currentTime(); + long now = currentTimeForCell(region.htableDescriptor.isNanosecondTimestamps()); visitBatchOperations(true, this.size(), (int index) -> { checkAndPrepareMutation(index, now); return true; @@ -4659,8 +4669,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // We should record the timestamp only after we have acquired the rowLock, // otherwise, newer puts/deletes/increment/append are not guaranteed to have a newer // timestamp - - long now = EnvironmentEdgeManager.currentTime(); + long now = currentTimeForCell(this.htableDescriptor.isNanosecondTimestamps()); batchOp.prepareMiniBatchOperations(miniBatchOp, now, acquiredRowLocks); // STEP 3. Build WAL edit @@ -4915,7 +4924,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // non-decreasing (see HBASE-14070) we should make sure that the mutation has a // larger timestamp than what was observed via Get. doBatchMutate already does this, but // there is no way to pass the cellTs. See HBASE-14054. - long now = EnvironmentEdgeManager.currentTime(); + long now = currentTimeForCell(this.htableDescriptor.isNanosecondTimestamps()); long ts = Math.max(now, cellTs); // ensure write is not eclipsed byte[] byteTs = Bytes.toBytes(ts); if (mutation != null) { @@ -8424,7 +8433,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // Short circuit the read only case if (processor.readOnly()) { try { - long now = EnvironmentEdgeManager.currentTime(); + long now = currentTimeForCell(this.htableDescriptor.isNanosecondTimestamps()); doProcessRowWithTimeout(processor, now, this, null, null, timeout); processor.postProcess(this, walEdit, true); } finally { @@ -8474,7 +8483,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // From this point until memstore update this operation should not be interrupted. disableInterrupts(); - long now = EnvironmentEdgeManager.currentTime(); + long now = currentTimeForCell(this.htableDescriptor.isNanosecondTimestamps()); + // STEP 4. Let the processor scan the rows, generate mutations and add waledits doProcessRowWithTimeout(processor, now, this, mutations, walEdit, timeout); if (!mutations.isEmpty()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index fe2e635593..e6e2adc5a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -262,15 +262,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, this.dataBlockEncoder = new HFileDataBlockEncoderImpl(family.getDataBlockEncoding()); - // used by ScanQueryMatcher - long timeToPurgeDeletes = - Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0); - LOG.trace("Time to purge deletes set to {}ms in {}", timeToPurgeDeletes, this); - // Get TTL - long ttl = determineTTLFromFamily(family); - // Why not just pass a HColumnDescriptor in here altogether? Even if have - // to clone it? - scanInfo = new ScanInfo(conf, family, ttl, timeToPurgeDeletes, region.getCellComparator()); + this.scanInfo = new ScanInfo(conf, region.getTableDescriptor(), family, region.getCellComparator()); + this.memstore = getMemstore(); this.offPeakHours = OffPeakHours.getInstance(conf); @@ -414,24 +407,6 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, return StoreEngine.create(store, conf, kvComparator); } - /** - * @return TTL in seconds of the specified family - */ - public static long determineTTLFromFamily(final ColumnFamilyDescriptor family) { - // HCD.getTimeToLive returns ttl in seconds. Convert to milliseconds. - long ttl = family.getTimeToLive(); - if (ttl == HConstants.FOREVER) { - // Default is unlimited ttl. - ttl = Long.MAX_VALUE; - } else if (ttl == -1) { - ttl = Long.MAX_VALUE; - } else { - // Second -> ms adjust for user data - ttl *= 1000; - } - return ttl; - } - StoreContext getStoreContext() { return storeContext; } @@ -1985,8 +1960,9 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, synchronized (filesCompacting) { long cfTtl = getStoreFileTtl(); if (cfTtl != Long.MAX_VALUE) { - delSfs = storeEngine.getStoreFileManager().getUnneededFiles( - EnvironmentEdgeManager.currentTime() - cfTtl, filesCompacting); + long now = scanInfo.isNanosecondTimestamps() ? + EnvironmentEdgeManager.currentTimeNano() : EnvironmentEdgeManager.currentTime(); + delSfs = storeEngine.getStoreFileManager().getUnneededFiles(now - cfTtl, filesCompacting); addToCompactingFiles(delSfs); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java index 831ce01f87..dd4265051a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeepDeletedCells; import org.apache.hadoop.hbase.util.Bytes; @@ -47,6 +48,7 @@ public class ScanInfo { private boolean parallelSeekEnabled; private final long preadMaxBytes; private final boolean newVersionBehavior; + private final boolean nanosecondTimestamps; public static final long FIXED_OVERHEAD = ClassSize.align(ClassSize.OBJECT + (2 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_INT) @@ -54,17 +56,17 @@ public class ScanInfo { /** * @param conf + * @param table {@link TableDescriptor} describing the table * @param family {@link ColumnFamilyDescriptor} describing the column family - * @param ttl Store's TTL (in ms) - * @param timeToPurgeDeletes duration in ms after which a delete marker can be purged during a - * major compaction. * @param comparator The store's comparator */ - public ScanInfo(Configuration conf, ColumnFamilyDescriptor family, long ttl, - long timeToPurgeDeletes, CellComparator comparator) { - this(conf, family.getName(), family.getMinVersions(), family.getMaxVersions(), ttl, - family.getKeepDeletedCells(), family.getBlocksize(), timeToPurgeDeletes, comparator, - family.isNewVersionBehavior()); + public ScanInfo(Configuration conf, TableDescriptor table, ColumnFamilyDescriptor family, + CellComparator comparator) { + this(conf, family.getName(), family.getMinVersions(), family.getMaxVersions(), + determineTTL(family, table.isNanosecondTimestamps()), + family.getKeepDeletedCells(), family.getBlocksize(), + determineTimeToPurgeDeletes(conf, table.isNanosecondTimestamps()), comparator, + family.isNewVersionBehavior(), table.isNanosecondTimestamps()); } private static long getCellsPerTimeoutCheck(Configuration conf) { @@ -85,21 +87,25 @@ public class ScanInfo { * be purged during a major compaction. * @param keepDeletedCells Store's keepDeletedCells setting * @param comparator The store's comparator + * @param newVersionBehavior whether compare cells by MVCC when scanning + * @param nanosecondTimestamps whether treat timestamps as nanoseconds */ public ScanInfo(Configuration conf, byte[] family, int minVersions, int maxVersions, long ttl, KeepDeletedCells keepDeletedCells, long blockSize, long timeToPurgeDeletes, - CellComparator comparator, boolean newVersionBehavior) { + CellComparator comparator, boolean newVersionBehavior, boolean nanosecondTimestamps) { this(family, minVersions, maxVersions, ttl, keepDeletedCells, timeToPurgeDeletes, comparator, conf.getLong(HConstants.TABLE_MAX_ROWSIZE_KEY, HConstants.TABLE_MAX_ROWSIZE_DEFAULT), conf.getBoolean("hbase.storescanner.use.pread", false), getCellsPerTimeoutCheck(conf), conf.getBoolean(StoreScanner.STORESCANNER_PARALLEL_SEEK_ENABLE, false), - conf.getLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, 4 * blockSize), newVersionBehavior); + conf.getLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, 4 * blockSize), newVersionBehavior, + nanosecondTimestamps); } private ScanInfo(byte[] family, int minVersions, int maxVersions, long ttl, KeepDeletedCells keepDeletedCells, long timeToPurgeDeletes, CellComparator comparator, long tableMaxRowSize, boolean usePread, long cellsPerTimeoutCheck, - boolean parallelSeekEnabled, long preadMaxBytes, boolean newVersionBehavior) { + boolean parallelSeekEnabled, long preadMaxBytes, boolean newVersionBehavior, + boolean nanosecondTimestamps) { this.family = family; this.minVersions = minVersions; this.maxVersions = maxVersions; @@ -113,6 +119,47 @@ public class ScanInfo { this.parallelSeekEnabled = parallelSeekEnabled; this.preadMaxBytes = preadMaxBytes; this.newVersionBehavior = newVersionBehavior; + this.nanosecondTimestamps = nanosecondTimestamps; + } + + /** + * @param conf is global configuration + * @param isNanosecondTimestamps whether timestamps are treated as nanoseconds + * @return TTL for deleted cells. Default in milliseconds and + * in nanoseconds if NANOSECOND_TIMESTAMPS table attribute is provided. + */ + private static long determineTimeToPurgeDeletes(final Configuration conf, + final boolean isNanosecondTimestamps) { + long ttpd = Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0); + if (isNanosecondTimestamps) { + ttpd *= 1000000; + } + return ttpd; + } + + /** + * @param family is store's family + * @param isNanosecondTimestamps whether timestamps are treated as nanoseconds + * @return TTL of the specified column family. Default in milliseconds and + * in nanoseconds if NANOSECOND_TIMESTAMPS table attribute is provided. + */ + private static long determineTTL(final ColumnFamilyDescriptor family, + final boolean isNanosecondTimestamps) { + // ColumnFamilyDescriptor.getTimeToLive() returns ttl in seconds. + long ttl = family.getTimeToLive(); + if (ttl == HConstants.FOREVER) { + // Default is unlimited ttl. + ttl = Long.MAX_VALUE; + } else if (ttl == -1) { + ttl = Long.MAX_VALUE; + } else if (isNanosecondTimestamps) { + // Second -> ns adjust for user data + ttl *= 1000000000; + } else { + // Second -> ms adjust for user data + ttl *= 1000; + } + return ttl; } long getTableMaxRowSize() { @@ -168,7 +215,15 @@ public class ScanInfo { } /** - * Used by CP users for customizing max versions, ttl and keepDeletedCells. + * Whether ScanInfo is for table that assumes nanosecond timestamps. + * @return true if nanosecond timestamps. + */ + public boolean isNanosecondTimestamps() { + return nanosecondTimestamps; + } + + /** + * Used for CP users for customizing max versions, ttl and keepDeletedCells. */ ScanInfo customize(int maxVersions, long ttl, KeepDeletedCells keepDeletedCells) { return customize(maxVersions, ttl, keepDeletedCells, minVersions, timeToPurgeDeletes); @@ -182,6 +237,6 @@ public class ScanInfo { int minVersions, long timeToPurgeDeletes) { return new ScanInfo(family, minVersions, maxVersions, ttl, keepDeletedCells, timeToPurgeDeletes, comparator, tableMaxRowSize, usePread, cellsPerTimeoutCheck, parallelSeekEnabled, - preadMaxBytes, newVersionBehavior); + preadMaxBytes, newVersionBehavior, nanosecondTimestamps); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 72bd41931a..be90655919 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -171,7 +171,8 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner get = scan.isGetScan(); explicitColumnQuery = numColumns > 0; this.scan = scan; - this.now = EnvironmentEdgeManager.currentTime(); + this.now = scanInfo.isNanosecondTimestamps() ? + EnvironmentEdgeManager.currentTimeNano() : EnvironmentEdgeManager.currentTime(); this.oldestUnexpiredTS = scan.isRaw() ? 0L : now - scanInfo.getTtl(); this.minVersions = scanInfo.getMinVersions(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 204c75a237..d7ad49d1e9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -745,6 +745,19 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { createDirAndSetProperty("fs.s3a.committer.staging.tmp.path"); } + /** + * Check whether the tests should assume NANOSECOND_TIMESTAMPS when creating + * new column tables. Default is false. + */ + public boolean isNanosecondTimestampsEnabled() { + final String propName = "hbase.tests.nanosecond.timestamps"; + String v = System.getProperty(propName); + if (v != null){ + return Boolean.parseBoolean(v); + } + return false; + } + /** * Check whether the tests should assume NEW_VERSION_BEHAVIOR when creating * new column families. Default to false. @@ -1685,7 +1698,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } builder.setColumnFamily(cfdb.build()); } - TableDescriptor td = builder.build(); + TableDescriptor td = builder.setNanosecondTimestamps(isNanosecondTimestampsEnabled()).build(); getAdmin().createTable(td, splitKeys); // HBaseAdmin only waits for regions to appear in hbase:meta // we should wait until they are assigned @@ -1709,7 +1722,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { .setNewVersionBehavior(true).build()); } } - getAdmin().createTable(builder.build(), splitRows); + getAdmin().createTable( + builder.setNanosecondTimestamps(isNanosecondTimestampsEnabled()).build(), splitRows); // HBaseAdmin only waits for regions to appear in hbase:meta // we should wait until they are assigned waitUntilAllRegionsAssigned(htd.getTableName()); @@ -1771,6 +1785,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { public Table createTable(TableName tableName, byte[][] families, int numVersions, byte[][] splitKeys) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); + desc.setNanosecondTimestamps(isNanosecondTimestampsEnabled()); for (byte[] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions); if (isNewVersionBehaviorEnabled()) { @@ -1810,6 +1825,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { public Table createTable(TableName tableName, byte[][] families, int numVersions, int blockSize) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); + desc.setNanosecondTimestamps(isNanosecondTimestampsEnabled()); for (byte[] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family) .setMaxVersions(numVersions) @@ -1829,6 +1845,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { public Table createTable(TableName tableName, byte[][] families, int numVersions, int blockSize, String cpName) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); + desc.setNanosecondTimestamps(isNanosecondTimestampsEnabled()); for (byte[] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family) .setMaxVersions(numVersions) @@ -1860,6 +1877,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { int[] numVersions) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); + desc.setNanosecondTimestamps(isNanosecondTimestampsEnabled()); int i = 0; for (byte[] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family) @@ -1888,6 +1906,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { public Table createTable(TableName tableName, byte[] family, byte[][] splitRows) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); + desc.setNanosecondTimestamps(isNanosecondTimestampsEnabled()); HColumnDescriptor hcd = new HColumnDescriptor(family); if (isNewVersionBehaviorEnabled()) { hcd.setNewVersionBehavior(true); @@ -2020,6 +2039,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { public HTableDescriptor createTableDescriptor(final TableName name, final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) { HTableDescriptor htd = new HTableDescriptor(name); + htd.setNanosecondTimestamps(isNanosecondTimestampsEnabled()); for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) { HColumnDescriptor hcd = new HColumnDescriptor(cfName) .setMinVersions(minVersions) @@ -2053,6 +2073,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { public HTableDescriptor createTableDescriptor(final TableName tableName, byte[][] families, int maxVersions) { HTableDescriptor desc = new HTableDescriptor(tableName); + desc.setNanosecondTimestamps(isNanosecondTimestampsEnabled()); for (byte[] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family) .setMaxVersions(maxVersions); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorHost.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorHost.java index 66077619cf..5916ac4169 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorHost.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionCoprocessorHost.java @@ -214,7 +214,7 @@ public class TestRegionCoprocessorHost { return new ScanInfo(conf, Bytes.toBytes("cf"), oldMinVersions, oldMaxVersions, oldTTL, KeepDeletedCells.FALSE, HConstants.FOREVER, 1000, - CellComparator.getInstance(), true); + CellComparator.getInstance(), true, false); } /* diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java index f14ceaede0..c0fb58a6e7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java @@ -319,7 +319,7 @@ public class TestMobCompactor { countFiles(tableName, false, family2)); // do the mob file compaction - MobCompactor compactor = new PartitionedMobCompactor(conf, fs, tableName, hcd1, pool); + MobCompactor compactor = new PartitionedMobCompactor(conf, fs, desc, hcd1, pool); compactor.compact(); assertEquals("After compaction: mob rows count", regionNum * (rowNumPerRegion - delRowNum), @@ -459,7 +459,7 @@ public class TestMobCompactor { countFiles(tableName, false, family2)); // do the mob compaction - MobCompactor compactor = new PartitionedMobCompactor(conf, fs, tableName, hcd1, pool); + MobCompactor compactor = new PartitionedMobCompactor(conf, fs, desc, hcd1, pool); compactor.compact(); assertEquals("After first compaction: mob rows count", regionNum diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java index 0aabd3114e..51ca7e241e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java @@ -36,6 +36,7 @@ import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -47,11 +48,13 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.MobCompactPartitionPolicy; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; @@ -62,7 +65,6 @@ import org.apache.hadoop.hbase.mob.compactions.MobCompactionRequest.CompactionTy import org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionDelPartition; import org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartition; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.KeyValueScanner; import org.apache.hadoop.hbase.regionserver.ScanInfo; @@ -380,7 +382,8 @@ public class TestPartitionedMobCompactor { listFiles(); TableName tName = TableName.valueOf(tableName); - MobCompactor compactor = new PartitionedMobCompactor(conf, faultyFs, tName, hcd, pool); + MobCompactor compactor = new PartitionedMobCompactor(conf, faultyFs, + new HTableDescriptor(tName), hcd, pool); faultyFs.setThrowException(true); try { compactor.compact(allFiles, true); @@ -485,8 +488,8 @@ public class TestPartitionedMobCompactor { listFiles(); - PartitionedMobCompactor compactor = new PartitionedMobCompactor(conf, fs, - TableName.valueOf(tableName), hcd, pool) { + HTableDescriptor td = new HTableDescriptor(TableName.valueOf(tableName)); + PartitionedMobCompactor compactor = new PartitionedMobCompactor(conf, fs, td, hcd, pool) { @Override public List compact(List files, boolean isForceAllFiles) throws IOException { @@ -516,11 +519,11 @@ public class TestPartitionedMobCompactor { int PartitionsIncludeDelFiles = 0; CacheConfig cacheConfig = null; - MyPartitionedMobCompactor(Configuration conf, FileSystem fs, TableName tableName, + MyPartitionedMobCompactor(Configuration conf, FileSystem fs, TableDescriptor td, ColumnFamilyDescriptor column, ExecutorService pool, final int delPartitionSize, final CacheConfig cacheConf, final int PartitionsIncludeDelFiles) throws IOException { - super(conf, fs, tableName, column, pool); + super(conf, fs, td, column, pool); this.delPartitionSize = delPartitionSize; this.cacheConfig = cacheConf; this.PartitionsIncludeDelFiles = PartitionsIncludeDelFiles; @@ -618,8 +621,9 @@ public class TestPartitionedMobCompactor { listFiles(); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); MyPartitionedMobCompactor compactor = new MyPartitionedMobCompactor(conf, fs, - TableName.valueOf(tableName), hcd, pool, 1, cacheConf, 1); + htd, hcd, pool, 1, cacheConf, 1); compactor.compact(allFiles, true); } @@ -639,7 +643,7 @@ public class TestPartitionedMobCompactor { listFiles(); MyPartitionedMobCompactor compactor = new MyPartitionedMobCompactor(conf, fs, - TableName.valueOf(tableName), hcd, pool, 3, cacheConf, 3); + new HTableDescriptor(TableName.valueOf(tableName)), hcd, pool, 3, cacheConf, 3); compactor.compact(allFiles, true); } @@ -669,8 +673,8 @@ public class TestPartitionedMobCompactor { */ private void testSelectFiles(String tableName, final CompactionType type, final boolean isForceAllFiles, final List expected) throws IOException { - PartitionedMobCompactor compactor = new PartitionedMobCompactor(conf, fs, - TableName.valueOf(tableName), hcd, pool) { + HTableDescriptor td = new HTableDescriptor(TableName.valueOf(tableName)); + PartitionedMobCompactor compactor = new PartitionedMobCompactor(conf, fs, td, hcd, pool) { @Override public List compact(List files, boolean isForceAllFiles) throws IOException { @@ -739,8 +743,8 @@ public class TestPartitionedMobCompactor { */ private void testCompactDelFiles(String tableName, final int expectedFileCount, final int expectedCellCount, boolean isForceAllFiles) throws IOException { - PartitionedMobCompactor compactor = new PartitionedMobCompactor(conf, fs, - TableName.valueOf(tableName), hcd, pool) { + HTableDescriptor td = new HTableDescriptor(TableName.valueOf(tableName)); + PartitionedMobCompactor compactor = new PartitionedMobCompactor(conf, fs, td, hcd, pool) { @Override protected List performCompaction(PartitionedMobCompactionRequest request) throws IOException { @@ -884,9 +888,8 @@ public class TestPartitionedMobCompactor { } List scanners = new ArrayList<>(StoreFileScanner.getScannersForStoreFiles(sfs, false, true, false, false, HConstants.LATEST_TIMESTAMP)); - long timeToPurgeDeletes = Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0); - long ttl = HStore.determineTTLFromFamily(hcd); - ScanInfo scanInfo = new ScanInfo(conf, hcd, ttl, timeToPurgeDeletes, CellComparatorImpl.COMPARATOR); + ScanInfo scanInfo = new ScanInfo(conf, new HTableDescriptor(TableName.valueOf("test")), + hcd, CellComparatorImpl.COMPARATOR); StoreScanner scanner = new StoreScanner(scanInfo, ScanType.COMPACT_RETAIN_DELETES, scanners); List results = new ArrayList<>(); boolean hasMore = true; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java index 9161d2370c..5607f75c28 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java @@ -263,12 +263,15 @@ public class TestBulkLoadReplication extends TestReplicationBase { Path path = createMobFiles(UTIL3); ColumnFamilyDescriptor descriptor = new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(famName); + TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(descriptor).build(); + ExecutorService pool = null; try { pool = Executors.newFixedThreadPool(1); PartitionedMobCompactor compactor = - new PartitionedMobCompactor(UTIL3.getConfiguration(), UTIL3.getTestFileSystem(), tableName, - descriptor, pool); + new PartitionedMobCompactor(UTIL3.getConfiguration(), UTIL3.getTestFileSystem(), + td, descriptor, pool); BULK_LOAD_LATCH = new CountDownLatch(1); BULK_LOADS_COUNT.set(0); compactor.compact(Arrays.asList(UTIL3.getTestFileSystem().listStatus(path)), true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java index 828061e295..f578436449 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java @@ -247,7 +247,8 @@ public class TestCompactingMemStore extends TestDefaultMemStore { Configuration conf = HBaseConfiguration.create(); for (int startRowId = 0; startRowId < ROW_COUNT; startRowId++) { ScanInfo scanInfo = new ScanInfo(conf, FAMILY, 0, 1, Integer.MAX_VALUE, - KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, this.memstore.getComparator(), false); + KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, this.memstore.getComparator(), + false, false); try (InternalScanner scanner = new StoreScanner(new Scan().withStartRow(Bytes.toBytes(startRowId)), scanInfo, null, memstore.getScanners(0))) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index 31a652a29e..038c2a5439 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -168,7 +168,8 @@ public class TestDefaultMemStore { List result = new ArrayList<>(); Configuration conf = HBaseConfiguration.create(); ScanInfo scanInfo = new ScanInfo(conf, null, 0, 1, HConstants.LATEST_TIMESTAMP, - KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, this.memstore.getComparator(), false); + KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, this.memstore.getComparator(), + false, false); int count = 0; try (StoreScanner s = new StoreScanner(scan, scanInfo, null, memstorescanners)) { while (s.next(result)) { @@ -607,7 +608,7 @@ public class TestDefaultMemStore { for (int startRowId = 0; startRowId < ROW_COUNT; startRowId++) { ScanInfo scanInfo = new ScanInfo(conf, FAMILY, 0, 1, Integer.MAX_VALUE, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, this.memstore.getComparator(), false); + HConstants.DEFAULT_BLOCKSIZE, 0, this.memstore.getComparator(), false, false); try (InternalScanner scanner = new StoreScanner(new Scan().withStartRow(Bytes.toBytes(startRowId)), scanInfo, null, memstore.getScanners(0))) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index bf5d782525..c4dfe9c39e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -3465,10 +3465,11 @@ public class TestHRegion { return null; } }).when(mockedCPHost).preBatchMutate(Mockito.isA(MiniBatchOperationInProgress.class)); + HTableDescriptor td = new HTableDescriptor(TableName.valueOf("testDataInMemoryWithoutWAL")); ColumnFamilyDescriptorBuilder builder = ColumnFamilyDescriptorBuilder. newBuilder(COLUMN_FAMILY_BYTES); - ScanInfo info = new ScanInfo(CONF, builder.build(), Long.MAX_VALUE, - Long.MAX_VALUE, region.getCellComparator()); + ScanInfo info = new ScanInfo(CONF, td, builder.build(), region.getCellComparator()); + Mockito.when(mockedCPHost.preFlushScannerOpen(Mockito.any(HStore.class), Mockito.any())).thenReturn(info); Mockito.when(mockedCPHost.preFlush(Mockito.any(), Mockito.any(StoreScanner.class), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java index 308a338e36..8e1b1688c7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java @@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPoli import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.WAL; import org.junit.After; import org.junit.Before; @@ -245,7 +246,8 @@ public class TestMajorCompaction { // should result in a compacted store file that has no references to the // deleted row. LOG.debug("Adding deletes to memstore and flushing"); - Delete delete = new Delete(secondRowBytes, System.currentTimeMillis()); + final long now = htd.isNanosecondTimestamps() ? EnvironmentEdgeManager.currentTimeNano() : EnvironmentEdgeManager.currentTime(); + Delete delete = new Delete(secondRowBytes, now); byte [][] famAndQf = {COLUMN_FAMILY, null}; delete.addFamily(famAndQf[0]); r.delete(delete); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java index 25e1223a40..905093366c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java @@ -442,10 +442,7 @@ public class TestMobStoreCompaction { List scanners = StoreFileScanner.getScannersForStoreFiles(sfs, false, true, false, false, HConstants.LATEST_TIMESTAMP); - long timeToPurgeDeletes = Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0); - long ttl = HStore.determineTTLFromFamily(hcd); - ScanInfo scanInfo = new ScanInfo(copyOfConf, hcd, ttl, timeToPurgeDeletes, - CellComparatorImpl.COMPARATOR); + ScanInfo scanInfo = new ScanInfo(copyOfConf, htd, hcd, CellComparatorImpl.COMPARATOR); StoreScanner scanner = new StoreScanner(scanInfo, ScanType.COMPACT_DROP_DELETES, scanners); try { size += UTIL.countRows(scanner); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java index 88f776268a..692e0ac48b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java @@ -271,7 +271,8 @@ public class TestReversibleScanners { ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(), FAMILYNAME, 0, Integer.MAX_VALUE, Long.MAX_VALUE, - KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, CellComparatorImpl.COMPARATOR, false); + KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, CellComparatorImpl.COMPARATOR, + false, false); // Case 1.Test a full reversed scan Scan scan = new Scan(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java index 21a2bfbb99..04c0960f77 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java @@ -81,9 +81,9 @@ public class TestStoreScanner { private static final String CF_STR = "cf"; private static final byte[] CF = Bytes.toBytes(CF_STR); static Configuration CONF = HBaseConfiguration.create(); - private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, Integer.MAX_VALUE, Long.MAX_VALUE, - KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false); + KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), + false, false); /** * From here on down, we have a bunch of defines and specific CELL_GRID of Cells. The @@ -840,7 +840,7 @@ public class TestStoreScanner { Scan scan = new Scan(); scan.readVersions(1); ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, 500, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false); + HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false, false); try (StoreScanner scanner = new StoreScanner(scan, scanInfo, null, scanners)) { List results = new ArrayList<>(); assertEquals(true, scanner.next(results)); @@ -903,7 +903,7 @@ public class TestStoreScanner { scan.readVersions(1); // scanner with ttl equal to 500 ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, 500, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false); + HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false, false); try (StoreScanner scanner = new StoreScanner(scan, scanInfo, null, scanners)) { List results = new ArrayList<>(); assertEquals(true, scanner.next(results)); @@ -966,7 +966,7 @@ public class TestStoreScanner { KeepDeletedCells.FALSE /* keepDeletedCells */, HConstants.DEFAULT_BLOCKSIZE /* block size */, 200, /* timeToPurgeDeletes */ - CellComparator.getInstance(), false); + CellComparator.getInstance(), false, false); try (StoreScanner scanner = new StoreScanner(scanInfo, 2, ScanType.COMPACT_DROP_DELETES, scanners)) { List results = new ArrayList<>(); @@ -995,7 +995,7 @@ public class TestStoreScanner { create("R1", "cf", "a", now - 10, KeyValue.Type.Put, "dont-care"), }; List scanners = scanFixture(kvs); ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, 500, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false); + HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false, false); try (StoreScanner storeScanner = new StoreScanner(scanInfo, -1, ScanType.COMPACT_RETAIN_DELETES, scanners)) { assertFalse(storeScanner.isScanUsePread()); @@ -1006,7 +1006,7 @@ public class TestStoreScanner { public void testReadVersionWithRawAndFilter() throws IOException { ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, Long.MAX_VALUE, KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0 - , CellComparator.getInstance(), false); + , CellComparator.getInstance(), false, false); KeyValue [] kvs = new KeyValue[] { create("R1", "cf", "a", 3, KeyValue.Type.Put, "dont-care"), create("R1", "cf", "a", 2, KeyValue.Type.Put, "dont-care"), @@ -1053,7 +1053,7 @@ public class TestStoreScanner { ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, Long.MAX_VALUE, KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0 - , CellComparator.getInstance(), false); + , CellComparator.getInstance(), false, false); InternalScan scan = new InternalScan(new Scan()); scan.checkOnlyMemStore(); MyCollectionBackedScanner fileScanner = new MyCollectionBackedScanner(true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java index 23b16930d4..3d0255263b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java @@ -86,7 +86,8 @@ public class TestStoreScannerClosure { private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static String ROOT_DIR = TEST_UTIL.getDataTestDir("TestHFile").toString(); private ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, Integer.MAX_VALUE, Long.MAX_VALUE, - KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false); + KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), + false, false); private final static byte[] fam = Bytes.toBytes("cf_1"); private static final KeyValue[] kvs = new KeyValue[] { create("R1", "cf", "a", 11, KeyValue.Type.Put, "dont-care"), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java index 92ba76d411..b04470f2c6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java @@ -34,6 +34,7 @@ import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.OptionalLong; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -42,6 +43,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.regionserver.HStore; @@ -99,8 +101,9 @@ public class TestDateTieredCompactor { conf.setBoolean("hbase.regionserver.compaction.private.readers", usePrivateReaders); final Scanner scanner = new Scanner(input); // Create store mock that is satisfactory for compactor. + HTableDescriptor td = new HTableDescriptor(TABLE_NAME); HColumnDescriptor col = new HColumnDescriptor(NAME_OF_THINGS); - ScanInfo si = new ScanInfo(conf, col, Long.MAX_VALUE, 0, CellComparatorImpl.COMPARATOR); + ScanInfo si = new ScanInfo(conf, td, col, CellComparatorImpl.COMPARATOR); HStore store = mock(HStore.class); when(store.getStorefiles()).thenReturn(storefiles); when(store.getColumnFamilyDescriptor()).thenReturn(col); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java index 6e8b19fdd0..d22078a94d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java @@ -31,6 +31,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -40,6 +41,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.regionserver.HStore; @@ -199,8 +201,9 @@ public class TestStripeCompactor { final Scanner scanner = new Scanner(input); // Create store mock that is satisfactory for compactor. + HTableDescriptor td = new HTableDescriptor(TABLE_NAME); HColumnDescriptor col = new HColumnDescriptor(NAME_OF_THINGS); - ScanInfo si = new ScanInfo(conf, col, Long.MAX_VALUE, 0, CellComparatorImpl.COMPARATOR); + ScanInfo si = new ScanInfo(conf, td, col, CellComparatorImpl.COMPARATOR); HStore store = mock(HStore.class); when(store.getColumnFamilyDescriptor()).thenReturn(col); when(store.getScanInfo()).thenReturn(si); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestCompactionScanQueryMatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestCompactionScanQueryMatcher.java index 223508f2a9..83a1e5b6df 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestCompactionScanQueryMatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestCompactionScanQueryMatcher.java @@ -79,7 +79,7 @@ public class TestCompactionScanQueryMatcher extends AbstractTestScanQueryMatcher long now = EnvironmentEdgeManager.currentTime(); // Set time to purge deletes to negative value to avoid it ever happening. ScanInfo scanInfo = new ScanInfo(this.conf, fam2, 0, 1, ttl, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, -1L, rowComparator, false); + HConstants.DEFAULT_BLOCKSIZE, -1L, rowComparator, false, false); CompactionScanQueryMatcher qm = CompactionScanQueryMatcher.create(scanInfo, ScanType.COMPACT_RETAIN_DELETES, Long.MAX_VALUE, HConstants.OLDEST_TIMESTAMP, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java index 861b83e2fe..0358e6a6b2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java @@ -63,7 +63,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { // Do with fam2 which has a col2 qualifier. UserScanQueryMatcher qm = UserScanQueryMatcher.create(scan, new ScanInfo(this.conf, fam2, 10, 1, ttl, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false), + HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false, false), get.getFamilyMap().get(fam2), now - ttl, now, null); Cell kv = new KeyValue(row1, fam2, col2, 1, data); Cell cell = PrivateCellUtil.createLastOnRowCol(kv); @@ -90,7 +90,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { // 2,4,5 UserScanQueryMatcher qm = UserScanQueryMatcher.create( scan, new ScanInfo(this.conf, fam2, 0, 1, ttl, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false), + HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false, false), get.getFamilyMap().get(fam2), now - ttl, now, null); List memstore = new ArrayList<>(6); @@ -133,7 +133,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { long now = EnvironmentEdgeManager.currentTime(); UserScanQueryMatcher qm = UserScanQueryMatcher.create(scan, new ScanInfo(this.conf, fam2, 0, 1, - ttl, KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false), + ttl, KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false, false), null, now - ttl, now, null); List memstore = new ArrayList<>(6); @@ -179,7 +179,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { long now = EnvironmentEdgeManager.currentTime(); UserScanQueryMatcher qm = UserScanQueryMatcher.create(scan, new ScanInfo(this.conf, fam2, 0, 1, testTTL, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false), + HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false, false), get.getFamilyMap().get(fam2), now - testTTL, now, null); KeyValue[] kvs = new KeyValue[] { new KeyValue(row1, fam2, col1, now - 100, data), @@ -223,7 +223,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { UserScanQueryMatcher qm = UserScanQueryMatcher.create(scan, new ScanInfo(this.conf, fam2, 0, 1, testTTL, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false), + HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false, false), null, now - testTTL, now, null); KeyValue[] kvs = new KeyValue[] { new KeyValue(row1, fam2, col1, now - 100, data), @@ -268,7 +268,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { // scan with column 2,4,5 UserScanQueryMatcher qm = UserScanQueryMatcher.create( scanWithFilter, new ScanInfo(this.conf, fam2, 0, 1, ttl, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false), + HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false, false), get.getFamilyMap().get(fam2), now - ttl, now, null); List memstore = new ArrayList<>(); @@ -318,7 +318,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { // scan with column 2,4,5, the family with maxVersion = 3 UserScanQueryMatcher qm = UserScanQueryMatcher.create( scanWithFilter, new ScanInfo(this.conf, fam2, 0, 3, ttl, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false), + HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false, false), get.getFamilyMap().get(fam2), now - ttl, now, null); List memstore = new ArrayList<>(); @@ -338,7 +338,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { scanWithFilter = new Scan(scan).setFilter(new AlwaysIncludeFilter()).readVersions(1); qm = UserScanQueryMatcher.create( scanWithFilter, new ScanInfo(this.conf, fam2, 0, 2, ttl, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false), + HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false, false), get.getFamilyMap().get(fam2), now - ttl, now, null); List memstore2 = new ArrayList<>(); @@ -378,7 +378,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher { // scan with column 2,4,5, the family with maxVersion = 5 UserScanQueryMatcher qm = UserScanQueryMatcher.create( scanWithFilter, new ScanInfo(this.conf, fam2, 0, 5, ttl, KeepDeletedCells.FALSE, - HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false), + HConstants.DEFAULT_BLOCKSIZE, 0, rowComparator, false, false), get.getFamilyMap().get(fam2), now - ttl, now, null); List memstore = new ArrayList<>(); diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index a0bea3aa29..28c1b8b1c4 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -1436,6 +1436,7 @@ module Hbase htd.setRegionMemstoreReplication(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::REGION_MEMSTORE_REPLICATION))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::REGION_MEMSTORE_REPLICATION) htd.setRegionSplitPolicyClassName(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::SPLIT_POLICY)) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::SPLIT_POLICY) htd.setRegionReplication(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::REGION_REPLICATION))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::REGION_REPLICATION) + htd.setNanosecondTimestamps(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::NANOSECOND_TIMESTAMPS))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::NANOSECOND_TIMESTAMPS) set_user_metadata(htd, arg.delete(METADATA)) if arg[METADATA] set_descriptor_config(htd, arg.delete(CONFIGURATION)) if arg[CONFIGURATION] end -- 2.34.1