From a41a084a5fbfe5f38d3e49ebfaabc90987cebe47 Mon Sep 17 00:00:00 2001 From: Josh Elser Date: Wed, 22 Mar 2017 17:39:54 -0400 Subject: [PATCH] HBASE-17753 Include serialized snapshot sizes in table/ns usages Now that the information is being captured in the quota table, the QuotaObserverChore needs to use these sizes. --- .../apache/hadoop/hbase/quotas/QuotaTableUtil.java | 104 +++++++++++++++++---- .../hbase/master/MetricsMasterQuotaSource.java | 6 ++ .../hbase/master/MetricsMasterQuotaSourceImpl.java | 17 ++++ .../hbase/quotas/NamespaceQuotaSnapshotStore.java | 7 +- .../hadoop/hbase/quotas/QuotaSnapshotStore.java | 2 +- .../hbase/quotas/SnapshotQuotaObserverChore.java | 51 ++++++++-- .../hbase/quotas/TableQuotaSnapshotStore.java | 54 ++++++++++- .../hbase/quotas/SpaceQuotaHelperForTests.java | 12 ++- .../quotas/TestNamespaceQuotaViolationStore.java | 15 ++- .../hadoop/hbase/quotas/TestQuotaTableUtil.java | 59 ++++++++++++ .../quotas/TestSnapshotQuotaObserverChore.java | 25 ++++- .../hbase/quotas/TestSpaceQuotasWithSnapshots.java | 74 ++++++++++++++- .../hbase/quotas/TestTableQuotaViolationStore.java | 17 +++- 13 files changed, 409 insertions(+), 34 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java index d537bd95f4..10fca7c1cf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.client.QuotaStatusCalls; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; import org.apache.hadoop.hbase.filter.CompareFilter; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterList; @@ -75,7 +76,7 @@ import org.apache.hadoop.hbase.util.Strings; * ROW-KEYFAM/QUALDATA * n.<namespace>q:s<global-quotas> * n.<namespace>u:p<namespace-quota policy> - * n.<namespace>u:s.<snapshot name><SpaceQuotaSnapshot> + * n.<namespace>u:s<:SpaceQuotaSnapshot> * t.<table>q:s<global-quotas> * t.<table>u:p<table-quota policy> * t.<table>u:s.<snapshot name><SpaceQuotaSnapshot> @@ -233,23 +234,26 @@ public class QuotaTableUtil { * Creates a {@link Scan} which returns only quota snapshots from the quota table. */ public static Scan makeQuotaSnapshotScan() { - Scan s = new Scan(); - // Limit to "u:v" column - s.addColumn(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY); - // Limit rowspace to the "t:" prefix - s.setRowPrefixFilter(QUOTA_TABLE_ROW_KEY_PREFIX); - return s; + return makeQuotaSnapshotScanForTable(null); } /** - * Creates a {@link Get} for the snapshot size against the given table. + * Creates a {@link Scan} which returns only {@link SpaceQuotaSnapshot} from the quota table for a + * specific table. + * @param tn Optionally, a table name to limit the scan's rowkey space. Can be null. */ - public static Get makeGetForSnapshotSize(TableName tn, String snapshot) { - Get g = new Get(Bytes.add(QUOTA_TABLE_ROW_KEY_PREFIX, Bytes.toBytes(tn.toString()))); - g.addColumn( - QUOTA_FAMILY_USAGE, - Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshot))); - return g; + public static Scan makeQuotaSnapshotScanForTable(TableName tn) { + Scan s = new Scan(); + // Limit to "u:v" column + s.addColumn(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY); + if (null == tn) { + s.setRowPrefixFilter(QUOTA_TABLE_ROW_KEY_PREFIX); + } else { + byte[] row = getTableRowKey(tn); + // Limit rowspace to the "t:" prefix + s.withStartRow(row, true).withStopRow(row, true); + } + return s; } /** @@ -391,6 +395,17 @@ public class QuotaTableUtil { } /** + * Creates a {@link Get} for the HBase snapshot's size against the given table. + */ + public static Get makeGetForSnapshotSize(TableName tn, String snapshot) { + Get g = new Get(Bytes.add(QUOTA_TABLE_ROW_KEY_PREFIX, Bytes.toBytes(tn.toString()))); + g.addColumn( + QUOTA_FAMILY_USAGE, + Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshot))); + return g; + } + + /** * Creates a {@link Put} to persist the current size of the {@code snapshot} with respect to * the given {@code table}. */ @@ -399,11 +414,68 @@ public class QuotaTableUtil { // SpaceQuotaSnapshot message instead of creating a new one. Put p = new Put(getTableRowKey(tableName)); p.addColumn(QUOTA_FAMILY_USAGE, getSnapshotSizeQualifier(snapshot), - org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaSnapshot - .newBuilder().setUsage(size).build().toByteArray()); + QuotaProtos.SpaceQuotaSnapshot.newBuilder().setUsage(size).build().toByteArray()); + return p; + } + + /** + * Creates a {@code Put} for the namespace's total snapshot size. + */ + public static Put createPutNamespaceSnapshotSize(String namespace, long size) { + Put p = new Put(getNamespaceRowKey(namespace)); + p.addColumn(QUOTA_FAMILY_USAGE, QUOTA_SNAPSHOT_SIZE_QUALIFIER, + QuotaProtos.SpaceQuotaSnapshot.newBuilder().setUsage(size).build().toByteArray()); return p; } + public static long getNamespaceSnapshotSize( + Connection conn, String namespace) throws IOException { + try (Table quotaTable = conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) { + Result r = quotaTable.get(createGetNamespaceSnapshotSize(namespace)); + if (r.isEmpty()) { + return 0L; + } + r.advance(); + Cell c = r.current(); + try { + return parseSnapshotSize(c); + } catch (InvalidProtocolBufferException e) { + LOG.warn("Could not parse snapshot size value for namespace " + namespace, e); + return 0L; + } + } + } + + /** + * Creates a {@code Get} to fetch the namespace's total snapshot size. + */ + static Get createGetNamespaceSnapshotSize(String namespace) { + Get g = new Get(getNamespaceRowKey(namespace)); + g.addColumn(QUOTA_FAMILY_USAGE, QUOTA_SNAPSHOT_SIZE_QUALIFIER); + return g; + } + + /** + * Parses the snapshot size from the given Cell's value. + */ + public static long parseSnapshotSize(Cell c) throws InvalidProtocolBufferException { + ByteString bs = UnsafeByteOperations.unsafeWrap( + c.getValueArray(), c.getValueOffset(), c.getValueLength()); + return QuotaProtos.SpaceQuotaSnapshot.parseFrom(bs).getUsage(); + } + + public static Scan createScanForSpaceSnapshotSizes(TableName table) { + byte[] rowkey = getTableRowKey(table); + return new Scan() + // Fetch just this one row + .withStartRow(rowkey) + .withStopRow(rowkey, true) + // Just the usage family + .addFamily(QUOTA_FAMILY_USAGE) + // Only the snapshot size qualifiers + .setFilter(new ColumnPrefixFilter(QUOTA_SNAPSHOT_SIZE_QUALIFIER)); + } + /* ========================================================================= * Space quota status RPC helpers */ diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.java index 432583035b..b426fcc429 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.java @@ -42,6 +42,12 @@ public interface MetricsMasterQuotaSource extends BaseSource { String SNAPSHOT_OBSERVER_CHORE_TIME_NAME = "snapshotObserverChoreTime"; String SNAPSHOT_OBSERVER_CHORE_TIME_DESC = "Histogram for the time in millis for the SnapshotQuotaObserverChore"; + String SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_NAME = "snapshotObserverSizeComputationTime"; + String SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_DESC = + "Histogram for the time in millis to compute the size of each snapshot"; + String SNAPSHOT_OBSERVER_FETCH_TIME_NAME = "snapshotObserverSnapshotFetchTime"; + String SNAPSHOT_OBSERVER_FETCH_TIME_DESC = + "Histogram for the time in millis to fetch all snapshots from HBase"; String TABLE_QUOTA_USAGE_NAME = "tableSpaceQuotaOverview"; String TABLE_QUOTA_USAGE_DESC = "A JSON summary of the usage of all tables with space quotas"; String NS_QUOTA_USAGE_NAME = "namespaceSpaceQuotaOverview"; diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceImpl.java index c160ef90b4..9140c1c237 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceImpl.java @@ -36,6 +36,8 @@ public class MetricsMasterQuotaSourceImpl extends BaseSourceImpl implements Metr private final MutableGaugeLong regionSpaceReportsGauge; private final MetricHistogram quotaObserverTimeHisto; private final MetricHistogram snapshotObserverTimeHisto; + private final MetricHistogram snapshotObserverSizeComputationTimeHisto; + private final MetricHistogram snapshotObserverSnapshotFetchTimeHisto; public MetricsMasterQuotaSourceImpl(MetricsMasterWrapper wrapper) { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, wrapper); @@ -60,6 +62,11 @@ public class MetricsMasterQuotaSourceImpl extends BaseSourceImpl implements Metr QUOTA_OBSERVER_CHORE_TIME_NAME, QUOTA_OBSERVER_CHORE_TIME_DESC); snapshotObserverTimeHisto = getMetricsRegistry().newTimeHistogram( SNAPSHOT_OBSERVER_CHORE_TIME_NAME, SNAPSHOT_OBSERVER_CHORE_TIME_DESC); + + snapshotObserverSizeComputationTimeHisto = getMetricsRegistry().newTimeHistogram( + SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_NAME, SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_DESC); + snapshotObserverSnapshotFetchTimeHisto = getMetricsRegistry().newTimeHistogram( + SNAPSHOT_OBSERVER_FETCH_TIME_NAME, SNAPSHOT_OBSERVER_FETCH_TIME_DESC); } @Override @@ -134,4 +141,14 @@ public class MetricsMasterQuotaSourceImpl extends BaseSourceImpl implements Metr sb.insert(0, "[").append("]"); return sb.toString(); } + + @Override + public void incrementSnapshotObserverSnapshotComputationTime(long time) { + snapshotObserverSizeComputationTimeHisto.add(time); + } + + @Override + public void incrementSnapshotObserverSnapshotFetchTime(long time) { + snapshotObserverSnapshotFetchTimeHisto.add(time); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NamespaceQuotaSnapshotStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NamespaceQuotaSnapshotStore.java index 75550f3105..22ccff88e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NamespaceQuotaSnapshotStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NamespaceQuotaSnapshotStore.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.quotas; import java.io.IOException; import java.util.Map; -import java.util.Objects; import java.util.Map.Entry; +import java.util.Objects; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; @@ -77,7 +77,8 @@ public class NamespaceQuotaSnapshotStore implements QuotaSnapshotStore { } @Override - public SpaceQuotaSnapshot getTargetState(String subject, SpaceQuota spaceQuota) { + public SpaceQuotaSnapshot getTargetState( + String subject, SpaceQuota spaceQuota) throws IOException { rlock.lock(); try { final long sizeLimitInBytes = spaceQuota.getSoftLimit(); @@ -85,6 +86,8 @@ public class NamespaceQuotaSnapshotStore implements QuotaSnapshotStore { for (Entry entry : filterBySubject(subject)) { sum += entry.getValue(); } + // Add in the size for any snapshots against this table + sum += QuotaTableUtil.getNamespaceSnapshotSize(conn, subject); // Observance is defined as the size of the table being less than the limit SpaceQuotaStatus status = sum <= sizeLimitInBytes ? SpaceQuotaStatus.notInViolation() : new SpaceQuotaStatus(ProtobufUtil.toViolationPolicy(spaceQuota.getViolationPolicy())); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSnapshotStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSnapshotStore.java index 8b0b3a7212..2b5ba59ab6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSnapshotStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSnapshotStore.java @@ -69,7 +69,7 @@ public interface QuotaSnapshotStore { * @param subject The object which to determine the target SpaceQuotaSnapshot of * @param spaceQuota The quota "definition" for the {@code subject} */ - SpaceQuotaSnapshot getTargetState(T subject, SpaceQuota spaceQuota); + SpaceQuotaSnapshot getTargetState(T subject, SpaceQuota spaceQuota) throws IOException; /** * Filters the provided regions, returning those which match the given diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.java index 403dcfa268..2b18bb6ea8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.java @@ -200,9 +200,9 @@ public class SnapshotQuotaObserverChore extends ScheduledChore { final Path rootDir = FSUtils.getRootDir(conf); // Get the map of store file names to store file path for this table // TODO is the store-file name unique enough? Does this need to be region+family+storefile? - final Map tableReferencedStoreFiles; + final Set tableReferencedStoreFiles; try { - tableReferencedStoreFiles = FSUtils.getTableStoreFilePathMap(fs, rootDir); + tableReferencedStoreFiles = FSUtils.getTableStoreFilePathMap(fs, rootDir).keySet(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); return null; @@ -214,7 +214,7 @@ public class SnapshotQuotaObserverChore extends ScheduledChore { // For each snapshot on this table, get the files which the snapshot references which // the table does not. - Set snapshotReferencedFiles = new HashSet<>(); + Set snapshotReferencedFiles = new HashSet<>(); for (String snapshotName : snapshotNames) { final long start = System.nanoTime(); Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); @@ -228,7 +228,7 @@ public class SnapshotQuotaObserverChore extends ScheduledChore { // Get the set of files from the manifest that this snapshot references which are not also // referenced by the originating table. Set unreferencedStoreFileNames = getStoreFilesFromSnapshot( - manifest, (sfn) -> !tableReferencedStoreFiles.containsKey(sfn) + manifest, (sfn) -> !tableReferencedStoreFiles.contains(sfn) && !snapshotReferencedFiles.contains(sfn)); if (LOG.isTraceEnabled()) { @@ -246,7 +246,11 @@ public class SnapshotQuotaObserverChore extends ScheduledChore { snapshotSizes.put(tn, new SnapshotWithSize(snapshotName, size)); // Make sure that we don't double-count the same file - snapshotReferencedFiles.addAll(unreferencedStoreFileNames); + for (StoreFileReference ref : unreferencedStoreFileNames) { + for (String fileNames : ref.getFamilyToFilesMapping().values()) { + snapshotReferencedFiles.add(fileNames); + } + } // Update the amount of time it took to compute the snapshot's size if (null != metrics) { metrics.incrementSnapshotSizeComputationTime((System.nanoTime() - start) / 1_000_000); @@ -267,7 +271,6 @@ public class SnapshotQuotaObserverChore extends ScheduledChore { for (SnapshotRegionManifest rm : manifest.getRegionManifests()) { StoreFileReference regionReference = new StoreFileReference( HRegionInfo.convert(rm.getRegionInfo()).getEncodedName()); - references.add(regionReference); // For each column family in this region for (FamilyFiles ff : rm.getFamilyFilesList()) { @@ -275,11 +278,17 @@ public class SnapshotQuotaObserverChore extends ScheduledChore { // And each store file in that family for (StoreFile sf : ff.getStoreFilesList()) { String storeFileName = sf.getName(); + // A snapshot only "inherits" a files size if it uniquely refers to it (no table + // and no other snapshot references it). if (filter.test(storeFileName)) { regionReference.addFamilyStoreFile(familyName, storeFileName); } } } + // Only add this Region reference if we retained any files. + if (!regionReference.getFamilyToFilesMapping().isEmpty()) { + references.add(regionReference); + } } return references; } @@ -351,7 +360,10 @@ public class SnapshotQuotaObserverChore extends ScheduledChore { void persistSnapshotSizes( Multimap snapshotsWithSize) throws IOException { try (Table quotaTable = conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) { + // Write each snapshot size for the table persistSnapshotSizes(quotaTable, snapshotsWithSize); + // Write a size entry for all snapshots in a namespace + persistSnapshotSizesByNS(quotaTable, snapshotsWithSize); } } @@ -369,6 +381,33 @@ public class SnapshotQuotaObserverChore extends ScheduledChore { } /** + * Rolls up the snapshot sizes by namespace and writes a single record for each namespace + * which is the size of all snapshots in that namespace. + */ + void persistSnapshotSizesByNS( + Table quotaTable, Multimap snapshotsWithSize) throws IOException { + Map namespaceSnapshotSizes = groupSnapshotSizesByNamespace(snapshotsWithSize); + quotaTable.put(namespaceSnapshotSizes.entrySet().stream() + .map(e -> QuotaTableUtil.createPutNamespaceSnapshotSize( + e.getKey(), e.getValue())) + .collect(Collectors.toList())); + } + + /** + * Sums the snapshot sizes for each namespace. + */ + Map groupSnapshotSizesByNamespace( + Multimap snapshotsWithSize) { + return snapshotsWithSize.entries().stream() + .collect(Collectors.groupingBy( + // Convert TableName into the namespace string + (e) -> e.getKey().getNamespaceAsString(), + // Sum the values for namespace + Collectors.mapping( + Map.Entry::getValue, Collectors.summingLong((sws) -> sws.getSize())))); + } + + /** * A struct encapsulating the name of a snapshot and its "size" on the filesystem. This size is * defined as the amount of filesystem space taken by the files the snapshot refers to which * the originating table no longer refers to. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TableQuotaSnapshotStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TableQuotaSnapshotStore.java index 82d3684c83..5157058e17 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TableQuotaSnapshotStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TableQuotaSnapshotStore.java @@ -18,17 +18,26 @@ package org.apache.hadoop.hbase.quotas; import java.io.IOException; import java.util.Map; +import java.util.Map.Entry; import java.util.Objects; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; -import java.util.Map.Entry; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus; +import org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota; @@ -41,6 +50,8 @@ import com.google.common.collect.Iterables; */ @InterfaceAudience.Private public class TableQuotaSnapshotStore implements QuotaSnapshotStore { + private static final Log LOG = LogFactory.getLog(TableQuotaSnapshotStore.class); + private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); private final ReadLock rlock = lock.readLock(); private final WriteLock wlock = lock.writeLock(); @@ -77,7 +88,8 @@ public class TableQuotaSnapshotStore implements QuotaSnapshotStore { } @Override - public SpaceQuotaSnapshot getTargetState(TableName table, SpaceQuota spaceQuota) { + public SpaceQuotaSnapshot getTargetState( + TableName table, SpaceQuota spaceQuota) throws IOException { rlock.lock(); try { final long sizeLimitInBytes = spaceQuota.getSoftLimit(); @@ -85,6 +97,8 @@ public class TableQuotaSnapshotStore implements QuotaSnapshotStore { for (Entry entry : filterBySubject(table)) { sum += entry.getValue(); } + // Add in the size for any snapshots against this table + sum += getSnapshotSizesForTable(table); // Observance is defined as the size of the table being less than the limit SpaceQuotaStatus status = sum <= sizeLimitInBytes ? SpaceQuotaStatus.notInViolation() : new SpaceQuotaStatus(ProtobufUtil.toViolationPolicy(spaceQuota.getViolationPolicy())); @@ -94,6 +108,42 @@ public class TableQuotaSnapshotStore implements QuotaSnapshotStore { } } + /** + * Fetches any serialized snapshot sizes from the quota table for the {@code tn} provided. Any + * malformed records are skipped with a warning printed out. + */ + long getSnapshotSizesForTable(TableName tn) throws IOException { + try (Table quotaTable = conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) { + Scan s = QuotaTableUtil.createScanForSpaceSnapshotSizes(tn); + ResultScanner rs = quotaTable.getScanner(s); + try { + long size = 0L; + // Should just be a single row (for our table) + for (Result result : rs) { + // May have multiple columns, one for each snapshot + CellScanner cs = result.cellScanner(); + while (cs.advance()) { + Cell current = cs.current(); + try { + long snapshotSize = QuotaTableUtil.parseSnapshotSize(current); + if (LOG.isTraceEnabled()) { + LOG.trace("Saw snapshot size of " + snapshotSize + " for " + current); + } + size += snapshotSize; + } catch (InvalidProtocolBufferException e) { + LOG.warn("Failed to parse snapshot size from cell: " + current); + } + } + } + return size; + } finally { + if (null != rs) { + rs.close(); + } + } + } + } + @Override public Iterable> filterBySubject(TableName table) { rlock.lock(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java index 0034e3d6e7..b0d3e30e04 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/SpaceQuotaHelperForTests.java @@ -88,6 +88,8 @@ public class SpaceQuotaHelperForTests { conf.setInt(SpaceQuotaRefresherChore.POLICY_REFRESHER_CHORE_PERIOD_KEY, 1000); conf.setInt(SnapshotQuotaObserverChore.SNAPSHOT_QUOTA_CHORE_DELAY_KEY, 1000); conf.setInt(SnapshotQuotaObserverChore.SNAPSHOT_QUOTA_CHORE_PERIOD_KEY, 1000); + // The period at which we check for compacted files that should be deleted from HDFS + conf.setInt("hbase.hfile.compaction.discharger.interval", 5 * 1000); conf.setBoolean(QuotaUtil.QUOTA_CONF_KEY, true); } @@ -122,6 +124,14 @@ public class SpaceQuotaHelperForTests { } void writeData(TableName tn, long sizeInBytes) throws IOException { + writeData(tn, sizeInBytes, Bytes.toBytes("q1")); + } + + void writeData(TableName tn, long sizeInBytes, String qual) throws IOException { + writeData(tn, sizeInBytes, Bytes.toBytes(qual)); + } + + void writeData(TableName tn, long sizeInBytes, byte[] qual) throws IOException { final Connection conn = testUtil.getConnection(); final Table table = conn.getTable(tn); try { @@ -137,7 +147,7 @@ public class SpaceQuotaHelperForTests { Put p = new Put(Bytes.toBytes(sb.reverse().toString())); byte[] value = new byte[SIZE_PER_VALUE]; r.nextBytes(value); - p.addColumn(Bytes.toBytes(F1), Bytes.toBytes("q1"), value); + p.addColumn(Bytes.toBytes(F1), qual, value); updates.add(p); // Batch ~13KB worth of updates diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestNamespaceQuotaViolationStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestNamespaceQuotaViolationStore.java index 4a7258fd98..385f8c4d72 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestNamespaceQuotaViolationStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestNamespaceQuotaViolationStore.java @@ -23,6 +23,7 @@ import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; @@ -31,6 +32,9 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; @@ -91,7 +95,8 @@ public class TestNamespaceQuotaViolationStore { } @Test - public void testTargetViolationState() { + public void testTargetViolationState() throws IOException { + mockNoSnapshotSizes(); final String NS = "ns"; TableName tn1 = TableName.valueOf(NS, "tn1"); TableName tn2 = TableName.valueOf(NS, "tn2"); @@ -123,7 +128,8 @@ public class TestNamespaceQuotaViolationStore { // Exceeds the quota, should be in violation assertEquals(true, store.getTargetState(NS, quota).getQuotaStatus().isInViolation()); - assertEquals(SpaceViolationPolicy.DISABLE, store.getTargetState(NS, quota).getQuotaStatus().getPolicy()); + assertEquals( + SpaceViolationPolicy.DISABLE, store.getTargetState(NS, quota).getQuotaStatus().getPolicy()); } @Test @@ -153,4 +159,9 @@ public class TestNamespaceQuotaViolationStore { assertEquals(18, size(store.filterBySubject("ns"))); } + void mockNoSnapshotSizes() throws IOException { + Table quotaTable = mock(Table.class); + when(conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)).thenReturn(quotaTable); + when(quotaTable.get(any(Get.class))).thenReturn(new Result()); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java index f10cdef487..6eae900d45 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java @@ -19,6 +19,8 @@ package org.apache.hadoop.hbase.quotas; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.ArrayList; @@ -27,8 +29,11 @@ import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -37,7 +42,9 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus; +import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Throttle; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -232,7 +239,59 @@ public class TestQuotaTableUtil { assertEquals(expectedPolicies, actualPolicies); } + @Test + public void testSerdeTableSnapshotSizes() throws Exception { + TableName tn1 = TableName.valueOf("tn1"); + TableName tn2 = TableName.valueOf("tn2"); + try (Table quotaTable = connection.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) { + for (int i = 0; i < 3; i++) { + Put p = QuotaTableUtil.createPutSnapshotSize(tn1, "tn1snap" + i, 1024L * (1+i)); + quotaTable.put(p); + } + for (int i = 0; i < 3; i++) { + Put p = QuotaTableUtil.createPutSnapshotSize(tn2, "tn2snap" + i, 2048L * (1+i)); + quotaTable.put(p); + } + + verifyTableSnapshotSize(quotaTable, tn1, "tn1snap0", 1024L); + verifyTableSnapshotSize(quotaTable, tn1, "tn1snap1", 2048L); + verifyTableSnapshotSize(quotaTable, tn1, "tn1snap2", 3072L); + + verifyTableSnapshotSize(quotaTable, tn2, "tn2snap0", 2048L); + verifyTableSnapshotSize(quotaTable, tn2, "tn2snap1", 4096L); + verifyTableSnapshotSize(quotaTable, tn2, "tn2snap2", 6144L); + } + } + + @Test + public void testReadNamespaceSnapshotSizes() throws Exception { + String ns1 = "ns1"; + String ns2 = "ns2"; + String defaultNs = NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR; + try (Table quotaTable = connection.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) { + quotaTable.put(QuotaTableUtil.createPutNamespaceSnapshotSize(ns1, 1024L)); + quotaTable.put(QuotaTableUtil.createPutNamespaceSnapshotSize(ns2, 2048L)); + quotaTable.put(QuotaTableUtil.createPutNamespaceSnapshotSize(defaultNs, 8192L)); + + assertEquals(1024L, QuotaTableUtil.getNamespaceSnapshotSize(connection, ns1)); + assertEquals(2048L, QuotaTableUtil.getNamespaceSnapshotSize(connection, ns2)); + assertEquals(8192L, QuotaTableUtil.getNamespaceSnapshotSize(connection, defaultNs)); + } + } + private TableName getUniqueTableName() { return TableName.valueOf(testName.getMethodName() + "_" + tableNameCounter++); } + + private void verifyTableSnapshotSize( + Table quotaTable, TableName tn, String snapshotName, long expectedSize) throws IOException { + Result r = quotaTable.get(QuotaTableUtil.makeGetForSnapshotSize(tn, snapshotName)); + CellScanner cs = r.cellScanner(); + assertTrue(cs.advance()); + Cell c = cs.current(); + assertEquals(expectedSize, QuotaProtos.SpaceQuotaSnapshot.parseFrom( + UnsafeByteOperations.unsafeWrap( + c.getValueArray(), c.getValueOffset(), c.getValueLength())).getUsage()); + assertFalse(cs.advance()); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSnapshotQuotaObserverChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSnapshotQuotaObserverChore.java index 8be251b996..8bdda109a9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSnapshotQuotaObserverChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSnapshotQuotaObserverChore.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertNotNull; import java.io.IOException; import java.util.Arrays; import java.util.HashSet; +import java.util.Map; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; @@ -46,8 +47,8 @@ import org.apache.hadoop.hbase.client.SnapshotType; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore.SnapshotWithSize; -import org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate; import org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.NoFilesToDischarge; +import org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.AfterClass; import org.junit.Before; @@ -298,6 +299,28 @@ public class TestSnapshotQuotaObserverChore { assertEquals(lastSeenSize.get().longValue(), sws.getSize()); } + @Test + public void testPersistingSnapshotsForNamespaces() throws Exception { + Multimap snapshotsWithSizes = HashMultimap.create(); + TableName tn1 = TableName.valueOf("ns1:tn1"); + TableName tn2 = TableName.valueOf("ns1:tn2"); + TableName tn3 = TableName.valueOf("ns2:tn1"); + TableName tn4 = TableName.valueOf("ns2:tn2"); + TableName tn5 = TableName.valueOf("tn1"); + + snapshotsWithSizes.put(tn1, new SnapshotWithSize("", 1024L)); + snapshotsWithSizes.put(tn2, new SnapshotWithSize("", 1024L)); + snapshotsWithSizes.put(tn3, new SnapshotWithSize("", 512L)); + snapshotsWithSizes.put(tn4, new SnapshotWithSize("", 1024L)); + snapshotsWithSizes.put(tn5, new SnapshotWithSize("", 3072L)); + + Map nsSizes = testChore.groupSnapshotSizesByNamespace(snapshotsWithSizes); + assertEquals(3, nsSizes.size()); + assertEquals(2048L, (long) nsSizes.get("ns1")); + assertEquals(1536L, (long) nsSizes.get("ns2")); + assertEquals(3072L, (long) nsSizes.get(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR)); + } + private long count(Table t) throws IOException { try (ResultScanner rs = t.getScanner(new Scan())) { long sum = 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotasWithSnapshots.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotasWithSnapshots.java index 2c0477d6c1..28c763f327 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotasWithSnapshots.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceQuotasWithSnapshots.java @@ -17,6 +17,7 @@ package org.apache.hadoop.hbase.quotas; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; @@ -25,13 +26,21 @@ import java.util.concurrent.atomic.AtomicReference; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter.Predicate; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.SnapshotType; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate; +import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.junit.AfterClass; import org.junit.Before; @@ -41,6 +50,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import com.google.common.collect.Iterables; + /** * Test class to exercise the inclusion of snapshots in space quotas */ @@ -156,6 +167,7 @@ public class TestSpaceQuotasWithSnapshots { // The size of the table (WRT quotas) should now be approximately double what it was previously TEST_UTIL.waitFor(30 * 1000, 1000, new SpaceQuotaSnapshotPredicate(conn, tn) { @Override boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception { + LOG.debug("Checking for " + expectedFinalSize + " == " + snapshot.getUsage()); return expectedFinalSize == snapshot.getUsage(); } }); @@ -172,9 +184,10 @@ public class TestSpaceQuotasWithSnapshots { ns, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS); admin.setQuota(settings); - // Write some data + // Write some data and flush it to disk final long initialSize = 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE; helper.writeData(tn, initialSize); + admin.flush(tn); LOG.info("Waiting until namespace size reflects written data"); // Wait until that data is seen by the master @@ -247,4 +260,61 @@ public class TestSpaceQuotasWithSnapshots { } }); } + + @Test + public void testTablesWithSnapshots() throws Exception { + final Connection conn = TEST_UTIL.getConnection(); + final SpaceViolationPolicy policy = SpaceViolationPolicy.NO_INSERTS; + final TableName tn = helper.createTableWithRegions(10); + + // 3MB limit on the table + final long tableLimit = 3L * SpaceQuotaHelperForTests.ONE_MEGABYTE; + TEST_UTIL.getAdmin().setQuota(QuotaSettingsFactory.limitTableSpace(tn, tableLimit, policy)); + + LOG.info("Writing first data set"); + // Write more data than should be allowed and flush it to disk + helper.writeData(tn, 1L * SpaceQuotaHelperForTests.ONE_MEGABYTE, "q1"); + + LOG.info("Creating snapshot"); + TEST_UTIL.getAdmin().snapshot(tn.toString() + "snap1", tn, SnapshotType.FLUSH); + + LOG.info("Writing second data set"); + // Write some more data + helper.writeData(tn, 1L * SpaceQuotaHelperForTests.ONE_MEGABYTE, "q2"); + + LOG.info("Flushing and major compacting table"); + // Compact the table to force the snapshot to own all of its files + TEST_UTIL.getAdmin().flush(tn); + TEST_UTIL.compact(tn, true); + + LOG.info("Checking for quota violation"); + // Wait to observe the quota moving into violation + TEST_UTIL.waitFor(60_000, 1_000, new Predicate() { + @Override + public boolean evaluate() throws Exception { + Scan s = QuotaTableUtil.makeQuotaSnapshotScanForTable(tn); + try (Table t = conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) { + ResultScanner rs = t.getScanner(s); + try { + Result r = Iterables.getOnlyElement(rs); + CellScanner cs = r.cellScanner(); + assertTrue(cs.advance()); + Cell c = cs.current(); + SpaceQuotaSnapshot snapshot = SpaceQuotaSnapshot.toSpaceQuotaSnapshot( + QuotaProtos.SpaceQuotaSnapshot.parseFrom( + UnsafeByteOperations.unsafeWrap( + c.getValueArray(), c.getValueOffset(), c.getValueLength()))); + LOG.info( + snapshot.getUsage() + "/" + snapshot.getLimit() + " " + snapshot.getQuotaStatus()); + // We expect to see the table move to violation + return snapshot.getQuotaStatus().isInViolation(); + } finally { + if (null != rs) { + rs.close(); + } + } + } + } + }); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableQuotaViolationStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableQuotaViolationStore.java index cefed67d9f..5a4969a4a6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableQuotaViolationStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestTableQuotaViolationStore.java @@ -23,6 +23,8 @@ import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; @@ -30,6 +32,10 @@ import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; @@ -87,7 +93,8 @@ public class TestTableQuotaViolationStore { } @Test - public void testTargetViolationState() { + public void testTargetViolationState() throws IOException { + mockNoSnapshotSizes(); TableName tn1 = TableName.valueOf("violation1"); TableName tn2 = TableName.valueOf("observance1"); TableName tn3 = TableName.valueOf("observance2"); @@ -154,4 +161,12 @@ public class TestTableQuotaViolationStore { quotaRef.set(quotaWithoutSpace); assertNull(mockStore.getSpaceQuota(TableName.valueOf("foo"))); } + + void mockNoSnapshotSizes() throws IOException { + Table quotaTable = mock(Table.class); + ResultScanner scanner = mock(ResultScanner.class); + when(conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)).thenReturn(quotaTable); + when(quotaTable.getScanner(any(Scan.class))).thenReturn(scanner); + when(scanner.iterator()).thenReturn(Collections. emptyList().iterator()); + } } -- 2.12.0