diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 19e1235..25c5fd8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -77,9 +77,12 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellScanner; @@ -128,6 +131,7 @@ import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.filter.FilterWrapper; import org.apache.hadoop.hbase.filter.IncompatibleFilterException; +import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.hfile.HFile; @@ -188,7 +192,6 @@ import org.apache.hadoop.util.StringUtils; import org.apache.htrace.Trace; import org.apache.htrace.TraceScope; - @SuppressWarnings("deprecation") @InterfaceAudience.Private public class HRegion implements HeapSize, PropagatingConfigurationObserver, Region { @@ -1108,20 +1111,81 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi FileSystem fs = tablePath.getFileSystem(conf); HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tablePath, regionInfo); - for (HColumnDescriptor family: tableDescriptor.getFamilies()) { - Collection storeFiles = regionFs.getStoreFiles(family.getNameAsString()); - if (storeFiles == null) continue; - for (StoreFileInfo storeFileInfo : storeFiles) { - try { - hdfsBlocksDistribution.add(storeFileInfo.computeHDFSBlocksDistribution(fs)); - } catch (IOException ioe) { - LOG.warn("Error getting hdfs block distribution for " + storeFileInfo); + for (HColumnDescriptor family : tableDescriptor.getFamilies()) { + List locatedFileStatusList = getStoreFilesLocatedStatus( + regionFs, family.getNameAsString(), true); + if (locatedFileStatusList == null) { + continue; + } + + for (LocatedFileStatus status : locatedFileStatusList) { + Path p = status.getPath(); + if (StoreFileInfo.isReference(p) || HFileLink.isHFileLink(p)) { + // Only construct StoreFileInfo object if its not a hfile, save obj + // creation + StoreFileInfo storeFileInfo = new StoreFileInfo(conf, fs, status); + hdfsBlocksDistribution.add(storeFileInfo + .computeHDFSBlocksDistribution(fs)); + } else if (StoreFileInfo.isHFile(p)) { + // If its a HFile, then lets just add to the block distribution + // lets not create more objects here, not even another hdfsblock dist + addToHDFSBlocksDistribution(hdfsBlocksDistribution, + status.getBlockLocations()); + } else { + throw new IOException("path=" + p + + " doesn't look like a valid StoreFile"); } } } return hdfsBlocksDistribution; } + public static List getStoreFilesLocatedStatus( + final HRegionFileSystem regionfs, final String familyName, + final boolean validate) throws IOException { + Path familyDir = regionfs.getStoreDir(familyName); + List locatedFileStatuses = listLocatedStatus( + regionfs.getFileSystem(), familyDir); + if (locatedFileStatuses == null) { + LOG.debug("No StoreFiles for: " + familyDir); + return null; + } + + List validStoreFiles = Lists.newArrayList(); + for (LocatedFileStatus status : locatedFileStatuses) { + if (validate && !StoreFileInfo.isValid(status)) { + LOG.warn("Invalid StoreFile: " + status.getPath()); + } else { + validStoreFiles.add(status); + } + } + return locatedFileStatuses; + } + + public static List listLocatedStatus(final FileSystem fs, + final Path dir) throws IOException { + List status = null; + RemoteIterator locatedFileStatusRemoteIterator = fs + .listFiles(dir, false); + while (locatedFileStatusRemoteIterator.hasNext()) { + if (status == null) { + status = Lists.newArrayList(); + } + status.add(locatedFileStatusRemoteIterator.next()); + } + return status; + } + + static public void addToHDFSBlocksDistribution( + HDFSBlocksDistribution blocksDistribution, BlockLocation[] blockLocations) + throws IOException { + for (BlockLocation bl : blockLocations) { + String[] hosts = bl.getHosts(); + long len = bl.getLength(); + blocksDistribution.addHostsAndBlockWeight(hosts, len); + } + } + /** * Increase the size of mem store in this region and the size of global mem * store