diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java index f71b62a..247124e 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java @@ -369,6 +369,9 @@ public class MapReduceBackupMergeJob implements BackupMergeJob { BackupSystemTable table = new BackupSystemTable(conn)) { for (String backupId : backupIds) { BackupInfo bInfo = table.readBackupInfo(backupId); + if (bInfo == null) { + continue; + } allSet.addAll(bInfo.getTableNames()); } diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java index 094fc1d..f5b4cea 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java @@ -577,7 +577,11 @@ public class RSGroupAdminServer implements RSGroupAdmin { checkServersAndTables(servers, tables, targetGroup); //Move servers and tables to a new group. - String srcGroup = getRSGroupOfServer(servers.iterator().next()).getName(); + RSGroupInfo RSGroupInfo = getRSGroupOfServer(servers.iterator().next()); + if (RSGroupInfo == null) { + throw new ConstraintException("The group info of servers being moved cannot be null."); + } + String srcGroup = RSGroupInfo.getName(); rsGroupInfoManager.moveServersAndTables(servers, tables, srcGroup, targetGroup); //move regions which should not belong to these tables diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java index ce76b05..faef95c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java @@ -229,7 +229,9 @@ public class TableStateManager { @Override public boolean visit(Result r) throws IOException { TableState state = MetaTableAccessor.getTableState(r); - states.put(state.getTableName().getNameAsString(), state); + if (state != null) { + states.put(state.getTableName().getNameAsString(), state); + } return true; } }); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java index 57e71f8..bf6d565 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java @@ -618,18 +618,20 @@ public class MergeTableRegionsProcedure final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); final Configuration conf = env.getMasterConfiguration(); final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); - - for (String family: regionFs.getFamilies()) { - final ColumnFamilyDescriptor hcd = htd.getColumnFamily(Bytes.toBytes(family)); - final Collection storeFiles = regionFs.getStoreFiles(family); - - if (storeFiles != null && storeFiles.size() > 0) { - final CacheConfig cacheConf = new CacheConfig(conf, hcd); - for (StoreFileInfo storeFileInfo: storeFiles) { - // Create reference file(s) of the region in mergedDir - regionFs.mergeStoreFile(mergedRegion, family, new HStoreFile(mfs.getFileSystem(), - storeFileInfo, conf, cacheConf, hcd.getBloomFilterType(), true), - mergedDir); + Collection familyNames = regionFs.getFamilies(); + if (familyNames != null) { + for (String family: familyNames) { + final ColumnFamilyDescriptor hcd = htd.getColumnFamily(Bytes.toBytes(family)); + final Collection storeFiles = regionFs.getStoreFiles(family); + + if (storeFiles != null && storeFiles.size() > 0) { + final CacheConfig cacheConf = new CacheConfig(conf, hcd); + for (StoreFileInfo storeFileInfo: storeFiles) { + // Create reference file(s) of the region in mergedDir + regionFs.mergeStoreFile(mergedRegion, family, new HStoreFile(mfs.getFileSystem(), + storeFileInfo, conf, cacheConf, hcd.getBloomFilterType(), true), + mergedDir); + } } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java index 341affb..ce4f899 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java @@ -577,9 +577,13 @@ public class SplitTableRegionProcedure // Nothing to unroll here if failure -- re-run createSplitsDir will // clean this up. int nbFiles = 0; + Collection familyNames = regionFs.getFamilies(); + if (familyNames == null || familyNames.isEmpty()) { + return new Pair(0,0); + } final Map> files = - new HashMap>(regionFs.getFamilies().size()); - for (String family: regionFs.getFamilies()) { + new HashMap>(familyNames.size()); + for (String family: familyNames) { Collection sfis = regionFs.getStoreFiles(family); if (sfis == null) continue; Collection filteredSfis = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java index 12bda2d..a5805a9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java @@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CloneSnapshotState; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; @InterfaceAudience.Private public class CloneSnapshotProcedure @@ -335,9 +336,10 @@ public class CloneSnapshotProcedure mfs.getFileSystem(), SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, mfs.getRootDir()), snapshot); - + Map regionManifests = manifest.getRegionManifestsMap(); + int size = (regionManifests == null ? 0 : regionManifests.size()); ProcedureSyncWait.getMasterQuotaManager(env) - .checkNamespaceTableAndRegionQuota(getTableName(), manifest.getRegionManifestsMap().size()); + .checkNamespaceTableAndRegionQuota(getTableName(), size); } final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java index 09f6259..c7c83d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreSnapshotState; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; @InterfaceAudience.Private public class RestoreSnapshotProcedure @@ -351,7 +352,8 @@ public class RestoreSnapshotProcedure mfs.getFileSystem(), SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, mfs.getRootDir()), snapshot); - int snapshotRegionCount = manifest.getRegionManifestsMap().size(); + Map regionManifests = manifest.getRegionManifestsMap(); + int snapshotRegionCount = (regionManifests == null ? 0 : regionManifests.size()); int tableRegionCount = ProcedureSyncWait.getMasterQuotaManager(env).getRegionCountOfTable(tableName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java index 219625b..a53b347 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java @@ -28,6 +28,7 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.NavigableMap; import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; @@ -316,7 +317,11 @@ public class AccessControlLists { try { scanner = table.getScanner(scan); for (Result res : scanner) { - for (byte[] q : res.getFamilyMap(ACL_LIST_FAMILY).navigableKeySet()) { + NavigableMap familyMap = res.getFamilyMap(ACL_LIST_FAMILY); + if (familyMap == null || familyMap.isEmpty()) { + continue; + } + for (byte[] q : familyMap.navigableKeySet()) { qualifierSet.add(q); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 28c7ec3..7a0e204 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -620,6 +620,9 @@ public class FSTableDescriptors implements TableDescriptors { private static void deleteTableDescriptorFiles(FileSystem fs, Path dir, int maxSequenceId) throws IOException { FileStatus [] status = FSUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER); + if (status == null) { + return; + } for (FileStatus file : status) { Path path = file.getPath(); int sequenceId = getTableInfoSequenceId(path);