Index: src/test/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java =================================================================== --- src/test/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java (revision 693663) +++ src/test/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java (working copy) @@ -228,7 +228,7 @@ // file, one from the top and the other from the bottom. // Test bottom half first. bottom = new HStoreFile.HalfMapFileReader(this.fs, p.toString(), - this.conf, HStoreFile.Range.bottom, midkey); + this.conf, HStoreFile.Range.bottom, midkey, null); boolean first = true; while (bottom.next(key, value)) { previous = key.toString(); @@ -243,7 +243,7 @@ } // Now test reading from the top. top = new HStoreFile.HalfMapFileReader(this.fs, p.toString(), this.conf, - HStoreFile.Range.top, midkey); + HStoreFile.Range.top, midkey, null); first = true; while (top.next(key, value)) { assertTrue(key.compareTo(midkey) >= 0); @@ -264,12 +264,12 @@ // properly. WritableComparable badkey = new HStoreKey(" "); bottom = new HStoreFile.HalfMapFileReader(this.fs, p.toString(), - this.conf, HStoreFile.Range.bottom, badkey); + this.conf, HStoreFile.Range.bottom, badkey, null); // When badkey is < than the bottom, should return no values. assertFalse(bottom.next(key, value)); // Now read from the top. top = new HStoreFile.HalfMapFileReader(this.fs, p.toString(), this.conf, - HStoreFile.Range.top, badkey); + HStoreFile.Range.top, badkey, null); first = true; while (top.next(key, value)) { assertTrue(key.compareTo(badkey) >= 0); @@ -291,7 +291,7 @@ // Test when badkey is > than last key in file ('||' > 'zz'). badkey = new HStoreKey("|||"); bottom = new HStoreFile.HalfMapFileReader(this.fs, p.toString(), - this.conf, HStoreFile.Range.bottom, badkey); + this.conf, HStoreFile.Range.bottom, badkey, null); first = true; while (bottom.next(key, value)) { if (first) { @@ -310,7 +310,7 @@ } // Now look at top. Should not return any values. top = new HStoreFile.HalfMapFileReader(this.fs, p.toString(), this.conf, - HStoreFile.Range.top, badkey); + HStoreFile.Range.top, badkey, null); assertFalse(top.next(key, value)); } finally { @@ -343,12 +343,12 @@ // properly. HStoreKey midkey = new HStoreKey(" "); bottom = new HStoreFile.HalfMapFileReader(this.fs, p.toString(), - this.conf, HStoreFile.Range.bottom, midkey); + this.conf, HStoreFile.Range.bottom, midkey, null); // When midkey is < than the bottom, should return no values. assertFalse(bottom.next(key, value)); // Now read from the top. top = new HStoreFile.HalfMapFileReader(this.fs, p.toString(), - this.conf, HStoreFile.Range.top, midkey); + this.conf, HStoreFile.Range.top, midkey, null); boolean first = true; while (top.next(key, value)) { assertTrue(key.compareTo(midkey) >= 0); @@ -364,7 +364,7 @@ // Test when midkey is > than last key in file ('||' > 'zz'). midkey = new HStoreKey("|||"); bottom = new HStoreFile.HalfMapFileReader(this.fs, p.toString(), - this.conf, HStoreFile.Range.bottom, midkey); + this.conf, HStoreFile.Range.bottom, midkey, null); first = true; while (bottom.next(key, value)) { if (first) { @@ -377,7 +377,7 @@ assertEquals("zz", Bytes.toString(key.getRow())); // Now look at top. Should not return any values. top = new HStoreFile.HalfMapFileReader(this.fs, p.toString(), - this.conf, HStoreFile.Range.top, midkey); + this.conf, HStoreFile.Range.top, midkey, null); assertFalse(top.next(key, value)); } finally { if (top != null) { Index: src/java/org/apache/hadoop/hbase/HMerge.java =================================================================== --- src/java/org/apache/hadoop/hbase/HMerge.java (revision 693663) +++ src/java/org/apache/hadoop/hbase/HMerge.java (working copy) @@ -333,6 +333,7 @@ TreeMap results = new TreeMap(Bytes.BYTES_COMPARATOR); while(rootScanner.next(key, results)) { + key.setHRegionInfo(HRegionInfo.ROOT_REGIONINFO); for(Cell c: results.values()) { HRegionInfo info = Writables.getHRegionInfoOrNull(c.getValue()); if (info != null) { @@ -389,4 +390,4 @@ } } } -} \ No newline at end of file +} Index: src/java/org/apache/hadoop/hbase/HStoreKey.java =================================================================== --- src/java/org/apache/hadoop/hbase/HStoreKey.java (revision 693663) +++ src/java/org/apache/hadoop/hbase/HStoreKey.java (working copy) @@ -24,6 +24,7 @@ import java.io.DataOutput; import java.io.IOException; +import org.apache.hadoop.hbase.regionserver.BeforeThisStoreKey; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.io.WritableComparator; @@ -203,7 +204,7 @@ /** @return Approximate size in bytes of this key. */ public long getSize() { - return this.row.length + this.column.length + Bytes.SIZEOF_LONG; + return getRow().length + getColumn().length + Bytes.SIZEOF_LONG; } /** @@ -212,7 +213,8 @@ * @param other the source key */ public HStoreKey(HStoreKey other) { - this(other.row, other.column, other.timestamp, other.regionInfo); + this(other.getRow(), other.getColumn(), other.getTimestamp(), + other.getHRegionInfo()); } /** @@ -274,6 +276,13 @@ } /** + * @param hri + */ + public void setHRegionInfo(final HRegionInfo hri) { + this.regionInfo = hri; + } + + /** * Compares the row and column of two keys * @param other Key to compare against. Compares row and column. * @return True if same row and column. @@ -281,8 +290,8 @@ * @see #matchesRowFamily(HStoreKey) */ public boolean matchesRowCol(HStoreKey other) { - return HStoreKey.equalsTwoRowKeys(this.regionInfo, this.row, other.row) && - Bytes.equals(column, other.column); + return HStoreKey.equalsTwoRowKeys(getHRegionInfo(), getRow(), other.getRow()) && + Bytes.equals(getColumn(), other.getColumn()); } /** @@ -295,8 +304,8 @@ * @see #matchesRowFamily(HStoreKey) */ public boolean matchesWithoutColumn(HStoreKey other) { - return equalsTwoRowKeys(this.regionInfo, this.row, other.row) && - this.timestamp >= other.getTimestamp(); + return equalsTwoRowKeys(getHRegionInfo(), getRow(), other.getRow()) && + getTimestamp() >= other.getTimestamp(); } /** @@ -309,9 +318,9 @@ * @see #matchesWithoutColumn(HStoreKey) */ public boolean matchesRowFamily(HStoreKey that) { - int delimiterIndex = getFamilyDelimiterIndex(this.column); - return equalsTwoRowKeys(this.regionInfo, this.row, that.row) && - Bytes.compareTo(this.column, 0, delimiterIndex, that.column, 0, + int delimiterIndex = getFamilyDelimiterIndex(getColumn()); + return equalsTwoRowKeys(getHRegionInfo(), getRow(), that.getRow()) && + Bytes.compareTo(getColumn(), 0, delimiterIndex, that.getColumn(), 0, delimiterIndex) == 0; } @@ -331,9 +340,9 @@ /** {@inheritDoc} */ @Override public int hashCode() { - int result = Bytes.hashCode(this.row); - result ^= Bytes.hashCode(this.column); - result ^= this.timestamp; + int result = Bytes.hashCode(getRow()); + result ^= Bytes.hashCode(getColumn()); + result ^= getTimestamp(); return result; } @@ -366,7 +375,9 @@ } else if (left.getTimestamp() > right.getTimestamp()) { result = -1; } - return result; + // Because of HBASE-877, our BeforeThisStoreKey trick no longer works and + // so instead we need to do this check here below. + return result == 0 && left instanceof BeforeThisStoreKey? -1: result; } /** @@ -581,8 +592,9 @@ this.hri = hri; } - public int compare(final WritableComparable left, final WritableComparable right) { + public int compare(final WritableComparable left, + final WritableComparable right) { return compareTo(this.hri, (HStoreKey)left, (HStoreKey)right); } } -} +} \ No newline at end of file Index: src/java/org/apache/hadoop/hbase/regionserver/Memcache.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/Memcache.java (revision 693663) +++ src/java/org/apache/hadoop/hbase/regionserver/Memcache.java (working copy) @@ -383,20 +383,21 @@ final Set deletes) { // We want the earliest possible to start searching from. Start before // the candidate key in case it turns out a delete came in later. - HStoreKey search_key = candidateKeys.isEmpty()? new HStoreKey(row, this.regionInfo): + HStoreKey search_key = candidateKeys.isEmpty()? + new HStoreKey(row, this.regionInfo): new HStoreKey(candidateKeys.firstKey().getRow(), this.regionInfo); List victims = new ArrayList(); long now = System.currentTimeMillis(); - + // Get all the entries that come equal or after our search key SortedMap tailMap = map.tailMap(search_key); - + // if there are items in the tail map, there's either a direct match to // the search key, or a range of values between the first candidate key // and the ultimate search key (or the end of the cache) if (!tailMap.isEmpty() && - HStoreKey.compareTwoRowKeys(regionInfo, - tailMap.firstKey().getRow(), search_key.getRow()) <= 0) { + HStoreKey.compareTwoRowKeys(this.regionInfo, + tailMap.firstKey().getRow(), search_key.getRow()) <= 0) { Iterator key_iterator = tailMap.keySet().iterator(); // Keep looking at cells as long as they are no greater than the @@ -404,10 +405,10 @@ HStoreKey deletedOrExpiredRow = null; for (HStoreKey found_key = null; key_iterator.hasNext() && (found_key == null || - HStoreKey.compareTwoRowKeys(regionInfo, + HStoreKey.compareTwoRowKeys(this.regionInfo, found_key.getRow(), row) <= 0);) { found_key = key_iterator.next(); - if (HStoreKey.compareTwoRowKeys(regionInfo, + if (HStoreKey.compareTwoRowKeys(this.regionInfo, found_key.getRow(), row) <= 0) { if (HLogEdit.isDeleted(tailMap.get(found_key))) { HStore.handleDeleted(found_key, candidateKeys, deletes); @@ -462,7 +463,7 @@ if (headMap.isEmpty()) { return; } - + // If there aren't any candidate keys at this point, we need to search // backwards until we find at least one candidate or run out of headMap. if (candidateKeys.isEmpty()) { @@ -476,7 +477,7 @@ // not a delete record. boolean deleted = HLogEdit.isDeleted(headMap.get(found_key)); if (lastRowFound != null && - !HStoreKey.equalsTwoRowKeys(regionInfo, lastRowFound, + !HStoreKey.equalsTwoRowKeys(this.regionInfo, lastRowFound, found_key.getRow()) && !deleted) { break; } Index: src/java/org/apache/hadoop/hbase/regionserver/HStore.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/HStore.java (revision 693663) +++ src/java/org/apache/hadoop/hbase/regionserver/HStore.java (working copy) @@ -298,7 +298,7 @@ } long maxSeqIdInLog = -1; TreeMap reconstructedCache = - new TreeMap(); + new TreeMap(new HStoreKey.HStoreKeyWritableComparator(this.info)); SequenceFile.Reader logReader = new SequenceFile.Reader(this.fs, reconstructionLog, this.conf); @@ -934,7 +934,7 @@ ImmutableBytesWritable[] vals = new ImmutableBytesWritable[rdrs.length]; boolean[] done = new boolean[rdrs.length]; for(int i = 0; i < rdrs.length; i++) { - keys[i] = new HStoreKey(); + keys[i] = new HStoreKey(HConstants.EMPTY_BYTE_ARRAY, this.info); vals[i] = new ImmutableBytesWritable(); done[i] = false; } @@ -1451,7 +1451,8 @@ // most closely matches what we're looking for. We'll have to update it as // deletes are found all over the place as we go along before finally // reading the best key out of it at the end. - SortedMap candidateKeys = new TreeMap(); + SortedMap candidateKeys = new TreeMap( + new HStoreKey.HStoreKeyWritableComparator(info)); // Keep a list of deleted cell keys. We need this because as we go through // the store files, the cell with the delete marker may be in one file and @@ -1470,7 +1471,7 @@ // Process each store file. Run through from newest to oldest. // This code below is very close to the body of the getKeys method. MapFile.Reader[] maparray = getReaders(); - for(int i = maparray.length - 1; i >= 0; i--) { + for (int i = maparray.length - 1; i >= 0; i--) { // Update the candidate keys from the current map file rowAtOrBeforeFromMapFile(maparray[i], row, candidateKeys, deletes); } @@ -1503,9 +1504,10 @@ if (!map.next(startKey, startValue)) { return; } + startKey.setHRegionInfo(this.info); // If start row for this file is beyond passed in row, return; nothing // in here is of use to us. - if (HStoreKey.compareTwoRowKeys(info,startKey.getRow(), row) > 0) { + if (HStoreKey.compareTwoRowKeys(this.info, startKey.getRow(), row) > 0) { return; } long now = System.currentTimeMillis(); @@ -1538,15 +1540,15 @@ // up to the row before and return that. HStoreKey finalKey = getFinalKey(map); HStoreKey searchKey = null; - if (HStoreKey.compareTwoRowKeys(info,finalKey.getRow(), row) < 0) { + if (HStoreKey.compareTwoRowKeys(this.info, finalKey.getRow(), row) < 0) { searchKey = finalKey; } else { - searchKey = new HStoreKey(row); + searchKey = new HStoreKey(row, this.info); if (searchKey.compareTo(startKey) < 0) { searchKey = startKey; } } - rowAtOrBeforeCandidate(map, searchKey, candidateKeys, deletes, now); + rowAtOrBeforeCandidate(map, searchKey, candidateKeys, deletes, now, finalKey); } /* @@ -1582,10 +1584,13 @@ */ private void rowAtOrBeforeCandidate(final MapFile.Reader map, final HStoreKey sk, final SortedMap candidateKeys, - final Set deletes, final long now) + final Set deletes, final long now, final HStoreKey finalKey) throws IOException { HStoreKey searchKey = sk; - HStoreKey readkey = new HStoreKey(); + if (searchKey.getHRegionInfo() == null) { + searchKey.setHRegionInfo(this.info); + } + HStoreKey readkey = null; ImmutableBytesWritable readval = new ImmutableBytesWritable(); HStoreKey knownNoGoodKey = null; for (boolean foundCandidate = false; !foundCandidate;) { @@ -1597,9 +1602,12 @@ } HStoreKey deletedOrExpiredRow = null; do { + readkey.setHRegionInfo(this.info); + // Set this region into the readkey. // If we have an exact match on row, and it's not a delete, save this // as a candidate key - if (HStoreKey.equalsTwoRowKeys(info,readkey.getRow(), searchKey.getRow())) { + if (HStoreKey.equalsTwoRowKeys(this.info, readkey.getRow(), + searchKey.getRow())) { if (!HLogEdit.isDeleted(readval.get())) { if (handleNonDelete(readkey, now, deletes, candidateKeys)) { foundCandidate = true; @@ -1611,7 +1619,8 @@ if (deletedOrExpiredRow == null) { deletedOrExpiredRow = copy; } - } else if (HStoreKey.compareTwoRowKeys(info,readkey.getRow(), searchKey.getRow()) > 0) { + } else if (HStoreKey.compareTwoRowKeys(this.info, readkey.getRow(), + searchKey.getRow()) > 0) { // if the row key we just read is beyond the key we're searching for, // then we're done. break; @@ -1630,7 +1639,7 @@ if (deletedOrExpiredRow == null) { deletedOrExpiredRow = copy; } - } + } } while(map.next(readkey, readval) && (knownNoGoodKey == null || readkey.compareTo(knownNoGoodKey) < 0)); @@ -1667,7 +1676,7 @@ final SortedMap candidateKeys, final Set deletes, final long now) throws IOException { - HStoreKey readkey = new HStoreKey(); + HStoreKey readkey = null; ImmutableBytesWritable readval = new ImmutableBytesWritable(); // if there are already candidate keys, we need to start our search @@ -1676,7 +1685,7 @@ // of the row in case there are deletes for this candidate in this mapfile // BUT do not backup before the first key in the mapfile else getClosest // will return null - HStoreKey searchKey = new HStoreKey(candidateKeys.firstKey().getRow()); + HStoreKey searchKey = new HStoreKey(candidateKeys.firstKey().getRow(), this.info); if (searchKey.compareTo(startKey) < 0) { searchKey = startKey; } @@ -1772,6 +1781,7 @@ private HStoreKey getFinalKey(final MapFile.Reader mf) throws IOException { HStoreKey finalKey = new HStoreKey(); mf.finalKey(finalKey); + finalKey.setHRegionInfo(this.info); return finalKey; } Index: src/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java (revision 693663) +++ src/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java (working copy) @@ -408,10 +408,10 @@ return new HStoreFile.HalfMapFileReader(fs, getMapFilePath(reference).toString(), conf, reference.getFileRegion(), reference.getMidkey(), bloomFilter, - blockCacheEnabled); + blockCacheEnabled, this.hri); } return new BloomFilterMapFile.Reader(fs, getMapFilePath().toString(), - conf, bloomFilter, blockCacheEnabled); + conf, bloomFilter, blockCacheEnabled, this.hri); } /** @@ -612,9 +612,10 @@ * @param conf * @throws IOException */ - public HbaseReader(FileSystem fs, String dirName, Configuration conf) + public HbaseReader(FileSystem fs, String dirName, Configuration conf, + HRegionInfo hri) throws IOException { - this(fs, dirName, conf, false); + this(fs, dirName, conf, false, hri); } /** @@ -622,14 +623,16 @@ * @param dirName * @param conf * @param blockCacheEnabled + * @param hri * @throws IOException */ public HbaseReader(FileSystem fs, String dirName, Configuration conf, - boolean blockCacheEnabled) + boolean blockCacheEnabled, HRegionInfo hri) throws IOException { - super(fs, dirName, null, conf, false); // defer opening streams + super(fs, dirName, new HStoreKey.HStoreKeyWritableComparator(hri), + conf, false); // defer opening streams this.blockCacheEnabled = blockCacheEnabled; - open(fs, dirName, null, conf); + open(fs, dirName, new HStoreKey.HStoreKeyWritableComparator(hri), conf); // Force reading of the mapfile index by calling midKey. // Reading the index will bring the index into memory over @@ -671,6 +674,7 @@ * @param fs * @param dirName * @param compression + * @param hri * @throws IOException */ public HbaseWriter(Configuration conf, FileSystem fs, String dirName, @@ -703,12 +707,14 @@ * @param conf * @param filter * @param blockCacheEnabled + * @param hri * @throws IOException */ public Reader(FileSystem fs, String dirName, Configuration conf, - final boolean filter, final boolean blockCacheEnabled) + final boolean filter, final boolean blockCacheEnabled, + HRegionInfo hri) throws IOException { - super(fs, dirName, conf, blockCacheEnabled); + super(fs, dirName, conf, blockCacheEnabled, hri); if (filter) { this.bloomFilter = loadBloomFilter(fs, dirName); } else { @@ -894,17 +900,19 @@ HalfMapFileReader(final FileSystem fs, final String dirName, final Configuration conf, final Range r, - final WritableComparable midKey) + final WritableComparable midKey, + final HRegionInfo hri) throws IOException { - this(fs, dirName, conf, r, midKey, false, false); + this(fs, dirName, conf, r, midKey, false, false, hri); } HalfMapFileReader(final FileSystem fs, final String dirName, final Configuration conf, final Range r, final WritableComparable midKey, final boolean filter, - final boolean blockCacheEnabled) + final boolean blockCacheEnabled, + final HRegionInfo hri) throws IOException { - super(fs, dirName, conf, filter, blockCacheEnabled); + super(fs, dirName, conf, filter, blockCacheEnabled, hri); top = isTopFileRegion(r); midkey = midKey; } Index: src/java/org/apache/hadoop/hbase/regionserver/BeforeThisStoreKey.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/BeforeThisStoreKey.java (revision 693663) +++ src/java/org/apache/hadoop/hbase/regionserver/BeforeThisStoreKey.java (working copy) @@ -23,6 +23,7 @@ import java.io.DataOutput; import java.io.IOException; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HStoreKey; /** @@ -116,4 +117,14 @@ public void write(DataOutput out) throws IOException { this.beforeThisKey.write(out); } + + @Override + public HRegionInfo getHRegionInfo() { + return this.beforeThisKey.getHRegionInfo(); + } + + @Override + public void setHRegionInfo(final HRegionInfo hri) { + this.beforeThisKey.setHRegionInfo(hri); + } } \ No newline at end of file Index: src/java/org/apache/hadoop/hbase/regionserver/HRegion.java =================================================================== --- src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 693663) +++ src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy) @@ -803,7 +803,7 @@ // Reference to top half of the hsf store file. HStoreFile.Reference bReference = new HStoreFile.Reference( this.regionInfo.getEncodedName(), h.getFileId(), - new HStoreKey(midKey), HStoreFile.Range.top); + new HStoreKey(midKey, this.regionInfo), HStoreFile.Range.top); HStoreFile b = new HStoreFile(this.conf, fs, splits, regionBInfo, h.getColFamily(), -1, bReference); h.splitStoreFile(a, b, this.fs); @@ -1518,7 +1518,8 @@ for (HStore store : stores.values()) { List keys = store.getKeys(new HStoreKey(row, ts, this.regionInfo), ALL_VERSIONS, now); - TreeMap edits = new TreeMap(); + TreeMap edits = new TreeMap( + new HStoreKey.HStoreKeyWritableComparator(regionInfo)); for (HStoreKey key: keys) { edits.put(key, HLogEdit.deleteBytes.get()); } @@ -1550,7 +1551,8 @@ List keys = store.getKeys(new HStoreKey(row, timestamp, this.regionInfo), ALL_VERSIONS, now); // delete all the cells - TreeMap edits = new TreeMap(); + TreeMap edits = new TreeMap( + new HStoreKey.HStoreKeyWritableComparator(regionInfo)); for (HStoreKey key: keys) { edits.put(key, HLogEdit.deleteBytes.get()); } @@ -1578,7 +1580,8 @@ HStoreKey origin = new HStoreKey(row, column, ts, this.regionInfo); Set keys = getKeys(origin, versions); if (keys.size() > 0) { - TreeMap edits = new TreeMap(); + TreeMap edits = new TreeMap( + new HStoreKey.HStoreKeyWritableComparator(regionInfo)); for (HStoreKey key: keys) { edits.put(key, HLogEdit.deleteBytes.get()); } @@ -1614,7 +1617,8 @@ checkReadOnly(); TreeMap targets = this.targetColumns.get(lockid); if (targets == null) { - targets = new TreeMap(); + targets = new TreeMap( + new HStoreKey.HStoreKeyWritableComparator(regionInfo)); this.targetColumns.put(lockid, targets); } targets.put(key, val); @@ -2151,7 +2155,8 @@ try { HStoreKey key = new HStoreKey(row, COL_REGIONINFO, System.currentTimeMillis(), r.getRegionInfo()); - TreeMap edits = new TreeMap(); + TreeMap edits = new TreeMap( + new HStoreKey.HStoreKeyWritableComparator(meta.getRegionInfo())); edits.put(key, Writables.getBytes(r.getRegionInfo())); meta.update(edits); } finally { Index: src/java/org/apache/hadoop/hbase/HTableDescriptor.java =================================================================== --- src/java/org/apache/hadoop/hbase/HTableDescriptor.java (revision 693663) +++ src/java/org/apache/hadoop/hbase/HTableDescriptor.java (working copy) @@ -79,6 +79,8 @@ public static final boolean DEFAULT_READONLY = false; public static final int DEFAULT_MEMCACHE_FLUSH_SIZE = 1024*1024*64; + + private transient Boolean meta = null; // Key is hash of the family name. private final Map families = @@ -198,10 +200,15 @@ /** @return true if this is a meta region (part of the root or meta tables) */ public boolean isMetaRegion() { + if (this.meta == null) { + this.meta = calculateIsMetaRegion(); + } + return this.meta.booleanValue(); + } + + private synchronized Boolean calculateIsMetaRegion() { String value = getValue(IS_META); - if (value != null) - return Boolean.valueOf(value); - return false; + return (value != null)? new Boolean(value): Boolean.FALSE; } /**