Index: src/main/java/org/apache/jackrabbit/oak/segment/RecordType.java
===================================================================
--- src/main/java/org/apache/jackrabbit/oak/segment/RecordType.java (revision 1765308)
+++ src/main/java/org/apache/jackrabbit/oak/segment/RecordType.java (working copy)
@@ -24,58 +24,6 @@
public enum RecordType {
/**
- * A leaf of a map (which is a HAMT tree). This contains
- *
- * - the size (int)
- * - for each entry, the hash code of the key (4 bytes), then the record id of
- * the key and the record id of the value
- *
- */
- LEAF,
-
- /**
- * A branch of a map (which is a HAMT tree). This contains
- *
- * - level within the HAMT structure (4 most significant bits), plus size
- * of the that branch of the map
- * - bitmap (4 bytes)
- * - record ids of the buckets of the next level of the map
- *
- * There is a special case: if the first int (level/size) is -1, then it's a
- * diff record, to handle the common case of when exactly one existing child
- * node was modified. This is common because whenever one node was changed,
- * we need to propagate that up to the root.
- *
- * - -1 (int)
- * - hash code of the key that was changed (4 bytes)
- * - the record id of the key
- * - the record id of the value
- * - the record id of the (base version of the) modified map
- *
- * There is only ever one single diff record for a map.
- */
- BRANCH,
-
- /**
- * A bucket (a list of references). It always includes at least 2 elements,
- * up to 255 entries (because each entry could in theory point to a
- * different segment, in which case this couldn't be stored in a segment).
- * This contains just the record ids. The size of the list is not stored, as
- * it is stored along with the reference to this record.
- */
- BUCKET,
-
- /**
- * A list including the size (an int). This could be 0, in which case there
- * is no reference. If the size is 1, then reference points to the value of
- * the list. If the size is larger, then a record id follows, which points
- * to a bucket with the actual record ids. If there are more than 255
- * entries in the list, then the list is partitioned into sublists of 255
- * entries each, which are stored kind of recursively.
- */
- LIST,
-
- /**
* A value (for example a string, or a long, or a blob). The format is:
* length (variable length encoding, one byte if shorter than 128, else more
* bytes), then the data as a byte array, or, for large values, a record id
@@ -123,6 +71,61 @@
* pointer to the list record)
*
*/
- NODE
+ NODE,
+ /**
+ * Represents the absence of typing information. This (non-) type is and
+ * should be used for testing purposes only. It should not be used to write
+ * actual data in real code.
+ */
+ UNDEFINED,
+
+ /**
+ * A bucket containing chunks of a binary value. It may point to other
+ * buckets of the same type.
+ */
+ CHUNKS_BUCKET,
+
+ /**
+ * A list containing the list of values of a single property. It stores the
+ * amount of values for this property and points to bucket of property
+ * values.
+ */
+ PROPERTY_VALUES_LIST,
+
+ /**
+ * A bucket containing the values of a single property. It may point to
+ * other buckets of the same type.
+ */
+ PROPERTY_VALUES_BUCKET,
+
+ /**
+ * A bucket containing property names. It may point to other buckets of the
+ * same type.
+ */
+ PROPERTY_NAMES_BUCKET,
+
+ /**
+ * A bucket containing the values of a set of properties. It may point to
+ * other buckets of the same type.
+ */
+ PROPERTIES_BUCKET,
+
+ /**
+ * A branch of a map of child nodes mapping child names to nodes. It may
+ * point to other branch records of the same type or to leaf records of type
+ * {@link RecordType#CHILDREN_LEAF}.
+ */
+ CHILDREN_BRANCH,
+
+ /**
+ * A leaf of a map of child nodes mapping child names to nodes.
+ */
+ CHILDREN_LEAF,
+
+ /**
+ * Value record representing the identifier of an external binary object.
+ */
+ BLOB_ID
+
}
Index: src/main/java/org/apache/jackrabbit/oak/segment/RecordUsageAnalyser.java
===================================================================
--- src/main/java/org/apache/jackrabbit/oak/segment/RecordUsageAnalyser.java (revision 1765308)
+++ src/main/java/org/apache/jackrabbit/oak/segment/RecordUsageAnalyser.java (working copy)
@@ -63,16 +63,14 @@
}
/**
- * @return number of bytes in {@link RecordType#LEAF leaf} and {@link RecordType#BRANCH branch}
- * records.
+ * @return number of bytes in leaf and branch records.
*/
public long getMapSize() {
return mapSize;
}
/**
- * @return number of bytes in {@link RecordType#LIST list} and {@link RecordType#BUCKET bucket}
- * records.
+ * @return number of bytes in list and bucket records.
*/
public long getListSize() {
return listSize;
Index: src/main/java/org/apache/jackrabbit/oak/segment/RecordWriters.java
===================================================================
--- src/main/java/org/apache/jackrabbit/oak/segment/RecordWriters.java (revision 1765308)
+++ src/main/java/org/apache/jackrabbit/oak/segment/RecordWriters.java (working copy)
@@ -23,11 +23,8 @@
import static java.util.Arrays.sort;
import static java.util.Collections.singleton;
import static org.apache.jackrabbit.oak.segment.MapRecord.SIZE_BITS;
+import static org.apache.jackrabbit.oak.segment.RecordType.BLOB_ID;
import static org.apache.jackrabbit.oak.segment.RecordType.BLOCK;
-import static org.apache.jackrabbit.oak.segment.RecordType.BRANCH;
-import static org.apache.jackrabbit.oak.segment.RecordType.BUCKET;
-import static org.apache.jackrabbit.oak.segment.RecordType.LEAF;
-import static org.apache.jackrabbit.oak.segment.RecordType.LIST;
import static org.apache.jackrabbit.oak.segment.RecordType.NODE;
import static org.apache.jackrabbit.oak.segment.RecordType.TEMPLATE;
import static org.apache.jackrabbit.oak.segment.RecordType.VALUE;
@@ -74,32 +71,32 @@
SegmentBufferWriter writer);
}
- public static RecordWriter newMapLeafWriter(int level, Collection entries) {
- return new MapLeafWriter(level, entries);
+ public static RecordWriter newMapLeafWriter(RecordType type, int level, Collection entries) {
+ return new MapLeafWriter(type, level, entries);
}
- public static RecordWriter newMapLeafWriter() {
- return new MapLeafWriter();
+ public static RecordWriter newMapLeafWriter(RecordType type) {
+ return new MapLeafWriter(type);
}
- public static RecordWriter newMapBranchWriter(int level, int entryCount, int bitmap, List ids) {
- return new MapBranchWriter(level, entryCount, bitmap, ids);
+ public static RecordWriter newMapBranchWriter(RecordType type, int level, int entryCount, int bitmap, List ids) {
+ return new MapBranchWriter(type, level, entryCount, bitmap, ids);
}
- public static RecordWriter newMapBranchWriter(int bitmap, List ids) {
- return new MapBranchWriter(bitmap, ids);
+ public static RecordWriter newMapBranchWriter(RecordType type, int bitmap, List ids) {
+ return new MapBranchWriter(type, bitmap, ids);
}
- public static RecordWriter newListWriter(int count, RecordId lid) {
- return new ListWriter(count, lid);
+ public static RecordWriter newListWriter(RecordType type, int count, RecordId lid) {
+ return new ListWriter(type, count, lid);
}
- public static RecordWriter newListWriter() {
- return new ListWriter();
+ public static RecordWriter newListWriter(RecordType type) {
+ return new ListWriter(type);
}
- public static RecordWriter newListBucketWriter(List ids) {
- return new ListBucketWriter(ids);
+ public static RecordWriter newListBucketWriter(RecordType type, List ids) {
+ return new ListBucketWriter(type, ids);
}
public static RecordWriter newBlockWriter(byte[] bytes, int offset, int length) {
@@ -144,20 +141,21 @@
/**
* Map Leaf record writer.
- * @see RecordType#LEAF
*/
private static class MapLeafWriter extends RecordWriter {
+
private final int level;
+
private final Collection entries;
- private MapLeafWriter() {
- super(LEAF, 4);
+ private MapLeafWriter(RecordType type) {
+ super(type, 4);
this.level = -1;
this.entries = null;
}
- private MapLeafWriter(int level, Collection entries) {
- super(LEAF, 4 + entries.size() * 4, extractIds(entries));
+ private MapLeafWriter(RecordType type, int level, Collection entries) {
+ super(type, 4 + entries.size() * 4, extractIds(entries));
this.level = level;
this.entries = entries;
}
@@ -199,18 +197,20 @@
/**
* Map Branch record writer.
- * @see RecordType#BRANCH
*/
private static class MapBranchWriter extends RecordWriter {
+
private final int level;
+
private final int entryCount;
+
private final int bitmap;
/*
* Write a regular map branch
*/
- private MapBranchWriter(int level, int entryCount, int bitmap, List ids) {
- super(BRANCH, 8, ids);
+ private MapBranchWriter(RecordType type, int level, int entryCount, int bitmap, List ids) {
+ super(type, 8, ids);
this.level = level;
this.entryCount = entryCount;
this.bitmap = bitmap;
@@ -219,9 +219,9 @@
/*
* Write a diff map
*/
- private MapBranchWriter(int bitmap, List ids) {
+ private MapBranchWriter(RecordType type, int bitmap, List ids) {
// level = 0 and and entryCount = -1 -> this is a map diff
- this(0, -1, bitmap, ids);
+ this(type, 0, -1, bitmap, ids);
}
@Override
@@ -238,54 +238,53 @@
/**
* List record writer.
- * @see RecordType#LIST
*/
private static class ListWriter extends RecordWriter {
+
private final int count;
+
private final RecordId lid;
- private ListWriter() {
- super(LIST, 4);
+ private ListWriter(RecordType type) {
+ super(type, 4);
count = 0;
lid = null;
}
- private ListWriter(int count, RecordId lid) {
- super(LIST, 4, lid);
+ private ListWriter(RecordType type, int count, RecordId lid) {
+ super(type, 4, lid);
this.count = count;
this.lid = lid;
}
@Override
- protected RecordId writeRecordContent(RecordId id,
- SegmentBufferWriter writer) {
+ protected RecordId writeRecordContent(RecordId id, SegmentBufferWriter writer) {
writer.writeInt(count);
if (lid != null) {
writer.writeRecordId(lid);
}
return id;
}
+
}
/**
* List Bucket record writer.
- *
- * @see RecordType#BUCKET
*/
private static class ListBucketWriter extends RecordWriter {
- private ListBucketWriter(List ids) {
- super(BUCKET, 0, ids);
+ private ListBucketWriter(RecordType type, List ids) {
+ super(type, 0, ids);
}
@Override
- protected RecordId writeRecordContent(RecordId id,
- SegmentBufferWriter writer) {
+ protected RecordId writeRecordContent(RecordId id, SegmentBufferWriter writer) {
for (RecordId bucketId : ids) {
writer.writeRecordId(bucketId);
}
return id;
}
+
}
/**
@@ -381,22 +380,20 @@
* {@code Segment#BLOB_ID_SMALL_LIMIT}.
*
* @see Segment#BLOB_ID_SMALL_LIMIT
- * @see RecordType#VALUE
*/
private static class LargeBlobIdWriter extends RecordWriter {
private final RecordId stringRecord;
private LargeBlobIdWriter(RecordId stringRecord) {
- super(VALUE, 1, stringRecord);
+ super(BLOB_ID, 1, stringRecord);
this.stringRecord = stringRecord;
}
@Override
- protected RecordId writeRecordContent(RecordId id,
- SegmentBufferWriter writer) {
+ protected RecordId writeRecordContent(RecordId id, SegmentBufferWriter writer) {
// The length uses a fake "length" field that is always equal to
- // 0xF0.
- // This allows the code to take apart small from a large blob IDs.
+ // 0xF0. This allows the code to take apart small from a large blob
+ // IDs.
writer.writeByte((byte) 0xF0);
writer.writeRecordId(stringRecord);
return id;
@@ -408,20 +405,18 @@
* its binary representation is less than {@code Segment#BLOB_ID_SMALL_LIMIT}.
* @see Segment#BLOB_ID_SMALL_LIMIT
- * @see RecordType#VALUE
*/
private static class SmallBlobIdWriter extends RecordWriter {
private final byte[] blobId;
private SmallBlobIdWriter(byte[] blobId) {
- super(VALUE, 2 + blobId.length);
+ super(BLOB_ID, 2 + blobId.length);
checkArgument(blobId.length < Segment.BLOB_ID_SMALL_LIMIT);
this.blobId = blobId;
}
@Override
- protected RecordId writeRecordContent(RecordId id,
- SegmentBufferWriter writer) {
+ protected RecordId writeRecordContent(RecordId id, SegmentBufferWriter writer) {
int length = blobId.length;
writer.writeShort((short) (length | 0xE000));
writer.writeBytes(blobId, 0, length);
Index: src/main/java/org/apache/jackrabbit/oak/segment/SegmentWriter.java
===================================================================
--- src/main/java/org/apache/jackrabbit/oak/segment/SegmentWriter.java (revision 1765308)
+++ src/main/java/org/apache/jackrabbit/oak/segment/SegmentWriter.java (working copy)
@@ -219,13 +219,12 @@
* @throws IOException
*/
@Nonnull
- public MapRecord writeMap(@Nullable final MapRecord base,
- @Nonnull final Map changes)
+ public MapRecord writeMap(final RecordType branchType, final RecordType leafType, @Nullable final MapRecord base, @Nonnull final Map changes)
throws IOException {
RecordId mapId = writeOperationHandler.execute(new SegmentWriteOperation() {
@Override
public RecordId execute(SegmentBufferWriter writer) throws IOException {
- return with(writer).writeMap(base, changes);
+ return with(writer).writeMap(branchType, leafType, base, changes);
}
});
return new MapRecord(reader, mapId);
@@ -238,11 +237,11 @@
* @throws IOException
*/
@Nonnull
- public RecordId writeList(@Nonnull final List list) throws IOException {
+ public RecordId writeBuckets(final RecordType type, @Nonnull final List list) throws IOException {
return writeOperationHandler.execute(new SegmentWriteOperation() {
@Override
public RecordId execute(SegmentBufferWriter writer) throws IOException {
- return with(writer).writeList(list);
+ return with(writer).writeBuckets(type, list);
}
});
}
@@ -483,9 +482,7 @@
return this;
}
- private RecordId writeMap(@Nullable MapRecord base,
- @Nonnull Map changes)
- throws IOException {
+ private RecordId writeMap(RecordType branchType, RecordType leafType, @Nullable MapRecord base, @Nonnull Map changes) throws IOException {
if (base != null && base.isDiff()) {
Segment segment = base.getSegment();
RecordId key = segment.readRecordId(base.getRecordNumber(), 8);
@@ -506,8 +503,7 @@
if (value.equals(entry.getValue())) {
return base.getRecordId();
} else {
- return RecordWriters.newMapBranchWriter(entry.getHash(), asList(entry.getKey(),
- value, base.getRecordId())).write(writer);
+ return RecordWriters.newMapBranchWriter(branchType, entry.getHash(), asList(entry.getKey(), value, base.getRecordId())).write(writer);
}
}
}
@@ -532,19 +528,19 @@
entries.add(new MapEntry(reader, key, keyId, entry.getValue()));
}
}
- return writeMapBucket(base, entries, 0);
+ return writeMapBucket(branchType, leafType, base, entries, 0);
}
- private RecordId writeMapLeaf(int level, Collection entries) throws IOException {
+ private RecordId writeMapLeaf(RecordType type, int level, Collection entries) throws IOException {
checkNotNull(entries);
int size = entries.size();
checkElementIndex(size, MapRecord.MAX_SIZE);
checkPositionIndex(level, MapRecord.MAX_NUMBER_OF_LEVELS);
checkArgument(size != 0 || level == MapRecord.MAX_NUMBER_OF_LEVELS);
- return RecordWriters.newMapLeafWriter(level, entries).write(writer);
+ return RecordWriters.newMapLeafWriter(type, level, entries).write(writer);
}
- private RecordId writeMapBranch(int level, int size, MapRecord... buckets) throws IOException {
+ private RecordId writeMapBranch(RecordType type, int level, int size, MapRecord... buckets) throws IOException {
int bitmap = 0;
List bucketIds = newArrayListWithCapacity(buckets.length);
for (int i = 0; i < buckets.length; i++) {
@@ -553,17 +549,17 @@
bucketIds.add(buckets[i].getRecordId());
}
}
- return RecordWriters.newMapBranchWriter(level, size, bitmap, bucketIds).write(writer);
+ return RecordWriters.newMapBranchWriter(type, level, size, bitmap, bucketIds).write(writer);
}
- private RecordId writeMapBucket(MapRecord base, Collection entries, int level)
+ private RecordId writeMapBucket(RecordType branchType, RecordType leafType, MapRecord base, Collection entries, int level)
throws IOException {
// when no changed entries, return the base map (if any) as-is
if (entries == null || entries.isEmpty()) {
if (base != null) {
return base.getRecordId();
} else if (level == 0) {
- return RecordWriters.newMapLeafWriter().write(writer);
+ return RecordWriters.newMapLeafWriter(leafType).write(writer);
} else {
return null;
}
@@ -574,18 +570,18 @@
// use leaf records for small maps or the last map level
if (entries.size() <= BUCKETS_PER_LEVEL
|| level == MapRecord.MAX_NUMBER_OF_LEVELS) {
- return writeMapLeaf(level, entries);
+ return writeMapLeaf(leafType, level, entries);
}
// write a large map by dividing the entries into buckets
MapRecord[] buckets = new MapRecord[BUCKETS_PER_LEVEL];
List> changes = splitToBuckets(entries, level);
for (int i = 0; i < BUCKETS_PER_LEVEL; i++) {
- buckets[i] = mapRecordOrNull(writeMapBucket(null, changes.get(i), level + 1));
+ buckets[i] = mapRecordOrNull(writeMapBucket(branchType, leafType, null, changes.get(i), level + 1));
}
// combine the buckets into one big map
- return writeMapBranch(level, entries.size(), buckets);
+ return writeMapBranch(branchType, level, entries.size(), buckets);
}
// if the base map is small, update in memory and write as a new map
@@ -601,7 +597,7 @@
map.remove(entry.getName());
}
}
- return writeMapBucket(null, map.values(), level);
+ return writeMapBucket(branchType, leafType, null, map.values(), level);
}
// finally, the if the base map is large, handle updates per bucket
@@ -610,7 +606,7 @@
MapRecord[] buckets = base.getBuckets();
List> changes = splitToBuckets(entries, level);
for (int i = 0; i < BUCKETS_PER_LEVEL; i++) {
- buckets[i] = mapRecordOrNull(writeMapBucket(buckets[i], changes.get(i), level + 1));
+ buckets[i] = mapRecordOrNull(writeMapBucket(branchType, leafType, buckets[i], changes.get(i), level + 1));
if (buckets[i] != null) {
newSize += buckets[i].size();
newCount++;
@@ -619,7 +615,7 @@
// OAK-654: what if the updated map is smaller?
if (newSize > BUCKETS_PER_LEVEL) {
- return writeMapBranch(level, newSize, buckets);
+ return writeMapBranch(branchType, level, newSize, buckets);
} else if (newCount <= 1) {
// up to one bucket contains entries, so return that as the new map
for (MapRecord bucket : buckets) {
@@ -628,7 +624,7 @@
}
}
// no buckets remaining, return empty map
- return writeMapBucket(null, null, level);
+ return writeMapBucket(branchType, leafType, null, null, level);
} else {
// combine all remaining entries into a leaf record
List list = newArrayList();
@@ -637,7 +633,7 @@
addAll(list, bucket.getEntries());
}
}
- return writeMapLeaf(level, list);
+ return writeMapLeaf(leafType, level, list);
}
}
@@ -645,13 +641,7 @@
return id == null ? null : new MapRecord(reader, id);
}
- /**
- * Writes a list record containing the given list of record identifiers.
- *
- * @param list list of record identifiers
- * @return list record identifier
- */
- private RecordId writeList(@Nonnull List list) throws IOException {
+ private RecordId writeBuckets(RecordType type, List list) throws IOException {
checkNotNull(list);
checkArgument(!list.isEmpty());
List thisLevel = list;
@@ -660,7 +650,7 @@
for (List bucket :
partition(thisLevel, ListRecord.LEVEL_SIZE)) {
if (bucket.size() > 1) {
- nextLevel.add(writeListBucket(bucket));
+ nextLevel.add(RecordWriters.newListBucketWriter(type, bucket).write(writer));
} else {
nextLevel.add(bucket.get(0));
}
@@ -670,11 +660,6 @@
return thisLevel.iterator().next();
}
- private RecordId writeListBucket(List bucket) throws IOException {
- checkArgument(bucket.size() > 1);
- return RecordWriters.newListBucketWriter(bucket).write(writer);
- }
-
private List> splitToBuckets(Collection entries, int level) {
int mask = (1 << MapRecord.BITS_PER_LEVEL) - 1;
int shift = 32 - (level + 1) * MapRecord.BITS_PER_LEVEL;
@@ -745,7 +730,7 @@
pos += len;
}
- return writeValueRecord(data.length, writeList(blockIds));
+ return writeValueRecord(data.length, writeBuckets(RecordType.CHUNKS_BUCKET, blockIds));
}
private boolean sameStore(SegmentId id) {
@@ -837,7 +822,7 @@
if (blockIds == null) {
return internalWriteStream(stream);
} else {
- return writeValueRecord(segmentStream.getLength(), writeList(blockIds));
+ return writeValueRecord(segmentStream.getLength(), writeBuckets(RecordType.CHUNKS_BUCKET, blockIds));
}
}
threw = false;
@@ -882,7 +867,7 @@
length += n;
}
- return writeValueRecord(length, writeList(blockIds));
+ return writeValueRecord(length, writeBuckets(RecordType.CHUNKS_BUCKET, blockIds));
}
private RecordId writeProperty(@Nonnull PropertyState state) throws IOException {
@@ -917,9 +902,10 @@
if (!type.isArray()) {
return valueIds.iterator().next();
} else if (count == 0) {
- return RecordWriters.newListWriter().write(writer);
+ return RecordWriters.newListWriter(RecordType.PROPERTY_VALUES_LIST).write(writer);
} else {
- return RecordWriters.newListWriter(count, writeList(valueIds)).write(writer);
+ RecordId buckets = writeBuckets(RecordType.PROPERTY_VALUES_BUCKET, valueIds);
+ return RecordWriters.newListWriter(RecordType.PROPERTY_VALUES_LIST, count, buckets).write(writer);
}
}
@@ -983,7 +969,7 @@
RecordId propNamesId = null;
if (propertyNames.length > 0) {
- propNamesId = writeList(asList(propertyNames));
+ propNamesId = writeBuckets(RecordType.PROPERTY_NAMES_BUCKET, asList(propertyNames));
ids.add(propNamesId);
}
@@ -1093,7 +1079,7 @@
writeNode(entry.getNodeState(), depth + 1));
}
}
- ids.add(writeMap(base, childNodes));
+ ids.add(writeMap(RecordType.CHILDREN_BRANCH, RecordType.CHILDREN_LEAF, base, childNodes));
} else if (childName != Template.ZERO_CHILD_NODES) {
ids.add(writeNode(state.getChildNode(template.getChildName()), depth + 1));
}
@@ -1144,7 +1130,7 @@
}
if (!pIds.isEmpty()) {
- ids.add(writeList(pIds));
+ ids.add(writeBuckets(RecordType.PROPERTIES_BUCKET, pIds));
}
RecordId stableId = null;
Index: src/test/java/org/apache/jackrabbit/oak/segment/RecordTest.java
===================================================================
--- src/test/java/org/apache/jackrabbit/oak/segment/RecordTest.java (revision 1765308)
+++ src/test/java/org/apache/jackrabbit/oak/segment/RecordTest.java (working copy)
@@ -29,6 +29,7 @@
import static org.apache.jackrabbit.oak.api.Type.STRINGS;
import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE;
import static org.apache.jackrabbit.oak.segment.ListRecord.LEVEL_SIZE;
+import static org.apache.jackrabbit.oak.segment.RecordType.UNDEFINED;
import static org.apache.jackrabbit.oak.segment.file.FileStoreBuilder.fileStoreBuilder;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -113,11 +114,11 @@
public void testListRecord() throws IOException {
RecordId blockId = writer.writeBlock(bytes, 0, bytes.length);
- ListRecord one = writeList(1, blockId);
- ListRecord level1 = writeList(LEVEL_SIZE, blockId);
- ListRecord level1p = writeList(LEVEL_SIZE + 1, blockId);
- ListRecord level2 = writeList(LEVEL_SIZE * LEVEL_SIZE, blockId);
- ListRecord level2p = writeList(LEVEL_SIZE * LEVEL_SIZE + 1, blockId);
+ ListRecord one = writeBuckets(1, blockId);
+ ListRecord level1 = writeBuckets(LEVEL_SIZE, blockId);
+ ListRecord level1p = writeBuckets(LEVEL_SIZE + 1, blockId);
+ ListRecord level2 = writeBuckets(LEVEL_SIZE * LEVEL_SIZE, blockId);
+ ListRecord level2p = writeBuckets(LEVEL_SIZE * LEVEL_SIZE + 1, blockId);
assertEquals(1, one.size());
assertEquals(blockId, one.getEntry(0));
@@ -143,9 +144,9 @@
assertEquals(LEVEL_SIZE * LEVEL_SIZE + 1, count);
}
- private ListRecord writeList(int size, RecordId id) throws IOException {
+ private ListRecord writeBuckets(int size, RecordId id) throws IOException {
List list = Collections.nCopies(size, id);
- return new ListRecord(writer.writeList(list), size);
+ return new ListRecord(writer.writeBuckets(UNDEFINED, list), size);
}
@Test
@@ -154,7 +155,7 @@
for (int i = 0; i < 1000; i++) {
list.add(new RecordId(store.newBulkSegmentId(), 0));
}
- writer.writeList(list);
+ writer.writeBuckets(UNDEFINED, list);
}
@Test
@@ -219,17 +220,14 @@
public void testMapRecord() throws IOException {
RecordId blockId = writer.writeBlock(bytes, 0, bytes.length);
- MapRecord zero = writer.writeMap(
- null, ImmutableMap.of());
- MapRecord one = writer.writeMap(
- null, ImmutableMap.of("one", blockId));
- MapRecord two = writer.writeMap(
- null, ImmutableMap.of("one", blockId, "two", blockId));
+ MapRecord zero = writer.writeMap(UNDEFINED, UNDEFINED, null, ImmutableMap.of());
+ MapRecord one = writer.writeMap(UNDEFINED, UNDEFINED, null, ImmutableMap.of("one", blockId));
+ MapRecord two = writer.writeMap(UNDEFINED, UNDEFINED, null, ImmutableMap.of("one", blockId, "two", blockId));
Map map = newHashMap();
for (int i = 0; i < 1000; i++) {
map.put("key" + i, blockId);
}
- MapRecord many = writer.writeMap(null, map);
+ MapRecord many = writer.writeMap(UNDEFINED, UNDEFINED, null, map);
Iterator iterator;
@@ -270,7 +268,7 @@
Map changes = newHashMap();
changes.put("key0", null);
changes.put("key1000", blockId);
- MapRecord modified = writer.writeMap(many, changes);
+ MapRecord modified = writer.writeMap(UNDEFINED, UNDEFINED, many, changes);
assertEquals(1000, modified.size());
iterator = modified.getEntries().iterator();
for (int i = 1; i <= 1000; i++) {
@@ -288,7 +286,7 @@
Map changes = newHashMap();
changes.put("one", null);
- MapRecord zero = writer.writeMap(null, changes);
+ MapRecord zero = writer.writeMap(UNDEFINED, UNDEFINED, null, changes);
assertEquals(0, zero.size());
}
@@ -303,7 +301,7 @@
map.put(new String(key), blockId);
}
- MapRecord bad = writer.writeMap(null, map);
+ MapRecord bad = writer.writeMap(UNDEFINED, UNDEFINED, null, map);
assertEquals(map.size(), bad.size());
Iterator iterator = bad.getEntries().iterator();
Index: src/test/java/org/apache/jackrabbit/oak/segment/SegmentParserTest.java
===================================================================
--- src/test/java/org/apache/jackrabbit/oak/segment/SegmentParserTest.java (revision 1765308)
+++ src/test/java/org/apache/jackrabbit/oak/segment/SegmentParserTest.java (working copy)
@@ -28,6 +28,7 @@
import static org.apache.jackrabbit.oak.api.Type.NAME;
import static org.apache.jackrabbit.oak.api.Type.NAMES;
import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE;
+import static org.apache.jackrabbit.oak.segment.RecordType.UNDEFINED;
import static org.apache.jackrabbit.oak.segment.Segment.MEDIUM_LIMIT;
import static org.apache.jackrabbit.oak.segment.Segment.SMALL_LIMIT;
import static org.apache.jackrabbit.oak.segment.SegmentParser.BlobType.LONG;
@@ -42,7 +43,6 @@
import java.util.List;
import java.util.Map;
import java.util.Random;
-import java.util.concurrent.atomic.AtomicInteger;
import com.google.common.collect.ImmutableList;
import org.apache.jackrabbit.oak.api.Blob;
@@ -56,7 +56,6 @@
import org.apache.jackrabbit.oak.segment.memory.MemoryStore;
import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
import org.junit.Before;
-import org.junit.Ignore;
import org.junit.Test;
public class SegmentParserTest {
@@ -219,7 +218,7 @@
@Test
public void emptyMap() throws IOException {
Map empty = newHashMap();
- MapRecord map = writer.writeMap(null, empty);
+ MapRecord map = writer.writeMap(UNDEFINED, UNDEFINED, null, empty);
MapInfo mapInfo = new TestParser(store.getReader(), "emptyMap") {
@Override protected void onMapLeaf(RecordId parentId, RecordId mapId, MapRecord map) { }
}.parseMap(null, map.getRecordId(), map);
@@ -229,8 +228,8 @@
@Test
public void nonEmptyMap() throws IOException {
Random rnd = new Random();
- MapRecord base = writer.writeMap(null, createMap(33, rnd));
- MapRecord map = writer.writeMap(base, createMap(1, rnd));
+ MapRecord base = writer.writeMap(UNDEFINED, UNDEFINED, null, createMap(33, rnd));
+ MapRecord map = writer.writeMap(UNDEFINED, UNDEFINED, base, createMap(1, rnd));
MapInfo mapInfo = new TestParser(store.getReader(), "nonEmptyMap") {
@Override
protected void onMapDiff(RecordId parentId, RecordId mapId, MapRecord map) {
@@ -395,7 +394,7 @@
for (int k = 0; k < count; k++) {
list.add(writer.writeString("string " + rnd.nextLong()));
}
- RecordId listId = writer.writeList(list);
+ RecordId listId = writer.writeBuckets(UNDEFINED, list);
ListInfo listInfo = new TestParser(store.getReader(), "nonEmptyList"){
@Override
protected void onListBucket(RecordId parentId, RecordId listId, int index, int count, int capacity) {
Index: src/test/java/org/apache/jackrabbit/oak/segment/SegmentReferencesTest.java
===================================================================
--- src/test/java/org/apache/jackrabbit/oak/segment/SegmentReferencesTest.java (revision 1765308)
+++ src/test/java/org/apache/jackrabbit/oak/segment/SegmentReferencesTest.java (working copy)
@@ -46,7 +46,7 @@
SegmentWriter writer = SegmentWriterBuilder.segmentWriterBuilder("test").build(store);
RecordId stringId = writer.writeString("test");
- RecordId listId = writer.writeList(Arrays.asList(stringId, stringId));
+ RecordId listId = writer.writeBuckets(RecordType.UNDEFINED, Arrays.asList(stringId, stringId));
writer.flush();
// The two records should be living in the same segment.
@@ -71,7 +71,7 @@
RecordId stringId = writer.writeString("test");
writer.flush();
- RecordId listId = writer.writeList(Arrays.asList(stringId, stringId));
+ RecordId listId = writer.writeBuckets(RecordType.UNDEFINED, Arrays.asList(stringId, stringId));
writer.flush();
// The two records should be living in two different segments.
Index: src/test/java/org/apache/jackrabbit/oak/segment/file/FileStoreIT.java
===================================================================
--- src/test/java/org/apache/jackrabbit/oak/segment/file/FileStoreIT.java (revision 1765308)
+++ src/test/java/org/apache/jackrabbit/oak/segment/file/FileStoreIT.java (working copy)
@@ -20,6 +20,7 @@
import static com.google.common.collect.Lists.newArrayList;
import static com.google.common.collect.Sets.newTreeSet;
+import static org.apache.jackrabbit.oak.segment.RecordType.UNDEFINED;
import static org.apache.jackrabbit.oak.segment.file.FileStoreBuilder.fileStoreBuilder;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -183,7 +184,7 @@
// Adding 765 bytes (255 recordIds)
// This should cause the current segment to flush
List list = Collections.nCopies(n, x);
- writer.writeList(list);
+ writer.writeBuckets(UNDEFINED, list);
writer.flush();
Index: src/test/java/org/apache/jackrabbit/oak/segment/file/TarWriterTest.java
===================================================================
--- src/test/java/org/apache/jackrabbit/oak/segment/file/TarWriterTest.java (revision 1765308)
+++ src/test/java/org/apache/jackrabbit/oak/segment/file/TarWriterTest.java (working copy)
@@ -36,8 +36,8 @@
import java.util.UUID;
import com.google.common.collect.ImmutableList;
-
import org.apache.jackrabbit.oak.segment.RecordId;
+import org.apache.jackrabbit.oak.segment.RecordType;
import org.apache.jackrabbit.oak.segment.Segment;
import org.apache.jackrabbit.oak.segment.SegmentId;
import org.apache.jackrabbit.oak.segment.SegmentWriter;
@@ -215,7 +215,7 @@
void addReference(SegmentWriter writer) throws IOException {
// Need to write a proper list as singleton lists are optimised
// to just returning the recordId of its single element
- writer.writeList(ImmutableList.of(selfId, selfId));
+ writer.writeBuckets(RecordType.UNDEFINED, ImmutableList.of(selfId, selfId));
}
}