Index: oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentBufferWriter.java =================================================================== --- oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentBufferWriter.java (revision 1839649) +++ oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/SegmentBufferWriter.java (working copy) @@ -405,16 +405,25 @@ } if (segmentSize > buffer.length) { - LOG.debug("Flushing full segment {} (headerSize={}, recordSize={}, length={}, segmentSize={})", + if (dirty) { + LOG.debug("Flushing full segment {} (headerSize={}, recordSize={}, length={}, segmentSize={})", segment.getSegmentId(), headerSize, recordSize, length, segmentSize); - flush(store); + flush(store); + return prepare(type, size, ids, store); + } + throw new IllegalArgumentException(String.format( + "Record too big: type=%s, size=%s, recordIds=%s, total=%s", + type, + size, + ids.size(), + recordSize + )); } statistics.recordCount++; length += recordSize; position = buffer.length - length; - checkState(position >= 0); int recordNumber = recordNumbers.addRecord(type, position); return new RecordId(segment.getSegmentId(), recordNumber); Index: oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentBufferWriterTest.java =================================================================== --- oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentBufferWriterTest.java (revision 1839649) +++ oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentBufferWriterTest.java (working copy) @@ -24,6 +24,7 @@ import static org.junit.Assert.assertNotEquals; import java.io.File; +import java.util.Collections; import java.util.List; import org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState; @@ -96,4 +97,40 @@ assertNotEquals(before, after); } + @Test + public void tooBigRecord() throws Exception { + + // See OAK-7721 to understand why this test exists. + + try (FileStore store = openFileStore()) { + + // Please don't change anything from the following statement yet. + // Read the next comment to understand why. + + SegmentBufferWriter writer = new SegmentBufferWriter( + store.getSegmentIdProvider(), + store.getReader(), + "t", + store.getRevisions().getHead().getSegment().getGcGeneration() + ); + + // The size of the record is chosen with the precise intention to + // fool `writer` into having enough space to write the record. In + // particular, at the end of `prepare()`, `writer` will have + // `this.length = 262144`, which is `MAX_SEGMENT_SIZE`, and + // `this.position = 0`. This result is particularly sensitive to the + // initial content of the segment, which in turn is influenced by + // the segment info. Try to change the writer ID in the constructor + // of `SegmentBufferWriter` to a longer string, and you will have + // `prepare()` throw ISEs because the writer ID is embedded in the + // segment info. + + try { + writer.prepare(RecordType.BLOCK, 262101, Collections.emptyList(), store); + } catch (IllegalArgumentException e) { + assertEquals("Record too big: type=BLOCK, size=262101, recordIds=0, total=262104", e.getMessage()); + } + } + } + }