Index: core/src/main/java/org/apache/hama/bsp/BSPMessageBundle.java =================================================================== --- core/src/main/java/org/apache/hama/bsp/BSPMessageBundle.java (Revision 1385087) +++ core/src/main/java/org/apache/hama/bsp/BSPMessageBundle.java (Arbeitskopie) @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; -import java.util.LinkedList; import java.util.List; import java.util.Map.Entry; @@ -40,7 +39,7 @@ public static final Log LOG = LogFactory.getLog(BSPMessageBundle.class); - private HashMap> messages = new HashMap>(); + private HashMap> messages = new HashMap>(); private HashMap> classCache = new HashMap>(); public BSPMessageBundle() { @@ -54,8 +53,7 @@ public void addMessage(M message) { String className = message.getClass().getName(); if (!messages.containsKey(className)) { - // use linked list because we're just iterating over them - LinkedList list = new LinkedList(); + ArrayList list = new ArrayList(); list.add(message); messages.put(className, list); } else { @@ -67,7 +65,7 @@ // here we use an arraylist, because we know the size and outside may need // random access List mergeList = new ArrayList(messages.size()); - for (LinkedList c : messages.values()) { + for (ArrayList c : messages.values()) { mergeList.addAll(c); } return mergeList; @@ -78,9 +76,9 @@ // writes the k/v mapping size out.writeInt(messages.size()); if (messages.size() > 0) { - for (Entry> entry : messages.entrySet()) { + for (Entry> entry : messages.entrySet()) { out.writeUTF(entry.getKey()); - LinkedList messageList = entry.getValue(); + ArrayList messageList = entry.getValue(); out.writeInt(messageList.size()); for (M msg : messageList) { msg.write(out); @@ -93,14 +91,14 @@ @SuppressWarnings("unchecked") public void readFields(DataInput in) throws IOException { if (messages == null) { - messages = new HashMap>(); + messages = new HashMap>(); } int numMessages = in.readInt(); if (numMessages > 0) { for (int entries = 0; entries < numMessages; entries++) { String className = in.readUTF(); int size = in.readInt(); - LinkedList msgList = new LinkedList(); + ArrayList msgList = new ArrayList(); messages.put(className, msgList); Class clazz = null; Index: graph/src/test/java/org/apache/jdbm/DBMakerTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/DBMakerTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/DBMakerTest.java (Arbeitskopie) @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; +import java.util.Set; + +public class DBMakerTest extends TestCaseWithTestFile { + + public void testMemory() throws IOException { + DBStore db = (DBStore) DBMaker.openMemory().disableCache().make(); + + long recid = db.insert("aaa"); + db.commit(); + db.update(recid, "bbb"); + db.rollback(); + assertEquals("aaa", db.fetch(recid)); + + db.close(); + + db = (DBStore) DBMaker.openMemory().disableCache().make(); + + // this will fail if 'test' already exists + try { + db.fetch(recid); + fail("record should not exist"); + } catch (Throwable e) { + // ignore + } + + } + + public void testDisk() throws IOException { + DBStore db = (DBStore) DBMaker.openFile(newTestFile()).disableCache() + .make(); + + long recid = db.insert("aaa"); + db.commit(); + db.update(recid, "bbb"); + db.rollback(); + assertEquals("aaa", db.fetch(recid)); + + db.close(); + } + + public void testEncrypt() { + String file = newTestFile(); + DB db = DBMaker.openFile(file).enableEncryption("password", false).make(); + + Set l = db.createHashSet("test"); + for (int i = 0; i < 10000; i++) { + l.add("aa" + i); + } + db.commit(); + db.close(); + db = DBMaker.openFile(file).enableEncryption("password", false).make(); + l = db.getHashSet("test"); + for (int i = 0; i < 10000; i++) { + assertTrue(l.contains("aa" + i)); + } + db.close(); + + } + +} Index: graph/src/test/java/org/apache/jdbm/TestInsertUpdate.java =================================================================== --- graph/src/test/java/org/apache/jdbm/TestInsertUpdate.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/TestInsertUpdate.java (Arbeitskopie) @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; +import java.util.Map; + +import org.junit.Test; + +public class TestInsertUpdate extends TestCaseWithTestFile { + + /** + * Test that the object is not modified by serialization. + * + * @throws IOException + */ + @Test + public void testInsertUpdateWithCustomSerializer() throws IOException { + DB db = newDBCache(); + Serializer serializer = new HTreeBucketTest.LongSerializer(); + + Map map = db.createHashMap("custom", serializer, serializer); + + map.put(new Long(1), new Long(1)); + map.put(new Long(2), new Long(2)); + db.commit(); + map.put(new Long(2), new Long(3)); + db.commit(); + db.close(); + } + +} Index: graph/src/test/java/org/apache/jdbm/BTreeMapNavigableTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/BTreeMapNavigableTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/BTreeMapNavigableTest.java (Arbeitskopie) @@ -0,0 +1,309 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; +import java.util.Map.Entry; +import java.util.NavigableMap; +import java.util.NavigableSet; +import java.util.SortedMap; + +/** + * to test {@link NavigableMap} implementation + * + */ +public class BTreeMapNavigableTest extends TestCaseWithTestFile { + + private static final String MUST_NOT_CONTAINS_KD = "must not contains 'kd'"; + + private static final String MUST_NOT_CONTAINS_KA = "must not contains 'ka'"; + + private static final String BAD_FIRST_ENTRY_KEY = "bad first entry key"; + + private static final String MUST_NOT_BE_EMPTY = "must not be empty"; + + private static final String BAD_SIZE = "bad size"; + + private static final String MUST_CONTAINS_KC = "must contains 'kc'"; + + private static final String MUST_CONTAINS_KB = "must contains 'kb'"; + + private static final String MUST_CONTAINS_KA = "must contains 'ka'"; + + private NavigableMap navigableMap; + + public void setUp() throws IOException { + navigableMap = newDBCache().createTreeMap("test"); + } + + public void testLowerEntry() { + navigableMap.put("ka", "xx"); + navigableMap.put("kb", "aa"); + navigableMap.put("kc", "zz"); + final Entry lowerEntry = navigableMap.lowerEntry("kb"); + assertEquals("bad lower entry value", "xx", lowerEntry.getValue()); + assertEquals("bad lower entry key", "ka", lowerEntry.getKey()); + } + + public void testLowerKey() { + navigableMap.put("ka", "xx"); + navigableMap.put("kb", "aa"); + navigableMap.put("kc", "zz"); + assertEquals("bad lower key", "ka", navigableMap.lowerKey("kb")); + } + + public void testFloorEntry() { + navigableMap.put("ka", "xx"); + navigableMap.put("kc", "aa"); + navigableMap.put("kd", "zz"); + Entry floorEntry = navigableMap.floorEntry("ka"); + assertEquals("bad floor entry value", "xx", floorEntry.getValue()); + assertEquals("bad floor entry key", "ka", floorEntry.getKey()); + floorEntry = navigableMap.floorEntry("kb"); + assertEquals("bad floor entry value", "xx", floorEntry.getValue()); + assertEquals("bad floor entry key", "ka", floorEntry.getKey()); + } + + public void testFloorKey() { + navigableMap.put("ka", "xx"); + navigableMap.put("kc", "aa"); + navigableMap.put("kd", "zz"); + assertEquals("bad floor key", "ka", navigableMap.floorKey("ka")); + assertEquals("bad floor key", "ka", navigableMap.floorKey("kb")); + } + + public void testCeilingEntry() { + navigableMap.put("ka", "xx"); + navigableMap.put("kb", "aa"); + navigableMap.put("kd", "zz"); + Entry ceilingEntry = navigableMap.ceilingEntry("kd"); + assertEquals("bad ceiling entry value", "zz", ceilingEntry.getValue()); + assertEquals("bad ceiling entry key", "kd", ceilingEntry.getKey()); + ceilingEntry = navigableMap.ceilingEntry("kc"); + assertEquals("bad ceiling entry value", "zz", ceilingEntry.getValue()); + assertEquals("bad ceiling entry key", "kd", ceilingEntry.getKey()); + } + + public void testCeilingKey() { + navigableMap.put("ka", "xx"); + navigableMap.put("kb", "aa"); + navigableMap.put("kd", "zz"); + assertEquals("bad ceiling key", "kd", navigableMap.ceilingKey("kd")); + assertEquals("bad ceiling key", "kd", navigableMap.ceilingKey("kc")); + } + + public void testHigherEntry() { + navigableMap.put("ka", "xx"); + navigableMap.put("kb", "aa"); + navigableMap.put("kc", "zz"); + final Entry higherEntry = navigableMap.higherEntry("kb"); + assertEquals("bad higher entry value", "zz", higherEntry.getValue()); + assertEquals("bad higher entry key", "kc", higherEntry.getKey()); + } + + public void testHigherKey() { + navigableMap.put("ka", "xx"); + navigableMap.put("kb", "aa"); + navigableMap.put("kc", "zz"); + assertEquals("bad higher key", "kc", navigableMap.higherKey("kb")); + } + + public void testFirstEntry() { + navigableMap.put("ka", "xx"); + navigableMap.put("kb", "aa"); + navigableMap.put("kc", "zz"); + final Entry firstEntry = navigableMap.firstEntry(); + assertEquals("bad first entry value", "xx", firstEntry.getValue()); + assertEquals(BAD_FIRST_ENTRY_KEY, "ka", firstEntry.getKey()); + } + + public void testLastEntry() { + navigableMap.put("ka", "xx"); + navigableMap.put("kb", "aa"); + navigableMap.put("kc", "zz"); + final Entry lastEntry = navigableMap.lastEntry(); + assertEquals("bad last entry value", "zz", lastEntry.getValue()); + assertEquals("bad last entry key", "kc", lastEntry.getKey()); + } + + public void testPollFirstEntry() { + assertNull("must not have first entry", navigableMap.pollFirstEntry()); + navigableMap.put("ka", "xx"); + navigableMap.put("kb", "aa"); + navigableMap.put("kc", "zz"); + assertEquals("must have 3 entries", 3, navigableMap.size()); + final Entry firstEntry = navigableMap.pollFirstEntry(); + assertNotNull("must have first entry", firstEntry); + assertEquals("bad first entry value", "xx", firstEntry.getValue()); + assertEquals(BAD_FIRST_ENTRY_KEY, "ka", firstEntry.getKey()); + assertEquals("must have 2 entries", 2, navigableMap.size()); + } + + public void testPollLastEntry() { + assertNull("must not have last entry", navigableMap.pollLastEntry()); + navigableMap.put("ka", "xx"); + navigableMap.put("kb", "aa"); + navigableMap.put("kc", "zz"); + assertEquals("must have 3 entries", 3, navigableMap.size()); + final Entry lastEntry = navigableMap.pollLastEntry(); + assertNotNull("must have last entry", lastEntry); + assertEquals("bad last entry value", "zz", lastEntry.getValue()); + assertEquals("bad last entry key", "kc", lastEntry.getKey()); + assertEquals("must have 2 entries", 2, navigableMap.size()); + } + + // TODO implement this + // + // public void testDescendingMap() { + // navigableMap.put("ka", "xx"); + // navigableMap.put("kb", "aa"); + // navigableMap.put("kc", "zz"); + // final NavigableMap descendingMap = navigableMap + // .descendingMap(); + // + // assertEquals(BAD_SIZE, 3, descendingMap.size()); + // assertFalse(MUST_NOT_BE_EMPTY, descendingMap.isEmpty()); + // + // final Entry firstEntry = descendingMap.firstEntry(); + // assertEquals("bad first entry value", "zz", firstEntry.getValue()); + // assertEquals(BAD_FIRST_ENTRY_KEY, "kc", firstEntry.getKey()); + // + // final Entry lastEntry = descendingMap.lastEntry(); + // assertEquals("bad last entry value", "xx", lastEntry.getValue()); + // assertEquals("bad last entry key", "ka", lastEntry.getKey()); + // + // final Set> entrySet = descendingMap.entrySet(); + // final Iterator> iterator = entrySet.iterator(); + // assertTrue("must have first entry", iterator.hasNext()); + // assertEquals(BAD_FIRST_ENTRY_KEY, "kc", iterator.next().getKey()); + // assertTrue("must have second entry", iterator.hasNext()); + // assertEquals("bad second entry key", "kb", iterator.next().getKey()); + // assertTrue("must have third entry", iterator.hasNext()); + // assertEquals("bad third entry key", "ka", iterator.next().getKey()); + // assertFalse("must not have fourth entry", iterator.hasNext()); + // + // descendingMap.remove("kb"); + // assertEquals(BAD_SIZE, 2, descendingMap.size()); + // assertFalse(MUST_NOT_BE_EMPTY, descendingMap.isEmpty()); + // + // assertEquals(BAD_SIZE, 2, navigableMap.size()); + // assertFalse(MUST_NOT_BE_EMPTY, navigableMap.isEmpty()); + // assertTrue("must contains key 'ka'", navigableMap.containsKey("ka")); + // assertFalse("must not contains key 'kb'", navigableMap + // .containsKey("kb")); + // assertTrue("must contains key 'kc'", navigableMap.containsKey("kc")); + // } + + public void testNavigableKeySet() { + navigableMap.put("ka", "xx"); + navigableMap.put("kb", "aa"); + navigableMap.put("kc", "zz"); + final NavigableSet navigableSet = navigableMap.navigableKeySet(); + assertEquals("bad first element", "ka", navigableSet.first()); + assertEquals("bad last element", "kc", navigableSet.last()); + assertTrue(MUST_CONTAINS_KA, navigableSet.contains("ka")); + assertTrue(MUST_CONTAINS_KB, navigableSet.contains("kb")); + assertTrue(MUST_CONTAINS_KC, navigableSet.contains("kc")); + + navigableSet.remove("kb"); + assertEquals(BAD_SIZE, 2, navigableMap.size()); + assertFalse(MUST_NOT_BE_EMPTY, navigableMap.isEmpty()); + assertTrue("must contains key 'ka'", navigableMap.containsKey("ka")); + assertFalse("must not contains key 'kb'", navigableMap.containsKey("kb")); + assertTrue("must contains key 'kc'", navigableMap.containsKey("kc")); + } + + // TODO implement this + // public void testDescendingKeySet() { + // navigableMap.put("ka", "xx"); + // navigableMap.put("kb", "aa"); + // navigableMap.put("kc", "zz"); + // final NavigableSet navigableSet = navigableMap + // .descendingKeySet(); + // assertEquals("bad first element", "kc", navigableSet.first()); + // assertEquals("bad last element", "ka", navigableSet.last()); + // assertTrue(MUST_CONTAINS_KA, navigableSet.contains("ka")); + // assertTrue(MUST_CONTAINS_KB, navigableSet.contains("kb")); + // assertTrue(MUST_CONTAINS_KC, navigableSet.contains("kc")); + // + // navigableSet.remove("kb"); + // assertEquals(BAD_SIZE, 2, navigableMap.size()); + // assertFalse(MUST_NOT_BE_EMPTY, navigableMap.isEmpty()); + // assertTrue("must contains key 'ka'", navigableMap.containsKey("ka")); + // assertFalse("must not contains key 'kb'", navigableMap + // .containsKey("kb")); + // assertTrue("must contains key 'kc'", navigableMap.containsKey("kc")); + // } + + public void testSubMap() { + navigableMap.put("ka", "xx"); + navigableMap.put("kb", "aa"); + navigableMap.put("kc", "zz"); + navigableMap.put("kd", "uu"); + + SortedMap sortedMap = navigableMap.subMap("kb", "kd"); + assertFalse(MUST_NOT_CONTAINS_KA, sortedMap.containsKey("ka")); + assertTrue(MUST_CONTAINS_KB, sortedMap.containsKey("kb")); + assertTrue(MUST_CONTAINS_KC, sortedMap.containsKey("kc")); + assertFalse(MUST_NOT_CONTAINS_KD, sortedMap.containsKey("kd")); + + sortedMap = navigableMap.subMap("ka", false, "kc", true); + assertFalse(MUST_NOT_CONTAINS_KA, sortedMap.containsKey("ka")); + assertTrue(MUST_CONTAINS_KB, sortedMap.containsKey("kb")); + assertTrue(MUST_CONTAINS_KC, sortedMap.containsKey("kc")); + assertFalse(MUST_NOT_CONTAINS_KD, sortedMap.containsKey("kd")); + } + + public void testHeadMap() { + navigableMap.put("ka", "xx"); + navigableMap.put("kb", "aa"); + navigableMap.put("kc", "zz"); + navigableMap.put("kd", "uu"); + + SortedMap sortedMap = navigableMap.headMap("kc"); + assertTrue(MUST_CONTAINS_KA, sortedMap.containsKey("ka")); + assertTrue(MUST_CONTAINS_KB, sortedMap.containsKey("kb")); + assertFalse("must not contains 'kc'", sortedMap.containsKey("kc")); + assertFalse(MUST_NOT_CONTAINS_KD, sortedMap.containsKey("kd")); + + sortedMap = navigableMap.headMap("kb", true); + assertTrue(MUST_CONTAINS_KA, sortedMap.containsKey("ka")); + assertTrue(MUST_CONTAINS_KB, sortedMap.containsKey("kb")); + assertFalse("must not contains 'kc'", sortedMap.containsKey("kc")); + assertFalse(MUST_NOT_CONTAINS_KD, sortedMap.containsKey("kd")); + } + + public void testTailMap() { + navigableMap.put("ka", "xx"); + navigableMap.put("kb", "aa"); + navigableMap.put("kc", "zz"); + navigableMap.put("kd", "uu"); + + SortedMap sortedMap = navigableMap.tailMap("kc"); + assertFalse(MUST_NOT_CONTAINS_KA, sortedMap.containsKey("ka")); + assertFalse("must not contains 'kb'", sortedMap.containsKey("kb")); + assertTrue(MUST_CONTAINS_KC, sortedMap.containsKey("kc")); + assertTrue("must contains 'kd'", sortedMap.containsKey("kd")); + + sortedMap = navigableMap.tailMap("kb", false); + assertFalse(MUST_NOT_CONTAINS_KA, sortedMap.containsKey("ka")); + assertFalse("must not contains 'kb'", sortedMap.containsKey("kb")); + assertTrue(MUST_CONTAINS_KC, sortedMap.containsKey("kc")); + assertTrue("must contains 'kd'", sortedMap.containsKey("kd")); + } +} Index: graph/src/test/java/org/apache/jdbm/PageFileTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/PageFileTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/PageFileTest.java (Arbeitskopie) @@ -0,0 +1,133 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.File; + +/** + * This class contains all Unit tests for {@link PageFile}. + */ +final public class PageFileTest extends TestCaseWithTestFile { + + public static void deleteFile(String filename) { + File file = new File(filename); + + if (file.exists()) { + try { + file.delete(); + } catch (Exception except) { + except.printStackTrace(); + } + if (file.exists()) { + System.out.println("WARNING: Cannot delete file: " + file); + } + } + } + + /** + * Test constructor + */ + public void testCtor() throws Exception { + PageFile file = newRecordFile(); + file.close(); + } + + /** + * Test addition of record 0 + */ + public void testAddZero() throws Exception { + String f = newTestFile(); + PageFile file = new PageFile(f); + PageIo data = file.get(0); + data.writeByte(14, (byte) 'b'); + file.release(0, true); + file.close(); + file = new PageFile(f); + data = file.get(0); + assertEquals((byte) 'b', data.readByte(14)); + file.release(0, false); + file.close(); + } + + /** + * Test addition of a number of records, with holes. + */ + public void testWithHoles() throws Exception { + String f = newTestFile(); + PageFile file = new PageFile(f); + + // Write recid 0, byte 0 with 'b' + PageIo data = file.get(0); + data.writeByte(0, (byte) 'b'); + file.release(0, true); + + // Write recid 10, byte 10 with 'c' + data = file.get(10); + data.writeByte(10, (byte) 'c'); + file.release(10, true); + + // Write recid 5, byte 5 with 'e' + data = file.get(5); + data.writeByte(5, (byte) 'e'); + file.release(5, false); + + file.close(); + + file = new PageFile(f); + data = file.get(0); + assertEquals("0 = b", (byte) 'b', data.readByte(0)); + file.release(0, false); + + data = file.get(5); + assertEquals("5 = 0", (byte) 'e', data.readByte(5)); + file.release(5, false); + + data = file.get(10); + assertEquals("10 = c", (byte) 'c', data.readByte(10)); + file.release(10, false); + + file.close(); + } + + /** + * Test wrong release + */ + public void testWrongRelease() throws Exception { + PageFile file = newRecordFile(); + + // Write recid 0, byte 0 with 'b' + PageIo data = file.get(0); + data.writeByte(0, (byte) 'b'); + try { + file.release(1, true); + fail("expected exception"); + } catch (NullPointerException except) { + // ignore + } + file.release(0, false); + + file.close(); + + // @alex retry to open the file + /* + * file = new PageFile( testFileName ); PageManager pm = new PageManager( + * file ); pm.close(); file.close(); + */ + } + +} Index: graph/src/test/java/org/apache/jdbm/SerializationTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/SerializationTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/SerializationTest.java (Arbeitskopie) @@ -0,0 +1,483 @@ +/******************************************************************************* + * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ +package org.apache.jdbm; + +import static java.util.Arrays.asList; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectInputStream; +import java.io.ObjectOutput; +import java.io.ObjectOutputStream; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.AbstractMap.SimpleEntry; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Hashtable; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.Locale; +import java.util.Map; +import java.util.Properties; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.UUID; +import java.util.Vector; + +import junit.framework.TestCase; + +@SuppressWarnings("unchecked") +public class SerializationTest extends TestCase { + + Serialization ser; + + public SerializationTest() throws IOException { + ser = new Serialization(); + } + + public void testInt() throws IOException, ClassNotFoundException { + int[] vals = { Integer.MIN_VALUE, -Short.MIN_VALUE * 2, + -Short.MIN_VALUE + 1, -Short.MIN_VALUE, -10, -9, -8, -7, -6, -5, -4, + -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 127, 254, 255, 256, + Short.MAX_VALUE, Short.MAX_VALUE + 1, Short.MAX_VALUE * 2, + Integer.MAX_VALUE }; + for (int i : vals) { + byte[] buf = ser.serialize(i); + Object l2 = ser.deserialize(buf); + assertTrue(l2.getClass() == Integer.class); + assertEquals(l2, i); + } + } + + public void testShort() throws IOException, ClassNotFoundException { + short[] vals = { (short) (-Short.MIN_VALUE + 1), (short) -Short.MIN_VALUE, + -10, -9, -8, -7, -6, -5, -4, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 127, + 254, 255, 256, Short.MAX_VALUE, Short.MAX_VALUE - 1, Short.MAX_VALUE }; + for (short i : vals) { + byte[] buf = ser.serialize(i); + Object l2 = ser.deserialize(buf); + assertTrue(l2.getClass() == Short.class); + assertEquals(l2, i); + } + } + + public void testDouble() throws IOException, ClassNotFoundException { + double[] vals = { 1f, 0f, -1f, Math.PI, 255, 256, Short.MAX_VALUE, + Short.MAX_VALUE + 1, -100 }; + for (double i : vals) { + byte[] buf = ser.serialize(i); + Object l2 = ser.deserialize(buf); + assertTrue(l2.getClass() == Double.class); + assertEquals(l2, i); + } + } + + public void testFloat() throws IOException, ClassNotFoundException { + float[] vals = { 1f, 0f, -1f, (float) Math.PI, 255, 256, Short.MAX_VALUE, + Short.MAX_VALUE + 1, -100 }; + for (float i : vals) { + byte[] buf = ser.serialize(i); + Object l2 = ser.deserialize(buf); + assertTrue(l2.getClass() == Float.class); + assertEquals(l2, i); + } + } + + public void testChar() throws IOException, ClassNotFoundException { + char[] vals = { 'a', ' ' }; + for (char i : vals) { + byte[] buf = ser.serialize(i); + Object l2 = ser.deserialize(buf); + assertTrue(l2.getClass() == Character.class); + assertEquals(l2, i); + } + } + + public void testLong() throws IOException, ClassNotFoundException { + long[] vals = { Long.MIN_VALUE, Integer.MIN_VALUE, Integer.MIN_VALUE - 1, + Integer.MIN_VALUE + 1, -Short.MIN_VALUE * 2, -Short.MIN_VALUE + 1, + -Short.MIN_VALUE, -10, -9, -8, -7, -6, -5, -4, -1, 0, 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 127, 254, 255, 256, Short.MAX_VALUE, Short.MAX_VALUE + 1, + Short.MAX_VALUE * 2, Integer.MAX_VALUE, Integer.MAX_VALUE + 1, + Long.MAX_VALUE }; + for (long i : vals) { + byte[] buf = ser.serialize(i); + Object l2 = ser.deserialize(buf); + assertTrue(l2.getClass() == Long.class); + assertEquals(l2, i); + } + } + + public void testBoolean1() throws IOException, ClassNotFoundException { + byte[] buf = ser.serialize(true); + Object l2 = ser.deserialize(buf); + assertTrue(l2.getClass() == Boolean.class); + assertEquals(l2, true); + + byte[] buf2 = ser.serialize(false); + Object l22 = ser.deserialize(buf2); + assertTrue(l22.getClass() == Boolean.class); + assertEquals(l22, false); + + } + + public void testString() throws IOException, ClassNotFoundException { + byte[] buf = ser.serialize("Abcd"); + String l2 = (String) ser.deserialize(buf); + assertEquals(l2, "Abcd"); + } + + public void testBigString() throws IOException, ClassNotFoundException { + String bigString = ""; + for (int i = 0; i < 1e4; i++) + bigString += i % 10; + byte[] buf = ser.serialize(bigString); + String l2 = (String) ser.deserialize(buf); + assertEquals(l2, bigString); + } + + public void testObject() throws ClassNotFoundException, IOException { + SimpleEntry a = new SimpleEntry(1, "11"); + byte[] buf = ser.serialize(a); + SimpleEntry l2 = (SimpleEntry) ser.deserialize(buf); + assertEquals(l2, a); + } + + public void testNoArgumentConstructorInJavaSerialization() + throws ClassNotFoundException, IOException { + SimpleEntry a = new SimpleEntry(1, "11"); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + new ObjectOutputStream(out).writeObject(a); + ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream( + out.toByteArray())); + SimpleEntry a2 = (SimpleEntry) in.readObject(); + assertEquals(a, a2); + } + + public void testArrayList() throws ClassNotFoundException, IOException { + Collection c = new ArrayList(); + for (int i = 0; i < 200; i++) + c.add(i); + assertEquals(c, ser.deserialize(ser.serialize(c))); + for (int i = 0; i < 2000; i++) + c.add(i); + assertEquals(c, ser.deserialize(ser.serialize(c))); + } + + public void testLinkedList() throws ClassNotFoundException, IOException { + Collection c = new java.util.LinkedList(); + for (int i = 0; i < 200; i++) + c.add(i); + assertEquals(c, ser.deserialize(ser.serialize(c))); + for (int i = 0; i < 2000; i++) + c.add(i); + assertEquals(c, ser.deserialize(ser.serialize(c))); + } + + public void testVector() throws ClassNotFoundException, IOException { + Collection c = new Vector(); + for (int i = 0; i < 200; i++) + c.add(i); + assertEquals(c, ser.deserialize(ser.serialize(c))); + for (int i = 0; i < 2000; i++) + c.add(i); + assertEquals(c, ser.deserialize(ser.serialize(c))); + } + + public void testTreeSet() throws ClassNotFoundException, IOException { + Collection c = new TreeSet(); + for (int i = 0; i < 200; i++) + c.add(i); + assertEquals(c, ser.deserialize(ser.serialize(c))); + for (int i = 0; i < 2000; i++) + c.add(i); + assertEquals(c, ser.deserialize(ser.serialize(c))); + } + + public void testHashSet() throws ClassNotFoundException, IOException { + Collection c = new HashSet(); + for (int i = 0; i < 200; i++) + c.add(i); + assertEquals(c, ser.deserialize(ser.serialize(c))); + for (int i = 0; i < 2000; i++) + c.add(i); + assertEquals(c, ser.deserialize(ser.serialize(c))); + } + + public void testLinkedHashSet() throws ClassNotFoundException, IOException { + Collection c = new LinkedHashSet(); + for (int i = 0; i < 200; i++) + c.add(i); + assertEquals(c, ser.deserialize(ser.serialize(c))); + for (int i = 0; i < 2000; i++) + c.add(i); + assertEquals(c, ser.deserialize(ser.serialize(c))); + } + + public void testHashMap() throws ClassNotFoundException, IOException { + Map c = new HashMap(); + for (int i = 0; i < 200; i++) + c.put(i, i + 10000); + assertEquals(c, ser.deserialize(ser.serialize(c))); + for (int i = 0; i < 2000; i++) + c.put(i, i + 10000); + assertEquals(c, ser.deserialize(ser.serialize(c))); + } + + public void testTreeMap() throws ClassNotFoundException, IOException { + Map c = new TreeMap(); + for (int i = 0; i < 200; i++) + c.put(i, i + 10000); + assertEquals(c, ser.deserialize(ser.serialize(c))); + for (int i = 0; i < 2000; i++) + c.put(i, i + 10000); + assertEquals(c, ser.deserialize(ser.serialize(c))); + } + + public void testLinkedHashMap() throws ClassNotFoundException, IOException { + Map c = new LinkedHashMap(); + for (int i = 0; i < 200; i++) + c.put(i, i + 10000); + assertEquals(c, ser.deserialize(ser.serialize(c))); + for (int i = 0; i < 2000; i++) + c.put(i, i + 10000); + assertEquals(c, ser.deserialize(ser.serialize(c))); + } + + public void testHashtable() throws ClassNotFoundException, IOException { + Map c = new Hashtable(); + for (int i = 0; i < 200; i++) + c.put(i, i + 10000); + assertEquals(c, ser.deserialize(ser.serialize(c))); + for (int i = 0; i < 2000; i++) + c.put(i, i + 10000); + assertEquals(c, ser.deserialize(ser.serialize(c))); + } + + public void testProperties() throws ClassNotFoundException, IOException { + Properties c = new Properties(); + for (int i = 0; i < 200; i++) + c.put(i, i + 10000); + assertEquals(c, ser.deserialize(ser.serialize(c))); + for (int i = 0; i < 2000; i++) + c.put(i, i + 10000); + assertEquals(c, ser.deserialize(ser.serialize(c))); + } + + public void testClass() throws IOException, ClassNotFoundException { + byte[] buf = ser.serialize(String.class); + Class l2 = (Class) ser.deserialize(buf); + assertEquals(l2, String.class); + } + + public void testClass2() throws IOException, ClassNotFoundException { + byte[] buf = ser.serialize(long[].class); + Class l2 = (Class) ser.deserialize(buf); + assertEquals(l2, long[].class); + } + + public void testUnicodeString() throws ClassNotFoundException, IOException { + String s = "Ciudad BolĂ­va"; + byte[] buf = ser.serialize(s); + assertTrue("text is not unicode", buf.length != s.length()); + Object l2 = ser.deserialize(buf); + assertEquals(l2, s); + } + + public void testSerializationHeader() throws IOException { + ByteArrayOutputStream b = new ByteArrayOutputStream(); + new java.io.ObjectOutputStream(b).writeObject("lalala"); + ByteArrayInputStream i = new ByteArrayInputStream(b.toByteArray()); + final int header1 = i.read(); + + ByteArrayOutputStream b2 = new ByteArrayOutputStream(); + new java.io.ObjectOutputStream(b2).writeObject(new Integer(1)); + ByteArrayInputStream i2 = new ByteArrayInputStream(b2.toByteArray()); + final int header2 = i2.read(); + + assertEquals(header1, header2); + assertEquals(header1, SerializationHeader.JAVA_SERIALIZATION); + } + + public void testPackedLongCollection() throws ClassNotFoundException, + IOException { + ArrayList l1 = new ArrayList(); + l1.add(0L); + l1.add(1L); + l1.add(0L); + assertEquals(l1, ser.deserialize(ser.serialize(l1))); + l1.add(-1L); + assertEquals(l1, ser.deserialize(ser.serialize(l1))); + } + + public void testNegativeLongsArray() throws ClassNotFoundException, + IOException { + long[] l = new long[] { -12 }; + Object deserialize = ser.deserialize(ser.serialize(l)); + assertTrue(Arrays.equals(l, (long[]) deserialize)); + } + + public void testNegativeIntArray() throws ClassNotFoundException, IOException { + int[] l = new int[] { -12 }; + Object deserialize = ser.deserialize(ser.serialize(l)); + assertTrue(Arrays.equals(l, (int[]) deserialize)); + } + + public void testNegativeShortArray() throws ClassNotFoundException, + IOException { + short[] l = new short[] { -12 }; + Object deserialize = ser.deserialize(ser.serialize(l)); + assertTrue(Arrays.equals(l, (short[]) deserialize)); + } + + public void testBooleanArray() throws ClassNotFoundException, IOException { + boolean[] l = new boolean[] { true, false }; + Object deserialize = ser.deserialize(ser.serialize(l)); + assertTrue(Arrays.equals(l, (boolean[]) deserialize)); + } + + public void testDoubleArray() throws ClassNotFoundException, IOException { + double[] l = new double[] { Math.PI, 1D }; + Object deserialize = ser.deserialize(ser.serialize(l)); + assertTrue(Arrays.equals(l, (double[]) deserialize)); + } + + public void testFloatArray() throws ClassNotFoundException, IOException { + float[] l = new float[] { 1F, 1.234235F }; + Object deserialize = ser.deserialize(ser.serialize(l)); + assertTrue(Arrays.equals(l, (float[]) deserialize)); + } + + public void testByteArray() throws ClassNotFoundException, IOException { + byte[] l = new byte[] { 1, 34, -5 }; + Object deserialize = ser.deserialize(ser.serialize(l)); + assertTrue(Arrays.equals(l, (byte[]) deserialize)); + } + + public void testCharArray() throws ClassNotFoundException, IOException { + char[] l = new char[] { '1', 'a', '&' }; + Object deserialize = ser.deserialize(ser.serialize(l)); + assertTrue(Arrays.equals(l, (char[]) deserialize)); + } + + public void testDate() throws IOException, ClassNotFoundException { + Date d = new Date(6546565565656L); + assertEquals(d, ser.deserialize(ser.serialize(d))); + d = new Date(System.currentTimeMillis()); + assertEquals(d, ser.deserialize(ser.serialize(d))); + } + + public void testBigDecimal() throws IOException, ClassNotFoundException { + BigDecimal d = new BigDecimal("445656.7889889895165654423236"); + assertEquals(d, ser.deserialize(ser.serialize(d))); + d = new BigDecimal("-53534534534534445656.7889889895165654423236"); + assertEquals(d, ser.deserialize(ser.serialize(d))); + } + + public void testBigInteger() throws IOException, ClassNotFoundException { + BigInteger d = new BigInteger("4456567889889895165654423236"); + assertEquals(d, ser.deserialize(ser.serialize(d))); + d = new BigInteger("-535345345345344456567889889895165654423236"); + assertEquals(d, ser.deserialize(ser.serialize(d))); + } + + public void testUUID() throws IOException, ClassNotFoundException { + // try a bunch of UUIDs. + for (int i = 0; i < 1000; i++) { + UUID uuid = UUID.randomUUID(); + SimpleEntry a = new SimpleEntry(uuid, "11"); + byte[] buf = ser.serialize(a); + SimpleEntry b = (SimpleEntry) ser.deserialize(buf); + assertEquals(b, a); + } + } + + public void testLocale() throws Exception { + assertEquals(Locale.FRANCE, ser.deserialize(ser.serialize(Locale.FRANCE))); + assertEquals(Locale.CANADA_FRENCH, + ser.deserialize(ser.serialize(Locale.CANADA_FRENCH))); + assertEquals(Locale.SIMPLIFIED_CHINESE, + ser.deserialize(ser.serialize(Locale.SIMPLIFIED_CHINESE))); + + } + + enum Order { + ASCENDING, DESCENDING + } + + public void testEnum() throws Exception { + Order o = Order.ASCENDING; + o = (Order) ser.deserialize(ser.serialize(o)); + assertEquals(o, Order.ASCENDING); + assertEquals(o.ordinal(), Order.ASCENDING.ordinal()); + assertEquals(o.name(), Order.ASCENDING.name()); + + o = Order.DESCENDING; + o = (Order) ser.deserialize(ser.serialize(o)); + assertEquals(o, Order.DESCENDING); + assertEquals(o.ordinal(), Order.DESCENDING.ordinal()); + assertEquals(o.name(), Order.DESCENDING.name()); + + } + + static class Extr implements Externalizable { + + int aaa = 11; + String l = "agfa"; + + public void writeExternal(ObjectOutput out) throws IOException { + out.writeObject(l); + out.writeInt(aaa); + + } + + public void readExternal(ObjectInput in) throws IOException, + ClassNotFoundException { + l = (String) in.readObject(); + aaa = in.readInt() + 1; + + } + } + + public void testExternalizable() throws Exception { + Extr e = new Extr(); + e.aaa = 15; + e.l = "pakla"; + + e = (Extr) ser.deserialize(ser.serialize(e)); + assertEquals(e.aaa, 16); // was incremented during serialization + assertEquals(e.l, "pakla"); + + } + + public void testObjectArrayArray() throws IOException, ClassNotFoundException { + Object[][] arr = new Object[][] { { (int) 25, (short) 20, (short) 32, + (short) 16, (short) 20 }, }; + Object[][] arr2 = (Object[][]) ser.deserialize(ser.serialize(arr)); + + for (int i = 0; i < arr.length; i++) + assertEquals(asList(arr[i]), asList(arr2[i])); + } + +} Index: graph/src/test/java/org/apache/jdbm/PageTransactionManagerTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/PageTransactionManagerTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/PageTransactionManagerTest.java (Arbeitskopie) @@ -0,0 +1,118 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.File; + +/** + * This class contains all Unit tests for {@link PageTransactionManager}. TODO + * sort out this testcase + */ +public class PageTransactionManagerTest extends TestCaseWithTestFile { + + String file = newTestFile(); + + /** + * Test constructor. Oops - can only be done indirectly :-) + */ + public void testCtor() throws Exception { + PageFile file2 = new PageFile(file); + + file2.forceClose(); + } + + /** + * Test recovery + */ + public void XtestRecovery() throws Exception { + PageFile file1 = new PageFile(file); + + // Do three transactions. + for (int i = 0; i < 3; i++) { + PageIo node = file1.get(i); + node.setDirty(); + file1.release(node); + file1.commit(); + } + assertDataSizeEquals("len1", 0); + assertLogSizeNotZero("len1"); + + file1.forceClose(); + + // Leave the old record file in flux, and open it again. + // The second instance should start recovery. + PageFile file2 = new PageFile(file); + + assertDataSizeEquals("len2", 3 * Storage.PAGE_SIZE); + assertLogSizeEquals("len2", 8); + + file2.forceClose(); + + // assure we can recover this log file + PageFile file3 = new PageFile(file); + + file3.forceClose(); + } + + /** + * Test background synching + */ + public void XtestSynching() throws Exception { + PageFile file1 = new PageFile(file); + + // Do enough transactions to fill the first slot + int txnCount = 1; + for (int i = 0; i < txnCount; i++) { + PageIo node = file1.get(i); + node.setDirty(); + file1.release(node); + file1.commit(); + } + file1.forceClose(); + + // The data file now has the first slotfull + assertDataSizeEquals("len1", 1 * Storage.PAGE_SIZE + 6); + assertLogSizeNotZero("len1"); + + // Leave the old record file in flux, and open it again. + // The second instance should start recovery. + PageFile file2 = new PageFile(file); + + assertDataSizeEquals("len2", txnCount * Storage.PAGE_SIZE); + assertLogSizeEquals("len2", 8); + + file2.forceClose(); + } + + // Helpers + + void assertDataSizeEquals(String msg, long size) { + assertEquals(msg + " data size", size, new File(file + ".t").length()); + } + + void assertLogSizeEquals(String msg, long size) { + assertEquals(msg + " log size", size, new File(file + + StorageDisk.transaction_log_file_extension).length()); + } + + void assertLogSizeNotZero(String msg) { + assertTrue(msg + " log size", new File(file + + StorageDisk.transaction_log_file_extension).length() != 0); + } + +} Index: graph/src/test/java/org/apache/jdbm/LongTreeMap.java =================================================================== --- graph/src/test/java/org/apache/jdbm/LongTreeMap.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/LongTreeMap.java (Arbeitskopie) @@ -0,0 +1,510 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jdbm; + +import java.util.ConcurrentModificationException; +import java.util.Iterator; +import java.util.Map; +import java.util.NoSuchElementException; + +/** + * B-Tree Map which uses primitive long as key. Main advantage is new instanceof + * of Long does not have to be created for each lookup. + *

+ * This code comes from Android, which in turns comes to Apache Harmony. This + * class was modified to use primitive longs and stripped down to consume less + * space. + *

+ * Author of JDBM modifications: Jan Kotek + *

+ * It is much slower then LongKeyChainedHashMap, but may be usefull in future + * for better licence. + * + * @param + */ +public class LongTreeMap { + + private Entry root; + + private int size; + + /** + * counts modifications to throw ConcurrentAccessException + */ + private transient int modCount; + + /** + * Returns the value of the mapping with the specified key. + * + * @param key the key. + * @return the value of the mapping with the specified key. + * @throws ClassCastException if the key cannot be compared with the keys in + * this map. + * @throws NullPointerException if the key is {@code null} and the comparator + * cannot handle {@code null}. + * @since Android 1.0 + */ + public V get(long key) { + Entry node = find(key); + if (node != null) { + return node.value; + } + return null; + } + + /** + * Maps the specified key to the specified value. + * + * @param key the key. + * @param value the value. + * @return the value of any previous mapping with the specified key or + * {@code null} if there was no mapping. + * @throws ClassCastException if the specified key cannot be compared with the + * keys in this map. + * @throws NullPointerException if the specified key is {@code null} and the + * comparator cannot handle {@code null} keys. + * @since Android 1.0 + */ + public V put(long key, V value) { + Entry entry = rbInsert(key); + V result = entry.value; + entry.value = value; + return result; + } + + /** + * Removes the mapping with the specified key from this map. + * + * @param key the key of the mapping to remove. + * @return the value of the removed mapping or {@code null} if no mapping for + * the specified key was found. + * @throws ClassCastException if the specified key cannot be compared with the + * keys in this map. + * @throws NullPointerException if the specified key is {@code null} and the + * comparator cannot handle {@code null} keys. + * @since Android 1.0 + */ + public V remove(long key) { + if (size == 0) { + return null; + } + Entry node = find(key); + if (node == null) { + return null; + } + V result = node.value; + rbDelete(node); + return result; + } + + /** + * Removes all mappings from this TreeMap, leaving it empty. + * + * @see Map#isEmpty() + * @see #size() + * @since Android 1.0 + */ + public void clear() { + root = null; + size = 0; + modCount++; + } + + /** + * Entry is an internal class which is used to hold the entries of a TreeMap. + */ + private static class Entry { + Entry parent, left, right; + + long key; + V value; + + boolean color; + + Entry(long key, V value) { + this.key = key; + this.value = value; + } + + public String toString() { + return super.toString() + " - " + key + " - " + value; + } + } + + /** + * @return iterator over values in map + */ + public Iterator valuesIterator() { + return new ValueIterator(); + } + + /** + * @return iterator over keys in map + */ + public LongIterator keyIterator() { + return new LongIterator(); + } + + private class MapIterator { + + int expectedModCount; + Entry node; + Entry lastNode; + + MapIterator() { + expectedModCount = modCount; + if (root != null) + node = minimum(root); + } + + public boolean hasNext() { + return node != null; + } + + final public void remove() { + if (expectedModCount == modCount) { + if (lastNode != null) { + rbDelete(lastNode); + lastNode = null; + expectedModCount++; + } else { + throw new IllegalStateException(); + } + } else { + throw new ConcurrentModificationException(); + } + } + + final void makeNext() { + if (expectedModCount != modCount) { + throw new ConcurrentModificationException(); + } else if (node == null) { + throw new NoSuchElementException(); + } + lastNode = node; + node = successor(node); + } + } + + private class ValueIterator extends MapIterator implements Iterator { + public V next() { + makeNext(); + return lastNode.value; + } + } + + public class LongIterator extends MapIterator implements Iterator { + public Long next() { + makeNext(); + return lastNode.key; + } + + public long nextLong() { + makeNext(); + return lastNode.key; + } + + } + + public boolean isEmpty() { + return size == 0; + } + + public int size() { + return size; + } + + public String toString() { + String s = this.getClass().getName(); + s += "["; + LongIterator iter = keyIterator(); + boolean first = true; + + while (iter.hasNext()) { + if (!first) { + s += ", "; + } + first = false; + long k = iter.nextLong(); + s += k + "=" + get(k); + } + s += "]"; + return s; + } + + private Entry find(long object) { + Entry x = root; + while (x != null) { + // result = object != null ? object.compareTo(x.key) : comparator + // .compare(key, x.key); + // if (result == 0) { + // return x; + // } + // x = result < 0 ? x.left : x.right; + if (object == x.key) + return x; + x = object < x.key ? x.left : x.right; + } + return null; + } + + private Entry minimum(Entry x) { + while (x.left != null) { + x = x.left; + } + return x; + } + + Entry successor(Entry x) { + if (x.right != null) { + return minimum(x.right); + } + Entry y = x.parent; + while (y != null && x == y.right) { + x = y; + y = y.parent; + } + return y; + } + + void rbDelete(Entry z) { + Entry y = z.left == null || z.right == null ? z : successor(z); + Entry x = y.left != null ? y.left : y.right; + if (x != null) { + x.parent = y.parent; + } + if (y.parent == null) { + root = x; + } else if (y == y.parent.left) { + y.parent.left = x; + } else { + y.parent.right = x; + } + modCount++; + if (y != z) { + z.key = y.key; + z.value = y.value; + } + if (!y.color && root != null) { + if (x == null) { + fixup(y.parent); + } else { + fixup(x); + } + } + size--; + } + + private void fixup(Entry x) { + Entry w; + while (x != root && !x.color) { + if (x == x.parent.left) { + w = x.parent.right; + if (w == null) { + x = x.parent; + continue; + } + if (w.color) { + w.color = false; + x.parent.color = true; + leftRotate(x.parent); + w = x.parent.right; + if (w == null) { + x = x.parent; + continue; + } + } + if ((w.left == null || !w.left.color) + && (w.right == null || !w.right.color)) { + w.color = true; + x = x.parent; + } else { + if (w.right == null || !w.right.color) { + w.left.color = false; + w.color = true; + rightRotate(w); + w = x.parent.right; + } + w.color = x.parent.color; + x.parent.color = false; + w.right.color = false; + leftRotate(x.parent); + x = root; + } + } else { + w = x.parent.left; + if (w == null) { + x = x.parent; + continue; + } + if (w.color) { + w.color = false; + x.parent.color = true; + rightRotate(x.parent); + w = x.parent.left; + if (w == null) { + x = x.parent; + continue; + } + } + if ((w.left == null || !w.left.color) + && (w.right == null || !w.right.color)) { + w.color = true; + x = x.parent; + } else { + if (w.left == null || !w.left.color) { + w.right.color = false; + w.color = true; + leftRotate(w); + w = x.parent.left; + } + w.color = x.parent.color; + x.parent.color = false; + w.left.color = false; + rightRotate(x.parent); + x = root; + } + } + } + x.color = false; + } + + private void leftRotate(Entry x) { + Entry y = x.right; + x.right = y.left; + if (y.left != null) { + y.left.parent = x; + } + y.parent = x.parent; + if (x.parent == null) { + root = y; + } else { + if (x == x.parent.left) { + x.parent.left = y; + } else { + x.parent.right = y; + } + } + y.left = x; + x.parent = y; + } + + private void rightRotate(Entry x) { + Entry y = x.left; + x.left = y.right; + if (y.right != null) { + y.right.parent = x; + } + y.parent = x.parent; + if (x.parent == null) { + root = y; + } else { + if (x == x.parent.right) { + x.parent.right = y; + } else { + x.parent.left = y; + } + } + y.right = x; + x.parent = y; + } + + private Entry rbInsert(long object) { + boolean smaller = false; + Entry y = null; + if (size != 0) { + Entry x = root; + while (x != null) { + y = x; + // result = key != null ? key.compareTo(x.key) : comparator + // .compare(object, x.key); + // if (result == 0) { + // return x; + // } + // x = result < 0 ? x.left : x.right; + if (object == x.key) + return x; + if (object < x.key) { + x = x.left; + smaller = true; + } else { + x = x.right; + smaller = false; + } + } + } + + size++; + modCount++; + Entry z = new Entry(object, null); + if (y == null) { + return root = z; + } + z.parent = y; + if (smaller) { + y.left = z; + } else { + y.right = z; + } + balance(z); + return z; + } + + void balance(Entry x) { + Entry y; + x.color = true; + while (x != root && x.parent.color) { + if (x.parent == x.parent.parent.left) { + y = x.parent.parent.right; + if (y != null && y.color) { + x.parent.color = false; + y.color = false; + x.parent.parent.color = true; + x = x.parent.parent; + } else { + if (x == x.parent.right) { + x = x.parent; + leftRotate(x); + } + x.parent.color = false; + x.parent.parent.color = true; + rightRotate(x.parent.parent); + } + } else { + y = x.parent.parent.left; + if (y != null && y.color) { + x.parent.color = false; + y.color = false; + x.parent.parent.color = true; + x = x.parent.parent; + } else { + if (x == x.parent.left) { + x = x.parent; + rightRotate(x); + } + x.parent.color = false; + x.parent.parent.color = true; + leftRotate(x.parent.parent); + } + } + } + root.color = false; + } + +} Index: graph/src/test/java/org/apache/jdbm/TestStress.java =================================================================== --- graph/src/test/java/org/apache/jdbm/TestStress.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/TestStress.java (Arbeitskopie) @@ -0,0 +1,215 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.util.Random; + +/** + * This class contains stress tests for this package. + */ +public class TestStress extends TestCaseWithTestFile { + + // test parameters + final int RECORDS = 10000; + final int MAXSIZE = 500; + final int ROUNDS = 1 * 1000 * 1000; + + final int RPPROMILLE = ROUNDS / 1000; + + Random rnd = new Random(42); + + // holder for record data so we can compare + class RecordData { + long rowid; + int size; + byte b; + + RecordData(long rowid, int size, byte b) { + this.rowid = rowid; + this.size = size; + this.b = b; + } + + public String toString() { + return "slot(" + rowid + ",sz=" + size + ",b=" + b + ")"; + } + } + + private int getRandomAllocatedSlot(RecordData[] d) { + int slot = rnd.nextInt(RECORDS); + while (d[slot] == null) { + slot++; + if (slot == RECORDS) + slot = 0; // wrap + } + return slot; + } + + // holder for root records + long[] roots = new long[Magic.FILE_HEADER_NROOTS]; + + private int getRandomAllocatedRoot() { + int slot = rnd.nextInt(Magic.FILE_HEADER_NROOTS); + while (roots[slot] == 0) { + slot++; + if (slot == Magic.FILE_HEADER_NROOTS) + slot = 0; // wrap + } + return slot; + } + + /** + * Test basics + */ + public void testBasics() throws Exception { + + String file = newTestFile(); + DBStore db = new DBStore(file, false, false, false); + + // as this code is meant to test data structure calculcations + // and stuff like that, we may want to disable transactions + // that just slow us down. + // mgr.disableTransactions(); + + RecordData[] d = new RecordData[RECORDS]; + int recordCount = 0, rootCount = 0; + int inserts = 0, updates = 0, deletes = 0, fetches = 0; + int rootgets = 0, rootsets = 0; + int slot = -1; + + try { + + for (int i = 0; i < ROUNDS; i++) { + if ((i % RPPROMILLE) == 0) + System.out.print("\rComplete: " + i / RPPROMILLE + "/1000th"); + + // close and re-open a couple of times during the + // test, in order to check flushing etcetera. + if ((i % (ROUNDS / 5)) == 0) { + System.out.print(" (reopened at round " + i / RPPROMILLE + ")"); + db.close(); + db = new DBStore(file, false, false, false); + // db.disableTransactions(); + } + + // generate a random number and assign ranges to operations: + // 0-10 = insert, 20 = delete, 30-50 = update, 51 = set root, + // 52 = get root, rest = fetch. + int op = rnd.nextInt(100); + if (op <= 10) { + // INSERT RECORD + if (recordCount == RECORDS) { + i -= 1; + continue; + } + + slot = 0; + while (d[slot] != null) + slot++; + + d[slot] = new RecordData(0, rnd.nextInt(MAXSIZE), + (byte) rnd.nextInt()); + d[slot].rowid = db.insert(UtilTT.makeRecord(d[slot].size, d[slot].b)); + recordCount++; + inserts++; + } else if (op == 20) { + // DELETE RECORD + if (recordCount == 0) { + i -= 1; + continue; + } + + slot = getRandomAllocatedSlot(d); + db.delete(d[slot].rowid); + d[slot] = null; + recordCount--; + deletes++; + } else if (op <= 50) { + // UPDATE RECORD + if (recordCount == 0) { + i -= 1; + continue; + } + + slot = getRandomAllocatedSlot(d); + d[slot].size = rnd.nextInt(MAXSIZE); + d[slot].b = (byte) rnd.nextInt(); + db.update(d[slot].rowid, UtilTT.makeRecord(d[slot].size, d[slot].b)); + updates++; + } else if (op == 51) { + + // SET ROOT + int root = rnd.nextInt(Magic.FILE_HEADER_NROOTS); + if (root > 10) { // DONT do this for reserved roots + roots[root] = rnd.nextLong(); + db.setRoot((byte) root, roots[root]); + rootsets++; + } + } else if (op == 52) { + // GET ROOT + if (rootCount == 0) { + i -= 1; + continue; + } + + int root = getRandomAllocatedRoot(); + if (root > 10) { // DONT do this for reserved roots + assertEquals("root", roots[root], db.getRoot((byte) root)); + rootgets++; + } + } else { + // FETCH RECORD + if (recordCount == 0) { + i -= 1; + continue; + } + + slot = getRandomAllocatedSlot(d); + byte[] data = (byte[]) db.fetch(d[slot].rowid); + assertTrue("fetch round=" + i + ", slot=" + slot + ", " + d[slot], + UtilTT.checkRecord(data, d[slot].size, d[slot].b)); + fetches++; + } + } + db.close(); + } catch (Throwable e) { + e.printStackTrace(); + throw new RuntimeException("aborting test at slot " + slot + ": ", e); + } finally { + System.out.println("records : " + recordCount); + System.out.println("deletes : " + deletes); + System.out.println("inserts : " + inserts); + System.out.println("updates : " + updates); + System.out.println("fetches : " + fetches); + System.out.println("rootget : " + rootgets); + System.out.println("rootset : " + rootsets); + int totalSize = 0; + for (int i = 0; i < RECORDS; i++) + if (d[i] != null) + totalSize += d[i].size; + System.out.println("total outstanding size: " + totalSize); + + // System.out.println("---"); + // for (int i = 0; i < RECORDS; i++) + // if (d[i] != null) + // System.out.println("slot " + i + ": " + d[i]); + } + + } + +} Index: graph/src/test/java/org/apache/jdbm/UtilTT.java =================================================================== --- graph/src/test/java/org/apache/jdbm/UtilTT.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/UtilTT.java (Arbeitskopie) @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import junit.framework.Assert; + +/** + * This class contains some test utilities. + */ +public class UtilTT { + /** + * Creates a "record" containing "length" repetitions of the indicated byte. + */ + public static byte[] makeRecord(int length, byte b) { + byte[] retval = new byte[length]; + for (int i = 0; i < length; i++) + retval[i] = b; + return retval; + } + + /** + * Checks whether the record has the indicated length and data + */ + public static boolean checkRecord(byte[] data, int length, byte b) { + Assert.assertEquals("lenght does not match", length, data.length); + for (int i = 0; i < length; i++) + Assert.assertEquals("byte " + i, b, data[i]); + + return true; + } + +} Index: graph/src/test/java/org/apache/jdbm/DBTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/DBTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/DBTest.java (Arbeitskopie) @@ -0,0 +1,462 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.File; +import java.io.IOError; +import java.io.IOException; +import java.io.Serializable; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * This class contains all Unit tests for {@link DBAbstract}. + */ +public class DBTest extends TestCaseWithTestFile { + + /** + * Test constructor + */ + public void testCtor() throws Exception { + DB db; + + db = newDBCache(); + db.close(); + } + + /** + * Test basics + */ + public void testBasics() throws Exception { + + DBAbstract db = newDBCache(); + + // insert a 10,000 byte record. + byte[] data = UtilTT.makeRecord(10000, (byte) 1); + long rowid = db.insert(data); + assertTrue("check data1", + UtilTT.checkRecord((byte[]) db.fetch(rowid), 10000, (byte) 1)); + + // update it as a 20,000 byte record. + data = UtilTT.makeRecord(20000, (byte) 2); + db.update(rowid, data); + assertTrue("check data2", + UtilTT.checkRecord((byte[]) db.fetch(rowid), 20000, (byte) 2)); + + // insert a third record. + data = UtilTT.makeRecord(20, (byte) 3); + long rowid2 = db.insert(data); + assertTrue("check data3", + UtilTT.checkRecord((byte[]) db.fetch(rowid2), 20, (byte) 3)); + + // now, grow the first record again + data = UtilTT.makeRecord(30000, (byte) 4); + db.update(rowid, data); + assertTrue("check data4", + UtilTT.checkRecord((byte[]) db.fetch(rowid), 30000, (byte) 4)); + + // delete the record + db.delete(rowid); + + // close the file + db.close(); + } + + /** + * Test delete and immediate reuse. This attempts to reproduce a bug in the + * stress test involving 0 record lengths. + */ + public void testDeleteAndReuse() throws Exception { + + DBAbstract db = newDBCache(); + + // insert a 1500 byte record. + byte[] data = UtilTT.makeRecord(1500, (byte) 1); + long rowid = db.insert(data); + assertTrue("check data1", + UtilTT.checkRecord((byte[]) db.fetch(rowid), 1500, (byte) 1)); + + // delete the record + db.delete(rowid); + + // insert a 0 byte record. Should have the same rowid. + data = UtilTT.makeRecord(0, (byte) 2); + long rowid2 = db.insert(data); + assertEquals("old and new rowid", rowid, rowid2); + assertTrue("check data2", + UtilTT.checkRecord((byte[]) db.fetch(rowid2), 0, (byte) 2)); + + // now make the record a bit bigger + data = UtilTT.makeRecord(10000, (byte) 3); + db.update(rowid, data); + assertTrue("check data3", + UtilTT.checkRecord((byte[]) db.fetch(rowid), 10000, (byte) 3)); + + // .. and again + data = UtilTT.makeRecord(30000, (byte) 4); + db.update(rowid, data); + assertTrue("check data3", + UtilTT.checkRecord((byte[]) db.fetch(rowid), 30000, (byte) 4)); + + // close the file + db.close(); + } + + /** + * Test rollback sanity. Attemts to add a new record, rollback and add the + * same record. We should obtain the same record id for both operations. + */ + public void testRollback() throws Exception { + + // Note: We start out with an empty file + DBAbstract db = newDBCache(); + + db.insert(""); // first insert an empty record, to make sure serializer is + // initialized + db.commit(); + // insert a 150000 byte record. + byte[] data1 = UtilTT.makeRecord(150000, (byte) 1); + long rowid1 = db.insert(data1); + assertTrue("check data1", + UtilTT.checkRecord((byte[]) db.fetch(rowid1), 150000, (byte) 1)); + + // rollback transaction, should revert to previous state + db.rollback(); + + // insert same 150000 byte record. + byte[] data2 = UtilTT.makeRecord(150000, (byte) 1); + long rowid2 = db.insert(data2); + assertTrue("check data2", + UtilTT.checkRecord((byte[]) db.fetch(rowid2), 150000, (byte) 1)); + + assertEquals("old and new rowid", rowid1, rowid2); + + db.commit(); + + // insert a 150000 byte record. + data1 = UtilTT.makeRecord(150000, (byte) 2); + rowid1 = db.insert(data1); + assertTrue("check data1", + UtilTT.checkRecord((byte[]) db.fetch(rowid1), 150000, (byte) 2)); + + // rollback transaction, should revert to previous state + db.rollback(); + + // insert same 150000 byte record. + data2 = UtilTT.makeRecord(150000, (byte) 2); + rowid2 = db.insert(data2); + assertTrue("check data2", + UtilTT.checkRecord((byte[]) db.fetch(rowid2), 150000, (byte) 2)); + + assertEquals("old and new rowid", rowid1, rowid2); + + // close the file + db.close(); + } + + public void testNonExistingRecid() throws IOException { + DBAbstract db = newDBCache(); + + Object obj = db.fetch(6666666); + assertTrue(obj == null); + + try { + db.update(6666666, obj); + db.commit(); + fail(); + } catch (IOError expected) { + + } catch (IOException expected) { + + } + + } + + final static AtomicInteger i = new AtomicInteger(0); + + public static class Serial implements Serializer, Serializable { + + public String deserialize(DataInput in) throws IOException, + ClassNotFoundException { + i.incrementAndGet(); + return in.readUTF(); + } + + public void serialize(DataOutput out, String obj) throws IOException { + i.incrementAndGet(); + out.writeUTF(obj); + } + } + + public void testTreeMapValueSerializer() throws Exception { + i.set(0); + Serializer ser = new Serial(); + + DB db = newDBCache(); + Map t = db. createTreeMap("test", null, null, + ser); + t.put(1l, "hopsa hejsa1"); + t.put(2l, "hopsa hejsa2"); + db.commit(); + assertEquals(t.get(2l), "hopsa hejsa2"); + assertTrue(i.intValue() > 0); + } + + public void testCountRecid() throws Exception { + DBStore db = newDBNoCache(); + db.insert(""); // first insert an empty record, to make sure serializer is + // initialized + long baseCount = db.countRecords(); + for (int i = 1; i < 3000; i++) { + Object val = "qjiodjqwoidjqwiodoi"; + + db.insert(val); + if (i % 1000 == 0) + db.commit(); + + assertEquals(db.countRecords(), i + baseCount); + } + + } + + public void testGetCollections() throws IOException { + DB db = newDBCache(); + db.createTreeMap("treemap"); + db.createHashMap("hashmap"); + db.createTreeSet("treeset"); + db.createHashSet("hashset"); + + db.createLinkedList("linkedlist"); + Map cols = db.getCollections(); + assertTrue(cols.get("treemap") instanceof SortedMap); + assertTrue(cols.get("hashmap") instanceof Map); + + assertTrue(cols.get("treeset") instanceof SortedSet); + assertTrue(cols.get("hashset") instanceof Set); + assertTrue(cols.get("linkedlist") instanceof List); + } + + public void testRegisterShutdown() { + DB d = DBMaker.openFile(newTestFile()).closeOnExit().make(); + // do nothing + } + + public void testDeleteAfterExit() { + String f = newTestFile(); + File f1 = new File(StorageDiskMapped.makeFileName(f, 1, 0)); + File f2 = new File(StorageDiskMapped.makeFileName(f, -1, 0)); + + assertFalse(f1.exists()); + assertFalse(f2.exists()); + + DB d = DBMaker.openFile(f).deleteFilesAfterClose().make(); + d.createHashSet("test"); + assertTrue(f1.exists()); + assertTrue(f2.exists()); + d.close(); + assertFalse(f1.exists()); + assertFalse(f2.exists()); + + } + + public void testDeleteAfterExitRAF() { + String f = newTestFile(); + File f1 = new File(StorageDiskMapped.makeFileName(f, 1, 0)); + File f2 = new File(StorageDiskMapped.makeFileName(f, -1, 0)); + + assertFalse(f1.exists()); + assertFalse(f2.exists()); + + DB d = DBMaker.openFile(f).deleteFilesAfterClose().useRandomAccessFile() + .make(); + d.createHashSet("test"); + assertTrue(f1.exists()); + assertTrue(f2.exists()); + d.close(); + assertFalse(f1.exists()); + assertFalse(f2.exists()); + + } + + public void testDeleteLinkedList() throws IOException { + DBStore d = newDBNoCache(); + d.createHashMap("testXX").put("aa", "bb"); // make sure serializer and name + // map are initilaized + d.commit(); + long recCount = d.countRecords(); + List l = d.createLinkedList("test"); + l.add("1"); + l.add("2"); + d.commit(); + assertFalse(recCount == d.countRecords()); + d.deleteCollection("test"); + assertEquals(recCount, d.countRecords()); + + } + + public void testDeleteTreeMap() throws IOException { + DBStore d = newDBNoCache(); + d.createHashMap("testXX").put("aa", "bb"); // make sure serializer and name + // map are initilaized + d.commit(); + long recCount = d.countRecords(); + Map l = d.createTreeMap("test"); + l.put("1", "b"); + l.put("2", "b"); + d.commit(); + assertFalse(recCount == d.countRecords()); + d.deleteCollection("test"); + assertEquals(recCount, d.countRecords()); + + } + + public void testDeleteHashMap() throws IOException { + DBStore d = newDBNoCache(); + d.createHashMap("testXX").put("aa", "bb"); // make sure serializer and name + // map are initilaized + d.commit(); + long recCount = d.countRecords(); + Map l = d.createHashMap("test"); + l.put("1", "b"); + l.put("2", "b"); + d.commit(); + assertFalse(recCount == d.countRecords()); + d.deleteCollection("test"); + assertEquals(recCount, d.countRecords()); + + } + + public void testDeleteEmptyLinkedList() throws IOException { + DBStore d = newDBNoCache(); + d.createHashMap("testXX").put("aa", "bb"); // make sure serializer and name + // map are initilaized + d.commit(); + long recCount = d.countRecords(); + List l = d.createLinkedList("test"); + d.commit(); + assertFalse(recCount == d.countRecords()); + d.deleteCollection("test"); + assertEquals(recCount, d.countRecords()); + + } + + public void testDeleteEmptyTreeMap() throws IOException { + DBStore d = newDBNoCache(); + d.createHashMap("testXX").put("aa", "bb"); // make sure serializer and name + // map are initilaized + d.commit(); + long recCount = d.countRecords(); + Map l = d.createTreeMap("test"); + d.commit(); + assertFalse(recCount == d.countRecords()); + d.deleteCollection("test"); + assertEquals(recCount, d.countRecords()); + + } + + public void testDeleteEmptyHashMap() throws IOException { + DBStore d = newDBNoCache(); + d.createHashMap("testXX").put("aa", "bb"); // make sure serializer and name + // map are initilaized + d.commit(); + long recCount = d.countRecords(); + Map l = d.createHashMap("test"); + d.commit(); + assertFalse(recCount == d.countRecords()); + d.deleteCollection("test"); + assertEquals(recCount, d.countRecords()); + + } + + public void testHugeRecord() throws IOException { + DBStore s = newDBNoCache(); + try { + s.insert(new byte[50 * 1000 * 1000]); + s.commit(); + fail(); + } catch (IllegalArgumentException e) { + // expected + } + + } + + public void testCompressRecid() { + for (long l = Magic.PAGE_HEADER_SIZE; l < Storage.PAGE_SIZE; l += 6) { + assertEquals(l, DBStore.decompressRecid(DBStore.compressRecid(l))); + } + + for (long l = Magic.PAGE_HEADER_SIZE + Storage.PAGE_SIZE * 5; l < Storage.PAGE_SIZE * 6; l += 6) { + assertEquals(l, DBStore.decompressRecid(DBStore.compressRecid(l))); + } + + } + + public void testCollectionSize() throws IOException { + DB d = newDBNoCache(); + + Map tm = d.createTreeMap("t1"); + tm.put(1, 1); + tm.put(2, 2); + assertEquals(d.collectionSize(tm), 2); + + tm = d.createHashMap("t2"); + tm.put(1, 1); + tm.put(2, 2); + assertEquals(d.collectionSize(tm), 2); + + Collection c = d.createLinkedList("t3"); + c.add(1); + c.add(2); + assertEquals(d.collectionSize(c), 2); + + c = d.createTreeSet("t4"); + c.add(1); + c.add(2); + assertEquals(d.collectionSize(c), 2); + + c = d.createHashSet("t5"); + c.add(1); + c.add(2); + assertEquals(d.collectionSize(c), 2); + + } + + public void testDeleteAndPutCollection() throws IOException { + DB db = newDBNoCache(); + db = DBMakerTest.newDBCache(); + Map toAdd = new HashMap(); + toAdd.put("description", "test"); + toAdd.put("descriptio1", "test"); + Map map = db.createHashMap("test"); + map.putAll(toAdd); + db.commit(); + db.deleteCollection("test"); + map = db.getHashMap("test"); + assertNull(map); + + } + +} Index: graph/src/test/java/org/apache/jdbm/RecordHeaderTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/RecordHeaderTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/RecordHeaderTest.java (Arbeitskopie) @@ -0,0 +1,152 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.util.Random; + +import junit.framework.TestCase; + +/** + * This class contains all Unit tests for {@link RecordHeader}. + */ +public class RecordHeaderTest extends TestCase { + + /** + * Test basics - read and write at an offset + */ + public void testReadWrite() throws Exception { + byte[] data = new byte[Storage.PAGE_SIZE]; + PageIo test = new PageIo(0, data); + // RecordHeader hdr = new RecordHeader(test, (short) 6); + RecordHeader.setAvailableSize(test, (short) 6, 2345); + RecordHeader.setCurrentSize(test, (short) 6, 2300); + + assertEquals("current size", 2300, + RecordHeader.getCurrentSize(test, (short) 6)); + assertEquals("available size", 2345, + RecordHeader.getAvailableSize(test, (short) 6)); + } + + public void testRecordSize() { + + System.out.println("MAX_RECORD_SIZE " + RecordHeader.MAX_RECORD_SIZE); + + assertEquals("inconsistent rounding at max rec size", + RecordHeader.MAX_RECORD_SIZE, + RecordHeader.roundAvailableSize(RecordHeader.MAX_RECORD_SIZE)); + + byte[] data = new byte[Storage.PAGE_SIZE]; + PageIo test = new PageIo(0, data); + Random r = new Random(); + // RecordHeader hdr = new RecordHeader(test, (short) 6); + + for (int size = 2; size <= RecordHeader.MAX_RECORD_SIZE; size++) { + // set size + int currSize = size; + int availSize = RecordHeader.roundAvailableSize(currSize); + + assertTrue(availSize - currSize < RecordHeader.MAX_SIZE_SPACE); + assertTrue(currSize <= availSize); + + assertEquals( + "size rounding function does not provide consistent results " + + availSize, availSize, + RecordHeader.roundAvailableSize(availSize)); + + // make sure it writes and reads back correctly + RecordHeader.setAvailableSize(test, (short) 6, availSize); + assertEquals("available size", availSize, + RecordHeader.getAvailableSize(test, (short) 6)); + RecordHeader.setCurrentSize(test, (short) 6, currSize); + + assertEquals("current size", currSize, + RecordHeader.getCurrentSize(test, (short) 6)); + + // try random size within given offset + int newCurrSize = availSize - r.nextInt(RecordHeader.MAX_SIZE_SPACE); + if (newCurrSize < 0) + newCurrSize = 0; + RecordHeader.setCurrentSize(test, (short) 6, newCurrSize); + assertEquals("current size", newCurrSize, + RecordHeader.getCurrentSize(test, (short) 6)); + + RecordHeader.setCurrentSize(test, (short) 6, 0); + + size++; + + // comment out next line to do full test + if (size > 1e6) + size = (int) (size * 1.01); + } + + } + + public void testMaxRecordSize() { + + long max = 0; + for (int i = 0; i < 1e7; i++) { + int deconverted = RecordHeader.deconvertAvailSize(RecordHeader + .convertAvailSize(i)); + if (i == deconverted) { + max = i; + } + } + assertEquals("Maximal record size does not match the calculated one: " + + max, max, RecordHeader.MAX_RECORD_SIZE); + + } + + public void testRoundingSmall() { + for (int i = 0; i <= Short.MAX_VALUE; i++) { + assertEquals(i, RecordHeader.convertAvailSize(i)); + } + } + + public void testRounding() { + + for (int i = 0; i < RecordHeader.MAX_RECORD_SIZE; i++) { + int deconverted = RecordHeader.deconvertAvailSize(RecordHeader + .convertAvailSize(i)); + assertTrue("deconverted size is smaller than actual: " + i + " versus " + + deconverted, deconverted >= i); + } + + } + + public void testSetCurrentSize() { + PageIo b = new PageIo(4l, new byte[Storage.PAGE_SIZE]); + short pos = 10; + + RecordHeader.setAvailableSize(b, pos, 1000); + assertEquals(1000, RecordHeader.getAvailableSize(b, pos)); + RecordHeader.setCurrentSize(b, pos, 900); + assertEquals(900, RecordHeader.getCurrentSize(b, pos)); + RecordHeader.setCurrentSize(b, pos, 0); + assertEquals(0, RecordHeader.getCurrentSize(b, pos)); + RecordHeader.setCurrentSize(b, pos, 1000 - 254); + assertEquals(1000 - 254, RecordHeader.getCurrentSize(b, pos)); + + short pos2 = 20; + RecordHeader.setAvailableSize(b, pos2, 10000); + assertEquals(10000, RecordHeader.getAvailableSize(b, pos2)); + RecordHeader.setCurrentSize(b, pos2, 10000); + assertEquals(10000, RecordHeader.getCurrentSize(b, pos2)); + + } + +} Index: graph/src/test/java/org/apache/jdbm/DefragTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/DefragTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/DefragTest.java (Arbeitskopie) @@ -0,0 +1,106 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +public class DefragTest extends TestCaseWithTestFile { + + public void testDefrag1() throws IOException { + String file = newTestFile(); + DBStore m = new DBStore(file, false, false, false); + long loc = m.insert("123"); + m.defrag(true); + m.close(); + m = new DBStore(file, false, false, false); + assertEquals(m.fetch(loc), "123"); + } + + public void testDefrag2() throws IOException { + String file = newTestFile(); + DBStore m = new DBStore(file, false, false, false); + TreeMap map = new TreeMap(); + for (int i = 0; i < 10000; i++) { + long loc = m.insert("" + i); + map.put(loc, "" + i); + } + + m.defrag(true); + m.close(); + m = new DBStore(file, false, false, false); + for (Long l : map.keySet()) { + String val = map.get(l); + assertEquals(val, m.fetch(l)); + } + } + + public void testDefragBtree() throws IOException { + String file = newTestFile(); + DBStore m = new DBStore(file, false, false, false); + Map t = m.createTreeMap("aa"); + TreeMap t2 = new TreeMap(); + for (int i = 0; i < 10000; i++) { + t.put(i, "" + i); + t2.put(i, "" + i); + } + + m.defrag(true); + m.close(); + m = new DBStore(file, false, false, false); + t = m.getTreeMap("aa"); + assertEquals(t, t2); + } + + public void testDefragLinkedList() throws Exception { + String file = newTestFile(); + DBStore r = new DBStore(file, false, false, false); + List l = r.createLinkedList("test"); + Map junk = new LinkedHashMap(); + + for (int i = 0; i < 1e4; i++) { + // insert some junk + Double d = Math.random(); + l.add(d); + junk.put(r.insert(d), d); + } + r.commit(); + // make copy of linked list + List l2 = new ArrayList(l); + long oldRecCount = r.countRecords(); + r.defrag(true); + + r.close(); + r = new DBStore(file, false, false, false); + assertEquals(oldRecCount, r.countRecords()); + + // compare that list was unchanged + assertEquals(l2, new ArrayList(r.getLinkedList("test"))); + + // and check that random junk still have the same recids + for (Long recid : junk.keySet()) { + assertEquals(junk.get(recid), r.fetch(recid)); + } + + r.close(); + } +} Index: graph/src/test/java/org/apache/jdbm/FileLockTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/FileLockTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/FileLockTest.java (Arbeitskopie) @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOError; +import java.io.IOException; + +public class FileLockTest extends TestCaseWithTestFile { + + public void testLock() throws IOException { + String file = newTestFile(); + + DB db1 = DBMaker.openFile(file).make(); + // now open same file second time, exception should be thrown + try { + DB db2 = DBMaker.openFile(file).make(); + fail("Exception should be thrown if file was locked"); + } catch (IOError e) { + // expected + } + + db1.close(); + + // after close lock should be released, reopen + DB db3 = DBMaker.openFile(file).make(); + db3.close(); + } +} Index: graph/src/test/java/org/apache/jdbm/TestRollback.java =================================================================== --- graph/src/test/java/org/apache/jdbm/TestRollback.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/TestRollback.java (Arbeitskopie) @@ -0,0 +1,130 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +/** + * Test cases for HTree rollback + */ +public class TestRollback extends TestCaseWithTestFile { + + /** + * Test case courtesy of Derek Dick (mailto:ddick users.sourceforge.net) + */ + public void testRollback1() throws Exception { + + // Note: We start out with an empty file + DBAbstract db = newDBCache(); + + HTree tree = (HTree) db.createHashMap("test"); + + tree.put("Foo", "Bar"); + tree.put("Fo", "Fum"); + + db.commit(); + + tree.put("Hello", "World"); + + db.rollback(); + + assertTrue(tree.get("Foo").equals("Bar")); + assertTrue(tree.get("Fo").equals("Fum")); + assertTrue(tree.get("Hello") == null); + } + + /** + * Test case courtesy of Derek Dick (mailto:ddick users.sourceforge.net) + */ + public void testRollback2() throws Exception { + + DBAbstract db = newDBCache(); + + HTree tree = (HTree) db.createHashMap("test"); + + tree.put("hello", "world"); + tree.put("goodnight", "gracie"); + db.commit(); + + tree.put("derek", "dick"); + db.rollback(); + + assertTrue(tree.get("derek") == null); + assertTrue(tree.get("goodnight").equals("gracie")); + assertTrue(tree.get("hello").equals("world")); + } + + /** + * Test case courtesy of Derek Dick (mailto:ddick users.sourceforge.net) + */ + public void testRollback1b() throws Exception { + + // Note: We start out with an empty file + DBAbstract db = newDBCache(); + + HTree tree = (HTree) db + .createHashMap("test"); + + tree.put("Foo", "Bar"); + tree.put("Fo", "Fum"); + + db.commit(); + + tree.put("Hello", "World"); + + db.rollback(); + + assertTrue(tree.get("Foo").equals("Bar")); + assertTrue(tree.get("Fo").equals("Fum")); + assertTrue(tree.get("Hello") == null); + } + + /** + * Test case courtesy of Derek Dick (mailto:ddick users.sourceforge.net) + */ + public void testRollback2b() throws Exception { + DBAbstract db; + long root; + + // Note: We start out with an empty file + db = newDBCache(); + + root = db.getNamedObject("xyz"); + + BTree tree = null; + if (root == 0) { + // create a new one + tree = BTree.createInstance(db); + root = tree.getRecid(); + db.setNamedObject("xyz", root); + db.commit(); + } else { + tree = BTree.load(db, root); + } + + tree.insert("hello", "world", true); + tree.insert("goodnight", "gracie", true); + db.commit(); + + tree.insert("derek", "dick", true); + db.rollback(); + + assertTrue(tree.get("derek") == null); + assertTrue(tree.get("goodnight").equals("gracie")); + assertTrue(tree.get("hello").equals("world")); + } + +} Index: graph/src/test/java/org/apache/jdbm/RollbackTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/RollbackTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/RollbackTest.java (Arbeitskopie) @@ -0,0 +1,107 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class RollbackTest extends TestCaseWithTestFile { + + public void test_treemap() throws IOException { + DB db = newDBCache(); + Map map = db.createTreeMap("collectionName"); + + map.put(1, "one"); + map.put(2, "two"); + + assertEquals(2, map.size()); + db.commit(); // persist changes into disk + + map.put(3, "three"); + assertEquals(3, map.size()); + db.rollback(); // revert recent changes + assertEquals(2, map.size()); + } + + public void test_hashmap() throws IOException { + DB db = newDBCache(); + Map map = db.createHashMap("collectionName"); + + map.put(1, "one"); + map.put(2, "two"); + + assertEquals(2, map.size()); + db.commit(); // persist changes into disk + + map.put(3, "three"); + assertEquals(3, map.size()); + db.rollback(); // revert recent changes + assertEquals(2, map.size()); + } + + public void test_treeset() throws IOException { + DB db = newDBCache(); + Set c = db.createTreeSet("collectionName"); + + c.add(1); + c.add(2); + + assertEquals(2, c.size()); + db.commit(); // persist changes into disk + + c.add(3); + assertEquals(3, c.size()); + db.rollback(); // revert recent changes + assertEquals(2, c.size()); + } + + public void test_hashset() throws IOException { + DB db = newDBCache(); + Set c = db.createHashSet("collectionName"); + + c.add(1); + c.add(2); + + assertEquals(2, c.size()); + db.commit(); // persist changes into disk + + c.add(3); + assertEquals(3, c.size()); + db.rollback(); // revert recent changes + assertEquals(2, c.size()); + } + + public void test_linkedlist() throws IOException { + DB db = newDBCache(); + List c = db.createLinkedList("collectionName"); + + c.add(1); + c.add(2); + + assertEquals(2, c.size()); + db.commit(); // persist changes into disk + + c.add(3); + assertEquals(3, c.size()); + db.rollback(); // revert recent changes + assertEquals(2, c.size()); + } + +} Index: graph/src/test/java/org/apache/jdbm/BTreeMapNavigable2Test.java =================================================================== --- graph/src/test/java/org/apache/jdbm/BTreeMapNavigable2Test.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/BTreeMapNavigable2Test.java (Arbeitskopie) @@ -0,0 +1,421 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.util.AbstractMap; +import java.util.ConcurrentModificationException; +import java.util.NavigableMap; +import java.util.NavigableSet; +import java.util.Set; +import java.util.SortedMap; + +public class BTreeMapNavigable2Test extends TestCaseWithTestFile { + static NavigableMap map; + + @Override + public void setUp() throws Exception { + map = newDBNoCache().createTreeMap("test"); + + map.put(1, "one"); + map.put(2, "two"); + map.put(3, "three"); + map.put(4, "four"); + + map.put(7, "seven"); + map.put(8, "eight"); + map.put(9, "nine"); + map.put(10, "ten"); + } + + public void testSize() { + int i = 8; + assertEquals(map.size(), i); + while (!map.isEmpty()) { + map.remove(map.firstKey()); + assertEquals(map.size(), --i); + } + } + + public void testContainsKey() { + assertTrue(map.containsKey(1)); + assertTrue(map.containsKey(2)); + assertTrue(map.containsKey(3)); + assertTrue(map.containsKey(4)); + assertFalse(map.containsKey(5)); + assertFalse(map.containsKey(6)); + assertTrue(map.containsKey(7)); + assertTrue(map.containsKey(8)); + assertTrue(map.containsKey(9)); + assertTrue(map.containsKey(10)); + + assertFalse(map.containsKey(999)); + assertFalse(map.containsKey(-1)); + } + + public void testContainsValue() { + assertTrue(map.containsValue("one")); + assertTrue(map.containsValue("two")); + assertTrue(map.containsValue("three")); + assertTrue(map.containsValue("four")); + assertFalse(map.containsValue("five")); + assertFalse(map.containsValue("six")); + assertTrue(map.containsValue("seven")); + assertTrue(map.containsValue("eight")); + assertTrue(map.containsValue("nine")); + assertTrue(map.containsValue("ten")); + + assertFalse(map.containsValue("aaaa")); + } + + public void testPut() { + assertFalse(map.containsKey(40)); + assertFalse(map.containsValue("forty")); + map.put(40, "forty"); + assertTrue(map.containsKey(40)); + assertTrue(map.containsValue("forty")); + } + + public void testLowerEntry() { + AbstractMap.Entry e = map.lowerEntry(4); + assertEquals(e.getKey(), (Integer) 3); + } + + public void testLowerKey() { + Integer key = map.lowerKey(4); + assertEquals(key, (Integer) 3); + } + + public void testFloorEntry() { + AbstractMap.Entry e = map.floorEntry(6); + assertEquals(e.getKey(), (Integer) 4); + + e = map.floorEntry(7); + assertEquals(e.getKey(), (Integer) 7); + } + + public void testFloorKey() { + Integer key = map.floorKey(6); + assertEquals(key, (Integer) 4); + + key = map.floorKey(7); + assertEquals(key, (Integer) 7); + } + + public void testCeilingEntry() { + AbstractMap.Entry e = map.ceilingEntry(6); + assertEquals(e.getKey(), (Integer) 7); + + e = map.ceilingEntry(7); + assertEquals(e.getKey(), (Integer) 7); + } + + public void testCeilingKey() { + Integer key = map.ceilingKey(6); + assertEquals(key, (Integer) 7); + + key = map.ceilingKey(7); + assertEquals(key, (Integer) 7); + } + + public void testHigherEntry() { + AbstractMap.Entry e = map.higherEntry(4); + assertEquals(e.getKey(), (Integer) 7); + + e = map.higherEntry(7); + assertEquals(e.getKey(), (Integer) 8); + } + + public void testHigherKey() { + Integer key = map.higherKey(4); + assertEquals(key, (Integer) 7); + + key = map.higherKey(7); + assertEquals(key, (Integer) 8); + } + + public void testFirstEntry() { + assertEquals(map.firstEntry().getKey(), (Integer) 1); + } + + public void testLastEntry() { + assertEquals(map.lastEntry().getKey(), (Integer) 10); + } + + public void testPollFirstEntry() { + int size0 = map.size(); + AbstractMap.Entry e = map.pollFirstEntry(); + int size1 = map.size(); + assertEquals(size0 - 1, size1); + + assertNull(map.get(1)); + assertEquals(e.getKey(), (Integer) 1); + assertEquals(e.getValue(), "one"); + } + + public void testPollLastEntry() { + int size0 = map.size(); + AbstractMap.Entry e = map.pollLastEntry(); + int size1 = map.size(); + assertEquals(size0 - 1, size1); + + assertNull(map.get(10)); + assertEquals(e.getKey(), (Integer) 10); + assertEquals(e.getValue(), "ten"); + } + + // + // TODO implement this + // public void testDescendingMap() + // { + // NavigableMap desMap = map.descendingMap(); + // Set> entrySet1 = map.entrySet(); + // Set> entrySet2 = desMap.entrySet(); + // AbstractMap.Entry[] arr1 = entrySet1.toArray(new + // AbstractMap.Entry[0]); + // AbstractMap.Entry[] arr2 = entrySet2.toArray(new + // AbstractMap.Entry[0]); + // + // int size = arr1.length; + // assertEquals(arr1.length, arr2.length); + // for (int i = 0; i < arr1.length; i++) + // { + // assertEquals(arr1[i], arr2[size-1-i]); + // } + // } + + public void testNavigableKeySet() { + int size0 = map.size(); + NavigableSet keySet = map.navigableKeySet(); + int size1 = keySet.size(); + assertEquals(size0, size1); + + keySet.remove(2); + size0 = map.size(); + size1 = keySet.size(); + assertEquals(size0, size1); + assertNull(map.get(2)); + } + + // TODO implement this + // + // public void testDescendingKeySet() + // { + // Set keySet1 = map.keySet(); + // Set keySet2 = map.descendingKeySet(); + // + // Integer[] arr1 = keySet1.toArray(new Integer[0]); + // Integer[] arr2 = keySet2.toArray(new Integer[0]); + // int size = arr1.length; + // assertEquals(arr1.length, arr2.length); + // for (int i = 0; i < size; i++) + // { + // assertEquals(arr1[i],arr2[size-1-i]); + // } + // } + + public void testSubMap() { + SortedMap subMap = map.subMap(3, 8); + assertNotNull(subMap.get(3)); + assertEquals(subMap.get(3), "three"); + assertEquals(subMap.get(4), "four"); + assertNull(subMap.get(5)); + assertNull(subMap.get(6)); + assertEquals(subMap.get(7), "seven"); + + assertNull(subMap.get(8)); + assertNull(subMap.get(2)); + assertNull(subMap.get(9)); + try { + subMap.put(11, "eleven"); + fail("Inserted entry outside of submap range"); + } catch (IllegalArgumentException e) { + assertNull(subMap.get(11)); + } + } + + public void testSubMap2() { + NavigableMap subMap = map.subMap(3, true, 8, false); + assertNotNull(subMap.get(3)); + assertEquals(subMap.get(3), "three"); + assertEquals(subMap.get(4), "four"); + assertNull(subMap.get(5)); + assertNull(subMap.get(6)); + assertEquals(subMap.get(7), "seven"); + + assertNull(subMap.get(8)); + assertNull(subMap.get(2)); + assertNull(subMap.get(9)); + try { + subMap.put(11, "eleven"); + fail("Inserted entry outside of submap range"); + } catch (IllegalArgumentException e) { + assertNull(subMap.get(11)); + } + } + + public void testSubMap3() { + NavigableMap subMap = map.subMap(2, false, 8, false); + assertNotNull(subMap.get(3)); + assertEquals(subMap.get(3), "three"); + assertEquals(subMap.get(4), "four"); + assertNull(subMap.get(5)); + assertNull(subMap.get(6)); + assertEquals(subMap.get(7), "seven"); + + assertNull(subMap.get(8)); + assertNull(subMap.get(2)); + assertNull(subMap.get(9)); + try { + subMap.put(11, "eleven"); + fail("Inserted entry outside of submap range"); + } catch (IllegalArgumentException e) { + assertNull(subMap.get(11)); + } + } + + public void testSubMap4() { + NavigableMap subMap = map.subMap(3, true, 7, true); + assertNotNull(subMap.get(3)); + assertEquals(subMap.get(3), "three"); + assertEquals(subMap.get(4), "four"); + assertNull(subMap.get(5)); + assertNull(subMap.get(6)); + assertEquals(subMap.get(7), "seven"); + + assertNull(subMap.get(8)); + assertNull(subMap.get(2)); + assertNull(subMap.get(9)); + try { + subMap.put(11, "eleven"); + fail("Inserted entry outside of submap range"); + } catch (IllegalArgumentException e) { + assertNull(subMap.get(11)); + } + } + + public void testHeadMap() { + SortedMap subMap = map.headMap(5); + assertEquals(subMap.size(), 4); + assertNull(subMap.get(5)); + assertEquals(subMap.get(1), "one"); + try { + subMap.put(5, "five"); + fail("Inseted data out of bounds of submap."); + } catch (IllegalArgumentException e) { + assertNull(subMap.get(5)); + } + } + + public void testHeadMap2() { + NavigableMap subMap = map.headMap(5, false); + assertEquals(subMap.size(), 4); + assertNull(subMap.get(5)); + assertEquals(subMap.get(1), "one"); + try { + subMap.put(5, "five"); + fail("Inseted data out of bounds of submap."); + } catch (IllegalArgumentException e) { + assertNull(subMap.get(5)); + } + } + + public void testHeadMap3() { + NavigableMap subMap = map.headMap(5, true); + assertEquals(subMap.size(), 4); + assertNull(subMap.get(5)); + assertEquals(subMap.get(1), "one"); + try { + subMap.put(5, "five"); + assertEquals(subMap.get(5), "five"); + } catch (IllegalArgumentException e) { + fail("It was not possible to insert a legal value in a submap."); + } + } + + public void testHeadMap4() { + NavigableMap subMap = map.headMap(8, true); + assertEquals(subMap.size(), 6); + assertEquals(subMap.get(8), "eight"); + assertEquals(subMap.get(1), "one"); + try { + subMap.put(5, "five"); + assertEquals(subMap.get(5), "five"); + } catch (IllegalArgumentException e) { + fail("It was not possible to insert a legal value in a submap."); + } + } + + public void testTailMap() { + SortedMap subMap = map.tailMap(5); + assertEquals(subMap.size(), 4); + assertEquals(subMap.firstKey(), (Integer) 7); + assertEquals(subMap.lastKey(), (Integer) 10); + } + + public void testTailMap2() { + SortedMap subMap = map.tailMap(7); + assertEquals(subMap.size(), 4); + assertEquals(subMap.firstKey(), (Integer) 7); + assertEquals(subMap.lastKey(), (Integer) 10); + } + + public void testTailMap3() { + NavigableMap subMap = map.tailMap(7, false); + assertEquals(subMap.size(), 3); + assertEquals(subMap.firstKey(), (Integer) 8); + assertEquals(subMap.lastKey(), (Integer) 10); + } + + public void testTailMap4() { + NavigableMap subMap = map.tailMap(7, true); + assertEquals(subMap.size(), 4); + assertEquals(subMap.firstKey(), (Integer) 7); + assertEquals(subMap.lastKey(), (Integer) 10); + } + + public void testIsEmpty() { + assertFalse(map.isEmpty()); + map.clear(); + assertTrue(map.isEmpty()); + } + + public void testClearSubmap() { + NavigableMap subMap = map.subMap(7, true, 9, true); + subMap.clear(); + assertEquals(subMap.size(), 0); + assertTrue(map.size() == 5); + assertNull(map.get(7)); + assertNull(map.get(8)); + assertNull(map.get(9)); + } + + public void testConcurrentModification() { + Set> entrySet = map.entrySet(); + assertTrue(entrySet.size() > 0); + try { + + for (AbstractMap.Entry e : entrySet) + entrySet.remove(e); + + fail("No concurrentModificationException was thrown"); + } catch (ConcurrentModificationException ex) { + } + + } + +} Index: graph/src/test/java/org/apache/jdbm/HTreeDirectoryTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/HTreeDirectoryTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/HTreeDirectoryTest.java (Arbeitskopie) @@ -0,0 +1,145 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; +import java.util.Hashtable; +import java.util.Iterator; + +/** + * This class contains all Unit tests for {@link HTreeDirectory}. + */ +public class HTreeDirectoryTest extends TestCaseWithTestFile { + + /** + * Basic tests + */ + public void testBasics() throws IOException { + System.out.println("testBasics"); + + DBAbstract db = newDBCache(); + + HTree tree = (HTree) db.createHashMap("test"); + HTreeDirectory dir = tree.getRoot(); + + dir.put("key", "value"); + String s = (String) dir.get("key"); + assertEquals("value", s); + + db.close(); + } + + /** + * Mixed tests + */ + public void testMixed() throws IOException { + System.out.println("testMixed"); + + DBAbstract db = newDBCache(); + + HTree tree = (HTree) db.createHashMap("test"); + HTreeDirectory dir = tree.getRoot(); + + Hashtable hash = new Hashtable(); // use to compare results + + int max = 30; // must be even + + // insert & check values + for (int i = 0; i < max; i++) { + dir.put("key" + i, "value" + i); + hash.put("key" + i, "value" + i); + } + db.commit(); + + for (int i = 0; i < max; i++) { + String s = (String) dir.get("key" + i); + assertEquals("value" + i, s); + } + db.commit(); + + // replace only even values + for (int i = 0; i < max; i += 2) { + dir.put("key" + i, "value" + (i * 2 + 1)); + hash.put("key" + i, "value" + (i * 2 + 1)); + } + db.commit(); + + for (int i = 0; i < max; i++) { + if ((i % 2) == 1) { + // odd + String s = (String) dir.get("key" + i); + assertEquals("value" + i, s); + } else { + // even + String s = (String) dir.get("key" + i); + assertEquals("value" + (i * 2 + 1), s); + } + } + db.commit(); + + // remove odd numbers + for (int i = 1; i < max; i += 2) { + dir.remove("key" + i); + hash.remove("key" + i); + } + db.commit(); + + for (int i = 0; i < max; i++) { + if ((i % 2) == 1) { + // odd + String s = (String) dir.get("key" + i); + assertEquals(null, s); + } else { + // even + String s = (String) dir.get("key" + i); + assertEquals("value" + (i * 2 + 1), s); + } + } + db.commit(); + + db.close(); + db = null; + } + + void checkEnumerations(Hashtable hash, HTreeDirectory dir) throws IOException { + + // test keys + Hashtable clone = (Hashtable) hash.clone(); + int count = 0; + Iterator iter = dir.keys(); + + while (iter.hasNext()) { + String s = iter.next(); + count++; + clone.remove(s); + } + assertEquals(hash.size(), count); + + // test values + clone = (Hashtable) hash.clone(); + count = 0; + iter = dir.values(); + while (iter.hasNext()) { + String s = iter.next(); + count++; + clone.remove(s); + } + assertEquals(hash.size(), count); + } + +} Index: graph/src/test/java/org/apache/jdbm/BTreeBench.java =================================================================== --- graph/src/test/java/org/apache/jdbm/BTreeBench.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/BTreeBench.java (Arbeitskopie) @@ -0,0 +1,107 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; +import java.util.Enumeration; +import java.util.Hashtable; + +/** + * Random insertion/removal test for B+Tree data structure. + */ +public class BTreeBench extends TestCaseWithTestFile { + + DBAbstract db; + + /** + * Test w/o compression or specialized key or value serializers. + * + * @throws IOException + */ + public void test_001() throws IOException { + db = newDBCache(); + BTree tree = BTree.createInstance(db); + doTest(db, tree, 5001); + db.close(); + } + + public static void doTest(DB db, BTree tree, int ITERATIONS) + throws IOException { + + long beginTime = System.currentTimeMillis(); + Hashtable hash = new Hashtable(); + + for (int i = 0; i < ITERATIONS; i++) { + Long random = new Long(random(0, 64000)); + + if ((i % 5000) == 0) { + long elapsed = System.currentTimeMillis() - beginTime; + System.out.println("Iterations=" + i + " Objects=" + tree._entries + + ", elapsed=" + elapsed + "ms"); + db.commit(); + } + if (hash.get(random) == null) { + // System.out.println( "Insert " + random ); + hash.put(random, random); + tree.insert(random, random, false); + } else { + // System.out.println( "Remove " + random ); + hash.remove(random); + Object removed = tree.remove(random); + if ((removed == null) || (!removed.equals(random))) { + throw new IllegalStateException("Remove expected " + random + " got " + + removed); + } + } + // tree.assertOrdering(); + compare(tree, hash); + } + + } + + static long random(int min, int max) { + return Math.round(Math.random() * (max - min)) + min; + } + + static void compare(BTree tree, Hashtable hash) + throws IOException { + boolean failed = false; + Enumeration enumeration; + + if (tree._entries != hash.size()) { + throw new IllegalStateException("Tree size " + tree._entries + + " Hash size " + hash.size()); + } + + enumeration = hash.keys(); + while (enumeration.hasMoreElements()) { + Long key = enumeration.nextElement(); + Long hashValue = hash.get(key); + Long treeValue = tree.get(key); + if (!hashValue.equals(treeValue)) { + System.out.println("Compare expected " + hashValue + " got " + + treeValue); + failed = true; + } + } + if (failed) { + throw new IllegalStateException("Compare failed"); + } + } + +} Index: graph/src/test/java/org/apache/jdbm/ObjectOutputStreamTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/ObjectOutputStreamTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/ObjectOutputStreamTest.java (Arbeitskopie) @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; + +import junit.framework.TestCase; + +import org.apache.jdbm.SerialClassInfoTest.Bean1; + +public class ObjectOutputStreamTest extends TestCase { + + @SuppressWarnings("unchecked") + E neser(E e) throws IOException, ClassNotFoundException { + ByteArrayOutputStream i = new ByteArrayOutputStream(); + new AdvancedObjectOutputStream(i).writeObject(e); + return (E) new AdvancedObjectInputStream(new ByteArrayInputStream( + i.toByteArray())).readObject(); + } + + public void testSimple() throws ClassNotFoundException, IOException { + + Bean1 b = new Bean1("qwe", "rty"); + Bean1 b2 = neser(b); + + assertEquals(b, b2); + + } +} Index: graph/src/test/java/org/apache/jdbm/PageIoTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/PageIoTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/PageIoTest.java (Arbeitskopie) @@ -0,0 +1,104 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.nio.ByteBuffer; + +import junit.framework.TestCase; + +/** + * This class contains all Unit tests for {@link PageIo}. + */ +public class PageIoTest extends TestCase { + + private static final short SHORT_VALUE = 0x1234; + private static final int INT_VALUE = 0xe7b3c8a1; + private static final long LONG_VALUE = 0xfdebca9876543210L; + private static final long LONG_VALUE2 = 1231290495545446485L; + + /** + * Test writing + */ + public void testWrite() throws Exception { + byte[] data = new byte[100]; + PageIo test = new PageIo(0, data); + test.writeShort(0, SHORT_VALUE); + test.writeLong(2, LONG_VALUE); + test.writeInt(10, INT_VALUE); + test.writeLong(14, LONG_VALUE2); + + DataInputStream is = new DataInputStream(new ByteArrayInputStream(data)); + assertEquals("short", SHORT_VALUE, is.readShort()); + assertEquals("long", LONG_VALUE, is.readLong()); + assertEquals("int", INT_VALUE, is.readInt()); + assertEquals("long", LONG_VALUE2, is.readLong()); + + assertEquals("short", SHORT_VALUE, test.readShort(0)); + assertEquals("long", LONG_VALUE, test.readLong(2)); + assertEquals("int", INT_VALUE, test.readInt(10)); + assertEquals("long", LONG_VALUE2, test.readLong(14)); + + } + + /** + * Test reading + */ + public void testRead() throws Exception { + ByteArrayOutputStream bos = new ByteArrayOutputStream(100); + DataOutputStream os = new DataOutputStream(bos); + os.writeShort(SHORT_VALUE); + os.writeLong(LONG_VALUE); + os.writeInt(INT_VALUE); + os.writeLong(LONG_VALUE2); + + byte[] data = bos.toByteArray(); + PageIo test = new PageIo(0, data); + assertEquals("short", SHORT_VALUE, test.readShort(0)); + assertEquals("long", LONG_VALUE, test.readLong(2)); + assertEquals("int", INT_VALUE, test.readInt(10)); + assertEquals("long", LONG_VALUE2, test.readLong(14)); + } + + public void testNegativeSixByte() { + + PageIo t = new PageIo(0, ByteBuffer.allocate(Storage.PAGE_SIZE)); + + t.writeSixByteLong(0, -11111); + assertEquals(-11111, t.readSixByteLong(0)); + + t.writeSixByteLong(0, 11111); + assertEquals(11111, t.readSixByteLong(0)); + + } + + public void testPageHeaderSetWriteRead() throws Exception { + PageIo data = new PageIo(0, new byte[Storage.PAGE_SIZE]); + data.writeShort(0, Magic.PAGE_MAGIC); + + data.pageHeaderSetNext(10); + data.pageHeaderSetPrev(33); + + assertEquals("next", 10, data.pageHeaderGetNext()); + assertEquals("prev", 33, data.pageHeaderGetPrev()); + } + +} Index: graph/src/test/java/org/apache/jdbm/HTreeBucketTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/HTreeBucketTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/HTreeBucketTest.java (Arbeitskopie) @@ -0,0 +1,100 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jdbm; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.io.Serializable; +import java.util.Map; + +/** + * This class contains all Unit tests for {@link HTreeBucket}. + */ +public class HTreeBucketTest extends TestCaseWithTestFile { + + /** + * Basic tests + */ + public void testBasics() throws IOException { + + DB db = newDBCache(); + + HTree tree = (HTree) db.createHashMap("test"); + + HTreeBucket bucket = new HTreeBucket(tree, (byte) 0); + + // add + bucket.addElement("key", "value"); + String s = (String) bucket.getValue("key"); + assertEquals("value", s); + + // replace + bucket.addElement("key", "value2"); + s = (String) bucket.getValue("key"); + assertEquals("value2", s); + + // add + bucket.addElement("key2", "value3"); + s = (String) bucket.getValue("key2"); + assertEquals("value3", s); + + // remove + bucket.removeElement("key2"); + s = (String) bucket.getValue("key2"); + assertEquals(null, s); + bucket.removeElement("key"); + s = (String) bucket.getValue("key"); + assertEquals(null, s); + + db.close(); + } + + public static class LongSerializer implements Serializer, Serializable { + + public LongSerializer() { + + } + + public void serialize(DataOutput out, Long obj) throws IOException { + out.writeLong(obj); + } + + public Long deserialize(DataInput in) throws IOException, + ClassNotFoundException { + return in.readLong(); + } + } + + public void testCustomSerializer() throws IOException { + Serializer ser = new LongSerializer(); + + DB db = newDBCache(); + Map s = db.createHashMap("test", ser, ser); + + s.put(new Long(1), new Long(2)); + s.put(new Long(4), new Long(5)); + db.commit(); + db.clearCache(); + assertTrue(s.size() == 2); + assertEquals(s.get(new Long(1)), new Long(2)); + assertEquals(s.get(new Long(4)), new Long(5)); + + } +} Index: graph/src/test/java/org/apache/jdbm/PageManagerTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/PageManagerTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/PageManagerTest.java (Arbeitskopie) @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jdbm; + +/** + * This class contains all Unit tests for {@link PageManager}. + */ +public class PageManagerTest extends TestCaseWithTestFile { + + /** + * Test constructor + */ + public void testCtor() throws Exception { + PageFile f = newRecordFile(); + PageManager pm = new PageManager(f); + + f.forceClose(); + } + + /** + * Test allocations on a single list. + */ + public void testAllocSingleList() throws Exception { + String file = newTestFile(); + PageFile f = new PageFile(file); + PageManager pm = new PageManager(f); + for (int i = 0; i < 100; i++) { + assertEquals("allocate ", (long) i + 1, pm.allocate(Magic.USED_PAGE)); + } + pm.close(); + f.close(); + + f = new PageFile(file); + pm = new PageManager(f); + + long i = 1; + for (long cur = pm.getFirst(Magic.USED_PAGE); cur != 0; cur = pm + .getNext(cur)) { + assertEquals("next", i++, cur); + if (i > 120) + fail("list structure not ok"); + } + assertEquals("total", 101, i); + pm.close(); + f.close(); + } + +} Index: graph/src/test/java/org/apache/jdbm/TestLazyRecordsInTree.java =================================================================== --- graph/src/test/java/org/apache/jdbm/TestLazyRecordsInTree.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/TestLazyRecordsInTree.java (Arbeitskopie) @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; +import java.util.Map; + +public class TestLazyRecordsInTree extends TestCaseWithTestFile { + + String makeString(int size) { + StringBuilder s = new StringBuilder(size); + for (int i = 0; i < size; i++) { + s.append('a'); + } + return s.toString(); + } + + void doIt(DBStore r, Map m) throws IOException { + m.put(1, ""); + long counter = r.countRecords(); + // number of records should increase after inserting big record + m.put(1, makeString(1000)); + assertEquals(counter + 1, r.countRecords()); + assertEquals(m.get(1), makeString(1000)); + + // old record should be disposed when replaced with big record + m.put(1, makeString(1001)); + assertEquals(counter + 1, r.countRecords()); + assertEquals(m.get(1), makeString(1001)); + + // old record should be disposed when replaced with small record + m.put(1, "aa"); + assertEquals(counter, r.countRecords()); + assertEquals(m.get(1), "aa"); + + // old record should be disposed after deleting + m.put(1, makeString(1001)); + assertEquals(counter + 1, r.countRecords()); + assertEquals(m.get(1), makeString(1001)); + m.remove(1); + assertTrue(counter >= r.countRecords()); + assertEquals(m.get(1), null); + + } + + public void testBTree() throws IOException { + DBStore r = newDBNoCache(); + Map m = r.createTreeMap("test"); + doIt(r, m); + } + + public void testHTree() throws IOException { + DBStore r = newDBNoCache(); + Map m = r.createHashMap("test"); + doIt(r, m); + } + +} Index: graph/src/test/java/org/apache/jdbm/DataInputOutputTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/DataInputOutputTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/DataInputOutputTest.java (Arbeitskopie) @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; + +import junit.framework.TestCase; + +public class DataInputOutputTest extends TestCase { + + final DataInputOutput d = new DataInputOutput(); + + public void testInt() throws IOException { + int i = 123129049; + d.writeInt(i); + d.reset(); + assertEquals(i, d.readInt()); + } + + public void testLong() throws IOException { + long i = 1231290495545446485L; + d.writeLong(i); + d.reset(); + assertEquals(i, d.readLong()); + } + + public void testBooelean() throws IOException { + d.writeBoolean(true); + d.reset(); + assertEquals(true, d.readBoolean()); + d.reset(); + d.writeBoolean(false); + d.reset(); + assertEquals(false, d.readBoolean()); + + } + + public void testByte() throws IOException { + + for (int i = Byte.MIN_VALUE; i <= Byte.MAX_VALUE; i++) { + d.writeByte(i); + d.reset(); + assertEquals(i, d.readByte()); + d.reset(); + } + } + + public void testUnsignedByte() throws IOException { + + for (int i = 0; i <= 255; i++) { + d.write(i); + d.reset(); + assertEquals(i, d.readUnsignedByte()); + d.reset(); + } + } + + public void testLongPacker() throws IOException { + + for (int i = 0; i < 1e7; i++) { + LongPacker.packInt(d, i); + d.reset(); + assertEquals(i, LongPacker.unpackInt(d)); + d.reset(); + } + } + +} Index: graph/src/test/java/org/apache/jdbm/LogicalRowIdManagerTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/LogicalRowIdManagerTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/LogicalRowIdManagerTest.java (Arbeitskopie) @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +/** + * This class contains all Unit tests for {@link LogicalRowIdManager}. + */ +public class LogicalRowIdManagerTest extends TestCaseWithTestFile { + + /** + * Test constructor + */ + public void testCtor() throws Exception { + PageFile f = newRecordFile(); + PageManager pm = new PageManager(f); + PageFile free = newRecordFile(); + PageManager pmfree = new PageManager(free); + + LogicalRowIdManager logMgr = new LogicalRowIdManager(f, pm); + + f.forceClose(); + } + + /** + * Test basics + */ + public void testBasics() throws Exception { + PageFile f = newRecordFile(); + PageManager pm = new PageManager(f); + PageFile free = newRecordFile(); + PageManager pmfree = new PageManager(free); + LogicalRowIdManager logMgr = new LogicalRowIdManager(f, pm); + long physid = 20 << Storage.PAGE_SIZE_SHIFT + 234; + + long logid = logMgr.insert(physid); + assertEquals("check one", physid, logMgr.fetch(logid)); + + physid = 10 << Storage.PAGE_SIZE_SHIFT + 567; + logMgr.update(logid, physid); + assertEquals("check two", physid, logMgr.fetch(logid)); + + logMgr.delete(logid); + + f.forceClose(); + } + + public void testFreeBasics() throws Exception { + PageFile f = newRecordFile(); + PageManager pm = new PageManager(f); + LogicalRowIdManager freeMgr = new LogicalRowIdManager(f, pm); + + // allocate a rowid - should fail on an empty file + long loc = freeMgr.getFreeSlot(); + assertTrue("loc is not null?", loc == 0); + + pm.close(); + f.close(); + } + +} Index: graph/src/test/java/org/apache/jdbm/PhysicalRowIdManagerTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/PhysicalRowIdManagerTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/PhysicalRowIdManagerTest.java (Arbeitskopie) @@ -0,0 +1,231 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * This class contains all Unit tests for {@link PhysicalRowIdManager}. + */ +public class PhysicalRowIdManagerTest extends TestCaseWithTestFile { + + private byte[] data = new byte[100000]; + + /** + * Test constructor + */ + public void testCtor() throws Exception { + PageFile f = newRecordFile(); + PageManager pm = new PageManager(f); + PageFile free = newRecordFile(); + PageManager pmfree = new PageManager(free); + + PhysicalRowIdManager physMgr = new PhysicalRowIdManager(f, pm); + + f.forceClose(); + } + + /** + * Test basics + */ + public void testBasics() throws Exception { + + PageFile f = newRecordFile(); + PageManager pm = new PageManager(f); + PageFile free = newRecordFile(); + PageManager pmfree = new PageManager(free); + PhysicalRowIdManager physMgr = new PhysicalRowIdManager(f, pm); + + // insert a 10,000 byte record. + byte[] data = UtilTT.makeRecord(10000, (byte) 1); + long loc = physMgr.insert(data, 0, data.length); + DataInputOutput a1 = new DataInputOutput(); + physMgr.fetch(a1, loc); + assertTrue("check data1", + UtilTT.checkRecord(a1.toByteArray(), 10000, (byte) 1)); + + // update it as a 20,000 byte record. + data = UtilTT.makeRecord(20000, (byte) 2); + long loc2 = physMgr.update(loc, data, 0, data.length); + DataInputOutput a2 = new DataInputOutput(); + physMgr.fetch(a2, loc2); + assertTrue("check data2", + UtilTT.checkRecord(a2.toByteArray(), 20000, (byte) 2)); + + // insert a third record. This'll effectively page the first one + // from growing + data = UtilTT.makeRecord(20, (byte) 3); + long loc3 = physMgr.insert(data, 0, data.length); + DataInputOutput a3 = new DataInputOutput(); + physMgr.fetch(a3, loc3); + assertTrue("check data3", + UtilTT.checkRecord(a3.toByteArray(), 20, (byte) 3)); + + // now, grow the first record again + data = UtilTT.makeRecord(30000, (byte) 4); + loc2 = physMgr.update(loc2, data, 0, data.length); + DataInputOutput a4 = new DataInputOutput(); + physMgr.fetch(a4, loc2); + assertTrue("check data4", + UtilTT.checkRecord(a4.toByteArray(), 30000, (byte) 4)); + + // delete the record + physMgr.free(loc2); + + f.forceClose(); + } + + public void testTwoRecords() throws IOException { + PageFile f = newRecordFile(); + PageManager pm = new PageManager(f); + PhysicalRowIdManager physmgr = new PhysicalRowIdManager(f, pm); + + physmgr.insert(new byte[1024], 0, 1024); + physmgr.insert(new byte[100], 0, 100); + + assertEquals(listRecords(pm), arrayList(1024, 100)); + + } + + public void testDeleteRecord() throws IOException { + PageFile f = newRecordFile(); + PageManager pm = new PageManager(f); + PhysicalRowIdManager physmgr = new PhysicalRowIdManager(f, pm); + + physmgr.insert(data, 0, 1024); + long recid = physmgr.insert(data, 0, 100); + physmgr.insert(data, 0, 700); + physmgr.free(recid); + + assertEquals(listRecords(pm), arrayList(1024, -100, 700)); + + } + + public void testTwoLargeRecord() throws IOException { + PageFile f = newRecordFile(); + PageManager pm = new PageManager(f); + PhysicalRowIdManager physmgr = new PhysicalRowIdManager(f, pm); + + physmgr.insert(data, 0, 5000); + physmgr.insert(data, 0, 5000); + + assertEquals(listRecords(pm), arrayList(5000, 5000)); + + } + + public void testManyLargeRecord() throws IOException { + PageFile f = newRecordFile(); + PageManager pm = new PageManager(f); + PhysicalRowIdManager physmgr = new PhysicalRowIdManager(f, pm); + + physmgr.insert(data, 0, 5002); + long id1 = physmgr.insert(data, 0, 5003); + physmgr.insert(data, 0, 5005); + long id2 = physmgr.insert(data, 0, 5006); + physmgr.insert(data, 0, 5007); + physmgr.insert(data, 0, 5008); + physmgr.free(id1); + physmgr.free(id2); + + assertEquals(listRecords(pm), + arrayList(5002, -5003, 5005, -5006, 5007, 5008)); + + } + + public void testSplitRecordAcrossPage() throws IOException { + PageFile f = newRecordFile(); + PageManager pm = new PageManager(f); + PhysicalRowIdManager physmgr = new PhysicalRowIdManager(f, pm); + + physmgr.insert(data, 0, 3000); + long id = physmgr.insert(data, 0, 3000); + physmgr.insert(data, 0, 1000); + physmgr.free(id); + + // record which crosses page should be sliced to two, so it does not cross + // the page + int firstSize = Storage.PAGE_SIZE - Magic.DATA_PAGE_O_DATA + - RecordHeader.SIZE - 3000 - RecordHeader.SIZE; + int secondSize = 3000 - firstSize - RecordHeader.SIZE; + + // TODO decide about this + // assertEquals(listRecords(pm), arrayList(3000,-firstSize,-secondSize, + // 1000)); + + } + + public void testFreeMidPages() throws IOException { + PageFile f = newRecordFile(); + PageManager pm = new PageManager(f); + PhysicalRowIdManager physmgr = new PhysicalRowIdManager(f, pm); + + physmgr.insert(data, 0, 3000); + long id = physmgr.insert(data, 0, 30000); + physmgr.insert(data, 0, 1000); + physmgr.free(id); + + // if record occupies multiple pages, mid pages should be freed and record + // trimmed. + int newSize = 30000; + + while (newSize > Storage.PAGE_SIZE - Magic.DATA_PAGE_O_DATA) + newSize = newSize - (Storage.PAGE_SIZE - Magic.DATA_PAGE_O_DATA); + + assertEquals(listRecords(pm), arrayList(3000, -newSize, 1000)); + + } + + /** return list of records in pageman, negative numbers are free records */ + List listRecords(PageManager pageman) throws IOException { + int pos = Magic.DATA_PAGE_O_DATA; + List ret = new ArrayList(); + for (long pageid = pageman.getFirst(Magic.USED_PAGE); pageid != 0; pageid = pageman + .getNext(pageid)) { + + PageIo page = pageman.file.get(pageid); + + while (pos < Storage.PAGE_SIZE - RecordHeader.SIZE) { + + int size = RecordHeader.getAvailableSize(page, (short) pos); + if (size == 0) + break; + int currSize = RecordHeader.getCurrentSize(page, (short) pos); + pos += size + RecordHeader.SIZE; + if (currSize == 0) + size = -size; + ret.add(size); + } + + pos = pos + Magic.DATA_PAGE_O_DATA - Storage.PAGE_SIZE; + + pageman.file.release(page); + } + + return ret; + } + + List arrayList(Integer... args) { + ArrayList ret = new ArrayList(); + for (Integer i : args) + ret.add(i); + return ret; + } + +} Index: graph/src/test/java/org/apache/jdbm/SerialClassInfoTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/SerialClassInfoTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/SerialClassInfoTest.java (Arbeitskopie) @@ -0,0 +1,276 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.Serializable; +import java.util.AbstractMap; +import java.util.ArrayList; + +public class SerialClassInfoTest extends TestCaseWithTestFile { + + static class Bean1 implements Serializable { + + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + + Bean1 bean1 = (Bean1) o; + + if (Double.compare(bean1.doubleField, doubleField) != 0) + return false; + if (Float.compare(bean1.floatField, floatField) != 0) + return false; + if (intField != bean1.intField) + return false; + if (longField != bean1.longField) + return false; + if (field1 != null ? !field1.equals(bean1.field1) : bean1.field1 != null) + return false; + if (field2 != null ? !field2.equals(bean1.field2) : bean1.field2 != null) + return false; + + return true; + } + + protected String field1 = null; + protected String field2 = null; + + protected int intField = Integer.MAX_VALUE; + protected long longField = Long.MAX_VALUE; + protected double doubleField = Double.MAX_VALUE; + protected float floatField = Float.MAX_VALUE; + + transient int getCalled = 0; + transient int setCalled = 0; + + public String getField2() { + getCalled++; + return field2; + } + + public void setField2(String field2) { + setCalled++; + this.field2 = field2; + } + + Bean1(String field1, String field2) { + this.field1 = field1; + this.field2 = field2; + } + + Bean1() { + } + } + + static class Bean2 extends Bean1 { + + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + if (!super.equals(o)) + return false; + + Bean2 bean2 = (Bean2) o; + + if (field3 != null ? !field3.equals(bean2.field3) : bean2.field3 != null) + return false; + + return true; + } + + @Override + public int hashCode() { + return field3 != null ? field3.hashCode() : 0; + } + + private String field3 = null; + + Bean2(String field1, String field2, String field3) { + super(field1, field2); + this.field3 = field3; + } + + Bean2() { + } + } + + SerialClassInfo s; + + public void setUp() throws IOException { + s = new Serialization(); + } + + Bean1 b = new Bean1("aa", "bb"); + Bean2 b2 = new Bean2("aa", "bb", "cc"); + + public void testGetFieldValue1() throws Exception { + assertEquals("aa", s.getFieldValue("field1", b)); + } + + public void testGetFieldValue2() throws Exception { + assertEquals("bb", s.getFieldValue("field2", b)); + assertEquals(1, b.getCalled); + } + + public void testGetFieldValue3() throws Exception { + assertEquals("aa", s.getFieldValue("field1", b2)); + } + + public void testGetFieldValue4() throws Exception { + assertEquals("bb", s.getFieldValue("field2", b2)); + assertEquals(1, b2.getCalled); + } + + public void testGetFieldValue5() throws Exception { + assertEquals("cc", s.getFieldValue("field3", b2)); + } + + public void testSetFieldValue1() { + s.setFieldValue("field1", b, "zz"); + assertEquals("zz", b.field1); + } + + public void testSetFieldValue2() { + s.setFieldValue("field2", b, "zz"); + assertEquals("zz", b.field2); + assertEquals(1, b.setCalled); + } + + public void testSetFieldValue3() { + s.setFieldValue("field1", b2, "zz"); + assertEquals("zz", b2.field1); + } + + public void testSetFieldValue4() { + s.setFieldValue("field2", b2, "zz"); + assertEquals("zz", b2.field2); + assertEquals(1, b2.setCalled); + } + + public void testSetFieldValue5() { + s.setFieldValue("field3", b2, "zz"); + assertEquals("zz", b2.field3); + } + + public void testGetPrimitiveField() { + assertEquals(Integer.MAX_VALUE, s.getFieldValue("intField", b2)); + assertEquals(Long.MAX_VALUE, s.getFieldValue("longField", b2)); + assertEquals(Double.MAX_VALUE, s.getFieldValue("doubleField", b2)); + assertEquals(Float.MAX_VALUE, s.getFieldValue("floatField", b2)); + } + + public void testSetPrimitiveField() { + s.setFieldValue("intField", b2, -1); + assertEquals(-1, s.getFieldValue("intField", b2)); + s.setFieldValue("longField", b2, -1L); + assertEquals(-1L, s.getFieldValue("longField", b2)); + s.setFieldValue("doubleField", b2, -1D); + assertEquals(-1D, s.getFieldValue("doubleField", b2)); + s.setFieldValue("floatField", b2, -1F); + assertEquals(-1F, s.getFieldValue("floatField", b2)); + } + + E serialize(E e) throws ClassNotFoundException, IOException { + Serialization s2 = new Serialization(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + s2.serialize(new DataOutputStream(out), e); + + ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray()); + return (E) s2.deserialize(new DataInputStream(in)); + + } + + public void testSerializable() throws Exception { + + assertEquals(serialize(b), b); + } + + public void testRecursion() throws Exception { + AbstractMap.SimpleEntry b = new AbstractMap.SimpleEntry("abcd", null); + b.setValue(b.getKey()); + + AbstractMap.SimpleEntry bx = serialize(b); + assertEquals(bx, b); + assert (bx.getKey() == bx.getValue()); + + } + + public void testRecursion2() throws Exception { + AbstractMap.SimpleEntry b = new AbstractMap.SimpleEntry("abcd", null); + b.setValue(b); + + AbstractMap.SimpleEntry bx = serialize(b); + assertTrue(bx == bx.getValue()); + assertEquals(bx.getKey(), "abcd"); + + } + + public void testRecursion3() throws Exception { + ArrayList l = new ArrayList(); + l.add("123"); + l.add(l); + + ArrayList l2 = serialize(l); + + assertTrue(l.size() == 2); + assertEquals(l.get(0), "123"); + assertTrue(l.get(1) == l); + } + + public void testPersistedSimple() throws Exception { + + String f = newTestFile(); + DBAbstract r1 = (DBAbstract) DBMaker.openFile(f).make(); + long recid = r1.insert("AA"); + r1.commit(); + r1.close(); + + DBAbstract r2 = (DBAbstract) DBMaker.openFile(f).make(); + + String a2 = r2.fetch(recid); + r2.close(); + assertEquals("AA", a2); + + } + + public void testPersisted() throws Exception { + Bean1 b1 = new Bean1("abc", "dcd"); + String f = newTestFile(); + DBAbstract r1 = (DBAbstract) DBMaker.openFile(f).make(); + long recid = r1.insert(b1); + r1.commit(); + r1.close(); + + DBAbstract r2 = (DBAbstract) DBMaker.openFile(f).make(); + + Bean1 b2 = (Bean1) r2.fetch(recid); + r2.close(); + assertEquals(b1, b2); + + } + +} Index: graph/src/test/java/org/apache/jdbm/StreamCorrupted.java =================================================================== --- graph/src/test/java/org/apache/jdbm/StreamCorrupted.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/StreamCorrupted.java (Arbeitskopie) @@ -0,0 +1,90 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; + +/** + * Contributed test case for BTree by Christof Dallermassl (cdaller iicm.edu): + *

+ * -= quote from original message posted on jdbm-general =- + * + *

+ * 
+ * I tried to insert a couple of elements into a BTree and then remove
+ * them one by one. After a number or removals, there is always (if more
+ * than 20 elements in btree) a java.io.StreamCorruptedException thrown.
+ * 
+ * The strange thing is, that on 50 elements, the exception is thrown
+ * after removing 22, on 200 it is thrown after 36, on 1000 it is thrown
+ * after 104, on 10000 it is thrown after 1003....
+ * 
+ * The full stackTrace is here:
+ * ---------------------- snip ------- snap -------------------------
+ * java.io.StreamCorruptedException: Caught EOFException while reading the
+ * stream header
+ *   at java.io.ObjectInputStream.readStreamHeader(ObjectInputStream.java:845)
+ *   at java.io.ObjectInputStream.(ObjectInputStream.java:168)
+ *   at jdbm.db.DB.byteArrayToObject(DB.java:296)
+ *   at jdbm.db.DB.fetchObject(DB.java:239)
+ *   at jdbm.helper.ObjectCache.fetchObject(ObjectCache.java:104)
+ *   at jdbm.btree.BPage.loadBPage(BPage.java:670)
+ *   at jdbm.btree.BPage.remove(BPage.java:492)
+ *   at jdbm.btree.BPage.remove(BPage.java:437)
+ *   at jdbm.btree.BTree.remove(BTree.java:313)
+ *   at JDBMTest.main(JDBMTest.java:41)
+ * 
+ * 
+ */ +public class StreamCorrupted extends TestCaseWithTestFile { + + /** + * Basic tests + */ + public void testStreamCorrupted() throws IOException { + DBAbstract db; + BTree btree; + int iterations; + + iterations = 100; // 23 works :-((((( + + // open database + db = newDBCache(); + + // create a new B+Tree data structure + btree = BTree.createInstance(db); + db.setNamedObject("testbtree", btree.getRecid()); + + // action: + + // insert data + for (int count = 0; count < iterations; count++) { + btree.insert("num" + count, new Integer(count), true); + } + + // delete data + for (int count = 0; count < iterations; count++) { + btree.remove("num" + count); + } + + // close database + db.close(); + db = null; + } + +} Index: graph/src/test/java/org/apache/jdbm/CompactTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/CompactTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/CompactTest.java (Arbeitskopie) @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; +import java.util.Map; + +public class CompactTest extends TestCaseWithTestFile { + + final int MAX = 1000 * 1000; + + public void testHashCompaction() throws IOException { + + String f = newTestFile(); + + DB db0 = DBMaker.openFile(f).disableTransactions().make(); + Map db = db0.createHashMap("db"); + + System.out.println("Adding"); + for (int i = 0; i < MAX; i++) { + db.put("key" + i, "value" + i); + } + + db0.close(); + db0 = DBMaker.openFile(f).disableTransactions().make(); + db = db0.getHashMap("db"); + + System.out.println("Deleting"); + for (int i = 0; i < MAX; i++) { + db.remove("key" + i); + } + + db0.close(); + db0 = DBMaker.openFile(f).disableTransactions().make(); + db = db0.getHashMap("db"); + + System.out.println("Adding"); + for (int i = 0; i < MAX; i++) { + db.put("key" + i, "value" + i); + } + System.out.println("Closing"); + db0.close(); + } + + public void testBTreeCompaction() throws IOException { + + String f = newTestFile(); + + DB db0 = DBMaker.openFile(f).disableTransactions().make(); + Map db = db0.createTreeMap("db"); + + System.out.println("Adding"); + for (int i = 0; i < MAX; i++) { + db.put("key" + i, "value" + i); + } + + db0.close(); + db0 = DBMaker.openFile(f).disableTransactions().make(); + db = db0.getTreeMap("db"); + + System.out.println("Deleting"); + for (int i = 0; i < MAX; i++) { + db.remove("key" + i); + } + + db0.close(); + db0 = DBMaker.openFile(f).disableTransactions().make(); + db = db0.getTreeMap("db"); + + System.out.println("Adding"); + for (int i = 0; i < MAX; i++) { + db.put("key" + i, "value" + i); + } + + System.out.println("Closing"); + db0.close(); + } + +} Index: graph/src/test/java/org/apache/jdbm/FileHeaderTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/FileHeaderTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/FileHeaderTest.java (Arbeitskopie) @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import junit.framework.TestCase; + +public class FileHeaderTest extends TestCase { + + /** + * Test set, write, read + */ + public void testSetWriteRead() throws Exception { + PageIo b = new PageIo(0, new byte[1000]); + b.fileHeaderCheckHead(true); + for (int i = 0; i < Magic.NLISTS; i++) { + b.fileHeaderSetFirstOf(i, 100 * i); + b.fileHeaderSetLastOf(i, 200 * i); + } + + b.fileHeaderCheckHead(false); + for (int i = 0; i < Magic.NLISTS; i++) { + assertEquals("first " + i, i * 100, b.fileHeaderGetFirstOf(i)); + assertEquals("last " + i, i * 200, b.fileHeaderGetLastOf(i)); + } + } + + /** + * Test root rowids + */ + public void testRootRowids() throws Exception { + PageIo b = new PageIo(0, new byte[Storage.PAGE_SIZE]); + b.fileHeaderCheckHead(true); + for (int i = 0; i < Magic.FILE_HEADER_NROOTS; i++) { + b.fileHeaderSetRoot(i, 100 * i); + } + + b.fileHeaderCheckHead(false); + for (int i = 0; i < Magic.FILE_HEADER_NROOTS; i++) { + assertEquals("root " + i, i * 100, b.fileHeaderGetRoot(i)); + } + } + +} Index: graph/src/test/java/org/apache/jdbm/MapInterfaceTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/MapInterfaceTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/MapInterfaceTest.java (Arbeitskopie) @@ -0,0 +1,1512 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jdbm; + +import static java.util.Collections.singleton; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import junit.framework.TestCase; + +/** + * Tests representing the contract of {@link Map}. Concrete subclasses of this + * base class test conformance of concrete {@link Map} subclasses to that + * contract. + *

+ * + * @param the type of keys used by the maps under test + * @param the type of mapped values used the maps under test + */ +public abstract class MapInterfaceTest extends TestCase { + protected final boolean supportsPut; + protected final boolean supportsRemove; + protected final boolean supportsClear; + protected final boolean allowsNullKeys; + protected final boolean allowsNullValues; + protected final boolean supportsIteratorRemove; + + /** + * Creates a new, empty instance of the class under test. + * + * @return a new, empty map instance. + * @throws UnsupportedOperationException if it's not possible to make an empty + * instance of the class under test. + */ + protected abstract Map makeEmptyMap() + throws UnsupportedOperationException; + + /** + * Creates a new, non-empty instance of the class under test. + * + * @return a new, non-empty map instance. + * @throws UnsupportedOperationException if it's not possible to make a + * non-empty instance of the class under test. + */ + protected abstract Map makePopulatedMap() + throws UnsupportedOperationException; + + /** + * Creates a new key that is not expected to be found in + * {@link #makePopulatedMap()}. + * + * @return a key. + * @throws UnsupportedOperationException if it's not possible to make a key + * that will not be found in the map. + */ + protected abstract K getKeyNotInPopulatedMap() + throws UnsupportedOperationException; + + /** + * Creates a new value that is not expected to be found in + * {@link #makePopulatedMap()}. + * + * @return a value. + * @throws UnsupportedOperationException if it's not possible to make a value + * that will not be found in the map. + */ + protected abstract V getValueNotInPopulatedMap() + throws UnsupportedOperationException; + + /** + * Constructor with an explicit {@code supportsIteratorRemove} parameter. + */ + protected MapInterfaceTest(boolean allowsNullKeys, boolean allowsNullValues, + boolean supportsPut, boolean supportsRemove, boolean supportsClear, + boolean supportsIteratorRemove) { + this.supportsPut = supportsPut; + this.supportsRemove = supportsRemove; + this.supportsClear = supportsClear; + this.allowsNullKeys = allowsNullKeys; + this.allowsNullValues = allowsNullValues; + this.supportsIteratorRemove = supportsIteratorRemove; + } + + /** + * Used by tests that require a map, but don't care whether it's populated or + * not. + * + * @return a new map instance. + */ + protected Map makeEitherMap() { + try { + return makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return makeEmptyMap(); + } + } + + protected final boolean supportsValuesHashCode(Map map) { + // get the first non-null value + Collection values = map.values(); + for (V value : values) { + if (value != null) { + try { + value.hashCode(); + } catch (Exception e) { + return false; + } + return true; + } + } + return true; + } + + /** + * Checks all the properties that should always hold of a map. Also calls + * {@link #assertMoreInvariants} to check invariants that are peculiar to + * specific implementations. + * + * @param map the map to check. + * @see #assertMoreInvariants + */ + protected final void assertInvariants(Map map) { + Set keySet = map.keySet(); + Collection valueCollection = map.values(); + Set> entrySet = map.entrySet(); + + assertEquals(map.size() == 0, map.isEmpty()); + assertEquals(map.size(), keySet.size()); + assertEquals(keySet.size() == 0, keySet.isEmpty()); + assertEquals(!keySet.isEmpty(), keySet.iterator().hasNext()); + + int expectedKeySetHash = 0; + for (K key : keySet) { + V value = map.get(key); + expectedKeySetHash += key != null ? key.hashCode() : 0; + assertTrue(map.containsKey(key)); + assertTrue(map.containsValue(value)); + assertTrue(valueCollection.contains(value)); + assertTrue(valueCollection.containsAll(Collections.singleton(value))); + assertTrue(entrySet.contains(mapEntry(key, value))); + assertTrue(allowsNullKeys || (key != null)); + } + assertEquals(expectedKeySetHash, keySet.hashCode()); + + assertEquals(map.size(), valueCollection.size()); + assertEquals(valueCollection.size() == 0, valueCollection.isEmpty()); + assertEquals(!valueCollection.isEmpty(), valueCollection.iterator() + .hasNext()); + for (V value : valueCollection) { + assertTrue(map.containsValue(value)); + assertTrue(allowsNullValues || (value != null)); + } + + assertEquals(map.size(), entrySet.size()); + assertEquals(entrySet.size() == 0, entrySet.isEmpty()); + assertEquals(!entrySet.isEmpty(), entrySet.iterator().hasNext()); + assertTrue(!entrySet.contains("foo")); + + boolean supportsValuesHashCode = supportsValuesHashCode(map); + if (supportsValuesHashCode) { + int expectedEntrySetHash = 0; + for (Entry entry : entrySet) { + assertTrue(map.containsKey(entry.getKey())); + assertTrue(map.containsValue(entry.getValue())); + int expectedHash = (entry.getKey() == null ? 0 : entry.getKey() + .hashCode()) + ^ (entry.getValue() == null ? 0 : entry.getValue().hashCode()); + assertEquals(expectedHash, entry.hashCode()); + expectedEntrySetHash += expectedHash; + } + assertEquals(expectedEntrySetHash, entrySet.hashCode()); + assertTrue(entrySet.containsAll(new HashSet>(entrySet))); + assertTrue(entrySet.equals(new HashSet>(entrySet))); + } + + Object[] entrySetToArray1 = entrySet.toArray(); + assertEquals(map.size(), entrySetToArray1.length); + assertTrue(Arrays.asList(entrySetToArray1).containsAll(entrySet)); + + Entry[] entrySetToArray2 = new Entry[map.size() + 2]; + entrySetToArray2[map.size()] = mapEntry("foo", 1); + assertSame(entrySetToArray2, entrySet.toArray(entrySetToArray2)); + assertNull(entrySetToArray2[map.size()]); + assertTrue(Arrays.asList(entrySetToArray2).containsAll(entrySet)); + + Object[] valuesToArray1 = valueCollection.toArray(); + assertEquals(map.size(), valuesToArray1.length); + assertTrue(Arrays.asList(valuesToArray1).containsAll(valueCollection)); + + Object[] valuesToArray2 = new Object[map.size() + 2]; + valuesToArray2[map.size()] = "foo"; + assertSame(valuesToArray2, valueCollection.toArray(valuesToArray2)); + assertNull(valuesToArray2[map.size()]); + assertTrue(Arrays.asList(valuesToArray2).containsAll(valueCollection)); + + if (supportsValuesHashCode) { + int expectedHash = 0; + for (Entry entry : entrySet) { + expectedHash += entry.hashCode(); + } + assertEquals(expectedHash, map.hashCode()); + } + + assertMoreInvariants(map); + } + + /** + * Override this to check invariants which should hold true for a particular + * implementation, but which are not generally applicable to every instance of + * Map. + * + * @param map the map whose additional invariants to check. + */ + protected void assertMoreInvariants(Map map) { + } + + public void testClear() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + if (supportsClear) { + map.clear(); + assertTrue(map.isEmpty()); + } else { + try { + map.clear(); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testContainsKey() { + final Map map; + final K unmappedKey; + try { + map = makePopulatedMap(); + unmappedKey = getKeyNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertTrue(!map.containsKey(unmappedKey)); + assertTrue(map.containsKey(map.keySet().iterator().next())); + if (allowsNullKeys) { + map.containsKey(null); + } else { + try { + map.containsKey(null); + } catch (NullPointerException optional) { + } + } + assertInvariants(map); + } + + public void testContainsValue() { + final Map map; + final V unmappedValue; + try { + map = makePopulatedMap(); + unmappedValue = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertTrue(!map.containsValue(unmappedValue)); + assertTrue(map.containsValue(map.values().iterator().next())); + if (allowsNullValues) { + map.containsValue(null); + } else { + try { + map.containsKey(null); + } catch (NullPointerException optional) { + } + } + assertInvariants(map); + } + + public void testEntrySet() { + final Map map; + final Set> entrySet; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertInvariants(map); + + entrySet = map.entrySet(); + final K unmappedKey; + final V unmappedValue; + try { + unmappedKey = getKeyNotInPopulatedMap(); + unmappedValue = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + for (Entry entry : entrySet) { + assertTrue(!unmappedKey.equals(entry.getKey())); + assertTrue(!unmappedValue.equals(entry.getValue())); + } + } + + public void testEntrySetForEmptyMap() { + final Map map; + try { + map = makeEmptyMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertInvariants(map); + } + + public void testEntrySetContainsEntryNullKeyPresent() { + if (!allowsNullKeys || !supportsPut) { + return; + } + final Map map; + final Set> entrySet; + try { + map = makeEitherMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertInvariants(map); + + entrySet = map.entrySet(); + final V unmappedValue; + try { + unmappedValue = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + map.put(null, unmappedValue); + Entry entry = mapEntry(null, unmappedValue); + assertTrue(entrySet.contains(entry)); + assertTrue(!entrySet.contains(mapEntry(null, null))); + } + + public void testEntrySetContainsEntryNullKeyMissing() { + final Map map; + final Set> entrySet; + try { + map = makeEitherMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertInvariants(map); + + entrySet = map.entrySet(); + final V unmappedValue; + try { + unmappedValue = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + Entry entry = mapEntry(null, unmappedValue); + assertTrue(!entrySet.contains(entry)); + assertTrue(!entrySet.contains(mapEntry(null, null))); + } + + public void testEntrySetIteratorRemove() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + Iterator> iterator = entrySet.iterator(); + if (supportsIteratorRemove) { + int initialSize = map.size(); + Entry entry = iterator.next(); + iterator.remove(); + assertEquals(initialSize - 1, map.size()); + assertTrue(!entrySet.contains(entry)); + assertInvariants(map); + try { + iterator.remove(); + fail("Expected IllegalStateException."); + } catch (IllegalStateException e) { + // Expected. + } + } else { + try { + iterator.next(); + iterator.remove(); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testEntrySetRemove() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + if (supportsRemove) { + int initialSize = map.size(); + boolean didRemove = entrySet.remove(entrySet.iterator().next()); + assertTrue(didRemove); + assertEquals(initialSize - 1, map.size()); + } else { + try { + entrySet.remove(entrySet.iterator().next()); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testEntrySetRemoveMissingKey() { + final Map map; + final K key; + try { + map = makeEitherMap(); + key = getKeyNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + Entry entry = mapEntry(key, getValueNotInPopulatedMap()); + int initialSize = map.size(); + if (supportsRemove) { + boolean didRemove = entrySet.remove(entry); + assertTrue(!didRemove); + } else { + try { + boolean didRemove = entrySet.remove(entry); + assertTrue(!didRemove); + } catch (UnsupportedOperationException optional) { + } + } + assertEquals(initialSize, map.size()); + assertTrue(!map.containsKey(key)); + assertInvariants(map); + } + + public void testEntrySetRemoveDifferentValue() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + K key = map.keySet().iterator().next(); + Entry entry = mapEntry(key, getValueNotInPopulatedMap()); + int initialSize = map.size(); + if (supportsRemove) { + boolean didRemove = entrySet.remove(entry); + assertTrue(!didRemove); + } else { + try { + boolean didRemove = entrySet.remove(entry); + assertTrue(!didRemove); + } catch (UnsupportedOperationException optional) { + } + } + assertEquals(initialSize, map.size()); + assertTrue(map.containsKey(key)); + assertInvariants(map); + } + + public void testEntrySetRemoveNullKeyPresent() { + if (!allowsNullKeys || !supportsPut || !supportsRemove) { + return; + } + final Map map; + final Set> entrySet; + try { + map = makeEitherMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertInvariants(map); + + entrySet = map.entrySet(); + final V unmappedValue; + try { + unmappedValue = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + map.put(null, unmappedValue); + assertEquals(unmappedValue, map.get(null)); + assertTrue(map.containsKey(null)); + Entry entry = mapEntry(null, unmappedValue); + assertTrue(entrySet.remove(entry)); + assertNull(map.get(null)); + assertTrue(!map.containsKey(null)); + } + + public void testEntrySetRemoveNullKeyMissing() { + final Map map; + try { + map = makeEitherMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + Entry entry = mapEntry(null, getValueNotInPopulatedMap()); + int initialSize = map.size(); + if (supportsRemove) { + boolean didRemove = entrySet.remove(entry); + assertTrue(!didRemove); + } else { + try { + boolean didRemove = entrySet.remove(entry); + assertTrue(!didRemove); + } catch (UnsupportedOperationException optional) { + } + } + assertEquals(initialSize, map.size()); + assertInvariants(map); + } + + public void testEntrySetRemoveAll() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + Set> entriesToRemove = singleton(entrySet.iterator().next()); + if (supportsRemove) { + int initialSize = map.size(); + boolean didRemove = entrySet.removeAll(entriesToRemove); + assertTrue(didRemove); + assertEquals(initialSize - entriesToRemove.size(), map.size()); + for (Entry entry : entriesToRemove) { + assertTrue(!entrySet.contains(entry)); + } + } else { + try { + entrySet.removeAll(entriesToRemove); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testEntrySetRemoveAllNullFromEmpty() { + final Map map; + try { + map = makeEmptyMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + if (supportsRemove) { + try { + entrySet.removeAll(null); + fail("Expected NullPointerException."); + } catch (NullPointerException e) { + // Expected. + } + } else { + try { + entrySet.removeAll(null); + fail("Expected UnsupportedOperationException or NullPointerException."); + } catch (UnsupportedOperationException e) { + // Expected. + } catch (NullPointerException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testEntrySetRetainAll() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + Set> entriesToRetain = singleton(entrySet.iterator().next()); + if (supportsRemove) { + boolean shouldRemove = (entrySet.size() > entriesToRetain.size()); + boolean didRemove = entrySet.retainAll(entriesToRetain); + assertEquals(shouldRemove, didRemove); + assertEquals(entriesToRetain.size(), map.size()); + for (Entry entry : entriesToRetain) { + assertTrue(entrySet.contains(entry)); + } + } else { + try { + entrySet.retainAll(entriesToRetain); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testEntrySetRetainAllNullFromEmpty() { + final Map map; + try { + map = makeEmptyMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + if (supportsRemove) { + try { + entrySet.retainAll(null); + // Returning successfully is not ideal, but tolerated. + } catch (NullPointerException e) { + // Expected. + } + } else { + try { + entrySet.retainAll(null); + // We have to tolerate a successful return (Sun bug 4802647) + } catch (UnsupportedOperationException e) { + // Expected. + } catch (NullPointerException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testEntrySetClear() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + if (supportsClear) { + entrySet.clear(); + assertTrue(entrySet.isEmpty()); + } else { + try { + entrySet.clear(); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testEntrySetAddAndAddAll() { + final Map map = makeEitherMap(); + + Set> entrySet = map.entrySet(); + final Entry entryToAdd = mapEntry(null, null); + try { + entrySet.add(entryToAdd); + fail("Expected UnsupportedOperationException or NullPointerException."); + } catch (UnsupportedOperationException e) { + // Expected. + } catch (NullPointerException e) { + // Expected. + } + assertInvariants(map); + + try { + entrySet.addAll(singleton(entryToAdd)); + fail("Expected UnsupportedOperationException or NullPointerException."); + } catch (UnsupportedOperationException e) { + // Expected. + } catch (NullPointerException e) { + // Expected. + } + assertInvariants(map); + } + + public void testEntrySetSetValue() { + // TODO: Investigate the extent to which, in practice, maps that support + // put() also support Entry.setValue(). + if (!supportsPut) { + return; + } + + final Map map; + final V valueToSet; + try { + map = makePopulatedMap(); + valueToSet = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + Entry entry = entrySet.iterator().next(); + final V oldValue = entry.getValue(); + final V returnedValue = entry.setValue(valueToSet); + assertEquals(oldValue, returnedValue); + assertTrue(entrySet.contains(mapEntry(entry.getKey(), valueToSet))); + assertEquals(valueToSet, map.get(entry.getKey())); + assertInvariants(map); + } + + public void testEntrySetSetValueSameValue() { + // TODO: Investigate the extent to which, in practice, maps that support + // put() also support Entry.setValue(). + if (!supportsPut) { + return; + } + + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + Entry entry = entrySet.iterator().next(); + final V oldValue = entry.getValue(); + final V returnedValue = entry.setValue(oldValue); + assertEquals(oldValue, returnedValue); + assertTrue(entrySet.contains(mapEntry(entry.getKey(), oldValue))); + assertEquals(oldValue, map.get(entry.getKey())); + assertInvariants(map); + } + + public void testEqualsForEqualMap() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + assertEquals(map, map); + assertEquals(makePopulatedMap(), map); + assertTrue(!map.equals(Collections.emptyMap())); + // no-inspection ObjectEqualsNull + assertTrue(!map.equals(null)); + } + + public void testEqualsForLargerMap() { + if (!supportsPut) { + return; + } + + final Map map; + final Map largerMap; + try { + map = makePopulatedMap(); + largerMap = makePopulatedMap(); + largerMap.put(getKeyNotInPopulatedMap(), getValueNotInPopulatedMap()); + } catch (UnsupportedOperationException e) { + return; + } + + assertTrue(!map.equals(largerMap)); + } + + public void testEqualsForSmallerMap() { + if (!supportsRemove) { + return; + } + + final Map map; + final Map smallerMap; + try { + map = makePopulatedMap(); + smallerMap = new LinkedHashMap(map); + // smallerMap = makePopulatedMap(); + smallerMap.remove(smallerMap.keySet().iterator().next()); + } catch (UnsupportedOperationException e) { + return; + } + + assertTrue(!map.equals(smallerMap)); + } + + public void testEqualsForEmptyMap() { + final Map map; + try { + map = makeEmptyMap(); + } catch (UnsupportedOperationException e) { + return; + } + + assertEquals(map, map); + assertEquals(makeEmptyMap(), map); + assertEquals(Collections.emptyMap(), map); + assertTrue(!map.equals(Collections.emptySet())); + // noinspection ObjectEqualsNull + assertTrue(!map.equals(null)); + } + + public void testGet() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + for (Entry entry : map.entrySet()) { + assertEquals(entry.getValue(), map.get(entry.getKey())); + } + + K unmappedKey = null; + try { + unmappedKey = getKeyNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertNull(map.get(unmappedKey)); + } + + public void testGetForEmptyMap() { + final Map map; + K unmappedKey = null; + try { + map = makeEmptyMap(); + unmappedKey = getKeyNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertNull(map.get(unmappedKey)); + } + + public void testGetNull() { + Map map = makeEitherMap(); + if (allowsNullKeys) { + if (allowsNullValues) { + // TODO: decide what to test here. + } else { + assertEquals(map.containsKey(null), map.get(null) != null); + } + } else { + try { + map.get(null); + } catch (NullPointerException optional) { + } + } + assertInvariants(map); + } + + public void testHashCode() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertInvariants(map); + } + + public void testHashCodeForEmptyMap() { + final Map map; + try { + map = makeEmptyMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertInvariants(map); + } + + public void testPutNewKey() { + final Map map = makeEitherMap(); + final K keyToPut; + final V valueToPut; + try { + keyToPut = getKeyNotInPopulatedMap(); + valueToPut = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + if (supportsPut) { + int initialSize = map.size(); + V oldValue = map.put(keyToPut, valueToPut); + assertEquals(valueToPut, map.get(keyToPut)); + assertTrue(map.containsKey(keyToPut)); + assertTrue(map.containsValue(valueToPut)); + assertEquals(initialSize + 1, map.size()); + assertNull(oldValue); + } else { + try { + map.put(keyToPut, valueToPut); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testPutExistingKey() { + final Map map; + final K keyToPut; + final V valueToPut; + try { + map = makePopulatedMap(); + valueToPut = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + keyToPut = map.keySet().iterator().next(); + if (supportsPut) { + int initialSize = map.size(); + map.put(keyToPut, valueToPut); + assertEquals(valueToPut, map.get(keyToPut)); + assertTrue(map.containsKey(keyToPut)); + assertTrue(map.containsValue(valueToPut)); + assertEquals(initialSize, map.size()); + } else { + try { + map.put(keyToPut, valueToPut); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testPutNullKey() { + if (!supportsPut) { + return; + } + final Map map = makeEitherMap(); + final V valueToPut; + try { + valueToPut = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + if (allowsNullKeys) { + final V oldValue = map.get(null); + final V returnedValue = map.put(null, valueToPut); + assertEquals(oldValue, returnedValue); + assertEquals(valueToPut, map.get(null)); + assertTrue(map.containsKey(null)); + assertTrue(map.containsValue(valueToPut)); + } else { + try { + map.put(null, valueToPut); + fail("Expected RuntimeException"); + } catch (RuntimeException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testPutNullValue() { + if (!supportsPut) { + return; + } + final Map map = makeEitherMap(); + final K keyToPut; + try { + keyToPut = getKeyNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + if (allowsNullValues) { + int initialSize = map.size(); + final V oldValue = map.get(keyToPut); + final V returnedValue = map.put(keyToPut, null); + assertEquals(oldValue, returnedValue); + assertNull(map.get(keyToPut)); + assertTrue(map.containsKey(keyToPut)); + assertTrue(map.containsValue(null)); + assertEquals(initialSize + 1, map.size()); + } else { + try { + map.put(keyToPut, null); + fail("Expected RuntimeException"); + } catch (RuntimeException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testPutNullValueForExistingKey() { + if (!supportsPut) { + return; + } + final Map map; + final K keyToPut; + try { + map = makePopulatedMap(); + keyToPut = map.keySet().iterator().next(); + } catch (UnsupportedOperationException e) { + return; + } + if (allowsNullValues) { + int initialSize = map.size(); + final V oldValue = map.get(keyToPut); + final V returnedValue = map.put(keyToPut, null); + assertEquals(oldValue, returnedValue); + assertNull(map.get(keyToPut)); + assertTrue(map.containsKey(keyToPut)); + assertTrue(map.containsValue(null)); + assertEquals(initialSize, map.size()); + } else { + try { + map.put(keyToPut, null); + fail("Expected RuntimeException"); + } catch (RuntimeException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testPutAllNewKey() { + final Map map = makeEitherMap(); + final K keyToPut; + final V valueToPut; + try { + keyToPut = getKeyNotInPopulatedMap(); + valueToPut = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + final Map mapToPut = Collections.singletonMap(keyToPut, valueToPut); + if (supportsPut) { + int initialSize = map.size(); + map.putAll(mapToPut); + assertEquals(valueToPut, map.get(keyToPut)); + assertTrue(map.containsKey(keyToPut)); + assertTrue(map.containsValue(valueToPut)); + assertEquals(initialSize + 1, map.size()); + } else { + try { + map.putAll(mapToPut); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testPutAllExistingKey() { + final Map map; + final K keyToPut; + final V valueToPut; + try { + map = makePopulatedMap(); + valueToPut = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + keyToPut = map.keySet().iterator().next(); + final Map mapToPut = Collections.singletonMap(keyToPut, valueToPut); + int initialSize = map.size(); + if (supportsPut) { + map.putAll(mapToPut); + assertEquals(valueToPut, map.get(keyToPut)); + assertTrue(map.containsKey(keyToPut)); + assertTrue(map.containsValue(valueToPut)); + } else { + try { + map.putAll(mapToPut); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertEquals(initialSize, map.size()); + assertInvariants(map); + } + + public void testRemove() { + final Map map; + final K keyToRemove; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + keyToRemove = map.keySet().iterator().next(); + if (supportsRemove) { + int initialSize = map.size(); + V expectedValue = map.get(keyToRemove); + V oldValue = map.remove(keyToRemove); + assertEquals(expectedValue, oldValue); + assertTrue(!map.containsKey(keyToRemove)); + assertEquals(initialSize - 1, map.size()); + } else { + try { + map.remove(keyToRemove); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testRemoveMissingKey() { + final Map map; + final K keyToRemove; + try { + map = makePopulatedMap(); + keyToRemove = getKeyNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + if (supportsRemove) { + int initialSize = map.size(); + assertNull(map.remove(keyToRemove)); + assertEquals(initialSize, map.size()); + } else { + try { + map.remove(keyToRemove); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testSize() { + assertInvariants(makeEitherMap()); + } + + public void testKeySetClear() { + final Map map; + try { + map = makeEitherMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set keySet = map.keySet(); + if (supportsClear) { + keySet.clear(); + assertTrue(keySet.isEmpty()); + } else { + try { + keySet.clear(); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testKeySetRemoveAllNullFromEmpty() { + final Map map; + try { + map = makeEmptyMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set keySet = map.keySet(); + if (supportsRemove) { + try { + keySet.removeAll(null); + fail("Expected NullPointerException."); + } catch (NullPointerException e) { + // Expected. + } + } else { + try { + keySet.removeAll(null); + fail("Expected UnsupportedOperationException or NullPointerException."); + } catch (UnsupportedOperationException e) { + // Expected. + } catch (NullPointerException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testKeySetRetainAllNullFromEmpty() { + final Map map; + try { + map = makeEmptyMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set keySet = map.keySet(); + if (supportsRemove) { + try { + keySet.retainAll(null); + // Returning successfully is not ideal, but tolerated. + } catch (NullPointerException e) { + // Expected. + } + } else { + try { + keySet.retainAll(null); + // We have to tolerate a successful return (Sun bug 4802647) + } catch (UnsupportedOperationException e) { + // Expected. + } catch (NullPointerException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testValues() { + final Map map; + final Collection valueCollection; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertInvariants(map); + + valueCollection = map.values(); + final V unmappedValue; + try { + unmappedValue = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + for (V value : valueCollection) { + assertTrue(!unmappedValue.equals(value)); + } + } + + public void testValuesIteratorRemove() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Collection valueCollection = map.values(); + Iterator iterator = valueCollection.iterator(); + if (supportsIteratorRemove) { + int initialSize = map.size(); + iterator.next(); + iterator.remove(); + assertEquals(initialSize - 1, map.size()); + // (We can't assert that the values collection no longer contains the + // removed value, because the underlying map can have multiple mappings + // to the same value.) + assertInvariants(map); + try { + iterator.remove(); + fail("Expected IllegalStateException."); + } catch (IllegalStateException e) { + // Expected. + } + } else { + try { + iterator.next(); + iterator.remove(); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testValuesRemove() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Collection valueCollection = map.values(); + if (supportsRemove) { + int initialSize = map.size(); + valueCollection.remove(valueCollection.iterator().next()); + assertEquals(initialSize - 1, map.size()); + // (We can't assert that the values collection no longer contains the + // removed value, because the underlying map can have multiple mappings + // to the same value.) + } else { + try { + valueCollection.remove(valueCollection.iterator().next()); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testValuesRemoveMissing() { + final Map map; + final V valueToRemove; + try { + map = makeEitherMap(); + valueToRemove = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Collection valueCollection = map.values(); + int initialSize = map.size(); + if (supportsRemove) { + assertTrue(!valueCollection.remove(valueToRemove)); + } else { + try { + assertTrue(!valueCollection.remove(valueToRemove)); + } catch (UnsupportedOperationException e) { + // Tolerated. + } + } + assertEquals(initialSize, map.size()); + assertInvariants(map); + } + + public void testValuesRemoveAll() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Collection valueCollection = map.values(); + Set valuesToRemove = singleton(valueCollection.iterator().next()); + if (supportsRemove) { + valueCollection.removeAll(valuesToRemove); + for (V value : valuesToRemove) { + assertTrue(!valueCollection.contains(value)); + } + for (V value : valueCollection) { + assertTrue(!valuesToRemove.contains(value)); + } + } else { + try { + valueCollection.removeAll(valuesToRemove); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testValuesRemoveAllNullFromEmpty() { + final Map map; + try { + map = makeEmptyMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Collection values = map.values(); + if (supportsRemove) { + try { + values.removeAll(null); + // Returning successfully is not ideal, but tolerated. + } catch (NullPointerException e) { + // Expected. + } + } else { + try { + values.removeAll(null); + // We have to tolerate a successful return (Sun bug 4802647) + } catch (UnsupportedOperationException e) { + // Expected. + } catch (NullPointerException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testValuesRetainAll() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Collection valueCollection = map.values(); + Set valuesToRetain = singleton(valueCollection.iterator().next()); + if (supportsRemove) { + valueCollection.retainAll(valuesToRetain); + for (V value : valuesToRetain) { + assertTrue(valueCollection.contains(value)); + } + for (V value : valueCollection) { + assertTrue(valuesToRetain.contains(value)); + } + } else { + try { + valueCollection.retainAll(valuesToRetain); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testValuesRetainAllNullFromEmpty() { + final Map map; + try { + map = makeEmptyMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Collection values = map.values(); + if (supportsRemove) { + try { + values.retainAll(null); + // Returning successfully is not ideal, but tolerated. + } catch (NullPointerException e) { + // Expected. + } + } else { + try { + values.retainAll(null); + // We have to tolerate a successful return (Sun bug 4802647) + } catch (UnsupportedOperationException e) { + // Expected. + } catch (NullPointerException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testValuesClear() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Collection valueCollection = map.values(); + if (supportsClear) { + valueCollection.clear(); + assertTrue(valueCollection.isEmpty()); + } else { + try { + valueCollection.clear(); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + private static Entry mapEntry(K key, V value) { + return Collections.singletonMap(key, value).entrySet().iterator().next(); + } +} Index: graph/src/test/java/org/apache/jdbm/Serialization2Test.java =================================================================== --- graph/src/test/java/org/apache/jdbm/Serialization2Test.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/Serialization2Test.java (Arbeitskopie) @@ -0,0 +1,87 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; +import java.io.Serializable; +import java.util.Map; + +public class Serialization2Test extends TestCaseWithTestFile { + + public void test2() throws IOException { + DB db = newDBNoCache(); + + Serialization2Bean processView = new Serialization2Bean(); + + Map map = db.createHashMap("test2"); + + map.put("abc", processView); + + db.commit(); + + Serialization2Bean retProcessView = (Serialization2Bean) map.get("abc"); + assertEquals(processView, retProcessView); + + db.close(); + } + + public void test3() throws IOException { + + String file = newTestFile(); + + Serialized2DerivedBean att = new Serialized2DerivedBean(); + DB db = DBMaker.openFile(file).disableCache().make(); + + Map map = db.createHashMap("test"); + + map.put("att", att); + db.commit(); + db.close(); + db = DBMaker.openFile(file).disableCache().make(); + map = db.getHashMap("test"); + + Serialized2DerivedBean retAtt = (Serialized2DerivedBean) map.get("att"); + assertEquals(att, retAtt); + } + + static class AAA implements Serializable { + String test = "aa"; + } + + public void testReopenWithDefrag() { + + String f = newTestFile(); + + DB db = DBMaker.openFile(f).disableTransactions().make(); + + Map map = db.createTreeMap("test"); + map.put(1, new AAA()); + + db.defrag(true); + db.close(); + + db = DBMaker.openFile(f).disableTransactions().make(); + + map = db.getTreeMap("test"); + assertNotNull(map.get(1)); + assertEquals(map.get(1).test, "aa"); + + db.close(); + } + +} Index: graph/src/test/java/org/apache/jdbm/UtilsTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/UtilsTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/UtilsTest.java (Arbeitskopie) @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import junit.framework.TestCase; + +public class UtilsTest extends TestCase { + + public void testFormatSpaceUsage() { + assertEquals("100B", JDBMUtils.formatSpaceUsage(100L)); + assertEquals("1024B", JDBMUtils.formatSpaceUsage(1024L)); + assertEquals("10KB", JDBMUtils.formatSpaceUsage(10024L)); + assertEquals("15MB", JDBMUtils.formatSpaceUsage(15000000)); + } +} Index: graph/src/test/java/org/apache/jdbm/ConcurrentBTreeReadTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/ConcurrentBTreeReadTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/ConcurrentBTreeReadTest.java (Arbeitskopie) @@ -0,0 +1,139 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; +import java.io.Serializable; +import java.util.Collections; +import java.util.Comparator; +import java.util.Random; + +public class ConcurrentBTreeReadTest extends TestCaseWithTestFile { + + public static class Dummy implements Serializable { + + private static final long serialVersionUID = -5567451291089724793L; + private long key; + @SuppressWarnings("unused") + private byte space[] = new byte[1024]; + + public Dummy() { + } + + public Dummy(long key) { + this.key = key; + } + + @Override + public int hashCode() { + return (int) key; + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof Dummy)) + return false; + Dummy other = (Dummy) obj; + if (key != other.key) + return false; + return true; + } + + } + + private DBAbstract db; + + private BTree btree; + + private int entries = 20000; + + private int readers = 5; + + public void setUp() throws Exception { + super.setUp(); + db = newDBCache(); + btree = BTree.createInstance(db, (Comparator) Collections.reverseOrder(), + null, null, true); + System.err.println(db.getClass()); + } + + public void testConcurrent() throws Exception { + Runnable read = new Runnable() { + + public void run() { + read(); + } + + }; + Thread t[] = new Thread[readers]; + int c = 0; + for (int i = 0; i < entries; i++) { + btree.insert((long) i, new Dummy(i), false); + if (i % 1000 == 0) { + System.err.println("count " + i); + commit(); + } + } + System.err.println("done!"); + commit(); + System.gc(); + Thread.sleep(1000); + + for (int i = 0; i < readers; i++) { + t[c++] = new Thread(read); + } + + System.err.println("start readers"); + long start = System.currentTimeMillis(); + for (int i = 0; i < t.length; i++) { + t[i].start(); + } + for (int i = 0; i < t.length; i++) { + t[i].join(); + } + long end = System.currentTimeMillis(); + System.err.println("done " + (end - start) + "ms"); + } + + private Object fetch(Long id) throws IOException { + try { + return btree.get(id); + } catch (IOException e) { + System.out.println("ERR " + id); + e.printStackTrace(); + return null; + } + } + + private void commit() throws IOException { + db.commit(); + } + + private void read() { + Random r = new Random(); + for (int i = 0; i < entries; i++) { + try { + fetch((long) r.nextInt(entries)); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + System.err.println("done read"); + } + +} Index: graph/src/test/java/org/apache/jdbm/LongHashMapTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/LongHashMapTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/LongHashMapTest.java (Arbeitskopie) @@ -0,0 +1,138 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.util.Iterator; +import java.util.Random; +import java.util.TreeMap; + +import junit.framework.TestCase; + +public class LongHashMapTest extends TestCase { + + public void testAll() { + LongHashMap t = new LongHashMap(); + t.put(1, "aa"); + t.put(2, "bb"); + t.put(2, "bb"); + t.put(4, "cc"); + t.put(9, "FF"); + assertEquals(4, t.size()); + t.remove(1); + assertEquals(3, t.size()); + assertEquals(t.get(1), null); + assertEquals(t.get(2), "bb"); + assertEquals(t.get(3), null); + assertEquals(t.get(4), "cc"); + assertEquals(t.get(5), null); + assertEquals(t.get(-1), null); + assertEquals(t.get(9), "FF"); + + Iterator vals = t.valuesIterator(); + assertTrue(vals.hasNext()); + assertEquals(vals.next(), "bb"); + assertTrue(vals.hasNext()); + assertEquals(vals.next(), "cc"); + assertTrue(vals.hasNext()); + assertEquals(vals.next(), "FF"); + + assertFalse(vals.hasNext()); + + t.clear(); + assertEquals(0, t.size()); + t.put(2, "bb"); + assertEquals(1, t.size()); + assertEquals(t.get(1), null); + assertEquals(t.get(2), "bb"); + assertEquals(t.get(3), null); + + } + + public void testRandomCompare() { + LongHashMap v1 = new LongHashMap(); + TreeMap v2 = new TreeMap(); + Random d = new Random(); + for (int i = 0; i < 1000; i++) { + long key = d.nextInt() % 100; + double random = d.nextDouble(); + if (random < 0.8) { + // System.out.println("put "+key); + v1.put(key, "" + key); + v2.put(key, "" + key); + } else { + // System.out.println("remove "+key); + v1.remove(key); + v2.remove(key); + } + checkEquals(v1, v2); + + } + } + + public void checkEquals(LongHashMap v1, TreeMap v2) { + assertEquals(v1.size(), v2.size()); + for (long k : v2.keySet()) { + assertEquals(v1.get(k), v2.get(k)); + } + + int counter = 0; + Iterator it = v1.valuesIterator(); + while (it.hasNext()) { + String v = it.next(); + long key = Long.valueOf(v); + assertEquals(v1.get(key), v); + assertEquals("" + key, v); + counter++; + } + assertEquals(counter, v2.size()); + } + + public void test2() { + LongHashMap v1 = new LongHashMap(); + v1.put(1611, "1611"); + v1.put(15500, "15500"); + v1.put(9446, "9446"); + System.out.println(v1.get(9446)); + System.out.println(v1.toString()); + assertEquals(3, v1.size()); + assertEquals(v1.get(9446), "9446"); + + } + + public void testMemoryConsuptio() { + System.out.println("Memory available: " + + (Runtime.getRuntime().maxMemory() / 1e6) + "MB"); + System.out.println("Memory used: " + + ((Runtime.getRuntime().totalMemory() - Runtime.getRuntime() + .freeMemory()) / 1e6) + "MB"); + long counter = 0; + LongHashMap e = new LongHashMap(); + // LongKeyChainedHashMap e = new LongKeyChainedHashMap(); + // LongTreeMap e = new LongTreeMap(); + while (counter < 1e6) { + counter++; + e.put(counter, ""); + } + System.out.println(counter + " items"); + System.out.println("Memory used: " + + ((Runtime.getRuntime().totalMemory() - Runtime.getRuntime() + .freeMemory()) / 1e6) + "MB"); + + } + +} Index: graph/src/test/java/org/apache/jdbm/BTreeMapNavigableSubMapInclusiveTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/BTreeMapNavigableSubMapInclusiveTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/BTreeMapNavigableSubMapInclusiveTest.java (Arbeitskopie) @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +public class BTreeMapNavigableSubMapInclusiveTest extends + BTreeMapNavigable2Test { + + @Override + public void setUp() throws Exception { + super.setUp(); + map.put(0, "zero"); + map.put(11, "eleven"); + map = map.subMap(1, true, 10, true); + } + + @Override + public void testPut() { + // this test is not run on submaps + } +} Index: graph/src/test/java/org/apache/jdbm/TestCaseWithTestFile.java =================================================================== --- graph/src/test/java/org/apache/jdbm/TestCaseWithTestFile.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/TestCaseWithTestFile.java (Arbeitskopie) @@ -0,0 +1,68 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.File; +import java.io.IOException; + +import junit.framework.TestCase; + +/** + * Subclass from this class if you have any test cases that need to do file I/O. + * The setUp() and tearDown() methods here will take care of cleanup on disk. + */ +abstract class TestCaseWithTestFile extends TestCase { + + public static final String testFolder = System.getProperty("java.io.tmpdir", + ".") + "/_testdb"; + + // public static final String testFileName = "test"; + + public void setUp() throws Exception { + File f = new File(testFolder); + if (!f.exists()) + f.mkdirs(); + } + + public void tearDown() throws Exception { + File f = new File(testFolder); + if (f.exists()) { + for (File f2 : f.listFiles()) { + f2.deleteOnExit(); + f2.delete(); + } + } + } + + static public String newTestFile() { + return testFolder + File.separator + "test" + System.nanoTime(); + } + + static public PageFile newRecordFile() throws IOException { + return new PageFile(newTestFile()); + } + + static public DBAbstract newDBCache() throws IOException { + return (DBAbstract) DBMaker.openFile(newTestFile()).make(); + } + + static public DBStore newDBNoCache() throws IOException { + return (DBStore) DBMaker.openFile(newTestFile()).disableCache().make(); + } + +} Index: graph/src/test/java/org/apache/jdbm/BTreeNodeTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/BTreeNodeTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/BTreeNodeTest.java (Arbeitskopie) @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; + +/** + * This class contains all Unit tests for {@link BTreeNode}. + */ +public class BTreeNodeTest extends TestCaseWithTestFile { + + /** + * Basic tests + */ + public void testBasics() throws IOException { + DBAbstract db; + String test, test1, test2, test3; + + test = "test"; + test1 = "test1"; + test2 = "test2"; + test3 = "test3"; + + db = newDBCache(); + + BTree tree = BTree.createInstance(db); + + BTreeNode node = new BTreeNode(tree, test, test); + + BTree.BTreeTupleBrowser browser; + BTree.BTreeTuple tuple = new BTree.BTreeTuple(); + + // test insertion + node.insert(1, test2, test2, false); + node.insert(1, test3, test3, false); + node.insert(1, test1, test1, false); + + // test binary search + browser = node.find(1, test2, true); + if (browser.getNext(tuple) == false) { + throw new IllegalStateException("Browser didn't have 'test2'"); + } + if (!tuple.key.equals(test2)) { + throw new IllegalStateException("Tuple key is not 'test2'"); + } + if (!tuple.value.equals(test2)) { + throw new IllegalStateException("Tuple value is not 'test2'"); + } + + db.close(); + db = null; + } + +} Index: graph/src/test/java/org/apache/jdbm/HTreeTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/HTreeTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/HTreeTest.java (Arbeitskopie) @@ -0,0 +1,133 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; +import java.util.AbstractMap.SimpleEntry; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +/** + * This class contains all Unit tests for {@link HTree}. + */ +public class HTreeTest extends TestCaseWithTestFile { + + /** + * Basic tests + */ + public void testIterator() throws IOException { + + DBAbstract db = newDBCache(); + + HTree testTree = (HTree) db.createHashMap("tree"); + + int total = 10; + for (int i = 0; i < total; i++) { + testTree.put(Long.valueOf("" + i), Long.valueOf("" + i)); + } + db.commit(); + + Iterator fi = testTree.values().iterator(); + Object item; + int count = 0; + while (fi.hasNext()) { + fi.next(); + count++; + } + assertEquals(count, total); + + db.close(); + } + + public void testRecordListener() throws IOException { + DBAbstract db = newDBCache(); + HTree tree = (HTree) db.createHashMap("test"); + final List> dels = new ArrayList(); + final List> ins = new ArrayList(); + final List> updNew = new ArrayList(); + final List> updOld = new ArrayList(); + + tree.addRecordListener(new RecordListener() { + + public void recordUpdated(Integer key, String oldValue, String newValue) + throws IOException { + updOld.add(new SimpleEntry(key, oldValue)); + updNew.add(new SimpleEntry(key, newValue)); + } + + public void recordRemoved(Integer key, String value) throws IOException { + dels.add(new SimpleEntry(key, value)); + } + + public void recordInserted(Integer key, String value) throws IOException { + ins.add(new SimpleEntry(key, value)); + } + }); + + // test insert + tree.put(11, "aa11"); + tree.put(12, "aa12"); + assertTrue(ins.contains(new SimpleEntry(11, "aa11"))); + assertTrue(ins.contains(new SimpleEntry(12, "aa12"))); + assertTrue(ins.size() == 2); + ins.clear(); + assertTrue(dels.isEmpty()); + assertTrue(updNew.isEmpty()); + assertTrue(updOld.isEmpty()); + + // test update + tree.put(12, "aa123"); + assertTrue(ins.isEmpty()); + assertTrue(dels.isEmpty()); + assertTrue(updOld.contains(new SimpleEntry(12, "aa12"))); + assertTrue(updOld.size() == 1); + updOld.clear(); + assertTrue(updNew.contains(new SimpleEntry(12, "aa123"))); + assertTrue(updNew.size() == 1); + updNew.clear(); + + // test remove + tree.remove(11); + assertTrue(dels.contains(new SimpleEntry(11, "aa11"))); + assertTrue(dels.size() == 1); + dels.clear(); + assertTrue(ins.isEmpty()); + assertTrue(updOld.isEmpty()); + assertTrue(updNew.isEmpty()); + + } + + public void testIssue() { + int size = 100000; + int commitSize = 100000; + DB build = DBMaker.openFile(newTestFile()).setMRUCacheSize(100).make(); + Map hashMap = build.createHashMap("hashMap"); + for (int i = 0; i < size; i++) { + hashMap.put(i + "asdddfdgf" + i + "sddfdfsf" + i, "dsfgfg.dfcdfsgfg"); + if (i % commitSize == 0) { + build.commit(); + } + } + build.commit(); + build.calculateStatistics(); + build.close(); + } + +} Index: graph/src/test/java/org/apache/jdbm/BTreeMapNavigableSubMapExclusiveTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/BTreeMapNavigableSubMapExclusiveTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/BTreeMapNavigableSubMapExclusiveTest.java (Arbeitskopie) @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +public class BTreeMapNavigableSubMapExclusiveTest extends + BTreeMapNavigable2Test { + + @Override + public void setUp() throws Exception { + super.setUp(); + map.put(-1, "-one"); + map.put(0, "zero"); + map.put(11, "eleven"); + map.put(12, "twelve"); + map = map.subMap(0, false, 11, false); + } + + @Override + public void testPut() { + // this test is not run on submaps + } +} Index: graph/src/test/java/org/apache/jdbm/DBCacheTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/DBCacheTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/DBCacheTest.java (Arbeitskopie) @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.util.Map; +import java.util.Set; + +public class DBCacheTest extends TestCaseWithTestFile { + + // https://github.com/jankotek/JDBM3/issues/11 + public void test_Issue_11_soft_cache_record_disappear() { + long MAX = (long) 1e6; + + String file = newTestFile(); + DB d = DBMaker.openFile(file).disableTransactions().enableSoftCache() + .make(); + + Set set = d.createHashSet("1"); + + for (Integer i = 0; i < MAX; i++) { + set.add(i); + } + + d.close(); + + d = DBMaker.openFile(file).disableTransactions().enableSoftCache().make(); + + set = d.getHashSet("1"); + for (Integer i = 0; i < MAX; i++) { + assertTrue(set.contains(i)); + } + + } + + public void test_issue_xyz() { + org.apache.jdbm.DB db = DBMaker.openFile(newTestFile()).enableSoftCache() + .make(); + Map m = db.createTreeMap("test"); + + for (int i = 0; i < 1e5; i++) { + m.put("test" + i, "test" + i); + } + db.close(); + // + // problem in cache, throws; + // java.lang.IllegalArgumentException: Argument 'recid' is invalid: 0 + // at org.apache.jdbm.DBStore.fetch(DBStore.java:356) + // at org.apache.jdbm.DBCache.fetch(DBCache.java:292) + // at org.apache.jdbm.BTreeNode.loadNode(BTreeNode.java:833) + // at org.apache.jdbm.BTreeNode.insert(BTreeNode.java:391) + // at org.apache.jdbm.BTreeNode.insert(BTreeNode.java:392) + // at org.apache.jdbm.BTreeNode.insert(BTreeNode.java:392) + // at org.apache.jdbm.BTree.insert(BTree.java:281) + // at org.apache.jdbm.BTreeMap.put(BTreeMap.java:285) + // at org.apache.jdbm.DBCacheTest.test_some_random_shit(DBCacheTest.java:48) + // + + } +} Index: graph/src/test/java/org/apache/jdbm/BTreeSetTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/BTreeSetTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/BTreeSetTest.java (Arbeitskopie) @@ -0,0 +1,316 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.Serializable; +import java.util.Arrays; +import java.util.Comparator; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Set; +import java.util.SortedSet; + +/** + * Tests for TreeSet which comes with JDBM. Original code comes from Apache + * Harmony, Modified by Jan Kotek for use in JDBM + */ +public class BTreeSetTest extends TestCaseWithTestFile { + + private DB db; + + public static class ReversedIntegerComparator implements Comparator, + Serializable { + public int compare(Object o1, Object o2) { + return -(((Integer) o1).compareTo((Integer) o2)); + } + + public boolean equals(Object o1, Object o2) { + return ((Integer) o1).compareTo((Integer) o2) == 0; + } + + } + + SortedSet ts; + + Object objArray[] = new Object[1000]; + + /** + * @tests java.util.TreeSet#TreeSet() + */ + public void test_Constructor() { + // Test for method java.util.TreeSet() + assertTrue("Did not construct correct TreeSet", db.createTreeSet("test") + .isEmpty()); + } + + /** + * @tests java.util.TreeSet#TreeSet(java.util.Comparator) + */ + public void test_ConstructorLjava_util_Comparator() { + // Test for method java.util.TreeSet(java.util.Comparator) + SortedSet myTreeSet = db.createTreeSet("test", + new ReversedIntegerComparator(), null); + assertTrue("Did not construct correct TreeSet", myTreeSet.isEmpty()); + myTreeSet.add(new Integer(1)); + myTreeSet.add(new Integer(2)); + assertTrue( + "Answered incorrect first element--did not use custom comparator ", + myTreeSet.first().equals(new Integer(2))); + assertTrue( + "Answered incorrect last element--did not use custom comparator ", + myTreeSet.last().equals(new Integer(1))); + } + + /** + * @tests java.util.TreeSet#TreeSet(java.util.SortedSet) + */ + public void test_ConstructorLjava_util_SortedSet() { + // Test for method java.util.TreeSet(java.util.SortedSet) + ReversedIntegerComparator comp = new ReversedIntegerComparator(); + SortedSet myTreeSet = db.createTreeSet("test", comp, null); + for (int i = 0; i < objArray.length; i++) + myTreeSet.add(objArray[i]); + SortedSet anotherTreeSet = db.getTreeSet("test"); + anotherTreeSet.addAll(myTreeSet); + assertTrue("TreeSet is not correct size", + anotherTreeSet.size() == objArray.length); + for (int counter = 0; counter < objArray.length; counter++) + assertTrue("TreeSet does not contain correct elements", + anotherTreeSet.contains(objArray[counter])); + assertEquals("TreeSet does not answer correct comparator", anotherTreeSet + .comparator().getClass(), comp.getClass()); + assertEquals("TreeSet does not use comparator", anotherTreeSet.first(), + objArray[objArray.length - 1]); + } + + /** + * @tests java.util.TreeSet#add(java.lang.Object) + */ + public void test_addLjava_lang_Object() { + // Test for method boolean java.util.TreeSet.add(java.lang.Object) + ts.add(new Integer(-8)); + assertTrue("Failed to add Object", ts.contains(new Integer(-8))); + ts.add(objArray[0]); + assertTrue("Added existing element", ts.size() == objArray.length + 1); + + } + + /** + * @tests java.util.TreeSet#addAll(java.util.Collection) + */ + public void test_addAllLjava_util_Collection() { + // Test for method boolean + // java.util.TreeSet.addAll(java.util.Collection) + SortedSet s = db.createTreeSet("test"); + s.addAll(ts); + assertTrue("Incorrect size after add", s.size() == ts.size()); + Iterator i = ts.iterator(); + while (i.hasNext()) + assertTrue("Returned incorrect set", s.contains(i.next())); + + } + + /** + * @tests java.util.TreeSet#clear() + */ + public void test_clear() { + // Test for method void java.util.TreeSet.clear() + ts.clear(); + assertEquals("Returned non-zero size after clear", 0, ts.size()); + assertTrue("Found element in cleared set", !ts.contains(objArray[0])); + } + + /** + * @tests java.util.TreeSet#comparator() + */ + public void test_comparator() { + // Test for method java.util.Comparator java.util.TreeSet.comparator() + ReversedIntegerComparator comp = new ReversedIntegerComparator(); + SortedSet myTreeSet = db.createTreeSet("test", comp, null); + assertTrue("Answered incorrect comparator", myTreeSet.comparator() == comp); + } + + /** + * @tests java.util.TreeSet#contains(java.lang.Object) + */ + public void test_containsLjava_lang_Object() { + // Test for method boolean java.util.TreeSet.contains(java.lang.Object) + assertTrue("Returned false for valid Object", + ts.contains(objArray[objArray.length / 2])); + assertTrue("Returned true for invalid Object", + !ts.contains(new Integer(-9))); + } + + /** + * @tests java.util.TreeSet#first() + */ + public void test_first() { + + // Test for method java.lang.Object java.util.TreeSet.first() + assertEquals("Returned incorrect first element", ts.first(), objArray[0]); + } + + /** + * @tests java.util.TreeSet#headSet(java.lang.Object) + */ + public void test_headSetLjava_lang_Object() { + // Test for method java.util.SortedSet + // java.util.TreeSet.headSet(java.lang.Object) + Set s = ts.headSet(new Integer(100)); + assertEquals("Returned set of incorrect size", 100, s.size()); + for (int i = 0; i < 100; i++) + assertTrue("Returned incorrect set", s.contains(objArray[i])); + } + + /** + * @tests java.util.TreeSet#isEmpty() + */ + public void test_isEmpty() { + // Test for method boolean java.util.TreeSet.isEmpty() + assertTrue("Empty set returned false", db.createTreeSet("test").isEmpty()); + assertTrue("Non-Empty returned true", !ts.isEmpty()); + } + + /** + * @tests java.util.TreeSet#iterator() + */ + public void test_iterator() { + // Test for method java.util.Iterator java.util.TreeSet.iterator() + SortedSet s = db.createTreeSet("test"); + s.addAll(ts); + Iterator i = ts.iterator(); + Set as = new HashSet(Arrays.asList(objArray)); + while (i.hasNext()) + as.remove(i.next()); + assertEquals("Returned incorrect iterator", 0, as.size()); + + } + + /** + * @tests java.util.TreeSet#last() + */ + public void test_last() { + // Test for method java.lang.Object java.util.TreeSet.last() + assertEquals("Returned incorrect last element", ts.last(), + objArray[objArray.length - 1]); + } + + /** + * @tests java.util.TreeSet#remove(java.lang.Object) + */ + public void test_removeLjava_lang_Object() { + // Test for method boolean java.util.TreeSet.remove(java.lang.Object) + ts.remove(objArray[0]); + assertTrue("Failed to remove object", !ts.contains(objArray[0])); + assertTrue("Failed to change size after remove", + ts.size() == objArray.length - 1); + } + + /** + * @tests java.util.TreeSet#size() + */ + public void test_size() { + // Test for method int java.util.TreeSet.size() + assertTrue("Returned incorrect size", ts.size() == objArray.length); + } + + /** + * @tests java.util.TreeSet#subSet(java.lang.Object, java.lang.Object) + */ + public void test_subSetLjava_lang_ObjectLjava_lang_Object() { + // Test for method java.util.SortedSet + // java.util.TreeSet.subSet(java.lang.Object, java.lang.Object) + final int startPos = objArray.length / 4; + final int endPos = 3 * objArray.length / 4; + SortedSet aSubSet = ts.subSet(objArray[startPos], objArray[endPos]); + assertTrue("Subset has wrong number of elements", + aSubSet.size() == (endPos - startPos)); + for (int counter = startPos; counter < endPos; counter++) + assertTrue("Subset does not contain all the elements it should", + aSubSet.contains(objArray[counter])); + + int result; + try { + ts.subSet(objArray[3], objArray[0]); + result = 0; + } catch (IllegalArgumentException e) { + result = 1; + } + assertEquals("end less than start should throw", 1, result); + } + + /** + * @tests java.util.TreeSet#tailSet(java.lang.Object) + */ + public void test_tailSetLjava_lang_Object() { + // Test for method java.util.SortedSet + // java.util.TreeSet.tailSet(java.lang.Object) + Set s = ts.tailSet(new Integer(900)); + assertEquals("Returned set of incorrect size", 100, s.size()); + for (int i = 900; i < objArray.length; i++) + assertTrue("Returned incorrect set", s.contains(objArray[i])); + } + + /** + * Tests equals() method. Tests that no ClassCastException will be thrown in + * all cases. Regression test for HARMONY-1639. + */ + public void test_equals() throws Exception { + // comparing TreeSets with different object types + Set s1 = db.createTreeSet("test1"); + Set s2 = db.createTreeSet("test2"); + s1.add("key1"); + s1.add("key2"); + s2.add(new Integer(1)); + s2.add(new Integer(2)); + assertFalse("Sets should not be equal 1", s1.equals(s2)); + assertFalse("Sets should not be equal 2", s2.equals(s1)); + + // comparing TreeSet with HashSet + s1 = db.createTreeSet("test"); + s2 = new HashSet(); + s1.add("key"); + s2.add(new Object()); + assertFalse("Sets should not be equal 3", s1.equals(s2)); + assertFalse("Sets should not be equal 4", s2.equals(s1)); + + } + + /** + * Sets up the fixture, for example, open a network connection. This method is + * called before a test is executed. + */ + public void setUp() throws Exception { + super.setUp(); + db = newDBNoCache(); + ts = db.createTreeSet("testBTreeSet"); + for (int i = 0; i < objArray.length; i++) { + Object x = objArray[i] = new Integer(i); + ts.add(x); + } + } + + /** + * Tears down the fixture, for example, close a network connection. This + * method is called after a test is executed. + */ + public void tearDown() throws Exception { + db.close(); + super.tearDown(); + } +} Index: graph/src/test/java/org/apache/jdbm/PhysicalFreeRowIdManagerTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/PhysicalFreeRowIdManagerTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/PhysicalFreeRowIdManagerTest.java (Arbeitskopie) @@ -0,0 +1,115 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; + +/** + * This class contains all Unit tests for {@link PhysicalFreeRowIdManager}. + */ +public class PhysicalFreeRowIdManagerTest extends TestCaseWithTestFile { + + /** + * Test constructor + */ + public void testCtor() throws Exception { + PageFile f = newRecordFile(); + PageManager pm = new PageManager(f); + PhysicalFreeRowIdManager freeMgr = new PhysicalFreeRowIdManager(f, pm); + + pm.close(); + f.close(); + } + + /** + * Test basics + */ + public void testBasics() throws Exception { + + PageFile f = newRecordFile(); + PageManager pm = new PageManager(f); + PhysicalFreeRowIdManager freeMgr = new PhysicalFreeRowIdManager(f, pm); + + // allocate 10,000 bytes - should fail on an empty file. + long loc = freeMgr.getFreeRecord(10000); + assertTrue("loc is not null?", loc == 0); + + pm.close(); + f.close(); + } + + public void testPhysRecRootPage() throws IOException { + PageFile f = newRecordFile(); + PageManager pm = new PageManager(f); + + long pageid = pm.allocate(Magic.FREEPHYSIDS_ROOT_PAGE); + PageIo p = f.get(pageid); + p.writeInt(100, 100); + f.release(p); + pm.commit(); + f.commit(); + + p = f.get(pageid); + assertEquals(p.readInt(100), 100); + + } + + public void test_size_to_root_offset() { + for (int i = 1; i < PhysicalFreeRowIdManager.MAX_REC_SIZE; i++) { + int offset = PhysicalFreeRowIdManager.sizeToRootOffset(i); + + assertTrue(offset <= Storage.PAGE_SIZE); + } + } + + public void test_record_reallocation() throws IOException { + PageFile f = newRecordFile(); + PageManager pm = new PageManager(f); + PhysicalFreeRowIdManager freeMgr = new PhysicalFreeRowIdManager(f, pm); + + freeMgr.putFreeRecord(1000, 100); + freeMgr.commit(); + + assertEquals(1000, + freeMgr.getFreeRecord(100 - PhysicalFreeRowIdManager.ROOT_SLOT_SIZE)); + assertEquals(0, + freeMgr.getFreeRecord(100 - PhysicalFreeRowIdManager.ROOT_SLOT_SIZE)); + + } + + public void test_all_sizes_deallocation() throws IOException { + PageFile f = newRecordFile(); + PageManager pm = new PageManager(f); + PhysicalFreeRowIdManager freeMgr = new PhysicalFreeRowIdManager(f, pm); + + for (int i = 1; i < PhysicalFreeRowIdManager.MAX_REC_SIZE; i++) { + + freeMgr.putFreeRecord(1000, i); + freeMgr.commit(); + + assertEquals(1000, + freeMgr.getFreeRecord(i - PhysicalFreeRowIdManager.ROOT_SLOT_SIZE)); + assertEquals(0, + freeMgr.getFreeRecord(i - PhysicalFreeRowIdManager.ROOT_SLOT_SIZE)); + } + pm.close(); + f.close(); + + } + +} Index: graph/src/test/java/org/apache/jdbm/TestInsertPerf.java =================================================================== --- graph/src/test/java/org/apache/jdbm/TestInsertPerf.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/TestInsertPerf.java (Arbeitskopie) @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; + +/** + * Test BTree insert performance. + */ +public class TestInsertPerf extends TestCaseWithTestFile { + + int _numberOfObjects = 1000; + + public void testInsert() throws IOException { + + long start, finish; + DBAbstract db = newDBCache(); + BTree btree = BTree.createInstance(db); + + // Note: One can use specialized serializers for better performance / + // database size + // btree = BTree.createInstance( db, new LongComparator(), + // LongSerializer.INSTANCE, IntegerSerializer.INSTANCE ); + + start = System.currentTimeMillis(); + for (int i = 0; i < _numberOfObjects; i++) { + btree.insert(new Long(i), new Integer(i), false); + } + db.commit(); + finish = System.currentTimeMillis(); + + System.out.println("It took " + (finish - start) + " ms to insert " + + _numberOfObjects + " objects."); + + } + +} Index: graph/src/test/java/org/apache/jdbm/HTreeSetTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/HTreeSetTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/HTreeSetTest.java (Arbeitskopie) @@ -0,0 +1,157 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.util.Iterator; +import java.util.Set; + +/** + * Tests for HashSet which comes with JDBM. Original code comes from Apache + * Harmony, Modified by Jan Kotek for use in JDBM + */ +public class HTreeSetTest extends TestCaseWithTestFile { + + Set hs; + DB db; + + static Object[] objArray; + + { + objArray = new Object[1000]; + for (int i = 0; i < objArray.length; i++) + objArray[i] = new Integer(i); + } + + /** + * @tests java.util.HashSet#HashSet() + */ + public void test_Constructor() { + // Test for method java.util.HashSet() + Set hs2 = db.createHashSet("secondHashSet", null); + assertEquals("Created incorrect HashSet", 0, hs2.size()); + } + + /** + * @tests java.util.HashSet#add(java.lang.Object) + */ + public void test_addLjava_lang_Object() { + // Test for method boolean java.util.HashSet.add(java.lang.Object) + int size = hs.size(); + hs.add(new Integer(8)); + assertTrue("Added element already contained by set", hs.size() == size); + hs.add(new Integer(-9)); + assertTrue("Failed to increment set size after add", hs.size() == size + 1); + assertTrue("Failed to add element to set", hs.contains(new Integer(-9))); + } + + /** + * @tests java.util.HashSet#clear() + */ + public void test_clear() { + // Test for method void java.util.HashSet.clear() + Set orgSet = new java.util.HashSet(hs); + hs.clear(); + Iterator i = orgSet.iterator(); + assertEquals("Returned non-zero size after clear", 0, hs.size()); + while (i.hasNext()) + assertTrue("Failed to clear set", !hs.contains(i.next())); + } + + /** + * @tests java.util.HashSet#contains(java.lang.Object) + */ + public void test_containsLjava_lang_Object() { + // Test for method boolean java.util.HashSet.contains(java.lang.Object) + assertTrue("Returned false for valid object", hs.contains(objArray[90])); + assertTrue("Returned true for invalid Object", !hs.contains(new Object())); + + } + + /** + * @tests java.util.HashSet#isEmpty() + */ + public void test_isEmpty() { + // Test for method boolean java.util.HashSet.isEmpty() + assertTrue("Empty set returned false", + db.createHashSet("secondHashSet", null).isEmpty()); + assertTrue("Non-empty set returned true", !hs.isEmpty()); + } + + /** + * @tests java.util.HashSet#iterator() + */ + public void test_iterator() { + // Test for method java.util.Iterator java.util.HashSet.iterator() + Iterator i = hs.iterator(); + int x = 0; + while (i.hasNext()) { + assertTrue("Failed to iterate over all elements", hs.contains(i.next())); + ++x; + } + assertTrue("Returned iteration of incorrect size", hs.size() == x); + + } + + /** + * @tests java.util.HashSet#remove(java.lang.Object) + */ + public void test_removeLjava_lang_Object() { + // Test for method boolean java.util.HashSet.remove(java.lang.Object) + int size = hs.size(); + hs.remove(new Integer(98)); + assertTrue("Failed to remove element", !hs.contains(new Integer(98))); + assertTrue("Failed to decrement set size", hs.size() == size - 1); + + } + + /** + * @tests java.util.HashSet#size() + */ + public void test_size() { + // Test for method int java.util.HashSet.size() + assertTrue("Returned incorrect size", hs.size() == (objArray.length)); + hs.clear(); + assertEquals("Cleared set returned non-zero size", 0, hs.size()); + } + + /** + * Sets up the fixture, for example, open a network connection. This method is + * called before a test is executed. + */ + public void setUp() throws Exception { + super.setUp(); + db = newDBNoCache(); + hs = db.createHashSet("testHashSet", null); + for (int i = 0; i < objArray.length; i++) + hs.add(objArray[i]); + } + + /** + * Tears down the fixture, for example, close a network connection. This + * method is called after a test is executed. + */ + public void tearDown() throws Exception { + db.close(); + super.tearDown(); + } + + public void testContains() { + + } + +} Index: graph/src/test/java/org/apache/jdbm/TestLargeData.java =================================================================== --- graph/src/test/java/org/apache/jdbm/TestLargeData.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/TestLargeData.java (Arbeitskopie) @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; +import java.util.Arrays; + +public class TestLargeData extends TestCaseWithTestFile { + + public void testLargeData() throws IOException { + + DBAbstract db = new DBStore(newTestFile(), false, false, false); + + byte[] data = UtilTT.makeRecord(1000000, (byte) 12); + final long id = db.insert(data); + data = (byte[]) db.fetch(id); + UtilTT.checkRecord(data, 1000000, (byte) 12); + db.commit(); + + data = UtilTT.makeRecord(2000000, (byte) 13); + db.update(id, data); + db.commit(); + data = (byte[]) db.fetch(id); + UtilTT.checkRecord(data, 2000000, (byte) 13); + db.commit(); + + data = UtilTT.makeRecord(1500000, (byte) 14); + db.update(id, data); + data = (byte[]) db.fetch(id); + UtilTT.checkRecord(data, 1500000, (byte) 14); + db.commit(); + + data = UtilTT.makeRecord(2500000, (byte) 15); + db.update(id, data); + db.rollback(); + data = (byte[]) db.fetch(id); + UtilTT.checkRecord(data, 1500000, (byte) 14); + db.commit(); + + data = UtilTT.makeRecord(1, (byte) 20); + db.update(id, data); + data = (byte[]) db.fetch(id); + UtilTT.checkRecord(data, 1, (byte) 20); + db.commit(); + } + + public void testAllSizes() throws IOException { + // use in memory store to make it faster + DBStore db = (DBStore) DBMaker.openFile(newTestFile()).disableCache() + .disableTransactions().make(); + for (int i = 1; i < RecordHeader.MAX_RECORD_SIZE - 100; i += 111111) { + // System.out.println(i); + byte[] rec = UtilTT.makeRecord(i, (byte) 11); + long recid = db.insert(rec); + byte[] rec2 = db.fetch(recid); + assertTrue("error at size: " + i, Arrays.equals(rec, rec2)); + db.delete(recid); + } + } + +} Index: graph/src/test/java/org/apache/jdbm/Serialization2Bean.java =================================================================== --- graph/src/test/java/org/apache/jdbm/Serialization2Bean.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/Serialization2Bean.java (Arbeitskopie) @@ -0,0 +1,113 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.Serializable; + +public class Serialization2Bean implements Serializable { + // =========================== Constants =============================== + private static final long serialVersionUID = 2757814409580877461L; + + // =========================== Attributes ============================== + private String id = "test"; + private String f1 = ""; + private String f2 = ""; + private String f3 = null; + private String f4 = ""; + private String f5 = null; + private String f6 = ""; + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((f1 == null) ? 0 : f1.hashCode()); + result = prime * result + ((f2 == null) ? 0 : f2.hashCode()); + result = prime * result + ((f3 == null) ? 0 : f3.hashCode()); + result = prime * result + ((f4 == null) ? 0 : f4.hashCode()); + result = prime * result + ((f5 == null) ? 0 : f5.hashCode()); + result = prime * result + ((f6 == null) ? 0 : f6.hashCode()); + result = prime * result + ((id == null) ? 0 : id.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Serialization2Bean other = (Serialization2Bean) obj; + if (f1 == null) { + if (other.f1 != null) { + return false; + } + } else if (!f1.equals(other.f1)) { + return false; + } + if (f2 == null) { + if (other.f2 != null) { + return false; + } + } else if (!f2.equals(other.f2)) { + return false; + } + if (f3 == null) { + if (other.f3 != null) { + return false; + } + } else if (!f3.equals(other.f3)) { + return false; + } + if (f4 == null) { + if (other.f4 != null) { + return false; + } + } else if (!f4.equals(other.f4)) { + return false; + } + if (f5 == null) { + if (other.f5 != null) { + return false; + } + } else if (!f5.equals(other.f5)) { + return false; + } + if (f6 == null) { + if (other.f6 != null) { + return false; + } + } else if (!f6.equals(other.f6)) { + return false; + } + if (id == null) { + if (other.id != null) { + return false; + } + } else if (!id.equals(other.id)) { + return false; + } + return true; + } + +} Index: graph/src/test/java/org/apache/jdbm/ByteArrayComparator.java =================================================================== --- graph/src/test/java/org/apache/jdbm/ByteArrayComparator.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/ByteArrayComparator.java (Arbeitskopie) @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.Serializable; +import java.util.Comparator; + +/** + * Comparator for byte arrays. + * + */ +public final class ByteArrayComparator implements Comparator, + Serializable { + + /** + * Version id for serialization. + */ + final static long serialVersionUID = 1L; + + /** + * Compare two objects. + * + * @param obj1 First object + * @param obj2 Second object + * @return a positive integer if obj1 > obj2, 0 if obj1 == obj2, and a + * negative integer if obj1 < obj2 + */ + public int compare(byte[] obj1, byte[] obj2) { + if (obj1 == null) { + throw new IllegalArgumentException("Argument 'obj1' is null"); + } + + if (obj2 == null) { + throw new IllegalArgumentException("Argument 'obj2' is null"); + } + + return compareByteArray(obj1, obj2); + } + + /** + * Compare two byte arrays. + */ + public static int compareByteArray(byte[] thisKey, byte[] otherKey) { + int len = Math.min(thisKey.length, otherKey.length); + + // compare the byte arrays + for (int i = 0; i < len; i++) { + if (thisKey[i] >= 0) { + if (otherKey[i] >= 0) { + // both positive + if (thisKey[i] < otherKey[i]) { + return -1; + } else if (thisKey[i] > otherKey[i]) { + return 1; + } + } else { + // otherKey is negative => greater (because MSB is 1) + return -1; + } + } else { + if (otherKey[i] >= 0) { + // thisKey is negative => greater (because MSB is 1) + return 1; + } else { + // both negative + if (thisKey[i] < otherKey[i]) { + return -1; + } else if (thisKey[i] > otherKey[i]) { + return 1; + } + } + } + } + if (thisKey.length == otherKey.length) { + return 0; + } + if (thisKey.length < otherKey.length) { + return -1; + } + return 1; + } + +} Index: graph/src/test/java/org/apache/jdbm/BTreeKeyCompressionTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/BTreeKeyCompressionTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/BTreeKeyCompressionTest.java (Arbeitskopie) @@ -0,0 +1,139 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.SortedMap; + +public class BTreeKeyCompressionTest extends TestCaseWithTestFile { + + static final long size = (long) 1e5; + + public void testExpand() throws IOException { + long init = Long.MAX_VALUE - size * 2; + String file = newTestFile(); + DB db = new DBStore(file, false, false, false); + SortedMap map = db.createTreeMap("aa"); + for (long i = init; i < init + size; i++) { + map.put(i, ""); + } + db.commit(); + db.defrag(true); + db.close(); + long fileSize = new File(file + ".dbr.0").length() / 1024; + System.out.println("file size: " + fileSize); + assertTrue("file is too big, compression failed", fileSize < 1000); + } + + public void testCornersLimitsLong() throws IOException { + DB db = newDBCache(); + SortedMap map = db.createTreeMap("aa"); + ArrayList ll = new ArrayList(); + for (Long i = Long.MIN_VALUE; i < Long.MIN_VALUE + 1000; i++) { + map.put(i, ""); + ll.add(i); + } + for (Long i = -1000l; i < 1000; i++) { + map.put(i, ""); + ll.add(i); + } + for (Long i = Long.MAX_VALUE - 1000; i <= Long.MAX_VALUE && i > 0; i++) { + map.put(i, ""); + ll.add(i); + } + + db.commit(); + + db.clearCache(); + for (Long i : ll) { + assertTrue("failed for " + i, map.containsKey(i)); + } + + assertTrue(!map.containsKey(Long.valueOf(Long.MIN_VALUE + 1000))); + assertTrue(!map.containsKey(Long.valueOf(Long.MIN_VALUE + 1001))); + assertTrue(!map.containsKey(Long.valueOf(-1001L))); + assertTrue(!map.containsKey(Long.valueOf(-1002L))); + assertTrue(!map.containsKey(Long.valueOf(1001L))); + assertTrue(!map.containsKey(Long.valueOf(1002L))); + assertTrue(!map.containsKey(Long.valueOf(Long.MAX_VALUE - 1001))); + assertTrue(!map.containsKey(Long.valueOf(Long.MAX_VALUE - 1002))); + + db.close(); + } + + public void testCornersLimitsInt() throws IOException { + DB db = newDBCache(); + SortedMap map = db.createTreeMap("aa"); + ArrayList ll = new ArrayList(); + for (Integer i = Integer.MIN_VALUE; i < Integer.MIN_VALUE + 1000; i++) { + map.put(new Integer(i), ""); + ll.add(new Integer(i)); + } + for (Integer i = -1000; i < 1000; i++) { + map.put(i, ""); + ll.add(i); + } + for (Integer i = Integer.MAX_VALUE - 1000; i <= Integer.MAX_VALUE && i > 0; i++) { + map.put(i, ""); + ll.add(i); + } + + db.commit(); + + db.clearCache(); + for (Integer i : ll) { + assertTrue("failed for " + i, map.containsKey(i)); + } + + assertTrue(!map.containsKey(Integer.valueOf(Integer.MIN_VALUE + 1000))); + assertTrue(!map.containsKey(Integer.valueOf(Integer.MIN_VALUE + 1001))); + assertTrue(!map.containsKey(Integer.valueOf(-1001))); + assertTrue(!map.containsKey(Integer.valueOf(-1002))); + assertTrue(!map.containsKey(Integer.valueOf(1001))); + assertTrue(!map.containsKey(Integer.valueOf(1002))); + assertTrue(!map.containsKey(Integer.valueOf(Integer.MAX_VALUE - 1001))); + assertTrue(!map.containsKey(Integer.valueOf(Integer.MAX_VALUE - 1002))); + + db.close(); + } + + public void testStrings() throws IOException { + long init = Long.MAX_VALUE - size * 2; + String file = newTestFile(); + DB db = new DBStore(file, false, false, false); + SortedMap map = db.createTreeMap("aa"); + for (long i = init; i < init + size / 10; i++) { + map.put("aaaaa" + i, ""); + } + db.commit(); + db.defrag(true); + db.close(); + db = new DBStore(file, false, false, false); + map = db.getTreeMap("aa"); + for (long i = init; i < init + size / 10; i++) { + assertTrue(map.containsKey("aaaaa" + i)); + } + + long fileSize = new File(file + ".dbr.0").length() / 1024; + System.out.println("file size with Strings: " + fileSize); + assertTrue("file is too big, compression failed", fileSize < 120); + } + +} Index: graph/src/test/java/org/apache/jdbm/LinkedListTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/LinkedListTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/LinkedListTest.java (Arbeitskopie) @@ -0,0 +1,455 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; + +/** + * Tests for LinkedList2 which comes with JDBM. Original code comes from Apache + * Harmony, Modified by Jan Kotek for use in JDBM + */ +public class LinkedListTest extends TestCaseWithTestFile { + + DB db; + + LinkedList ll; + + LinkedList testList; + + private Object testObjOne; + + private Object testObjTwo; + + private Object testObjThree; + + private Object testObjFour; + + private Object testObjLast; + + static Object[] objArray; + + { + objArray = new Object[100]; + for (int i = 0; i < objArray.length; i++) + objArray[i] = new Integer(i); + } + + /** + * @tests java.util.LinkedList2#add(int, java.lang.Object) + */ + public void test_addILjava_lang_Object() { + // Test for method void java.util.LinkedList2.add(int, java.lang.Object) + Object o = "Test"; + ll.add(50, o); + assertEquals("Failed to add Object>: " + ll.get(50).toString(), ll.get(50), + o); + assertEquals("Failed to fix up list after insert", ll.get(51), objArray[50]); + assertEquals(ll.get(52), objArray[51]); + ll.add(50, null); + assertNull("Did not add null correctly", ll.get(50)); + + try { + ll.add(-1, "Test"); + fail("Should throw IndexOutOfBoundsException"); + } catch (IndexOutOfBoundsException e) { + // Excepted + } + + try { + ll.add(-1, null); + fail("Should throw IndexOutOfBoundsException"); + } catch (IndexOutOfBoundsException e) { + // Excepted + } + + try { + ll.add(ll.size() + 1, "Test"); + fail("Should throw IndexOutOfBoundsException"); + } catch (IndexOutOfBoundsException e) { + // Excepted + } + + try { + ll.add(ll.size() + 1, null); + fail("Should throw IndexOutOfBoundsException"); + } catch (IndexOutOfBoundsException e) { + // Excepted + } + } + + /** + * @tests java.util.LinkedList2#addAll(int, java.util.Collection) + */ + public void test_addAllILjava_util_Collection() { + // Test for method boolean java.util.LinkedList2.addAll(int, + // java.util.Collection) + ll.addAll(50, new ArrayList(ll)); + assertEquals("Returned incorrect size after adding to existing list", 200, + ll.size()); + for (int i = 0; i < 50; i++) + assertEquals("Manipulated elements < index", ll.get(i), objArray[i]); + for (int i = 0; i >= 50 && (i < 150); i++) + assertTrue("Failed to ad elements properly", + ll.get(i) == objArray[i - 50]); + for (int i = 0; i >= 150 && (i < 200); i++) + assertTrue("Failed to ad elements properly", + ll.get(i) == objArray[i - 100]); + List myList = db.createLinkedList("testXX"); + myList.add(null); + myList.add("Blah"); + myList.add(null); + myList.add("Booga"); + myList.add(null); + ll.addAll(50, myList); + assertNull("a) List w/nulls not added correctly", ll.get(50)); + assertEquals("b) List w/nulls not added correctly", "Blah", ll.get(51)); + assertNull("c) List w/nulls not added correctly", ll.get(52)); + assertEquals("d) List w/nulls not added correctly", "Booga", ll.get(53)); + assertNull("e) List w/nulls not added correctly", ll.get(54)); + + try { + ll.addAll(50, null); + fail("Should throw NullPointerException"); + } catch (NullPointerException e) { + // Excepted + } + } + + /** + * @tests java.util.LinkedList2#addAll(int, java.util.Collection) + */ + public void test_addAllILjava_util_Collection_2() { + // Regression for HARMONY-467 + LinkedList obj = (LinkedList) db.createLinkedList("testXX"); + try { + obj.addAll(-1, (Collection) null); + fail("IndexOutOfBoundsException expected"); + } catch (IndexOutOfBoundsException e) { + } + } + + /** + * @tests java.util.LinkedList2#addAll(java.util.Collection) + */ + public void test_addAllLjava_util_Collection() { + + // Test for method boolean + // java.util.LinkedList2.addAll(java.util.Collection) + List l = new ArrayList(); + l.addAll(new ArrayList(ll)); + + for (int i = 0; i < ll.size(); i++) + assertTrue("Failed to add elements properly", l.get(i).equals(ll.get(i))); + ll.addAll(new ArrayList(ll)); + assertEquals("Returned incorrect siZe after adding to existing list", 200, + ll.size()); + for (int i = 0; i < 100; i++) { + assertTrue("Added to list in incorrect order", ll.get(i).equals(l.get(i))); + assertTrue("Failed to add to existing list", + ll.get(i + 100).equals(l.get(i))); + } + List myList = db.createLinkedList("testXX"); + myList.add(null); + myList.add("Blah"); + myList.add(null); + myList.add("Booga"); + myList.add(null); + ll.addAll(myList); + assertNull("a) List w/nulls not added correctly", ll.get(200)); + assertEquals("b) List w/nulls not added correctly", "Blah", ll.get(201)); + assertNull("c) List w/nulls not added correctly", ll.get(202)); + assertEquals("d) List w/nulls not added correctly", "Booga", ll.get(203)); + assertNull("e) List w/nulls not added correctly", ll.get(204)); + + try { + ll.addAll(null); + fail("Should throw NullPointerException"); + } catch (NullPointerException e) { + // Excepted + } + } + + /** + * @tests java.util.LinkedList2#clear() + */ + public void test_clear() { + // Test for method void java.util.LinkedList2.clear() + ll.clear(); + for (int i = 0; i < ll.size(); i++) + assertNull("Failed to clear list", ll.get(i)); + } + + /** + * @tests java.util.LinkedList2#contains(java.lang.Object) + */ + public void test_containsLjava_lang_Object() { + // Test for method boolean + // java.util.LinkedList2.contains(java.lang.Object) + assertTrue("Returned false for valid element", ll.contains(objArray[99])); + assertTrue("Returned false for equal element", ll.contains(new Integer(8))); + assertTrue("Returned true for invalid element", !ll.contains(new Object())); + assertTrue("Should not contain null", !ll.contains(null)); + ll.add(25, null); + assertTrue("Should contain null", ll.contains(null)); + } + + /** + * @tests java.util.LinkedList2#get(int) + */ + public void test_getI() { + // Test for method java.lang.Object java.util.LinkedList2.get(int) + assertEquals("Returned incorrect element", ll.get(22), objArray[22]); + try { + ll.get(8765); + fail("Failed to throw expected exception for index > size"); + } catch (IndexOutOfBoundsException e) { + } + } + + /** + * @tests java.util.LinkedList2#indexOf(java.lang.Object) + */ + public void test_indexOfLjava_lang_Object() { + // Test for method int java.util.LinkedList2.indexOf(java.lang.Object) + assertEquals("Returned incorrect index", 87, ll.indexOf(objArray[87])); + assertEquals("Returned index for invalid Object", -1, + ll.indexOf(new Object())); + ll.add(20, null); + ll.add(24, null); + assertTrue("Index of null should be 20, but got: " + ll.indexOf(null), + ll.indexOf(null) == 20); + } + + /** + * @tests java.util.LinkedList2#lastIndexOf(java.lang.Object) + */ + public void test_lastIndexOfLjava_lang_Object() { + // Test for method int + // java.util.LinkedList2.lastIndexOf(java.lang.Object) + ll.add(new Integer(99)); + assertEquals("Returned incorrect index", 100, ll.lastIndexOf(objArray[99])); + assertEquals("Returned index for invalid Object", -1, + ll.lastIndexOf(new Object())); + ll.add(20, null); + ll.add(24, null); + assertTrue( + "Last index of null should be 20, but got: " + ll.lastIndexOf(null), + ll.lastIndexOf(null) == 24); + } + + /** + * @tests java.util.LinkedList2#listIterator(int) + */ + public void test_listIteratorI() { + // Test for method java.util.ListIterator + // java.util.LinkedList2.listIterator(int) + ListIterator i = ll.listIterator(); + Object elm; + int n = 0; + while (i.hasNext()) { + if (n == 0 || n == objArray.length - 1) { + if (n == 0) + assertTrue("First element claimed to have a previous", + !i.hasPrevious()); + if (n == objArray.length) + assertTrue("Last element claimed to have next", !i.hasNext()); + } + elm = i.next(); + assertEquals("Iterator returned elements in wrong order", elm, + objArray[n]); + if (n > 0 && n < objArray.length - 1) { + assertEquals("Next index returned incorrect value", i.nextIndex(), + n + 1); + assertEquals( + "previousIndex returned incorrect value : " + i.previousIndex() + + ", n val: " + n, i.previousIndex(), n); + } + ++n; + } + List myList = db.createLinkedList("testXX"); + myList.add(null); + myList.add("Blah"); + myList.add(null); + myList.add("Booga"); + myList.add(null); + ListIterator li = myList.listIterator(); + assertTrue("li.hasPrevious() should be false", !li.hasPrevious()); + assertNull("li.next() should be null", li.next()); + assertTrue("li.hasPrevious() should be true", li.hasPrevious()); + assertNull("li.prev() should be null", li.previous()); + assertNull("li.next() should be null", li.next()); + assertEquals("li.next() should be Blah", "Blah", li.next()); + assertNull("li.next() should be null", li.next()); + assertEquals("li.next() should be Booga", "Booga", li.next()); + assertTrue("li.hasNext() should be true", li.hasNext()); + assertNull("li.next() should be null", li.next()); + assertTrue("li.hasNext() should be false", !li.hasNext()); + } + + /** + * @tests java.util.LinkedList2#remove(int) + */ + public void test_removeI() { + // Test for method java.lang.Object java.util.LinkedList2.remove(int) + ll.remove(10); + assertEquals("Failed to remove element", -1, ll.indexOf(objArray[10])); + try { + ll.remove(999); + fail("Failed to throw expected exception when index out of range"); + } catch (IndexOutOfBoundsException e) { + // Correct + } + + ll.add(20, null); + ll.remove(20); + assertNotNull("Should have removed null", ll.get(20)); + } + + /** + * @tests java.util.LinkedList2#remove(java.lang.Object) + */ + public void test_removeLjava_lang_Object() { + // Test for method boolean java.util.LinkedList2.remove(java.lang.Object) + assertTrue("Failed to remove valid Object", ll.remove(objArray[87])); + assertTrue("Removed invalid object", !ll.remove(new Object())); + assertEquals("Found Object after removal", -1, ll.indexOf(objArray[87])); + ll.add(null); + ll.remove(null); + assertTrue("Should not contain null afrer removal", !ll.contains(null)); + } + + /** + * @tests java.util.LinkedList2#set(int, java.lang.Object) + */ + public void test_setILjava_lang_Object() { + // Test for method java.lang.Object java.util.LinkedList2.set(int, + // java.lang.Object) + ll.set(65, "aa"); + assertEquals("Failed to set object", ll.get(65), "aa"); + } + + /** + * @tests java.util.LinkedList2#size() + */ + public void test_size() { + // Test for method int java.util.LinkedList2.size() + assertEquals("Returned incorrect size", ll.size(), objArray.length); + + int counter = 0; + Iterator iter = ll.iterator(); + while (iter.hasNext()) { + counter++; + iter.next(); + } + assertEquals("Returned incorrect size", counter, objArray.length); + + ll.remove(0); + assertEquals("Returned incorrect size", ll.size(), objArray.length - 1); + } + + /** + * @tests java.util.LinkedList2#toArray() + */ + public void test_toArray() { + // Test for method java.lang.Object [] java.util.LinkedList2.toArray() + ll.add(null); + Object[] obj = ll.toArray(); + assertEquals("Returned array of incorrect size", objArray.length + 1, + obj.length); + + for (int i = 0; i < obj.length - 1; i++) + assertEquals("Returned incorrect array: " + i, obj[i], objArray[i]); + assertNull("Returned incorrect array--end isn't null", obj[obj.length - 1]); + } + + /** + * @tests java.util.LinkedList2#toArray(java.lang.Object[]) + */ + public void test_toArray$Ljava_lang_Object() { + // Test for method java.lang.Object [] + // java.util.LinkedList2.toArray(java.lang.Object []) + Integer[] argArray = new Integer[100]; + Object[] retArray; + retArray = ll.toArray(argArray); + assertTrue("Returned different array than passed", retArray == argArray); + List retList = db.createLinkedList("testXX1"); + retList.addAll(Arrays.asList(retArray)); + Iterator li = ll.iterator(); + Iterator ri = retList.iterator(); + while (li.hasNext()) + assertEquals(li.next(), ri.next()); + argArray = new Integer[1000]; + retArray = ll.toArray(argArray); + assertNull("Failed to set first extra element to null", argArray[ll.size()]); + for (int i = 0; i < ll.size(); i++) + assertEquals("Returned incorrect array: " + i, retArray[i], objArray[i]); + ll.add(50, null); + argArray = new Integer[101]; + retArray = ll.toArray(argArray); + assertTrue("Returned different array than passed", retArray == argArray); + retArray = ll.toArray(argArray); + assertTrue("Returned different array than passed", retArray == argArray); + retList = db.createLinkedList("testXX2"); + retList.addAll(Arrays.asList(retArray)); + li = ll.iterator(); + ri = retList.iterator(); + while (li.hasNext()) + assertTrue("Lists are not equal", li.next() == ri.next()); + } + + /** + * @tests {@link java.util.LinkedList#remove()} + */ + public void test_remove() { + for (int i = 0; i < objArray.length; i++) { + assertEquals("should remove the head", objArray[i], ll.remove(0)); + } + assertEquals("should be empty", 0, ll.size()); + try { + ll.remove(0); + fail("IndexOutOfBoundsException is expected when removing from the empty list"); + } catch (IndexOutOfBoundsException e) { + // -- expected + } + } + + /** + * Sets up the fixture, for example, open a network connection. This method is + * called before a test is executed. + */ + @Override + public void setUp() throws Exception { + super.setUp(); + this.db = newDBCache(); + ll = (LinkedList) db.createLinkedList("ll"); + for (int i = 0; i < objArray.length; i++) { + ll.add(objArray[i]); + } + testList = (LinkedList) db.createLinkedList("testList"); + testObjOne = new Object(); + testObjTwo = new Object(); + testObjThree = new Object(); + testObjFour = new Object(); + testObjLast = new Object(); + } +} Index: graph/src/test/java/org/apache/jdbm/StorageDiskMappedTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/StorageDiskMappedTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/StorageDiskMappedTest.java (Arbeitskopie) @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import junit.framework.TestCase; + +public class StorageDiskMappedTest extends TestCase { + + public void testNumberOfPages() { + + assertTrue(StorageDiskMapped.PAGES_PER_FILE * Storage.PAGE_SIZE < Integer.MAX_VALUE); + + } +} Index: graph/src/test/java/org/apache/jdbm/Serialized2DerivedBean.java =================================================================== --- graph/src/test/java/org/apache/jdbm/Serialized2DerivedBean.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/Serialized2DerivedBean.java (Arbeitskopie) @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +public class Serialized2DerivedBean extends Serialization2Bean { + private static final long serialVersionUID = 2071817382135925585L; + + private String d1 = "1"; + private String d2 = "2"; + private String d3 = null; + private String d4 = "4"; + private String d5 = null; + private String d6 = "6"; + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + ((d1 == null) ? 0 : d1.hashCode()); + result = prime * result + ((d2 == null) ? 0 : d2.hashCode()); + result = prime * result + ((d3 == null) ? 0 : d3.hashCode()); + result = prime * result + ((d4 == null) ? 0 : d4.hashCode()); + result = prime * result + ((d5 == null) ? 0 : d5.hashCode()); + result = prime * result + ((d6 == null) ? 0 : d6.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (!super.equals(obj)) + return false; + if (getClass() != obj.getClass()) + return false; + Serialized2DerivedBean other = (Serialized2DerivedBean) obj; + if (d1 == null) { + if (other.d1 != null) + return false; + } else if (!d1.equals(other.d1)) + return false; + if (d2 == null) { + if (other.d2 != null) + return false; + } else if (!d2.equals(other.d2)) + return false; + if (d3 == null) { + if (other.d3 != null) + return false; + } else if (!d3.equals(other.d3)) + return false; + if (d4 == null) { + if (other.d4 != null) + return false; + } else if (!d4.equals(other.d4)) + return false; + if (d5 == null) { + if (other.d5 != null) + return false; + } else if (!d5.equals(other.d5)) + return false; + if (d6 == null) { + if (other.d6 != null) + return false; + } else if (!d6.equals(other.d6)) + return false; + return true; + } + +} Index: graph/src/test/java/org/apache/jdbm/TestIssues.java =================================================================== --- graph/src/test/java/org/apache/jdbm/TestIssues.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/TestIssues.java (Arbeitskopie) @@ -0,0 +1,68 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; +import java.util.Map; + +public class TestIssues extends TestCaseWithTestFile { + + /* + * test this issue http://code.google.com/p/jdbm2/issues/detail?id=2 + */ + public void testHTreeClear() throws IOException { + final DBAbstract db = newDBCache(); + final HTree tree = (HTree) db.createHashMap("name"); + + for (int i = 0; i < 1001; i++) { + tree.put(String.valueOf(i), String.valueOf(i)); + } + db.commit(); + System.out.println("finished adding"); + + tree.clear(); + db.commit(); + System.out.println("finished clearing"); + assertTrue(tree.isEmpty()); + } + + public void testBTreeClear() throws IOException { + final DB db = newDBCache(); + final Map treeMap = db.createTreeMap("test"); + + for (int i = 0; i < 1001; i++) { + treeMap.put(String.valueOf(i), String.valueOf(i)); + } + db.commit(); + System.out.println("finished adding"); + + treeMap.clear(); + db.commit(); + System.out.println("finished clearing"); + assertTrue(treeMap.isEmpty()); + } + + public void test_issue_84_reopen_after_close() { + String f = newTestFile(); + DB db = DBMaker.openFile(f).make(); + db.close(); + + db = DBMaker.openFile(f).readonly().make(); + db.close(); + } +} Index: graph/src/test/java/org/apache/jdbm/BTreeLeadingValuePackTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/BTreeLeadingValuePackTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/BTreeLeadingValuePackTest.java (Arbeitskopie) @@ -0,0 +1,181 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.util.Arrays; +import java.util.Random; + +import junit.framework.TestCase; + +public class BTreeLeadingValuePackTest extends TestCase { + + public static class ByteArraySource { + byte[] last = new byte[0]; + Random r; + + public ByteArraySource(long seed) { + r = new Random(seed); + r.nextBytes(last); + } + + public byte[] getBytesWithCommonPrefix(int len, int common) { + if (common > last.length) + common = last.length; + if (common > len) + common = len; + + byte[] out = new byte[len]; + System.arraycopy(last, 0, out, 0, common); + byte[] xtra = new byte[len - common]; + r.nextBytes(xtra); + System.arraycopy(xtra, 0, out, common, xtra.length); + + last = out; + return out; + } + + } + + private void doCompressUncompressTestFor(byte[][] groups) throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DataOutputStream dos = new DataOutputStream(baos); + + // compress + for (int i = 0; i < groups.length; i++) { + BTreeNode.leadingValuePackWrite(dos, groups[i], i > 0 ? groups[i - 1] + : null, 0); + } + + byte[] results = baos.toByteArray(); + + ByteArrayInputStream bais = new ByteArrayInputStream(results); + DataInputStream dis = new DataInputStream(bais); + + byte[] previous = null; + for (int i = 0; i < groups.length; i++) { + previous = BTreeNode.leadingValuePackRead(dis, previous, 0); + assertTrue(Arrays.equals(groups[i], previous)); + } + + } + + private byte[][] getIncrementingGroups(int groupCount, long seed, + int lenInit, int comInit, int lenIncr, int comIncr) { + ByteArraySource bap = new ByteArraySource(seed); + byte[][] groups = new byte[groupCount][]; + for (int i = 0; i < groupCount; i++) { + groups[i] = bap.getBytesWithCommonPrefix(lenInit, comInit); + lenInit += lenIncr; + comInit += comIncr; + } + return groups; + } + + public void testCompDecompEqualLenEqualCommon() throws IOException { + byte[][] groups = getIncrementingGroups(5, // number of groups + 1000, // seed + 50, // starting byte array length + 5, // starting common bytes + 0, // length increment + 0 // common bytes increment + ); + + doCompressUncompressTestFor(groups); + } + + public void testCompDecompEqualLenIncrCommon() throws IOException { + byte[][] groups = getIncrementingGroups(5, // number of groups + 1000, // seed + 50, // starting byte array length + 5, // starting common bytes + 0, // length increment + 2 // common bytes increment + ); + + doCompressUncompressTestFor(groups); + } + + public void testCompDecompEqualLenDecrCommon() throws IOException { + byte[][] groups = getIncrementingGroups(5, // number of groups + 1000, // seed + 50, // starting byte array length + 40, // starting common bytes + 0, // length increment + -2 // common bytes increment + ); + + doCompressUncompressTestFor(groups); + } + + public void testCompDecompIncrLenEqualCommon() throws IOException { + byte[][] groups = getIncrementingGroups(5, // number of groups + 1000, // seed + 30, // starting byte array length + 25, // starting common bytes + 1, // length increment + 0 // common bytes increment + ); + + doCompressUncompressTestFor(groups); + } + + public void testCompDecompDecrLenEqualCommon() throws IOException { + byte[][] groups = getIncrementingGroups(5, // number of groups + 1000, // seed + 50, // starting byte array length + 25, // starting common bytes + -1, // length increment + 0 // common bytes increment + ); + + doCompressUncompressTestFor(groups); + } + + public void testCompDecompNoCommon() throws IOException { + byte[][] groups = getIncrementingGroups(5, // number of groups + 1000, // seed + 50, // starting byte array length + 0, // starting common bytes + -1, // length increment + 0 // common bytes increment + ); + + doCompressUncompressTestFor(groups); + } + + public void testCompDecompNullGroups() throws IOException { + byte[][] groups = getIncrementingGroups(5, // number of groups + 1000, // seed + 50, // starting byte array length + 25, // starting common bytes + -1, // length increment + 0 // common bytes increment + ); + + groups[2] = null; + groups[4] = null; + + doCompressUncompressTestFor(groups); + } + +} Index: graph/src/test/java/org/apache/jdbm/SerializationHeaderTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/SerializationHeaderTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/SerializationHeaderTest.java (Arbeitskopie) @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.lang.reflect.Field; +import java.util.Set; +import java.util.TreeSet; + +import junit.framework.TestCase; + +public class SerializationHeaderTest extends TestCase { + + public void testUnique() throws IllegalAccessException { + Class c = SerializationHeader.class; + Set s = new TreeSet(); + for (Field f : c.getDeclaredFields()) { + f.setAccessible(true); + int value = f.getInt(null); + + assertTrue("Value already used: " + value, !s.contains(value)); + s.add(value); + } + assertTrue(!s.isEmpty()); + } +} Index: graph/src/test/java/org/apache/jdbm/StorageZipTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/StorageZipTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/StorageZipTest.java (Arbeitskopie) @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.File; +import java.io.IOException; +import java.util.Set; + +public class StorageZipTest extends TestCaseWithTestFile { + + public void test_archive_creation() throws IOException { + + File tmp = File.createTempFile("JDBM_TEST_ZIP", ".zip"); + String dbpath = tmp.getPath(); + tmp.deleteOnExit(); + + // first create archie and put it in zip file + DBStore r = new DBStore(newTestFile(), false, true, false); + Set h = r.createHashSet("hash"); + for (Long l = 0L; l < 1e3; l++) { + h.add(l); + } + r.commit(); + r.copyToZip(dbpath); + r.close(); + + System.out.println("Zip file created, size: " + tmp.length()); + + // open zip file and check it contains all data + DB r2 = DBMaker.openZip(dbpath).readonly().make(); + + Set h2 = r2.getHashSet("hash"); + for (Long l = 0L; l < 1e3; l++) { + assertTrue(h2.contains(l)); + } + + } + +} Index: graph/src/test/java/org/apache/jdbm/DBCacheMRUTest.java =================================================================== --- graph/src/test/java/org/apache/jdbm/DBCacheMRUTest.java (Revision 0) +++ graph/src/test/java/org/apache/jdbm/DBCacheMRUTest.java (Arbeitskopie) @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; +import java.util.ArrayList; + +public class DBCacheMRUTest extends TestCaseWithTestFile { + + public void testPurgeEntryClearsCache() throws IOException { + DBCacheMRU d = (DBCacheMRU) newDBCache(); + + for (long i = 0; i < 1e3; i++) + d.addEntry(newEntry(i)); + + for (long i = 0; i < 1e3; i++) + d.purgeEntry(); + + assertEquals(d._hash.size(), 0); + } + + DBCacheMRU.CacheEntry newEntry(long i) { + return new DBCacheMRU.CacheEntry(i, i); + } + + public void testCacheMaxSize() throws IOException { + + DBCacheMRU d = (DBCacheMRU) DBMaker.openFile(newTestFile()) + .setMRUCacheSize(100).make(); + + ArrayList recids = new ArrayList(); + for (int i = 0; i < 1e5; i++) { + recids.add(d.insert("aa" + i)); + } + d.commit(); + for (int i = 0; i < 1e5; i++) { + d.fetch(recids.get(i)); + } + + assert (d._hash.size() <= 100); + + } +} Index: graph/src/main/java/org/apache/hama/graph/VertexWritableSerialization.java =================================================================== --- graph/src/main/java/org/apache/hama/graph/VertexWritableSerialization.java (Revision 0) +++ graph/src/main/java/org/apache/hama/graph/VertexWritableSerialization.java (Arbeitskopie) @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hama.graph; + +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.util.ReflectionUtils; + +import com.google.common.base.Preconditions; + +/** + * Writable serialization for Hadoop objects with a class cache to reduce the + * amount of writing the classname instead of a single integer (with V + * compression, so most of the time it just takes a single byte).
+ * Enhanced by graph instance that can be passed. + */ +public final class VertexWritableSerialization extends + WritableSerialization { + + private static final long serialVersionUID = 1L; + @SuppressWarnings("rawtypes") + private GraphJobRunner runner; + + public VertexWritableSerialization() { + } + + public VertexWritableSerialization(Class writableClazz, + @SuppressWarnings("rawtypes") GraphJobRunner runner) { + super(writableClazz); + Preconditions + .checkArgument( + Vertex.class.isAssignableFrom(writableClazz), + "Class " + + writableClazz + + " is not assignable from Vertex class! This class only serializes vertices!"); + this.runner = runner; + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + @Override + public Writable newInstance() { + Writable newInstance = (Writable) ReflectionUtils.newInstance( + LOOKUP_LIST.get(writableClassIndex), null); + ((Vertex) newInstance).runner = this.runner; + return newInstance; + } +} Index: graph/src/main/java/org/apache/hama/graph/GraphJob.java =================================================================== --- graph/src/main/java/org/apache/hama/graph/GraphJob.java (Revision 1385087) +++ graph/src/main/java/org/apache/hama/graph/GraphJob.java (Arbeitskopie) @@ -22,6 +22,7 @@ import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; import org.apache.hama.HamaConfiguration; import org.apache.hama.bsp.BSPJob; import org.apache.hama.bsp.Combiner; @@ -72,7 +73,8 @@ /** * Set the Vertex ID class for the job. */ - public void setVertexIDClass(Class cls) + public void setVertexIDClass( + @SuppressWarnings("rawtypes") Class cls) throws IllegalStateException { conf.setClass(VERTEX_ID_CLASS_ATTR, cls, Writable.class); } @@ -129,8 +131,8 @@ } @Override - public void setPartitioner(@SuppressWarnings("rawtypes") - Class theClass) { + public void setPartitioner( + @SuppressWarnings("rawtypes") Class theClass) { super.setPartitioner(theClass); conf.setBoolean(VERTEX_GRAPH_RUNTIME_PARTIONING, true); } Index: graph/src/main/java/org/apache/hama/graph/WritableComparator.java =================================================================== --- graph/src/main/java/org/apache/hama/graph/WritableComparator.java (Revision 0) +++ graph/src/main/java/org/apache/hama/graph/WritableComparator.java (Arbeitskopie) @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hama.graph; + +import java.io.Serializable; +import java.util.Comparator; + +/** + * Comparator that uses Writable Comparable instance to compare. + */ +public final class WritableComparator> implements + Comparator, Serializable { + + private static final long serialVersionUID = 1L; + + @Override + public int compare(T o1, T o2) { + return o1.compareTo(o2); + } + +} Index: graph/src/main/java/org/apache/hama/graph/AbstractAggregator.java =================================================================== --- graph/src/main/java/org/apache/hama/graph/AbstractAggregator.java (Revision 1385087) +++ graph/src/main/java/org/apache/hama/graph/AbstractAggregator.java (Arbeitskopie) @@ -87,7 +87,7 @@ public IntWritable getTimesAggregated() { return new IntWritable(timesAggregated); } - + @Override public String toString() { return "VAL=" + getValue(); Index: graph/src/main/java/org/apache/hama/graph/WritableSerialization.java =================================================================== --- graph/src/main/java/org/apache/hama/graph/WritableSerialization.java (Revision 0) +++ graph/src/main/java/org/apache/hama/graph/WritableSerialization.java (Arbeitskopie) @@ -0,0 +1,92 @@ +package org.apache.hama.graph; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.HashMap; + +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.jdbm.Serializer; + +/** + * Writable serialization for Hadoop objects with a class cache to reduce the + * amount of writing the classname instead of a single integer (with V + * compression, so most of the time it just takes a single byte). + */ +public class WritableSerialization implements + Serializer, Serializable { + + private static final long serialVersionUID = 1L; + + // clazzname as string -> index in the lookuplist + protected transient final HashMap CLAZZ_CACHE = new HashMap(); + protected transient final ArrayList> LOOKUP_LIST = new ArrayList>(); + private transient int lastAssigned = 0; + + protected transient Writable instance; + protected transient int writableClassIndex; + + public WritableSerialization() { + } + + public WritableSerialization(Class writableClazz) { + Integer integer = CLAZZ_CACHE.get(writableClazz); + if (integer == null) { + integer = lastAssigned++; + CLAZZ_CACHE.put(writableClazz.getName(), integer); + LOOKUP_LIST.add(writableClazz); + } + this.writableClassIndex = integer; + } + + @Override + public void serialize(DataOutput out, K obj) throws IOException { + WritableUtils.writeVInt(out, writableClassIndex); + obj.write(out); + } + + @SuppressWarnings("unchecked") + @Override + public K deserialize(DataInput in) throws IOException, ClassNotFoundException { + writableClassIndex = WritableUtils.readVInt(in); + instance = newInstance(); + instance.readFields(in); + return (K) instance; + } + + public Writable newInstance() { + return (Writable) ReflectionUtils.newInstance( + LOOKUP_LIST.get(writableClassIndex), null); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((instance == null) ? 0 : instance.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + @SuppressWarnings("rawtypes") + WritableSerialization other = (WritableSerialization) obj; + if (instance == null) { + if (other.instance != null) + return false; + } else if (!instance.equals(other.instance)) + return false; + return true; + } + +} Index: graph/src/main/java/org/apache/hama/graph/Vertex.java =================================================================== --- graph/src/main/java/org/apache/hama/graph/Vertex.java (Revision 1385087) +++ graph/src/main/java/org/apache/hama/graph/Vertex.java (Arbeitskopie) @@ -17,6 +17,8 @@ */ package org.apache.hama.graph; +import java.io.DataInput; +import java.io.DataOutput; import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -24,13 +26,15 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.util.ReflectionUtils; import org.apache.hama.bsp.BSPPeer; import org.apache.hama.bsp.Partitioner; public abstract class Vertex - implements VertexInterface { + implements VertexInterface, Writable { - GraphJobRunner runner; + transient GraphJobRunner runner; private V vertexID; private M value; @@ -218,7 +222,82 @@ return true; } + @SuppressWarnings("unchecked") @Override + public void readFields(DataInput in) throws IOException { + votedToHalt = in.readBoolean(); + vertexID = (V) ReflectionUtils.newInstance(runner.vertexIdClass, null); + vertexID.readFields(in); + if (in.readBoolean()) { + value = (M) ReflectionUtils.newInstance(runner.vertexValueClass, null); + value.readFields(in); + } + + int edges = WritableUtils.readVInt(in); + ArrayList> list = new ArrayList>(edges); + for (int i = 0; i < edges; i++) { + V adjacentId = (V) ReflectionUtils + .newInstance(runner.vertexIdClass, null); + adjacentId.readFields(in); + E edgeValue = null; + if (in.readBoolean()) { + edgeValue = (E) ReflectionUtils + .newInstance(runner.edgeValueClass, null); + edgeValue.readFields(in); + } + list.add(new Edge(adjacentId, edgeValue)); + } + + this.setEdges(list); + readInternal(in); + } + + @Override + public void write(DataOutput out) throws IOException { + out.writeBoolean(votedToHalt); + V vId = getVertexID(); + vId.write(out); + M val = getValue(); + serializeNull(out, val); + + List> edges = getEdges(); + int length = edges == null ? 0 : edges.size(); + WritableUtils.writeVInt(out, length); + for (Edge edge : edges) { + edge.getDestinationVertexID().write(out); + serializeNull(out, edge.getValue()); + } + + writeInternal(out); + } + + /** + * A write method to let the user save its own state in the vertex class. + */ + protected void writeInternal(DataOutput out) throws IOException { + } + + /** + * A read method to let the user save its own state in the vertex class. + */ + protected void readInternal(DataInput out) throws IOException { + } + + /** + * Serializes data null-safe by writing a boolean that is only true when the + * given writable is not null. + */ + protected static void serializeNull(DataOutput out, Writable writable) + throws IOException { + if (writable == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + writable.write(out); + } + } + + @Override public String toString() { return getVertexID() + (getValue() != null ? " = " + getValue() : "") + " // " + edges; Index: graph/src/main/java/org/apache/hama/graph/GraphJobRunner.java =================================================================== --- graph/src/main/java/org/apache/hama/graph/GraphJobRunner.java (Revision 1385087) +++ graph/src/main/java/org/apache/hama/graph/GraphJobRunner.java (Arbeitskopie) @@ -18,8 +18,10 @@ package org.apache.hama.graph; import java.io.IOException; +import java.io.Serializable; import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -29,11 +31,15 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.MapWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hama.bsp.BSP; import org.apache.hama.bsp.BSPPeer; @@ -42,6 +48,8 @@ import org.apache.hama.bsp.Partitioner; import org.apache.hama.bsp.sync.SyncException; import org.apache.hama.util.KeyValuePair; +import org.apache.jdbm.DB; +import org.apache.jdbm.DBMaker; /** * Fully generic graph job runner. @@ -50,9 +58,12 @@ * @param the value type of an edge. * @param the value type of a vertex. */ -public final class GraphJobRunner - extends BSP { +public final class GraphJobRunner, E extends Writable, M extends Writable> + extends BSP + implements Serializable { + private static final long serialVersionUID = 1L; + private static final Log LOG = LogFactory.getLog(GraphJobRunner.class); // make sure that these values don't collide with the vertex names @@ -64,11 +75,12 @@ public static final String MESSAGE_COMBINER_CLASS = "hama.vertex.message.combiner.class"; public static final String GRAPH_REPAIR = "hama.graph.repair"; - private Configuration conf; - private Combiner combiner; - private Partitioner partitioner; + private transient Configuration conf; + private transient Combiner combiner; + private transient Partitioner partitioner; - private Map> vertices = new HashMap>(); + private transient Map> vertices; + private transient DB db; private boolean updated = true; private int globalUpdateCounts = 0; @@ -78,14 +90,14 @@ private int maxIteration = -1; private long iteration; - private Class vertexIdClass; - private Class vertexValueClass; - private Class edgeValueClass; - private Class> vertexClass; + Class vertexIdClass; + Class vertexValueClass; + Class edgeValueClass; + Class> vertexClass; - private AggregationRunner aggregationRunner; + private transient AggregationRunner aggregationRunner; - private BSPPeer peer; + private transient BSPPeer peer; @Override public final void setup( @@ -125,18 +137,32 @@ // loop over vertices and do their computation doSuperstep(messages, peer); } + + write(peer); } /** * Just write pair as a result. Note that * this will also be executed when failure happened. */ + private void write( + BSPPeer peer) + throws IOException { + + Set keySet = vertices.keySet(); + for (V value : keySet) { + Vertex e = vertices.get(value); + peer.write(e.getVertexID(), e.getValue()); + } + } + @Override public final void cleanup( BSPPeer peer) throws IOException { - for (Entry> e : vertices.entrySet()) { - peer.write(e.getValue().getVertexID(), e.getValue().getValue()); + // remove the DB files if they exist + if (db != null) { + db.close(); } } @@ -171,7 +197,9 @@ BSPPeer peer) throws IOException { int activeVertices = 0; - for (Vertex vertex : vertices.values()) { + Set keySet = vertices.keySet(); + for (V key : keySet) { + Vertex vertex = vertices.get(key); List msgs = messages.get(vertex.getVertexID()); // If there are newly received messages, restart. if (vertex.isHalted() && msgs != null) { @@ -207,7 +235,9 @@ private void doInitialSuperstep( BSPPeer peer) throws IOException { - for (Vertex vertex : vertices.values()) { + Set keySet = vertices.keySet(); + for (V key : keySet) { + Vertex vertex = vertices.get(key); List singletonList = Collections.singletonList(vertex.getValue()); M lastValue = vertex.getValue(); vertex.compute(singletonList.iterator()); @@ -254,6 +284,35 @@ conf); } + if (conf.getBoolean("hama.graph.in.memory", false)) { + + String storagePath = conf.get("hama.graph.storage.path"); + if (storagePath == null) { + storagePath = "/tmp/graph_storage/"; + } + + storagePath += peer.getTaskId().toString(); + + try { + LocalFileSystem local = FileSystem.getLocal(conf); + local.mkdirs(new Path(storagePath)); + } catch (IOException e) { + throw new RuntimeException("Could not create \"" + storagePath + + "\", nested exception was: ", e); + } + + db = DBMaker.openFile(storagePath + "/graph.db").disableLocking() + .disableTransactions().deleteFilesAfterClose().useRandomAccessFile() + .make(); + + Comparator writableComparator = new WritableComparator(); + vertices = db.createTreeMap("graph-db", writableComparator, + new WritableSerialization(vertexIdClass), + new VertexWritableSerialization>(vertexClass, this)); + } else { + vertices = new HashMap>(); + } + aggregationRunner = new AggregationRunner(); aggregationRunner.setupAggregators(peer); } @@ -351,6 +410,9 @@ GraphJobMessage msg = null; while ((msg = peer.getCurrentMessage()) != null) { Vertex messagedVertex = (Vertex) msg.getVertex(); + if (messagedVertex == null) { + System.out.println(msg + "\n" + messagedVertex); + } messagedVertex.runner = this; messagedVertex.setup(conf); vertices.put(messagedVertex.getVertexID(), messagedVertex); Index: graph/src/main/java/org/apache/jdbm/BTreeLazyRecord.java =================================================================== --- graph/src/main/java/org/apache/jdbm/BTreeLazyRecord.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/BTreeLazyRecord.java (Arbeitskopie) @@ -0,0 +1,116 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOError; +import java.io.IOException; + +/** + * An record lazily loaded from store. This is used in BTree/HTree to store big + * records outside of index tree + */ +public final class BTreeLazyRecord { + + /** + * if value in tree is serialized in more bytes, it is stored as separate + * record outside of tree This value must be always smaller than 250 + */ + static final int MAX_INTREE_RECORD_SIZE = 32; + static final int NULL = 255; + static final int LAZY_RECORD = 254; + + private E value = null; + private DBAbstract db; + private Serializer serializer; + final long recid; + + BTreeLazyRecord(DBAbstract db, long recid, Serializer serializer) { + this.db = db; + this.recid = recid; + this.serializer = serializer; + } + + E get() { + if (value != null) + return value; + try { + value = db.fetch(recid, serializer); + } catch (IOException e) { + throw new IOError(e); + } + return value; + } + + void delete() { + try { + db.delete(recid); + } catch (IOException e) { + throw new IOError(e); + } + value = null; + serializer = null; + db = null; + } + + /** + * Serialier used to insert already serialized data into store + */ + @SuppressWarnings("rawtypes") + static final Serializer FAKE_SERIALIZER = new Serializer() { + + @Override + public void serialize(DataOutput out, Object obj) throws IOException { + byte[] data = (byte[]) obj; + out.write(data); + } + + @Override + public Object deserialize(DataInput in) throws IOException, + ClassNotFoundException { + throw new UnsupportedOperationException(); + } + }; + + static Object fastDeser(DataInputOutput in, Serializer serializer, + int expectedSize) throws IOException, ClassNotFoundException { + // we should propably copy data for deserialization into separate buffer and + // pass it to Serializer + // but to make it faster, Serializer will operate directly on top of buffer. + // and we check that it readed correct number of bytes. + int origAvail = in.available(); + if (origAvail == 0) + throw new InternalError(); // is backed up by byte[] buffer, so there + // should be always avail bytes + Object ret = serializer.deserialize(in); + // check than valueSerializer did not read more bytes, if yes it readed + // bytes from next record + int readed = origAvail - in.available(); + if (readed > expectedSize) + throw new IOException("Serializer readed more bytes than is record size."); + else if (readed != expectedSize) { + // deserializer did not readed all bytes, unussual but valid. + // Skip some to get into correct position + for (int ii = 0; ii < expectedSize - readed; ii++) + in.readUnsignedByte(); + } + return ret; + } + +} Index: graph/src/main/java/org/apache/jdbm/LongHashMap.java =================================================================== --- graph/src/main/java/org/apache/jdbm/LongHashMap.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/LongHashMap.java (Arbeitskopie) @@ -0,0 +1,418 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jdbm; + +import java.io.Serializable; +import java.util.Arrays; +import java.util.Iterator; +import java.util.NoSuchElementException; + +/** + * Hash Map which uses primitive long as key. Main advantage is new instanceof + * of Long does not have to be created for each lookup. + *

+ * This code comes from Android, which in turns comes from Apache Harmony. This + * class was modified to use primitive longs and stripped down to consume less + * space. + *

+ * Author of JDBM modifications: Jan Kotek + */ +public final class LongHashMap implements Serializable { + private static final long serialVersionUID = 362499999763181265L; + + private int elementCount; + + private Entry[] elementData; + + private final float loadFactor; + + private int threshold; + + private int defaultSize = 16; + + private transient Entry reuseAfterDelete = null; + + static final class Entry implements Serializable { + private static final long serialVersionUID = 362445231113181265L; + + Entry next; + + V value; + + long key; + + Entry(long theKey) { + this.key = theKey; + this.value = null; + } + + } + + static class HashMapIterator implements Iterator { + private int position = 0; + + boolean canRemove = false; + + Entry entry; + + Entry lastEntry; + + final LongHashMap associatedMap; + + HashMapIterator(LongHashMap hm) { + associatedMap = hm; + } + + public boolean hasNext() { + if (entry != null) { + return true; + } + + Entry[] elementData = associatedMap.elementData; + int length = elementData.length; + int newPosition = position; + boolean result = false; + + while (newPosition < length) { + if (elementData[newPosition] == null) { + newPosition++; + } else { + result = true; + break; + } + } + + position = newPosition; + return result; + } + + public V next() { + + if (!hasNext()) { + throw new NoSuchElementException(); + } + + Entry result; + Entry _entry = entry; + if (_entry == null) { + result = lastEntry = associatedMap.elementData[position++]; + entry = lastEntry.next; + } else { + if (lastEntry.next != _entry) { + lastEntry = lastEntry.next; + } + result = _entry; + entry = _entry.next; + } + canRemove = true; + return result.value; + } + + public void remove() { + if (!canRemove) { + throw new IllegalStateException(); + } + + canRemove = false; + + if (lastEntry.next == entry) { + while (associatedMap.elementData[--position] == null) { + // Do nothing + } + associatedMap.elementData[position] = associatedMap.elementData[position].next; + entry = null; + } else { + lastEntry.next = entry; + } + if (lastEntry != null) { + Entry reuse = lastEntry; + lastEntry = null; + reuse.key = Long.MIN_VALUE; + reuse.value = null; + associatedMap.reuseAfterDelete = reuse; + } + + associatedMap.elementCount--; + } + } + + @SuppressWarnings("unchecked") + private Entry[] newElementArray(int s) { + return new Entry[s]; + } + + /** + * Constructs a new empty {@code HashMap} instance. + * + * @since Android 1.0 + */ + public LongHashMap() { + this(16); + } + + /** + * Constructs a new {@code HashMap} instance with the specified capacity. + * + * @param capacity the initial capacity of this hash map. + * @throws IllegalArgumentException when the capacity is less than zero. + * @since Android 1.0 + */ + public LongHashMap(int capacity) { + defaultSize = capacity; + if (capacity >= 0) { + elementCount = 0; + elementData = newElementArray(capacity == 0 ? 1 : capacity); + loadFactor = 0.75f; // Default load factor of 0.75 + computeMaxSize(); + } else { + throw new IllegalArgumentException(); + } + } + + // BEGIN android-changed + + /** + * Removes all mappings from this hash map, leaving it empty. + * + * @see #isEmpty + * @see #size + * @since Android 1.0 + */ + + public void clear() { + if (elementCount > 0) { + elementCount = 0; + } + if (elementData.length > 1024 && elementData.length > defaultSize) + elementData = new Entry[defaultSize]; + else + Arrays.fill(elementData, null); + computeMaxSize(); + } + + // END android-changed + + /** + * Returns a shallow copy of this map. + * + * @return a shallow copy of this map. + * @since Android 1.0 + */ + + private void computeMaxSize() { + threshold = (int) (elementData.length * loadFactor); + } + + /** + * Returns the value of the mapping with the specified key. + * + * @param key the key. + * @return the value of the mapping with the specified key, or {@code null} if + * no mapping for the specified key is found. + * @since Android 1.0 + */ + + public V get(final long key) { + + final int hash = powerHash(key); + final int index = (hash & 0x7FFFFFFF) % elementData.length; + + // find non null entry + Entry m = elementData[index]; + while (m != null) { + if (key == m.key) + return m.value; + m = m.next; + } + + return null; + + } + + /** + * Returns whether this map is empty. + * + * @return {@code true} if this map has no elements, {@code false} otherwise. + * @see #size() + * @since Android 1.0 + */ + + public boolean isEmpty() { + return elementCount == 0; + } + + /** + * @return iterator over keys + */ + + // public Iterator keyIterator(){ + // return new HashMapIterator( + // new MapEntry.Type() { + // public K get(Entry entry) { + // return entry.key; + // } + // }, HashMap.this); + // + // } + + /** + * Maps the specified key to the specified value. + * + * @param key the key. + * @param value the value. + * @return the value of any previous mapping with the specified key or + * {@code null} if there was no such mapping. + * @since Android 1.0 + */ + + public V put(final long key, final V value) { + + int hash = powerHash(key); + int index = (hash & 0x7FFFFFFF) % elementData.length; + + // find non null entry + Entry entry = elementData[index]; + while (entry != null && key != entry.key) { + entry = entry.next; + } + + if (entry == null) { + if (++elementCount > threshold) { + rehash(); + index = (hash & 0x7FFFFFFF) % elementData.length; + } + entry = createHashedEntry(key, index); + } + + V result = entry.value; + entry.value = value; + return result; + } + + Entry createHashedEntry(final long key, final int index) { + Entry entry = reuseAfterDelete; + if (entry == null) { + entry = new Entry(key); + } else { + reuseAfterDelete = null; + entry.key = key; + entry.value = null; + } + + entry.next = elementData[index]; + elementData[index] = entry; + return entry; + } + + void rehash(final int capacity) { + int length = (capacity == 0 ? 1 : capacity << 1); + + Entry[] newData = newElementArray(length); + for (int i = 0; i < elementData.length; i++) { + Entry entry = elementData[i]; + while (entry != null) { + int index = ((int) powerHash(entry.key) & 0x7FFFFFFF) % length; + Entry next = entry.next; + entry.next = newData[index]; + newData[index] = entry; + entry = next; + } + } + elementData = newData; + computeMaxSize(); + } + + void rehash() { + rehash(elementData.length); + } + + /** + * Removes the mapping with the specified key from this map. + * + * @param key the key of the mapping to remove. + * @return the value of the removed mapping or {@code null} if no mapping for + * the specified key was found. + * @since Android 1.0 + */ + + public V remove(final long key) { + Entry entry = removeEntry(key); + if (entry == null) + return null; + V ret = entry.value; + entry.value = null; + entry.key = Long.MIN_VALUE; + reuseAfterDelete = entry; + + return ret; + } + + Entry removeEntry(final long key) { + Entry last = null; + + final int hash = powerHash(key); + final int index = (hash & 0x7FFFFFFF) % elementData.length; + Entry entry = elementData[index]; + + while (true) { + if (entry == null) { + return null; + } + + if (key == entry.key) { + if (last == null) { + elementData[index] = entry.next; + } else { + last.next = entry.next; + } + elementCount--; + return entry; + } + + last = entry; + entry = entry.next; + } + } + + /** + * Returns the number of elements in this map. + * + * @return the number of elements in this map. + * @since Android 1.0 + */ + + public int size() { + return elementCount; + } + + /** + * @returns iterator over values in map + */ + public Iterator valuesIterator() { + return new HashMapIterator(this); + + } + + static final private int powerHash(final long key) { + int h = (int) (key ^ (key >>> 32)); + h ^= (h >>> 20) ^ (h >>> 12); + return h ^ (h >>> 7) ^ (h >>> 4); + } + +} Index: graph/src/main/java/org/apache/jdbm/HTree.java =================================================================== --- graph/src/main/java/org/apache/jdbm/HTree.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/HTree.java (Arbeitskopie) @@ -0,0 +1,483 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jdbm; + +import java.io.ByteArrayInputStream; +import java.io.DataInput; +import java.io.DataInputStream; +import java.io.DataOutput; +import java.io.IOError; +import java.io.IOException; +import java.util.AbstractMap; +import java.util.AbstractSet; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; + +/** + * Persistent HashMap implementation for DB. Implemented as an H*Tree structure. + */ +@SuppressWarnings("rawtypes") +public final class HTree extends AbstractMap { + + final Serializer SERIALIZER = new Serializer() { + + @SuppressWarnings("unchecked") + @Override + public Object deserialize(DataInput ds2) throws IOException { + DataInputOutput ds = (DataInputOutput) ds2; + try { + int i = ds.readUnsignedByte(); + if (i == SerializationHeader.HTREE_BUCKET) { // is HashBucket? + HTreeBucket ret = new HTreeBucket(HTree.this); + if (loadValues) + ret.readExternal(ds); + + if (loadValues && ds.available() != 0) + throw new InternalError("bytes left: " + ds.available()); + return ret; + } else if (i == SerializationHeader.HTREE_DIRECTORY) { + HTreeDirectory ret = new HTreeDirectory(HTree.this); + ret.readExternal(ds); + if (loadValues && ds.available() != 0) + throw new InternalError("bytes left: " + ds.available()); + return ret; + } else { + throw new InternalError("Wrong HTree header: " + i); + } + } catch (ClassNotFoundException e) { + throw new IOException(e); + } + + } + + @Override + public void serialize(DataOutput out, Object obj) throws IOException { + if (obj instanceof HTreeBucket) { + out.write(SerializationHeader.HTREE_BUCKET); + HTreeBucket b = (HTreeBucket) obj; + b.writeExternal(out); + } else { + out.write(SerializationHeader.HTREE_DIRECTORY); + HTreeDirectory n = (HTreeDirectory) obj; + n.writeExternal(out); + } + } + }; + + /** + * Listeners which are notified about changes in records + */ + protected RecordListener[] recordListeners = new RecordListener[0]; + + /** + * Serializer used to serialize index keys (optional) + */ + protected Serializer keySerializer; + + /** + * Serializer used to serialize index values (optional) + */ + protected Serializer valueSerializer; + protected boolean readonly = false; + final long rootRecid; + DBAbstract db; + /** if false map contains only keys, used for set */ + boolean hasValues = true; + + /** + * counts structural changes in tree at runtume. Is here to support fail-fast + * behaviour. + */ + int modCount; + + /** + * indicates if values should be loaded during deserialization, set to true + * during defragmentation + */ + private boolean loadValues = true; + + public Serializer getKeySerializer() { + return keySerializer; + } + + public Serializer getValueSerializer() { + return valueSerializer; + } + + /** + * cache writing buffer, so it does not have to be allocated on each write + */ + AtomicReference writeBufferCache = new AtomicReference(); + + /** + * Create a persistent hashtable. + */ + @SuppressWarnings("unchecked") + public HTree(DBAbstract db, Serializer keySerializer, + Serializer valueSerializer, boolean hasValues) throws IOException { + this.keySerializer = keySerializer; + this.valueSerializer = valueSerializer; + this.db = db; + this.hasValues = hasValues; + + HTreeDirectory root = new HTreeDirectory(this, (byte) 0); + root.setPersistenceContext(0); + this.rootRecid = db.insert(root, this.SERIALIZER, false); + } + + /** + * Load a persistent hashtable + */ + public HTree(DBAbstract db, long rootRecid, Serializer keySerializer, + Serializer valueSerializer, boolean hasValues) throws IOException { + this.db = db; + this.rootRecid = rootRecid; + this.keySerializer = keySerializer; + this.valueSerializer = valueSerializer; + this.hasValues = hasValues; + } + + void setPersistenceContext(DBAbstract db) { + this.db = db; + } + + @SuppressWarnings("unchecked") + @Override + public V put(K key, V value) { + if (readonly) + throw new UnsupportedOperationException("readonly"); + try { + if (key == null || value == null) + throw new NullPointerException("Null key or value"); + + V oldVal = (V) getRoot().put(key, value); + if (oldVal == null) { + modCount++; + + // increase size + HTreeDirectory root = getRoot(); + root.size++; + db.update(rootRecid, root, SERIALIZER); + + for (RecordListener r : recordListeners) + r.recordInserted(key, value); + } else { + + // notify listeners + for (RecordListener r : recordListeners) + r.recordUpdated(key, oldVal, value); + } + + return oldVal; + } catch (IOException e) { + throw new IOError(e); + } + } + + @SuppressWarnings("unchecked") + @Override + public V get(Object key) { + if (key == null) + return null; + try { + return getRoot().get((K) key); + } catch (ClassCastException e) { + return null; + } catch (IOException e) { + throw new IOError(e); + } + + } + + @SuppressWarnings("unchecked") + @Override + public V remove(Object key) { + if (readonly) + throw new UnsupportedOperationException("readonly"); + + try { + if (key == null) + return null; + + V val = (V) getRoot().remove(key); + modCount++; + + if (val != null) { + // decrease size + HTreeDirectory root = getRoot(); + root.size--; + db.update(rootRecid, root, SERIALIZER); + + for (RecordListener r : recordListeners) + r.recordRemoved(key, val); + } + + return val; + } catch (ClassCastException e) { + return null; + } catch (IOException e) { + throw new IOError(e); + } + + } + + @Override + public boolean containsKey(Object key) { + if (key == null) + return false; + // no need for locking, get is already locked + V v = get(key); + return v != null; + } + + @Override + public void clear() { + try { + Iterator keyIter = keys(); + while (keyIter.hasNext()) { + keyIter.next(); + keyIter.remove(); + } + } catch (IOException e) { + throw new IOError(e); + } + + } + + /** + * Returns an enumeration of the keys contained in this + */ + public Iterator keys() throws IOException { + return getRoot().keys(); + } + + public DBAbstract getRecordManager() { + return db; + } + + /** + * add RecordListener which is notified about record changes + * + * @param listener + */ + public void addRecordListener(RecordListener listener) { + recordListeners = Arrays + .copyOf(recordListeners, recordListeners.length + 1); + recordListeners[recordListeners.length - 1] = listener; + } + + /** + * remove RecordListener which is notified about record changes + * + * @param listener + */ + @SuppressWarnings("unchecked") + public void removeRecordListener(RecordListener listener) { + List l = Arrays.asList(recordListeners); + l.remove(listener); + recordListeners = (RecordListener[]) l.toArray(new RecordListener[1]); + } + + @Override + public Set> entrySet() { + return _entrySet; + } + + private Set> _entrySet = new AbstractSet>() { + + protected Entry newEntry(K k, V v) { + return new SimpleEntry(k, v) { + private static final long serialVersionUID = 978651696969194154L; + + @Override + public V setValue(V arg0) { + // put is already locked + HTree.this.put(getKey(), arg0); + return super.setValue(arg0); + } + + }; + } + + @Override + public boolean add(java.util.Map.Entry e) { + if (readonly) + throw new UnsupportedOperationException("readonly"); + if (e.getKey() == null) + throw new NullPointerException("Can not add null key"); + if (e.getValue().equals(get(e.getKey()))) + return false; + HTree.this.put(e.getKey(), e.getValue()); + return true; + } + + @Override + @SuppressWarnings("unchecked") + public boolean contains(Object o) { + if (o instanceof Entry) { + Entry e = (java.util.Map.Entry) o; + + // get is already locked + if (e.getKey() != null && HTree.this.get(e.getKey()) != null) + return true; + } + return false; + } + + @Override + public Iterator> iterator() { + try { + final Iterator br = keys(); + return new Iterator>() { + + @Override + public boolean hasNext() { + return br.hasNext(); + } + + @Override + public java.util.Map.Entry next() { + K k = br.next(); + return newEntry(k, get(k)); + } + + @Override + public void remove() { + if (readonly) + throw new UnsupportedOperationException("readonly"); + br.remove(); + } + }; + + } catch (IOException e) { + throw new IOError(e); + } + + } + + @Override + @SuppressWarnings("unchecked") + public boolean remove(Object o) { + if (readonly) + throw new UnsupportedOperationException("readonly"); + + if (o instanceof Entry) { + Entry e = (java.util.Map.Entry) o; + + // check for nulls + if (e.getKey() == null || e.getValue() == null) + return false; + // get old value, must be same as item in entry + V v = get(e.getKey()); + if (v == null || !e.getValue().equals(v)) + return false; + HTree.this.remove(e.getKey()); + return true; + } + return false; + + } + + @Override + public int size() { + try { + int counter = 0; + Iterator it = keys(); + while (it.hasNext()) { + it.next(); + counter++; + } + return counter; + } catch (IOException e) { + throw new IOError(e); + } + + } + + }; + + @SuppressWarnings("unchecked") + HTreeDirectory getRoot() { + // assumes that caller already holds read or write lock + try { + HTreeDirectory root = (HTreeDirectory) db.fetch(rootRecid, + this.SERIALIZER); + root.setPersistenceContext(rootRecid); + return root; + } catch (IOException e) { + throw new IOError(e); + } + } + + @SuppressWarnings("unchecked") + public static HTree deserialize(DataInput is, Serialization ser) + throws IOException, ClassNotFoundException { + long rootRecid = LongPacker.unpackLong(is); + boolean hasValues = is.readBoolean(); + Serializer keySerializer = (Serializer) ser.deserialize(is); + Serializer valueSerializer = (Serializer) ser.deserialize(is); + + return new HTree(ser.db, rootRecid, keySerializer, valueSerializer, + hasValues); + } + + @SuppressWarnings("unchecked") + void serialize(DataOutput out) throws IOException { + LongPacker.packLong(out, rootRecid); + out.writeBoolean(hasValues); + db.defaultSerializer().serialize(out, keySerializer); + db.defaultSerializer().serialize(out, valueSerializer); + } + + @SuppressWarnings("unchecked") + static void defrag(Long recid, DBStore r1, DBStore r2) throws IOException { + // TODO should modCount be increased after defrag, revert or commit? + try { + byte[] data = r1.fetchRaw(recid); + r2.forceInsert(recid, data); + DataInput in = new DataInputStream(new ByteArrayInputStream(data)); + HTree t = (HTree) r1.defaultSerializer().deserialize(in); + t.db = r1; + t.loadValues = false; + + HTreeDirectory d = t.getRoot(); + if (d != null) { + r2.forceInsert(t.rootRecid, r1.fetchRaw(t.rootRecid)); + d.defrag(r1, r2); + } + + } catch (ClassNotFoundException e) { + throw new IOError(e); + } + + } + + @Override + public int size() { + return (int) getRoot().size; + } + + public boolean hasValues() { + return hasValues; + } + +} Index: graph/src/main/java/org/apache/jdbm/BTreeNode.java =================================================================== --- graph/src/main/java/org/apache/jdbm/BTreeNode.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/BTreeNode.java (Arbeitskopie) @@ -0,0 +1,1532 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jdbm; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOError; +import java.io.IOException; +import java.util.ConcurrentModificationException; +import java.util.List; + +/** + * Node of a BTree. + *

+ * The node contains a number of key-value pairs. Keys are ordered to allow + * dichotomic search. If value is too big, it is stored in separate record and + * only recid reference is stored + *

+ * If the node is a leaf node, the keys and values are user-defined and + * represent entries inserted by the user. + *

+ * If the node is non-leaf, each key represents the greatest key in the + * underlying BTreeNode and the values are recids pointing to the children + * BTreeNodes. The only exception is the rightmost BTreeNode, which is + * considered to have an "infinite" key value, meaning that any insert will be + * to the left of this pseudo-key + */ +public final class BTreeNode implements Serializer> { + + private static final boolean DEBUG = false; + + /** + * Parent B+Tree. + */ + transient BTree _btree; + + /** + * This BTreeNode's record ID in the DB. + */ + protected transient long _recid; + + /** + * Flag indicating if this is a leaf BTreeNode. + */ + protected boolean _isLeaf; + + /** + * Keys of children nodes + */ + protected K[] _keys; + + /** + * Values associated with keys. (Only valid if leaf node) + */ + protected Object[] _values; + + /** + * Children nodes (recids) associated with keys. (Only valid if non-leaf node) + */ + protected long[] _children; + + /** + * Index of first used item at the node + */ + protected byte _first; + + /** + * Previous leaf node (only if this node is a leaf) + */ + protected long _previous; + + /** + * Next leaf node (only if this node is a leaf) + */ + protected long _next; + + /** + * Return the B+Tree that is the owner of this {@link BTreeNode}. + */ + public BTree getBTree() { + return _btree; + } + + /** + * No-argument constructor used by serialization. + */ + public BTreeNode() { + // empty + } + + /** + * Root node overflow constructor + */ + @SuppressWarnings("unchecked") + BTreeNode(BTree btree, BTreeNode root, BTreeNode overflow) + throws IOException { + _btree = btree; + + _isLeaf = false; + + _first = BTree.DEFAULT_SIZE - 2; + + _keys = (K[]) new Object[BTree.DEFAULT_SIZE]; + _keys[BTree.DEFAULT_SIZE - 2] = overflow.getLargestKey(); + _keys[BTree.DEFAULT_SIZE - 1] = root.getLargestKey(); + + _children = new long[BTree.DEFAULT_SIZE]; + _children[BTree.DEFAULT_SIZE - 2] = overflow._recid; + _children[BTree.DEFAULT_SIZE - 1] = root._recid; + + _recid = _btree._db.insert(this, this, false); + } + + /** + * Root node (first insert) constructor. + */ + @SuppressWarnings("unchecked") + BTreeNode(BTree btree, K key, V value) throws IOException { + _btree = btree; + + _isLeaf = true; + + _first = BTree.DEFAULT_SIZE - 2; + + _keys = (K[]) new Object[BTree.DEFAULT_SIZE]; + _keys[BTree.DEFAULT_SIZE - 2] = key; + _keys[BTree.DEFAULT_SIZE - 1] = null; // I am the root BTreeNode for now + + _values = new Object[BTree.DEFAULT_SIZE]; + _values[BTree.DEFAULT_SIZE - 2] = value; + _values[BTree.DEFAULT_SIZE - 1] = null; // I am the root BTreeNode for now + + _recid = _btree._db.insert(this, this, false); + } + + /** + * Overflow node constructor. Creates an empty BTreeNode. + */ + @SuppressWarnings("unchecked") + BTreeNode(BTree btree, boolean isLeaf) { + _btree = btree; + + _isLeaf = isLeaf; + + // node will initially be half-full + _first = BTree.DEFAULT_SIZE / 2; + + _keys = (K[]) new Object[BTree.DEFAULT_SIZE]; + if (isLeaf) { + _values = new Object[BTree.DEFAULT_SIZE]; + } else { + _children = new long[BTree.DEFAULT_SIZE]; + } + + try { + _recid = _btree._db.insert(this, this, false); + } catch (IOException e) { + throw new IOError(e); + } + } + + /** + * Get largest key under this BTreeNode. Null is considered to be the greatest + * possible key. + */ + K getLargestKey() { + return _keys[BTree.DEFAULT_SIZE - 1]; + } + + /** + * Return true if BTreeNode is empty. + */ + boolean isEmpty() { + if (_isLeaf) { + return (_first == _values.length - 1); + } else { + return (_first == _children.length - 1); + } + } + + /** + * Return true if BTreeNode is full. + */ + boolean isFull() { + return (_first == 0); + } + + /** + * Find the object associated with the given key. + * + * @param height Height of the current BTreeNode (zero is leaf node) + * @param key The key + * @return TupleBrowser positionned just before the given key, or before next + * greater key if key isn't found. + */ + BTree.BTreeTupleBrowser find(int height, final K key, + final boolean inclusive) throws IOException { + byte index = findChildren(key, inclusive); + + height -= 1; + + if (height == 0) { + // leaf node + return new Browser(this, index); + } else { + // non-leaf node + BTreeNode child = loadNode(_children[index]); + return child.find(height, key, inclusive); + } + } + + /** + * Find value associated with the given key. + * + * @param height Height of the current BTreeNode (zero is leaf node) + * @param key The key + * @return TupleBrowser positionned just before the given key, or before next + * greater key if key isn't found. + */ + V findValue(int height, K key) throws IOException { + byte index = findChildren(key, true); + + height -= 1; + + if (height == 0) { + + K key2 = _keys[index]; + // // get returns the matching key or the next ordered key, so we must + // // check if we have an exact match + if (key2 == null || compare(key, key2) != 0) + return null; + + // leaf node + if (_values[index] instanceof BTreeLazyRecord) + return ((BTreeLazyRecord) _values[index]).get(); + else + return (V) _values[index]; + + } else { + // non-leaf node + BTreeNode child = loadNode(_children[index]); + return child.findValue(height, key); + } + } + + /** + * Find first entry and return a browser positioned before it. + * + * @return TupleBrowser positionned just before the first entry. + */ + BTree.BTreeTupleBrowser findFirst() throws IOException { + if (_isLeaf) { + return new Browser(this, _first); + } else { + BTreeNode child = loadNode(_children[_first]); + return child.findFirst(); + } + } + + /** + * Deletes this BTreeNode and all children nodes from the record manager + */ + void delete() throws IOException { + if (_isLeaf) { + if (_next != 0) { + BTreeNode nextNode = loadNode(_next); + if (nextNode._previous == _recid) { // this consistency check can be + // removed in production code + nextNode._previous = _previous; + _btree._db.update(nextNode._recid, nextNode, nextNode); + } else { + throw new Error("Inconsistent data in BTree"); + } + } + if (_previous != 0) { + BTreeNode previousNode = loadNode(_previous); + if (previousNode._next != _recid) { // this consistency check can be + // removed in production code + previousNode._next = _next; + _btree._db.update(previousNode._recid, previousNode, previousNode); + } else { + throw new Error("Inconsistent data in BTree"); + } + } + } else { + int left = _first; + int right = BTree.DEFAULT_SIZE - 1; + + for (int i = left; i <= right; i++) { + BTreeNode childNode = loadNode(_children[i]); + childNode.delete(); + } + } + + _btree._db.delete(_recid); + } + + /** + * Insert the given key and value. + *

+ * Since the Btree does not support duplicate entries, the caller must specify + * whether to replace the existing value. + * + * @param height Height of the current BTreeNode (zero is leaf node) + * @param key Insert key + * @param value Insert value + * @param replace Set to true to replace the existing value, if one exists. + * @return Insertion result containing existing value OR a BTreeNode if the + * key was inserted and provoked a BTreeNode overflow. + */ + InsertResult insert(int height, K key, final V value, + final boolean replace) throws IOException { + InsertResult result; + long overflow; + + final byte index = findChildren(key, true); + + height -= 1; + if (height == 0) { + + // reuse InsertResult instance to avoid GC trashing on massive inserts + result = _btree.insertResultReuse; + _btree.insertResultReuse = null; + if (result == null) + result = new InsertResult(); + + // inserting on a leaf BTreeNode + overflow = -1; + if (DEBUG) { + System.out.println("BTreeNode.insert() Insert on leaf node key=" + key + + " value=" + value + " index=" + index); + } + if (compare(_keys[index], key) == 0) { + // key already exists + if (DEBUG) { + System.out.println("BTreeNode.insert() Key already exists."); + } + boolean isLazyRecord = _values[index] instanceof BTreeLazyRecord; + if (isLazyRecord) + result._existing = ((BTreeLazyRecord) _values[index]).get(); + else + result._existing = (V) _values[index]; + if (replace) { + // remove old lazy record if necesarry + if (isLazyRecord) + ((BTreeLazyRecord) _values[index]).delete(); + _values[index] = value; + _btree._db.update(_recid, this, this); + } + // return the existing key + return result; + } + } else { + // non-leaf BTreeNode + BTreeNode child = loadNode(_children[index]); + result = child.insert(height, key, value, replace); + + if (result._existing != null) { + // return existing key, if any. + return result; + } + + if (result._overflow == null) { + // no overflow means we're done with insertion + return result; + } + + // there was an overflow, we need to insert the overflow node on this + // BTreeNode + if (DEBUG) { + System.out.println("BTreeNode.insert() Overflow node: " + + result._overflow._recid); + } + key = result._overflow.getLargestKey(); + overflow = result._overflow._recid; + + // update child's largest key + _keys[index] = child.getLargestKey(); + + // clean result so we can reuse it + result._overflow = null; + } + + // if we get here, we need to insert a new entry on the BTreeNode before + // _children[ index ] + if (!isFull()) { + if (height == 0) { + insertEntry(this, index - 1, key, value); + } else { + insertChild(this, index - 1, key, overflow); + } + _btree._db.update(_recid, this, this); + return result; + } + + // node is full, we must divide the node + final byte half = BTree.DEFAULT_SIZE >> 1; + BTreeNode newNode = new BTreeNode(_btree, _isLeaf); + if (index < half) { + // move lower-half of entries to overflow node, including new entry + if (DEBUG) { + System.out + .println("BTreeNode.insert() move lower-half of entries to overflow BTreeNode, including new entry."); + } + if (height == 0) { + copyEntries(this, 0, newNode, half, index); + setEntry(newNode, half + index, key, value); + copyEntries(this, index, newNode, half + index + 1, half - index - 1); + } else { + copyChildren(this, 0, newNode, half, index); + setChild(newNode, half + index, key, overflow); + copyChildren(this, index, newNode, half + index + 1, half - index - 1); + } + } else { + // move lower-half of entries to overflow node, new entry stays on this + // node + if (DEBUG) { + System.out + .println("BTreeNode.insert() move lower-half of entries to overflow BTreeNode. New entry stays"); + } + if (height == 0) { + copyEntries(this, 0, newNode, half, half); + copyEntries(this, half, this, half - 1, index - half); + setEntry(this, index - 1, key, value); + } else { + copyChildren(this, 0, newNode, half, half); + copyChildren(this, half, this, half - 1, index - half); + setChild(this, index - 1, key, overflow); + } + } + + _first = half - 1; + + // nullify lower half of entries + for (int i = 0; i < _first; i++) { + if (height == 0) { + setEntry(this, i, null, null); + } else { + setChild(this, i, null, -1); + } + } + + if (_isLeaf) { + // link newly created node + newNode._previous = _previous; + newNode._next = _recid; + if (_previous != 0) { + BTreeNode previous = loadNode(_previous); + previous._next = newNode._recid; + _btree._db.update(_previous, previous, this); + + } + _previous = newNode._recid; + } + + _btree._db.update(_recid, this, this); + _btree._db.update(newNode._recid, newNode, this); + + result._overflow = newNode; + return result; + } + + /** + * Remove the entry associated with the given key. + * + * @param height Height of the current BTreeNode (zero is leaf node) + * @param key Removal key + * @return Remove result object + */ + RemoveResult remove(int height, K key) throws IOException { + RemoveResult result; + + int half = BTree.DEFAULT_SIZE / 2; + byte index = findChildren(key, true); + + height -= 1; + if (height == 0) { + // remove leaf entry + if (compare(_keys[index], key) != 0) { + throw new IllegalArgumentException("Key not found: " + key); + } + result = new RemoveResult(); + + if (_values[index] instanceof BTreeLazyRecord) { + BTreeLazyRecord r = (BTreeLazyRecord) _values[index]; + result._value = r.get(); + r.delete(); + } else { + result._value = (V) _values[index]; + } + removeEntry(this, index); + + // update this node + _btree._db.update(_recid, this, this); + + } else { + // recurse into Btree to remove entry on a children node + BTreeNode child = loadNode(_children[index]); + result = child.remove(height, key); + + // update children + _keys[index] = child.getLargestKey(); + _btree._db.update(_recid, this, this); + + if (result._underflow) { + // underflow occured + if (child._first != half + 1) { + throw new IllegalStateException("Error during underflow [1]"); + } + if (index < _children.length - 1) { + // exists greater brother node + BTreeNode brother = loadNode(_children[index + 1]); + int bfirst = brother._first; + if (bfirst < half) { + // steal entries from "brother" node + int steal = (half - bfirst + 1) / 2; + brother._first += steal; + child._first -= steal; + if (child._isLeaf) { + copyEntries(child, half + 1, child, half + 1 - steal, half - 1); + copyEntries(brother, bfirst, child, 2 * half - steal, steal); + } else { + copyChildren(child, half + 1, child, half + 1 - steal, half - 1); + copyChildren(brother, bfirst, child, 2 * half - steal, steal); + } + + for (int i = bfirst; i < bfirst + steal; i++) { + if (brother._isLeaf) { + setEntry(brother, i, null, null); + } else { + setChild(brother, i, null, -1); + } + } + + // update child's largest key + _keys[index] = child.getLargestKey(); + + // no change in previous/next node + + // update nodes + _btree._db.update(_recid, this, this); + _btree._db.update(brother._recid, brother, this); + _btree._db.update(child._recid, child, this); + + } else { + // move all entries from node "child" to "brother" + if (brother._first != half) { + throw new IllegalStateException("Error during underflow [2]"); + } + + brother._first = 1; + if (child._isLeaf) { + copyEntries(child, half + 1, brother, 1, half - 1); + } else { + copyChildren(child, half + 1, brother, 1, half - 1); + } + _btree._db.update(brother._recid, brother, this); + + // remove "child" from current node + if (_isLeaf) { + copyEntries(this, _first, this, _first + 1, index - _first); + setEntry(this, _first, null, null); + } else { + copyChildren(this, _first, this, _first + 1, index - _first); + setChild(this, _first, null, -1); + } + _first += 1; + _btree._db.update(_recid, this, this); + + // re-link previous and next nodes + if (child._previous != 0) { + BTreeNode prev = loadNode(child._previous); + prev._next = child._next; + _btree._db.update(prev._recid, prev, this); + } + if (child._next != 0) { + BTreeNode next = loadNode(child._next); + next._previous = child._previous; + _btree._db.update(next._recid, next, this); + + } + + // delete "child" node + _btree._db.delete(child._recid); + } + } else { + // node "brother" is before "child" + BTreeNode brother = loadNode(_children[index - 1]); + int bfirst = brother._first; + if (bfirst < half) { + // steal entries from "brother" node + int steal = (half - bfirst + 1) / 2; + brother._first += steal; + child._first -= steal; + if (child._isLeaf) { + copyEntries(brother, 2 * half - steal, child, half + 1 - steal, + steal); + copyEntries(brother, bfirst, brother, bfirst + steal, 2 * half + - bfirst - steal); + } else { + copyChildren(brother, 2 * half - steal, child, half + 1 - steal, + steal); + copyChildren(brother, bfirst, brother, bfirst + steal, 2 * half + - bfirst - steal); + } + + for (int i = bfirst; i < bfirst + steal; i++) { + if (brother._isLeaf) { + setEntry(brother, i, null, null); + } else { + setChild(brother, i, null, -1); + } + } + + // update brother's largest key + _keys[index - 1] = brother.getLargestKey(); + + // no change in previous/next node + + // update nodes + _btree._db.update(_recid, this, this); + _btree._db.update(brother._recid, brother, this); + _btree._db.update(child._recid, child, this); + + } else { + // move all entries from node "brother" to "child" + if (brother._first != half) { + throw new IllegalStateException("Error during underflow [3]"); + } + + child._first = 1; + if (child._isLeaf) { + copyEntries(brother, half, child, 1, half); + } else { + copyChildren(brother, half, child, 1, half); + } + _btree._db.update(child._recid, child, this); + + // remove "brother" from current node + if (_isLeaf) { + copyEntries(this, _first, this, _first + 1, index - 1 - _first); + setEntry(this, _first, null, null); + } else { + copyChildren(this, _first, this, _first + 1, index - 1 - _first); + setChild(this, _first, null, -1); + } + _first += 1; + _btree._db.update(_recid, this, this); + + // re-link previous and next nodes + if (brother._previous != 0) { + BTreeNode prev = loadNode(brother._previous); + prev._next = brother._next; + _btree._db.update(prev._recid, prev, this); + } + if (brother._next != 0) { + BTreeNode next = loadNode(brother._next); + next._previous = brother._previous; + _btree._db.update(next._recid, next, this); + } + + // delete "brother" node + _btree._db.delete(brother._recid); + } + } + } + } + + // underflow if node is more than half-empty + result._underflow = _first > half; + + return result; + } + + /** + * Find the first children node with a key equal or greater than the given + * key. + * + * @return index of first children with equal or greater key. + */ + private byte findChildren(final K key, final boolean inclusive) { + int left = _first; + int right = BTree.DEFAULT_SIZE - 1; + int middle; + final int D = inclusive ? 0 : 1; + + // binary search + while (true) { + middle = (left + right) / 2; + if (compare(_keys[middle], key) < D) { + left = middle + 1; + } else { + right = middle; + } + if (left >= right) { + return (byte) right; + } + } + } + + /** + * Insert entry at given position. + */ + private static void insertEntry(BTreeNode node, int index, + K key, V value) { + K[] keys = node._keys; + Object[] values = node._values; + int start = node._first; + int count = index - node._first + 1; + + // shift entries to the left + System.arraycopy(keys, start, keys, start - 1, count); + System.arraycopy(values, start, values, start - 1, count); + node._first -= 1; + keys[index] = key; + values[index] = value; + } + + /** + * Insert child at given position. + */ + private static void insertChild(BTreeNode node, int index, + K key, long child) { + K[] keys = node._keys; + long[] children = node._children; + int start = node._first; + int count = index - node._first + 1; + + // shift entries to the left + System.arraycopy(keys, start, keys, start - 1, count); + System.arraycopy(children, start, children, start - 1, count); + node._first -= 1; + keys[index] = key; + children[index] = child; + } + + /** + * Remove entry at given position. + */ + private static void removeEntry(BTreeNode node, int index) { + K[] keys = node._keys; + Object[] values = node._values; + int start = node._first; + int count = index - node._first; + + System.arraycopy(keys, start, keys, start + 1, count); + keys[start] = null; + System.arraycopy(values, start, values, start + 1, count); + values[start] = null; + node._first++; + } + + /** + * Set the entry at the given index. + */ + private static void setEntry(BTreeNode node, int index, K key, + V value) { + node._keys[index] = key; + node._values[index] = value; + } + + /** + * Set the child BTreeNode recid at the given index. + */ + private static void setChild(BTreeNode node, int index, K key, + long recid) { + node._keys[index] = key; + node._children[index] = recid; + } + + /** + * Copy entries between two nodes + */ + private static void copyEntries(BTreeNode source, + int indexSource, BTreeNode dest, int indexDest, int count) { + System.arraycopy(source._keys, indexSource, dest._keys, indexDest, count); + System.arraycopy(source._values, indexSource, dest._values, indexDest, + count); + } + + /** + * Copy child node recids between two nodes + */ + private static void copyChildren(BTreeNode source, + int indexSource, BTreeNode dest, int indexDest, int count) { + System.arraycopy(source._keys, indexSource, dest._keys, indexDest, count); + System.arraycopy(source._children, indexSource, dest._children, indexDest, + count); + } + + /** + * Load the node at the given recid. + */ + private BTreeNode loadNode(long recid) throws IOException { + BTreeNode child = _btree._db.fetch(recid, this); + child._recid = recid; + child._btree = _btree; + return child; + } + + private final int compare(final K value1, final K value2) { + if (value1 == null) { + return 1; + } + if (value2 == null) { + return -1; + } + + if (_btree._comparator == null) { + return ((Comparable) value1).compareTo(value2); + } else { + return _btree._comparator.compare(value1, value2); + } + + } + + /** + * Dump the structure of the tree on the screen. This is used for debugging + * purposes only. + */ + private void dump(int height) { + String prefix = ""; + for (int i = 0; i < height; i++) { + prefix += " "; + } + System.out.println(prefix + + "-------------------------------------- BTreeNode recid=" + _recid); + System.out.println(prefix + "first=" + _first); + for (int i = 0; i < BTree.DEFAULT_SIZE; i++) { + if (_isLeaf) { + System.out.println(prefix + "BTreeNode [" + i + "] " + _keys[i] + " " + + _values[i]); + } else { + System.out.println(prefix + "BTreeNode [" + i + "] " + _keys[i] + " " + + _children[i]); + } + } + System.out.println(prefix + "--------------------------------------"); + } + + /** + * Recursively dump the state of the BTree on screen. This is used for + * debugging purposes only. + */ + void dumpRecursive(int height, int level) throws IOException { + height -= 1; + level += 1; + if (height > 0) { + for (byte i = _first; i < BTree.DEFAULT_SIZE; i++) { + if (_keys[i] == null) + break; + BTreeNode child = loadNode(_children[i]); + child.dump(level); + child.dumpRecursive(height, level); + } + } + } + + /** + * Deserialize the content of an object from a byte array. + */ + @SuppressWarnings("unchecked") + public BTreeNode deserialize(DataInput ois2) throws IOException { + DataInputOutput ois = (DataInputOutput) ois2; + + BTreeNode node = new BTreeNode(); + + switch (ois.readUnsignedByte()) { + case SerializationHeader.BTREE_NODE_LEAF: + node._isLeaf = true; + break; + case SerializationHeader.BTREE_NODE_NONLEAF: + node._isLeaf = false; + break; + default: + throw new InternalError("wrong BTreeNode header"); + } + + if (node._isLeaf) { + node._previous = LongPacker.unpackLong(ois); + node._next = LongPacker.unpackLong(ois); + } + + node._first = ois.readByte(); + + if (!node._isLeaf) { + node._children = new long[BTree.DEFAULT_SIZE]; + for (int i = node._first; i < BTree.DEFAULT_SIZE; i++) { + node._children[i] = LongPacker.unpackLong(ois); + } + } + + if (!_btree.loadValues) + return node; + + try { + + node._keys = readKeys(ois, node._first); + + } catch (ClassNotFoundException except) { + throw new IOException(except.getMessage()); + } + + if (node._isLeaf) { + + try { + readValues(ois, node); + } catch (ClassNotFoundException except) { + throw new IOException(except); + } + } + + return node; + + } + + /** + * Serialize the content of an object into a byte array. + * + * @param obj Object to serialize + * @return a byte array representing the object's state + */ + public void serialize(DataOutput oos, BTreeNode obj) throws IOException { + + // note: It is assumed that BTreeNode instance doing the serialization is + // the parent + // of the BTreeNode object being serialized. + + BTreeNode node = obj; + + oos.writeByte(node._isLeaf ? SerializationHeader.BTREE_NODE_LEAF + : SerializationHeader.BTREE_NODE_NONLEAF); + if (node._isLeaf) { + LongPacker.packLong(oos, node._previous); + LongPacker.packLong(oos, node._next); + } + + oos.write(node._first); + + if (!node._isLeaf) { + for (int i = node._first; i < BTree.DEFAULT_SIZE; i++) { + LongPacker.packLong(oos, node._children[i]); + } + } + + writeKeys(oos, node._keys, node._first); + + if (node._isLeaf && _btree.hasValues()) { + writeValues(oos, node); + } + } + + private void readValues(DataInputOutput ois, BTreeNode node) + throws IOException, ClassNotFoundException { + node._values = new Object[BTree.DEFAULT_SIZE]; + if (_btree.hasValues()) { + Serializer serializer = _btree.valueSerializer != null ? _btree.valueSerializer + : (Serializer) _btree.getRecordManager().defaultSerializer(); + for (int i = node._first; i < BTree.DEFAULT_SIZE; i++) { + int header = ois.readUnsignedByte(); + if (header == BTreeLazyRecord.NULL) { + node._values[i] = null; + } else if (header == BTreeLazyRecord.LAZY_RECORD) { + long recid = LongPacker.unpackLong(ois); + node._values[i] = new BTreeLazyRecord(_btree._db, recid, serializer); + } else { + node._values[i] = BTreeLazyRecord.fastDeser(ois, serializer, header); + } + } + } else { + // create fake values + for (int i = node._first; i < BTree.DEFAULT_SIZE; i++) { + if (node._keys[i] != null) + node._values[i] = JDBMUtils.EMPTY_STRING; + } + } + } + + private void writeValues(DataOutput oos, BTreeNode node) + throws IOException { + + DataInputOutput output = null; + Serializer serializer = _btree.valueSerializer != null ? _btree.valueSerializer + : _btree.getRecordManager().defaultSerializer(); + for (int i = node._first; i < BTree.DEFAULT_SIZE; i++) { + if (node._values[i] instanceof BTreeLazyRecord) { + oos.write(BTreeLazyRecord.LAZY_RECORD); + LongPacker.packLong(oos, ((BTreeLazyRecord) node._values[i]).recid); + } else if (node._values[i] != null) { + if (output == null) { + output = new DataInputOutput(); + } else { + output.reset(); + } + + serializer.serialize(output, node._values[i]); + + if (output.getPos() > BTreeLazyRecord.MAX_INTREE_RECORD_SIZE) { + // write as separate record + long recid = _btree._db.insert(output.toByteArray(), + BTreeLazyRecord.FAKE_SERIALIZER, true); + oos.write(BTreeLazyRecord.LAZY_RECORD); + LongPacker.packLong(oos, recid); + } else { + // write as part of btree + oos.write(output.getPos()); + oos.write(output.getBuf(), 0, output.getPos()); + } + } else { + oos.write(BTreeLazyRecord.NULL); + } + } + } + + private static final int ALL_NULL = 0; + private static final int ALL_INTEGERS = 1 << 5; + private static final int ALL_INTEGERS_NEGATIVE = 2 << 5; + private static final int ALL_LONGS = 3 << 5; + private static final int ALL_LONGS_NEGATIVE = 4 << 5; + private static final int ALL_STRINGS = 5 << 5; + private static final int ALL_OTHER = 6 << 5; + + private K[] readKeys(DataInput ois, final int firstUse) throws IOException, + ClassNotFoundException { + Object[] ret = new Object[BTree.DEFAULT_SIZE]; + final int type = ois.readUnsignedByte(); + if (type == ALL_NULL) { + return (K[]) ret; + } else if (type == ALL_INTEGERS || type == ALL_INTEGERS_NEGATIVE) { + long first = LongPacker.unpackLong(ois); + if (type == ALL_INTEGERS_NEGATIVE) + first = -first; + ret[firstUse] = Integer.valueOf((int) first); + for (int i = firstUse + 1; i < BTree.DEFAULT_SIZE; i++) { + // ret[i] = Serialization.readObject(ois); + long v = LongPacker.unpackLong(ois); + if (v == 0) + continue; // null + v = v + first; + ret[i] = Integer.valueOf((int) v); + first = v; + } + return (K[]) ret; + } else if (type == ALL_LONGS || type == ALL_LONGS_NEGATIVE) { + long first = LongPacker.unpackLong(ois); + if (type == ALL_LONGS_NEGATIVE) + first = -first; + + ret[firstUse] = Long.valueOf(first); + for (int i = firstUse + 1; i < BTree.DEFAULT_SIZE; i++) { + // ret[i] = Serialization.readObject(ois); + long v = LongPacker.unpackLong(ois); + if (v == 0) + continue; // null + v = v + first; + ret[i] = Long.valueOf(v); + first = v; + } + return (K[]) ret; + } else if (type == ALL_STRINGS) { + byte[] previous = null; + for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) { + byte[] b = leadingValuePackRead(ois, previous, 0); + if (b == null) + continue; + ret[i] = new String(b, Serialization.UTF8); + previous = b; + } + return (K[]) ret; + + } else if (type == ALL_OTHER) { + + // TODO why this block is here? + if (_btree.keySerializer == null + || _btree.keySerializer == _btree.getRecordManager() + .defaultSerializer()) { + for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) { + ret[i] = _btree.getRecordManager().defaultSerializer() + .deserialize(ois); + } + return (K[]) ret; + } + + Serializer ser = _btree.keySerializer != null ? _btree.keySerializer + : _btree.getRecordManager().defaultSerializer(); + DataInputOutput in2 = null; + byte[] previous = null; + for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) { + byte[] b = leadingValuePackRead(ois, previous, 0); + if (b == null) + continue; + if (in2 == null) { + in2 = new DataInputOutput(); + } + in2.reset(b); + ret[i] = ser.deserialize(in2); + previous = b; + } + return (K[]) ret; + + } else { + throw new InternalError("unknown BTreeNode header type: " + type); + } + + } + + @SuppressWarnings("unchecked") + private void writeKeys(DataOutput oos, K[] keys, final int firstUse) + throws IOException { + if (keys.length != BTree.DEFAULT_SIZE) + throw new IllegalArgumentException("wrong keys size"); + + // check if all items on key are null + boolean allNull = true; + for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) { + if (keys[i] != null) { + allNull = false; + break; + } + } + if (allNull) { + oos.write(ALL_NULL); + return; + } + + /** + * Special compression to compress Long and Integer + */ + if ((_btree._comparator == JDBMUtils.COMPARABLE_COMPARATOR || _btree._comparator == null) + && (_btree.keySerializer == null || _btree.keySerializer == _btree + .getRecordManager().defaultSerializer())) { + boolean allInteger = true; + for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) { + if (keys[i] != null && keys[i].getClass() != Integer.class) { + allInteger = false; + break; + } + } + boolean allLong = true; + for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) { + if (keys[i] != null && (keys[i].getClass() != Long.class || + // special case to exclude Long.MIN_VALUE from conversion, causes + // problems to LongPacker + ((Long) keys[i]).longValue() == Long.MIN_VALUE)) { + allLong = false; + break; + } + } + + if (allLong) { + // check that diff between MIN and MAX fits into PACKED_LONG + long max = Long.MIN_VALUE; + long min = Long.MAX_VALUE; + for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) { + if (keys[i] == null) + continue; + long v = (Long) keys[i]; + if (v > max) + max = v; + if (v < min) + min = v; + } + // now convert to Double to prevent overflow errors + double max2 = max; + double min2 = min; + double maxDiff = Long.MAX_VALUE; + if (max2 - min2 > maxDiff / 2) // divide by two just to by sure + allLong = false; + + } + + if (allLong && allInteger) + throw new InternalError(); + + if (allLong || allInteger) { + long first = ((Number) keys[firstUse]).longValue(); + // write header + if (allInteger) { + if (first > 0) + oos.write(ALL_INTEGERS); + else + oos.write(ALL_INTEGERS_NEGATIVE); + } else if (allLong) { + if (first > 0) + oos.write(ALL_LONGS); + else + oos.write(ALL_LONGS_NEGATIVE); + } else { + throw new InternalError(); + } + + // write first + LongPacker.packLong(oos, Math.abs(first)); + // write others + for (int i = firstUse + 1; i < BTree.DEFAULT_SIZE; i++) { + // Serialization.writeObject(oos, keys[i]); + if (keys[i] == null) + LongPacker.packLong(oos, 0); + else { + long v = ((Number) keys[i]).longValue(); + if (v <= first) + throw new InternalError("not ordered"); + LongPacker.packLong(oos, v - first); + first = v; + } + } + return; + } else { + // another special case for Strings + boolean allString = true; + for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) { + if (keys[i] != null && (keys[i].getClass() != String.class)) { + allString = false; + break; + } + } + if (allString) { + oos.write(ALL_STRINGS); + byte[] previous = null; + for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) { + if (keys[i] == null) { + leadingValuePackWrite(oos, null, previous, 0); + } else { + byte[] b = ((String) keys[i]).getBytes(Serialization.UTF8); + leadingValuePackWrite(oos, b, previous, 0); + previous = b; + } + } + return; + } + } + } + + /** + * other case, serializer is provided or other stuff + */ + oos.write(ALL_OTHER); + if (_btree.keySerializer == null + || _btree.keySerializer == _btree.getRecordManager() + .defaultSerializer()) { + for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) { + _btree.getRecordManager().defaultSerializer().serialize(oos, keys[i]); + } + return; + } + + // custom serializer is provided, use it + + Serializer ser = _btree.keySerializer; + byte[] previous = null; + + DataInputOutput out3 = new DataInputOutput(); + for (int i = firstUse; i < BTree.DEFAULT_SIZE; i++) { + if (keys[i] == null) { + leadingValuePackWrite(oos, null, previous, 0); + } else { + out3.reset(); + ser.serialize(out3, keys[i]); + byte[] b = out3.toByteArray(); + leadingValuePackWrite(oos, b, previous, 0); + previous = b; + } + } + + } + + public void defrag(DBStore r1, DBStore r2) throws IOException { + if (_children != null) + for (long child : _children) { + if (child == 0) + continue; + byte[] data = r1.fetchRaw(child); + r2.forceInsert(child, data); + BTreeNode t = deserialize(new DataInputOutput(data)); + t._btree = _btree; + t.defrag(r1, r2); + } + } + + /** + * STATIC INNER CLASS Result from insert() method call + */ + static final class InsertResult { + + /** + * Overflow node. + */ + BTreeNode _overflow; + + /** + * Existing value for the insertion key. + */ + V _existing; + + } + + /** + * STATIC INNER CLASS Result from remove() method call + */ + static final class RemoveResult { + + /** + * Set to true if underlying nodes underflowed + */ + boolean _underflow; + + /** + * Removed entry value + */ + V _value; + } + + /** + * PRIVATE INNER CLASS Browser to traverse leaf nodes. + */ + static final class Browser implements BTree.BTreeTupleBrowser { + + /** + * Current node. + */ + private BTreeNode _node; + + /** + * Current index in the node. The index positionned on the next tuple to + * return. + */ + private byte _index; + + private int expectedModCount; + + /** + * Create a browser. + * + * @param node Current node + * @param index Position of the next tuple to return. + */ + Browser(BTreeNode node, byte index) { + _node = node; + _index = index; + expectedModCount = node._btree.modCount; + } + + public boolean getNext(BTree.BTreeTuple tuple) throws IOException { + if (expectedModCount != _node._btree.modCount) + throw new ConcurrentModificationException(); + if (_node == null) { + // last record in iterator was deleted, so iterator is at end of node + return false; + } + + if (_index < BTree.DEFAULT_SIZE) { + if (_node._keys[_index] == null) { + // reached end of the tree. + return false; + } + } else if (_node._next != 0) { + // move to next node + _node = _node.loadNode(_node._next); + _index = _node._first; + } + tuple.key = _node._keys[_index]; + if (_node._values[_index] instanceof BTreeLazyRecord) + tuple.value = ((BTreeLazyRecord) _node._values[_index]).get(); + else + tuple.value = (V) _node._values[_index]; + _index++; + return true; + } + + public boolean getPrevious(BTree.BTreeTuple tuple) throws IOException { + if (expectedModCount != _node._btree.modCount) + throw new ConcurrentModificationException(); + + if (_node == null) { + // deleted last record, but this situation is only supportedd on getNext + throw new InternalError(); + } + + if (_index == _node._first) { + + if (_node._previous != 0) { + _node = _node.loadNode(_node._previous); + _index = BTree.DEFAULT_SIZE; + } else { + // reached beginning of the tree + return false; + } + } + _index--; + tuple.key = _node._keys[_index]; + if (_node._values[_index] instanceof BTreeLazyRecord) + tuple.value = ((BTreeLazyRecord) _node._values[_index]).get(); + else + tuple.value = (V) _node._values[_index]; + + return true; + + } + + public void remove(K key) throws IOException { + if (expectedModCount != _node._btree.modCount) + throw new ConcurrentModificationException(); + + _node._btree.remove(key); + expectedModCount++; + + // An entry was removed and this may trigger tree rebalance, + // This would change current node layout, so find our position again + BTree.BTreeTupleBrowser b = _node._btree.browse(key, true); + // browser is positioned just before value which was currently deleted, so + // find if we have new value + if (b.getNext(new BTree.BTreeTuple(null, null))) { + // next value value exists, copy its state + Browser b2 = (Browser) b; + this._node = b2._node; + this._index = b2._index; + } else { + this._node = null; + this._index = -1; + } + + } + } + + /** + * Used for debugging and testing only. Recursively obtains the recids of all + * child BTreeNodes and adds them to the 'out' list. + * + * @param out + * @param height + * @throws IOException + */ + void dumpChildNodeRecIDs(List out, int height) throws IOException { + height -= 1; + if (height > 0) { + for (byte i = _first; i < BTree.DEFAULT_SIZE; i++) { + if (_children[i] == 0) + continue; + + BTreeNode child = loadNode(_children[i]); + out.add(new Long(child._recid)); + child.dumpChildNodeRecIDs(out, height); + } + } + } + + /** + * Read previously written data + * + * @author Kevin Day + */ + static byte[] leadingValuePackRead(DataInput in, byte[] previous, + int ignoreLeadingCount) throws IOException { + int len = LongPacker.unpackInt(in) - 1; // 0 indicates null + if (len == -1) + return null; + + int actualCommon = LongPacker.unpackInt(in); + + byte[] buf = new byte[len]; + + if (previous == null) { + actualCommon = 0; + } + + if (actualCommon > 0) { + in.readFully(buf, 0, ignoreLeadingCount); + System.arraycopy(previous, ignoreLeadingCount, buf, ignoreLeadingCount, + actualCommon - ignoreLeadingCount); + } + in.readFully(buf, actualCommon, len - actualCommon); + return buf; + } + + /** + * This method is used for delta compression for keys. Writes the contents of + * buf to the DataOutput out, with special encoding if there are common + * leading bytes in the previous group stored by this compressor. + * + * @author Kevin Day + */ + static void leadingValuePackWrite(DataOutput out, byte[] buf, + byte[] previous, int ignoreLeadingCount) throws IOException { + if (buf == null) { + LongPacker.packInt(out, 0); + return; + } + + int actualCommon = ignoreLeadingCount; + + if (previous != null) { + int maxCommon = buf.length > previous.length ? previous.length + : buf.length; + + if (maxCommon > Short.MAX_VALUE) + maxCommon = Short.MAX_VALUE; + + for (; actualCommon < maxCommon; actualCommon++) { + if (buf[actualCommon] != previous[actualCommon]) + break; + } + } + + // there are enough common bytes to justify compression + LongPacker.packInt(out, buf.length + 1);// store as +1, 0 indicates null + LongPacker.packInt(out, actualCommon); + out.write(buf, 0, ignoreLeadingCount); + out.write(buf, actualCommon, buf.length - actualCommon); + + } + + BTreeNode loadLastChildNode() throws IOException { + return loadNode(_children[BTree.DEFAULT_SIZE - 1]); + } + +} Index: graph/src/main/java/org/apache/jdbm/DBCache.java =================================================================== --- graph/src/main/java/org/apache/jdbm/DBCache.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/DBCache.java (Arbeitskopie) @@ -0,0 +1,173 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOError; +import java.io.IOException; +import java.util.Comparator; +import java.util.Iterator; + +import javax.crypto.Cipher; + +/** + * Abstract class with common cache functionality + */ +public abstract class DBCache extends DBStore { + + static final int NUM_OF_DIRTY_RECORDS_BEFORE_AUTOCOMIT = 1024; + + static final byte NONE = 1; + static final byte MRU = 2; + static final byte WEAK = 3; + static final byte SOFT = 4; + static final byte HARD = 5; + + static final class DirtyCacheEntry { + long _recid; // TODO recid is already part of _hashDirties, so this field + // could be removed to save memory + Object _obj; + Serializer _serializer; + } + + /** + * Dirty status of _hash CacheEntry Values + */ + final protected LongHashMap _hashDirties = new LongHashMap(); + + private Serializer cachedDefaultSerializer = null; + + /** + * Construct a CacheRecordManager wrapping another DB and using a given cache + * policy. + */ + public DBCache(String filename, boolean readonly, + boolean transactionDisabled, Cipher cipherIn, Cipher cipherOut, + boolean useRandomAccessFile, boolean deleteFilesAfterClose, + boolean lockingDisabled) { + + super(filename, readonly, transactionDisabled, cipherIn, cipherOut, + useRandomAccessFile, deleteFilesAfterClose, lockingDisabled); + + } + + @SuppressWarnings("rawtypes") + @Override + public Serializer defaultSerializer() { + if (cachedDefaultSerializer == null) + cachedDefaultSerializer = super.defaultSerializer(); + return cachedDefaultSerializer; + } + + @Override + boolean needsAutoCommit() { + return super.needsAutoCommit() + || (transactionsDisabled && !commitInProgress && _hashDirties.size() > NUM_OF_DIRTY_RECORDS_BEFORE_AUTOCOMIT); + } + + @Override + public long insert(final A obj, final Serializer serializer, + final boolean disableCache) throws IOException { + checkNotClosed(); + + if (super.needsAutoCommit()) + commit(); + + if (disableCache) + return super.insert(obj, serializer, disableCache); + + // prealocate recid so we have something to return + final long recid = super.insert(PREALOCATE_OBJ, null, disableCache); + + // super.update(recid, obj,serializer); + + // return super.insert(obj,serializer,disableCache); + + // and create new dirty record for future update + final DirtyCacheEntry e = new DirtyCacheEntry(); + e._recid = recid; + e._obj = obj; + e._serializer = serializer; + _hashDirties.put(recid, e); + + return recid; + } + + @Override + public void commit() { + try { + commitInProgress = true; + updateCacheEntries(); + super.commit(); + } finally { + commitInProgress = false; + } + } + + @Override + public void rollback() { + cachedDefaultSerializer = null; + _hashDirties.clear(); + super.rollback(); + } + + private static final Comparator DIRTY_COMPARATOR = new Comparator() { + @Override + final public int compare(DirtyCacheEntry o1, DirtyCacheEntry o2) { + return (int) (o1._recid - o2._recid); + + } + }; + + /** + * Update all dirty cache objects to the underlying DB. + */ + @SuppressWarnings("unchecked") + protected void updateCacheEntries() { + try { + while (!_hashDirties.isEmpty()) { + // make defensive copy of values as _db.update() may trigger changes + // in db + // and this would modify dirties again + DirtyCacheEntry[] vals = new DirtyCacheEntry[_hashDirties.size()]; + Iterator iter = _hashDirties.valuesIterator(); + + for (int i = 0; i < vals.length; i++) { + vals[i] = iter.next(); + } + iter = null; + + java.util.Arrays.sort(vals, DIRTY_COMPARATOR); + + for (int i = 0; i < vals.length; i++) { + final DirtyCacheEntry entry = vals[i]; + vals[i] = null; + super.update(entry._recid, entry._obj, entry._serializer); + _hashDirties.remove(entry._recid); + + } + + // update may have triggered more records to be added into dirties, so + // repeat until all records are written. + } + } catch (IOException e) { + throw new IOError(e); + } + + } + +} Index: graph/src/main/java/org/apache/jdbm/BTreeSet.java =================================================================== --- graph/src/main/java/org/apache/jdbm/BTreeSet.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/BTreeSet.java (Arbeitskopie) @@ -0,0 +1,215 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jdbm; + +import java.util.AbstractSet; +import java.util.Collection; +import java.util.Comparator; +import java.util.Iterator; +import java.util.Map; +import java.util.NavigableSet; +import java.util.SortedSet; + +/** + * Wrapper class for >SortedMap to implement + * >NavigableSet + *

+ * This code originally comes from Apache Harmony, was adapted by Jan Kotek for + * JDBM + */ +public final class BTreeSet extends AbstractSet implements + NavigableSet { + + /** + * use keyset from this map + */ + final BTreeMap map; + + BTreeSet(BTreeMap map) { + this.map = map; + } + + @Override + public boolean add(E object) { + return map.put(object, JDBMUtils.EMPTY_STRING) == null; + } + + @Override + public boolean addAll(Collection collection) { + return super.addAll(collection); + } + + @Override + public void clear() { + map.clear(); + } + + @Override + public Comparator comparator() { + return map.comparator(); + } + + @Override + public boolean contains(Object object) { + return map.containsKey(object); + } + + @Override + public boolean isEmpty() { + return map.isEmpty(); + } + + @Override + public E lower(E e) { + return map.lowerKey(e); + } + + @Override + public E floor(E e) { + return map.floorKey(e); + } + + @Override + public E ceiling(E e) { + return map.ceilingKey(e); + } + + @Override + public E higher(E e) { + return map.higherKey(e); + } + + @Override + public E pollFirst() { + Map.Entry e = map.pollFirstEntry(); + return e != null ? e.getKey() : null; + } + + @Override + public E pollLast() { + Map.Entry e = map.pollLastEntry(); + return e != null ? e.getKey() : null; + } + + @Override + public Iterator iterator() { + final Iterator> iter = map.entrySet().iterator(); + return new Iterator() { + @Override + public boolean hasNext() { + return iter.hasNext(); + } + + @Override + public E next() { + Map.Entry e = iter.next(); + return e != null ? e.getKey() : null; + } + + @Override + public void remove() { + iter.remove(); + } + }; + } + + @Override + public NavigableSet descendingSet() { + return map.descendingKeySet(); + } + + @Override + public Iterator descendingIterator() { + return map.descendingKeySet().iterator(); + } + + @Override + public NavigableSet subSet(E fromElement, boolean fromInclusive, + E toElement, boolean toInclusive) { + return map.subMap(fromElement, fromInclusive, toElement, toInclusive) + .navigableKeySet(); + } + + @Override + public NavigableSet headSet(E toElement, boolean inclusive) { + return map.headMap(toElement, inclusive).navigableKeySet(); + } + + @Override + public NavigableSet tailSet(E fromElement, boolean inclusive) { + return map.tailMap(fromElement, inclusive).navigableKeySet(); + } + + @Override + public boolean remove(Object object) { + return map.remove(object) != null; + } + + @Override + public int size() { + return map.size(); + } + + @Override + public E first() { + return map.firstKey(); + } + + @Override + public E last() { + return map.lastKey(); + } + + @Override + public SortedSet subSet(E start, E end) { + Comparator c = map.comparator(); + int compare = (c == null) ? ((Comparable) start).compareTo(end) : c + .compare(start, end); + if (compare <= 0) { + return new BTreeSet((BTreeMap) map.subMap(start, true, end, + false)); + } + throw new IllegalArgumentException(); + } + + @Override + public SortedSet headSet(E end) { + // Check for errors + Comparator c = map.comparator(); + if (c == null) { + ((Comparable) end).compareTo(end); + } else { + c.compare(end, end); + } + return new BTreeSet((BTreeMap) map.headMap(end, false)); + } + + @Override + public SortedSet tailSet(E start) { + // Check for errors + Comparator c = map.comparator(); + if (c == null) { + ((Comparable) start).compareTo(start); + } else { + c.compare(start, start); + } + return new BTreeSet((BTreeMap) map.tailMap(start, true)); + } + +} Index: graph/src/main/java/org/apache/jdbm/PhysicalFreeRowIdManager.java =================================================================== --- graph/src/main/java/org/apache/jdbm/PhysicalFreeRowIdManager.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/PhysicalFreeRowIdManager.java (Arbeitskopie) @@ -0,0 +1,227 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; +import java.util.Arrays; + +/** + * This class manages free physical rowid pages and provides methods to free and + * allocate physical rowids on a high level. + */ +public final class PhysicalFreeRowIdManager { + + /** + * maximal record size which can be hold. If record crosses multiple pages, it + * is trimmed before added to free list + */ + static final int MAX_REC_SIZE = Storage.PAGE_SIZE * 2; + + /** where data on root page starts, there are no extra data in page header */ + static final int ROOT_HEADER_SIZE = Magic.PAGE_HEADER_SIZE; + + /** page header size for slot page */ + static final int SLOT_PAGE_HEADER_SIZE = Magic.PAGE_HEADER_SIZE + + Magic.SZ_SHORT + Magic.SZ_SIX_BYTE_LONG; + + /** number of recids on slot page */ + static final int OFFSET_SLOT_PAGE_REC_COUNT = Magic.PAGE_HEADER_SIZE; + + static final int SLOT_PAGE_REC_NUM = (Storage.PAGE_SIZE - SLOT_PAGE_HEADER_SIZE) / 6; + + /** pointer to next slo page in slot page header */ + static final int OFFSET_SLOT_PAGE_NEXT = Magic.PAGE_HEADER_SIZE + + Magic.SZ_SHORT; + + /** number of size slots held in root page */ + static final int MAX_RECIDS_PER_PAGE = (Storage.PAGE_SIZE - ROOT_HEADER_SIZE - 6) / 6; // 6 + // is + // size + // of + // page + // pointer + + /** + * free records are grouped into slots by record size. Here is max diff in + * record size per group + */ + static final int ROOT_SLOT_SIZE = 1 + MAX_REC_SIZE / MAX_RECIDS_PER_PAGE; + + protected final PageFile file; + + protected final PageManager pageman; + + /** + * list of free phys slots in current transaction. First two bytes are size, + * last 6 bytes are recid + */ + private long[] inTrans = new long[8]; + private int inTransSize = 0; + + /** + * Creates a new instance using the indicated record file and page manager. + */ + PhysicalFreeRowIdManager(PageFile file, PageManager pageman) + throws IOException { + this.file = file; + this.pageman = pageman; + } + + long getFreeRecord(final int size) throws IOException { + if (size >= MAX_REC_SIZE) + return 0; + + final PageIo root = getRootPage(); + final int rootPageOffset = sizeToRootOffset(size + ROOT_SLOT_SIZE); + final long slotPageId = root.readSixByteLong(rootPageOffset); + + if (slotPageId == 0) { + file.release(root); + return 0; + } + + PageIo slotPage = file.get(slotPageId); + if (slotPage.readShort(Magic.PAGE_HEADER_O_MAGIC) != Magic.PAGE_MAGIC + + Magic.FREEPHYSIDS_PAGE) + throw new InternalError(); + + short recidCount = slotPage.readShort(OFFSET_SLOT_PAGE_REC_COUNT); + if (recidCount <= 0) { + throw new InternalError(); + } + + final int offset = (recidCount - 1) * 6 + SLOT_PAGE_HEADER_SIZE; + final long recid = slotPage.readSixByteLong(offset); + + recidCount--; + if (recidCount > 0) { + // decrease counter and zero out old record + slotPage.writeSixByteLong(offset, 0); + slotPage.writeShort(OFFSET_SLOT_PAGE_REC_COUNT, recidCount); + file.release(root); + file.release(slotPage); + } else { + // release this page + long prevSlotPageId = slotPage.readSixByteLong(OFFSET_SLOT_PAGE_NEXT); + root.writeSixByteLong(rootPageOffset, prevSlotPageId); + file.release(root); + file.release(slotPage); + pageman.free(Magic.FREEPHYSIDS_PAGE, slotPageId); + + } + + return recid; + } + + static final int sizeToRootOffset(int size) { + return ROOT_HEADER_SIZE + 6 * (size / ROOT_SLOT_SIZE); + } + + /** + * Puts the indicated rowid on the free list, which awaits for commit + */ + void putFreeRecord(final long rowid, final int size) throws IOException { + // ensure capacity + if (inTransSize == inTrans.length) { + inTrans = Arrays.copyOf(inTrans, inTrans.length * 2); + } + inTrans[inTransSize] = rowid + (((long) size) << 48); + inTransSize++; + } + + public void commit() throws IOException { + + if (inTransSize == 0) + return; + + Arrays.sort(inTrans, 0, inTransSize - 1); + + // write all uncommited free records + final PageIo root = getRootPage(); + PageIo slotPage = null; + for (int rowIdPos = 0; rowIdPos < inTransSize; rowIdPos++) { + final int size = (int) (inTrans[rowIdPos] >>> 48); + + final long rowid = inTrans[rowIdPos] & 0x0000FFFFFFFFFFFFL; + final int rootPageOffset = sizeToRootOffset(size); + + long slotPageId = root.readSixByteLong(rootPageOffset); + if (slotPageId == 0) { + if (slotPage != null) + file.release(slotPage); + // create new page for this slot + slotPageId = pageman.allocate(Magic.FREEPHYSIDS_PAGE); + root.writeSixByteLong(rootPageOffset, slotPageId); + } + + if (slotPage == null || slotPage.getPageId() != slotPageId) { + if (slotPage != null) + file.release(slotPage); + slotPage = file.get(slotPageId); + } + if (slotPage.readShort(Magic.PAGE_HEADER_O_MAGIC) != Magic.PAGE_MAGIC + + Magic.FREEPHYSIDS_PAGE) + throw new InternalError(); + + short recidCount = slotPage.readShort(OFFSET_SLOT_PAGE_REC_COUNT); + if (recidCount == MAX_RECIDS_PER_PAGE) { + file.release(slotPage); + // allocate new slot page and update links + final long newSlotPageId = pageman.allocate(Magic.FREEPHYSIDS_PAGE); + slotPage = file.get(newSlotPageId); + slotPage.writeSixByteLong(OFFSET_SLOT_PAGE_NEXT, slotPageId); + slotPage.writeShort(OFFSET_SLOT_PAGE_REC_COUNT, (short) 0); + recidCount = 0; + slotPageId = newSlotPageId; + root.writeSixByteLong(rootPageOffset, newSlotPageId); + } + + // write new recid + slotPage.writeSixByteLong(recidCount * 6 + SLOT_PAGE_HEADER_SIZE, rowid); + + // and increase count + recidCount++; + slotPage.writeShort(OFFSET_SLOT_PAGE_REC_COUNT, recidCount); + + } + if (slotPage != null) + file.release(slotPage); + + file.release(root); + clearFreeInTrans(); + } + + public void rollback() { + clearFreeInTrans(); + } + + private void clearFreeInTrans() { + if (inTrans.length > 128) + inTrans = new long[8]; + inTransSize = 0; + } + + /** return free phys row page. If not found create it */ + final PageIo getRootPage() throws IOException { + long pageId = pageman.getFirst(Magic.FREEPHYSIDS_ROOT_PAGE); + if (pageId == 0) { + pageId = pageman.allocate(Magic.FREEPHYSIDS_ROOT_PAGE); + } + return file.get(pageId); + } +} Index: graph/src/main/java/org/apache/jdbm/HTreeSet.java =================================================================== --- graph/src/main/java/org/apache/jdbm/HTreeSet.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/HTreeSet.java (Arbeitskopie) @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.util.AbstractSet; +import java.util.Iterator; + +/** + * Wrapper for HTree to implement java.util.Map interface + */ +public final class HTreeSet extends AbstractSet { + + final HTree map; + + HTreeSet(HTree map) { + this.map = map; + } + + @Override + public Iterator iterator() { + return map.keySet().iterator(); + } + + @Override + public int size() { + return map.size(); + } + + @Override + public boolean isEmpty() { + return map.isEmpty(); + } + + @Override + public boolean contains(Object o) { + return map.containsKey(o); + } + + @Override + public boolean add(E e) { + return map.put(e, JDBMUtils.EMPTY_STRING) == null; + } + + @Override + public boolean remove(Object o) { + return map.remove(o) == JDBMUtils.EMPTY_STRING; + } + + @Override + public void clear() { + map.clear(); + } + +} Index: graph/src/main/java/org/apache/jdbm/LinkedList.java =================================================================== --- graph/src/main/java/org/apache/jdbm/LinkedList.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/LinkedList.java (Arbeitskopie) @@ -0,0 +1,482 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jdbm; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOError; +import java.io.IOException; +import java.io.Serializable; +import java.util.AbstractSequentialList; +import java.util.ConcurrentModificationException; +import java.util.Iterator; +import java.util.ListIterator; +import java.util.NoSuchElementException; + +/** + * LinkedList which stores its nodes on disk. + */ +public final class LinkedList extends AbstractSequentialList { + + private DBAbstract db; + + final long rootRecid; + /** + * size limit, is not currently used, but needs to be here for future + * compatibility. Zero means no limit. + */ + long sizeLimit = 0; + + static final class Root { + long first; + long last; + long size; + } + + private static final Serializer ROOT_SERIALIZER = new Serializer() { + + @Override + public void serialize(DataOutput out, Root obj) throws IOException { + LongPacker.packLong(out, obj.first); + LongPacker.packLong(out, obj.last); + LongPacker.packLong(out, obj.size); + } + + @Override + public Root deserialize(DataInput in) throws IOException, + ClassNotFoundException { + Root r = new Root(); + r.first = LongPacker.unpackLong(in); + r.last = LongPacker.unpackLong(in); + r.size = LongPacker.unpackLong(in); + return r; + } + }; + + private Serializer valueSerializer; + + /** + * indicates that entry values should not be loaded during deserialization, + * used during defragmentation + */ + protected boolean loadValues = true; + + /** constructor used for deserialization */ + LinkedList(DBAbstract db, long rootRecid, Serializer valueSerializer) { + this.db = db; + this.rootRecid = rootRecid; + this.valueSerializer = valueSerializer; + } + + /** constructor used to create new empty list */ + LinkedList(DBAbstract db, Serializer valueSerializer) throws IOException { + this.db = db; + if (valueSerializer != null && !(valueSerializer instanceof Serializable)) + throw new IllegalArgumentException( + "Serializer does not implement Serializable"); + this.valueSerializer = valueSerializer; + // create root + this.rootRecid = db.insert(new Root(), ROOT_SERIALIZER, false); + } + + void setPersistenceContext(DBAbstract db) { + this.db = db; + } + + @Override + public ListIterator listIterator(int index) { + Root r = getRoot(); + if (index < 0 || index > r.size) + throw new IndexOutOfBoundsException(); + + Iter iter = new Iter(); + iter.next = r.first; + + // scroll to requested position + // TODO scroll from end, if beyond half + for (int i = 0; i < index; i++) { + iter.next(); + } + return iter; + + } + + Root getRoot() { + // expect that caller already holds lock + try { + return db.fetch(rootRecid, ROOT_SERIALIZER); + } catch (IOException e) { + throw new IOError(e); + } + } + + @Override + public int size() { + return (int) getRoot().size; + } + + public Iterator descendingIterator() { + return null; // To change body of implemented methods use File | Settings | + // File Templates. + } + + @Override + public boolean add(Object value) { + try { + Root r = getRoot(); + Entry e = new Entry(r.last, 0, value); + long recid = db.insert(e, entrySerializer, false); + + // update old last Entry to point to new record + if (r.last != 0) { + Entry oldLast = db.fetch(r.last, entrySerializer); + if (oldLast.next != 0) + throw new Error(); + oldLast.next = recid; + db.update(r.last, oldLast, entrySerializer); + } + + // update linked list + r.last = recid; + if (r.first == 0) + r.first = recid; + r.size++; + db.update(rootRecid, r, ROOT_SERIALIZER); + modCount++; + return true; + } catch (IOException e) { + throw new IOError(e); + } + + } + + private Entry fetch(long recid) { + try { + return db.fetch(recid, entrySerializer); + } catch (IOException e) { + throw new IOError(e); + } + + } + + /** + * called from Serialization object + */ + static LinkedList deserialize(DataInput is, Serialization ser) + throws IOException, ClassNotFoundException { + long rootrecid = LongPacker.unpackLong(is); + long sizeLimit = LongPacker.unpackLong(is); + if (sizeLimit != 0) + throw new InternalError( + "LinkedList.sizeLimit not supported in this JDBM version"); + Serializer serializer = (Serializer) ser.deserialize(is); + return new LinkedList(ser.db, rootrecid, serializer); + } + + void serialize(DataOutput out) throws IOException { + LongPacker.packLong(out, rootRecid); + LongPacker.packLong(out, sizeLimit); + db.defaultSerializer().serialize(out, valueSerializer); + } + + private final Serializer entrySerializer = new Serializer() { + + @Override + public void serialize(DataOutput out, Entry e) throws IOException { + LongPacker.packLong(out, e.prev); + LongPacker.packLong(out, e.next); + if (valueSerializer != null) + valueSerializer.serialize(out, (E) e.value); + else + db.defaultSerializer().serialize(out, e.value); + } + + @Override + public Entry deserialize(DataInput in) throws IOException, + ClassNotFoundException { + long prev = LongPacker.unpackLong(in); + long next = LongPacker.unpackLong(in); + Object value = null; + if (loadValues) + value = valueSerializer == null ? db.defaultSerializer() + .deserialize(in) : valueSerializer.deserialize(in); + return new LinkedList.Entry(prev, next, value); + } + }; + + static class Entry { + long prev = 0; + long next = 0; + + E value; + + public Entry(long prev, long next, E value) { + this.prev = prev; + this.next = next; + this.value = value; + } + } + + private final class Iter implements ListIterator { + + private int expectedModCount = modCount; + private int index = 0; + + private long prev = 0; + private long next = 0; + + private byte lastOper = 0; + + @Override + public boolean hasNext() { + return next != 0; + } + + @Override + public E next() { + if (next == 0) + throw new NoSuchElementException(); + checkForComodification(); + + Entry e = fetch(next); + + prev = next; + next = e.next; + index++; + lastOper = +1; + return e.value; + } + + @Override + public boolean hasPrevious() { + return prev != 0; + } + + @Override + public E previous() { + checkForComodification(); + Entry e = fetch(prev); + next = prev; + prev = e.prev; + index--; + lastOper = -1; + return e.value; + } + + @Override + public int nextIndex() { + return index; + } + + @Override + public int previousIndex() { + return index - 1; + } + + @Override + public void remove() { + checkForComodification(); + try { + if (lastOper == 1) { + // last operation was next() so remove previous element + lastOper = 0; + + Entry p = db.fetch(prev, entrySerializer); + // update entry before previous + if (p.prev != 0) { + Entry pp = db.fetch(p.prev, entrySerializer); + pp.next = p.next; + db.update(p.prev, pp, entrySerializer); + } + // update entry after next + if (p.next != 0) { + Entry pn = db.fetch(p.next, entrySerializer); + pn.prev = p.prev; + db.update(p.next, pn, entrySerializer); + } + // remove old record from db + db.delete(prev); + // update list + Root r = getRoot(); + if (r.first == prev) + r.first = next; + if (r.last == prev) + r.last = next; + r.size--; + db.update(rootRecid, r, ROOT_SERIALIZER); + modCount++; + expectedModCount++; + // update iterator + prev = p.prev; + + } else if (lastOper == -1) { + // last operation was prev() so remove next element + lastOper = 0; + + Entry n = db.fetch(next, entrySerializer); + // update entry before next + if (n.prev != 0) { + Entry pp = db.fetch(n.prev, entrySerializer); + pp.next = n.next; + db.update(n.prev, pp, entrySerializer); + } + // update entry after previous + if (n.next != 0) { + Entry pn = db.fetch(n.next, entrySerializer); + pn.prev = n.prev; + db.update(n.next, pn, entrySerializer); + } + // remove old record from db + db.delete(next); + // update list + Root r = getRoot(); + if (r.last == next) + r.last = prev; + if (r.first == next) + r.first = prev; + r.size--; + db.update(rootRecid, r, ROOT_SERIALIZER); + modCount++; + expectedModCount++; + // update iterator + next = n.next; + + } else + throw new IllegalStateException(); + } catch (IOException e) { + throw new IOError(e); + } + + } + + @Override + public void set(E value) { + checkForComodification(); + try { + if (lastOper == 1) { + // last operation was next(), so update previous item + lastOper = 0; + Entry n = db.fetch(prev, entrySerializer); + n.value = value; + db.update(prev, n, entrySerializer); + } else if (lastOper == -1) { + // last operation was prev() so update next item + lastOper = 0; + Entry n = db.fetch(next, entrySerializer); + n.value = value; + db.update(next, n, entrySerializer); + } else + throw new IllegalStateException(); + } catch (IOException e) { + throw new IOError(e); + } + + } + + @Override + public void add(E value) { + checkForComodification(); + + // use more efficient method if possible + if (next == 0) { + LinkedList.this.add(value); + expectedModCount++; + return; + } + try { + // insert new entry + Entry e = new Entry(prev, next, value); + long recid = db.insert(e, entrySerializer, false); + + // update previous entry + if (prev != 0) { + Entry p = db.fetch(prev, entrySerializer); + if (p.next != next) + throw new Error(); + p.next = recid; + db.update(prev, p, entrySerializer); + } + + // update next entry + Entry n = fetch(next); + if (n.prev != prev) + throw new Error(); + n.prev = recid; + db.update(next, n, entrySerializer); + + // update List + Root r = getRoot(); + r.size++; + db.update(rootRecid, r, ROOT_SERIALIZER); + + // update iterator + expectedModCount++; + modCount++; + prev = recid; + + } catch (IOException e) { + throw new IOError(e); + } + + } + + final void checkForComodification() { + if (modCount != expectedModCount) + throw new ConcurrentModificationException(); + } + } + + /** + * Copyes collection from one db to other, while keeping logical recids + * unchanged + */ + static void defrag(long recid, DBStore r1, DBStore r2) throws IOException { + try { + // move linked list itself + byte[] data = r1.fetchRaw(recid); + r2.forceInsert(recid, data); + DataInputOutput in = new DataInputOutput(); + in.reset(data); + LinkedList l = (LinkedList) r1.defaultSerializer().deserialize(in); + l.loadValues = false; + // move linkedlist root + if (l.rootRecid == 0) // empty list, done + return; + + data = r1.fetchRaw(l.rootRecid); + r2.forceInsert(l.rootRecid, data); + in.reset(data); + Root r = ROOT_SERIALIZER.deserialize(in); + // move all other nodes in linked list + long current = r.first; + while (current != 0) { + data = r1.fetchRaw(current); + in.reset(data); + r2.forceInsert(current, data); + + Entry e = (Entry) l.entrySerializer.deserialize(in); + current = e.next; + } + } catch (ClassNotFoundException e) { + throw new IOError(e); + } + + } + +} Index: graph/src/main/java/org/apache/jdbm/Storage.java =================================================================== --- graph/src/main/java/org/apache/jdbm/Storage.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/Storage.java (Arbeitskopie) @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; + +public interface Storage { + + /** + * Bite shift used to calculate page size. If you want to modify page size, do + * it here. + * + * 1<<9 = 512 1<<10 = 1024 1<<11 = 2048 1<<12 = 4096 + */ + int PAGE_SIZE_SHIFT = 12; + + /** + * the lenght of single page. + *

+ * !!! DO NOT MODIFY THI DIRECTLY !!! + */ + int PAGE_SIZE = 1 << PAGE_SIZE_SHIFT; + + /** + * use 'val & OFFSET_MASK' to quickly get offset within the page; + */ + long OFFSET_MASK = 0xFFFFFFFFFFFFFFFFL >>> (64 - Storage.PAGE_SIZE_SHIFT); + + void write(long pageNumber, ByteBuffer data) throws IOException; + + ByteBuffer read(long pageNumber) throws IOException; + + void forceClose() throws IOException; + + boolean isReadonly(); + + DataInputStream readTransactionLog(); + + void deleteTransactionLog(); + + void sync() throws IOException; + + DataOutputStream openTransactionLog() throws IOException; + + void deleteAllFiles() throws IOException; +} Index: graph/src/main/java/org/apache/jdbm/StorageDiskMapped.java =================================================================== --- graph/src/main/java/org/apache/jdbm/StorageDiskMapped.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/StorageDiskMapped.java (Arbeitskopie) @@ -0,0 +1,295 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.MappedByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.channels.OverlappingFileLockException; +import java.util.ArrayList; +import java.util.IdentityHashMap; +import java.util.List; + +import sun.misc.Cleaner; + +/** + * Disk storage which uses mapped buffers + */ +public final class StorageDiskMapped implements Storage { + + static final String IDR = ".i"; + + static final String DBR = ".d"; + + /** + * Maximal number of pages in single file. Calculated so that each file will + * have 1 GB + */ + final static long PAGES_PER_FILE = (1024 * 1024 * 1024) >>> Storage.PAGE_SIZE_SHIFT; + + private ArrayList channels = new ArrayList(); + private ArrayList channelsTranslation = new ArrayList(); + private IdentityHashMap buffers = new IdentityHashMap(); + + private String fileName; + private boolean transactionsDisabled; + private boolean readonly; + + public StorageDiskMapped(String fileName, boolean readonly, + boolean transactionsDisabled, boolean lockingDisabled) throws IOException { + this.fileName = fileName; + this.transactionsDisabled = transactionsDisabled; + this.readonly = readonly; + // make sure first file can be opened + // lock it + try { + if (!lockingDisabled) + getChannel(0).lock(); + } catch (IOException e) { + throw new IOException("Could not lock DB file: " + fileName, e); + } catch (OverlappingFileLockException e) { + throw new IOException("Could not lock DB file: " + fileName, e); + } + + } + + private FileChannel getChannel(long pageNumber) throws IOException { + int fileNumber = (int) (Math.abs(pageNumber) / PAGES_PER_FILE); + + List c = pageNumber >= 0 ? channels : channelsTranslation; + + // increase capacity of array lists if needed + for (int i = c.size(); i <= fileNumber; i++) { + c.add(null); + } + + FileChannel ret = c.get(fileNumber); + if (ret == null) { + String name = makeFileName(fileName, pageNumber, fileNumber); + ret = new RandomAccessFile(name, "rw").getChannel(); + c.set(fileNumber, ret); + buffers.put(ret, ret.map(FileChannel.MapMode.READ_WRITE, 0, ret.size())); + } + return ret; + } + + static String makeFileName(String fileName, long pageNumber, int fileNumber) { + return fileName + (pageNumber >= 0 ? DBR : IDR) + "." + fileNumber; + } + + @Override + public void write(long pageNumber, ByteBuffer data) throws IOException { + if (transactionsDisabled && data.isDirect()) { + // if transactions are disabled and this buffer is direct, + // changes written into buffer are directly reflected in file. + // so there is no need to write buffer second time + return; + } + + FileChannel f = getChannel(pageNumber); + int offsetInFile = (int) ((Math.abs(pageNumber) % PAGES_PER_FILE) * PAGE_SIZE); + MappedByteBuffer b = buffers.get(f); + if (b.limit() <= offsetInFile) { + + // remapping buffer for each newly added page would be slow, + // so allocate new size in chunks + int increment = Math.min(PAGE_SIZE * 1024, offsetInFile / 10); + increment -= increment % PAGE_SIZE; + + long newFileSize = offsetInFile + PAGE_SIZE + increment; + newFileSize = Math.min(PAGES_PER_FILE * PAGE_SIZE, newFileSize); + + // expand file size + f.position(newFileSize - 1); + f.write(ByteBuffer.allocate(1)); + // unmap old buffer + unmapBuffer(b); + // remap buffer + b = f.map(FileChannel.MapMode.READ_WRITE, 0, newFileSize); + buffers.put(f, b); + } + + // write into buffer + b.position(offsetInFile); + data.rewind(); + b.put(data); + } + + private void unmapBuffer(MappedByteBuffer b) { + if (b != null) { + Cleaner cleaner = ((sun.nio.ch.DirectBuffer) b).cleaner(); + if (cleaner != null) + cleaner.clean(); + } + } + + @Override + public ByteBuffer read(long pageNumber) throws IOException { + FileChannel f = getChannel(pageNumber); + int offsetInFile = (int) ((Math.abs(pageNumber) % PAGES_PER_FILE) * PAGE_SIZE); + MappedByteBuffer b = buffers.get(f); + + if (b == null) { // not mapped yet + b = f.map(FileChannel.MapMode.READ_WRITE, 0, f.size()); + } + + // check buffers size + if (b.limit() <= offsetInFile) { + // file is smaller, return empty data + return ByteBuffer.wrap(PageFile.CLEAN_DATA).asReadOnlyBuffer(); + } + + b.position(offsetInFile); + ByteBuffer ret = b.slice(); + ret.limit(PAGE_SIZE); + if (!transactionsDisabled || readonly) { + // changes written into buffer will be directly written into file + // so we need to protect buffer from modifications + ret = ret.asReadOnlyBuffer(); + } + return ret; + } + + @Override + public void forceClose() throws IOException { + for (FileChannel f : channels) { + if (f == null) + continue; + f.close(); + unmapBuffer(buffers.get(f)); + } + for (FileChannel f : channelsTranslation) { + if (f == null) + continue; + f.close(); + unmapBuffer(buffers.get(f)); + } + + channels = null; + channelsTranslation = null; + buffers = null; + } + + @Override + public void sync() throws IOException { + for (MappedByteBuffer b : buffers.values()) { + b.force(); + } + } + + @Override + public DataOutputStream openTransactionLog() throws IOException { + String logName = fileName + StorageDisk.transaction_log_file_extension; + final FileOutputStream fileOut = new FileOutputStream(logName); + return new DataOutputStream(new BufferedOutputStream(fileOut)) { + + // default implementation of flush on FileOutputStream does nothing, + // so we use little workaround to make sure that data were really flushed + @Override + public void flush() throws IOException { + super.flush(); + fileOut.flush(); + fileOut.getFD().sync(); + } + }; + } + + @Override + public void deleteAllFiles() throws IOException { + deleteTransactionLog(); + deleteFiles(fileName); + } + + static void deleteFiles(String fileName) { + for (int i = 0; true; i++) { + String name = makeFileName(fileName, +1, i); + File f = new File(name); + boolean exists = f.exists(); + if (exists && !f.delete()) + f.deleteOnExit(); + if (!exists) + break; + } + for (int i = 0; true; i++) { + String name = makeFileName(fileName, -1, i); + File f = new File(name); + boolean exists = f.exists(); + if (exists && !f.delete()) + f.deleteOnExit(); + if (!exists) + break; + } + } + + @Override + public DataInputStream readTransactionLog() { + + File logFile = new File(fileName + + StorageDisk.transaction_log_file_extension); + if (!logFile.exists()) + return null; + if (logFile.length() == 0) { + logFile.delete(); + return null; + } + + DataInputStream ois = null; + try { + ois = new DataInputStream(new BufferedInputStream(new FileInputStream( + logFile))); + } catch (FileNotFoundException e) { + // file should exists, we check for its presents just a miliseconds + // yearlier, anyway move on + return null; + } + + try { + if (ois.readShort() != Magic.LOGFILE_HEADER) + throw new Error("Bad magic on log file"); + } catch (IOException e) { + // corrupted/empty logfile + logFile.delete(); + return null; + } + return ois; + } + + @Override + public void deleteTransactionLog() { + File logFile = new File(fileName + + StorageDisk.transaction_log_file_extension); + if (logFile.exists()) + logFile.delete(); + } + + @Override + public boolean isReadonly() { + return readonly; + } + +} Index: graph/src/main/java/org/apache/jdbm/DBCacheRef.java =================================================================== --- graph/src/main/java/org/apache/jdbm/DBCacheRef.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/DBCacheRef.java (Arbeitskopie) @@ -0,0 +1,364 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jdbm; + +import java.io.IOException; +import java.lang.ref.ReferenceQueue; +import java.lang.ref.SoftReference; +import java.lang.ref.WeakReference; +import java.util.Iterator; +import java.util.concurrent.atomic.AtomicInteger; + +import javax.crypto.Cipher; + +/** + * A DB wrapping and caching another DB. + * + */ +public final class DBCacheRef extends DBCache { + + private static final boolean debug = false; + + /** + * If Soft Cache is enabled, this contains softly referenced clean entries. If + * entry became dirty, it is moved to _hash with limited size. This map is + * accessed from SoftCache Disposer thread, so all access must be synchronized + */ + protected LongHashMap _softHash; + + /** + * Reference queue used to collect Soft Cache entries + */ + protected ReferenceQueue _refQueue; + + /** + * Thread in which Soft Cache references are disposed + */ + protected Thread _softRefThread; + + protected static AtomicInteger threadCounter = new AtomicInteger(0); + + /** counter which counts number of insert since last 'action' */ + protected int insertCounter = 0; + + private final boolean _autoClearReferenceCacheOnLowMem; + private final byte _cacheType; + + /** + * Construct a CacheRecordManager wrapping another DB and using a given cache + * policy. + */ + public DBCacheRef(String filename, boolean readonly, + boolean transactionDisabled, Cipher cipherIn, Cipher cipherOut, + boolean useRandomAccessFile, boolean deleteFilesAfterClose, + byte cacheType, boolean cacheAutoClearOnLowMem, boolean lockingDisabled) { + + super(filename, readonly, transactionDisabled, cipherIn, cipherOut, + useRandomAccessFile, deleteFilesAfterClose, lockingDisabled); + + this._cacheType = cacheType; + _autoClearReferenceCacheOnLowMem = cacheAutoClearOnLowMem; + + _softHash = new LongHashMap(); + _refQueue = new ReferenceQueue(); + _softRefThread = new Thread(new SoftRunnable(this, _refQueue), + "JDBM Soft Cache Disposer " + (threadCounter.incrementAndGet())); + _softRefThread.setDaemon(true); + _softRefThread.start(); + + } + + void clearCacheIfLowOnMem() { + + insertCounter = 0; + + if (!_autoClearReferenceCacheOnLowMem) + return; + + Runtime r = Runtime.getRuntime(); + long max = r.maxMemory(); + if (max == Long.MAX_VALUE) + return; + + double free = r.freeMemory(); + double total = r.totalMemory(); + // We believe that free refers to total not max. + // Increasing heap size to max would increase to max + free = free + (max - total); + + if (debug) + System.err.println("DBCache: freemem = " + free + " = " + (free / max) + + "%"); + + if (free < 1e7 || free * 4 < max) + clearCache(); + + } + + public synchronized A fetch(long recid, Serializer serializer, + boolean disableCache) throws IOException { + + if (disableCache) + return super.fetch(recid, serializer, disableCache); + else + return fetch(recid, serializer); + } + + public synchronized void delete(long recid) throws IOException { + checkNotClosed(); + + super.delete(recid); + synchronized (_hashDirties) { + _hashDirties.remove(recid); + } + synchronized (_softHash) { + Object e = _softHash.remove(recid); + if (e != null && e instanceof ReferenceCacheEntry) { + ((ReferenceCacheEntry) e).clear(); + } + } + + if (needsAutoCommit()) + commit(); + + } + + public synchronized void update(final long recid, A obj, + Serializer serializer) throws IOException { + checkNotClosed(); + + synchronized (_softHash) { + // soft cache can not contain dirty objects + Object e = _softHash.remove(recid); + if (e != null && e instanceof ReferenceCacheEntry) { + ((ReferenceCacheEntry) e).clear(); + } + } + synchronized (_hashDirties) { + // put into dirty cache + final DirtyCacheEntry e = new DirtyCacheEntry(); + e._recid = recid; + e._obj = obj; + e._serializer = serializer; + _hashDirties.put(recid, e); + } + + if (needsAutoCommit()) + commit(); + + } + + public synchronized A fetch(long recid, Serializer serializer) + throws IOException { + checkNotClosed(); + + synchronized (_softHash) { + Object e = _softHash.get(recid); + if (e != null) { + + if (e instanceof ReferenceCacheEntry) + e = ((ReferenceCacheEntry) e).get(); + if (e != null) { + return (A) e; + } + } + } + + synchronized (_hashDirties) { + DirtyCacheEntry e2 = _hashDirties.get(recid); + if (e2 != null) { + return (A) e2._obj; + } + } + + A value = super.fetch(recid, serializer); + + if (needsAutoCommit()) + commit(); + + synchronized (_softHash) { + + if (_cacheType == SOFT) + _softHash.put(recid, new SoftCacheEntry(recid, value, _refQueue)); + else if (_cacheType == WEAK) + _softHash.put(recid, new WeakCacheEntry(recid, value, _refQueue)); + else + _softHash.put(recid, value); + } + + return value; + } + + public synchronized void close() { + checkNotClosed(); + + updateCacheEntries(); + super.close(); + _softHash = null; + _softRefThread.interrupt(); + } + + public synchronized void rollback() { + checkNotClosed(); + + // discard all cache entries since we don't know which entries + // where part of the transaction + synchronized (_softHash) { + Iterator iter = _softHash.valuesIterator(); + while (iter.hasNext()) { + ReferenceCacheEntry e = iter.next(); + e.clear(); + } + _softHash.clear(); + } + + super.rollback(); + } + + protected boolean isCacheEntryDirty(DirtyCacheEntry entry) { + return _hashDirties.get(entry._recid) != null; + } + + protected void setCacheEntryDirty(DirtyCacheEntry entry, boolean dirty) { + if (dirty) { + _hashDirties.put(entry._recid, entry); + } else { + _hashDirties.remove(entry._recid); + } + } + + interface ReferenceCacheEntry { + long getRecid(); + + void clear(); + + Object get(); + } + + @SuppressWarnings("unchecked") + static final class SoftCacheEntry extends SoftReference implements + ReferenceCacheEntry { + protected final long _recid; + + public long getRecid() { + return _recid; + } + + SoftCacheEntry(long recid, Object obj, ReferenceQueue queue) { + super(obj, queue); + _recid = recid; + } + } + + @SuppressWarnings("unchecked") + static final class WeakCacheEntry extends WeakReference implements + ReferenceCacheEntry { + protected final long _recid; + + public long getRecid() { + return _recid; + } + + WeakCacheEntry(long recid, Object obj, ReferenceQueue queue) { + super(obj, queue); + _recid = recid; + } + + } + + /** + * Runs in separate thread and cleans SoftCache. Runnable auto exists when + * CacheRecordManager is GCed + * + * @author Jan Kotek + */ + static final class SoftRunnable implements Runnable { + + private ReferenceQueue entryQueue; + private WeakReference db2; + + public SoftRunnable(DBCacheRef db, + ReferenceQueue entryQueue) { + this.db2 = new WeakReference(db); + this.entryQueue = entryQueue; + } + + public void run() { + while (true) + try { + + // collect next item from cache, + // limit 10000 ms is to keep periodically checking if db was GCed + ReferenceCacheEntry e = (ReferenceCacheEntry) entryQueue + .remove(10000); + + // check if db was GCed, cancel in that case + DBCacheRef db = db2.get(); + if (db == null) + return; + + if (e != null) { + + synchronized (db._softHash) { + int counter = 0; + while (e != null) { + db._softHash.remove(e.getRecid()); + e = (SoftCacheEntry) entryQueue.poll(); + if (debug) + counter++; + } + if (debug) + System.err.println("DBCache: " + counter + + " objects released from ref cache."); + } + } else { + // check memory consumption every 10 seconds + db.clearCacheIfLowOnMem(); + + } + + } catch (InterruptedException e) { + return; + } catch (Throwable e) { + // this thread must keep spinning, + // otherwise SoftCacheEntries would not be disposed + e.printStackTrace(); + } + } + + } + + public void clearCache() { + if (debug) + System.err.println("DBCache: Clear cache"); + + synchronized (_softHash) { + if (_cacheType != HARD) { + Iterator iter = _softHash.valuesIterator(); + while (iter.hasNext()) { + ReferenceCacheEntry e = iter.next(); + e.clear(); + } + } + _softHash.clear(); + } + + } + +} Index: graph/src/main/java/org/apache/jdbm/DBAbstract.java =================================================================== --- graph/src/main/java/org/apache/jdbm/DBAbstract.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/DBAbstract.java (Arbeitskopie) @@ -0,0 +1,636 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOError; +import java.io.IOException; +import java.lang.ref.WeakReference; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.NavigableSet; +import java.util.Set; + +/** + * An abstract class implementing most of DB. It also has some JDBM package + * protected stuff (getNamedRecord) + */ +public abstract class DBAbstract implements DB { + + /** + * Reserved slot for name directory recid. + */ + static final byte NAME_DIRECTORY_ROOT = 0; + + /** + * Reserved slot for version number + */ + static final byte STORE_VERSION_NUMBER_ROOT = 1; + + /** + * Reserved slot for recid where Serial class info is stored + * + * NOTE when introducing more roots, do not forget to update defrag + */ + static final byte SERIAL_CLASS_INFO_RECID_ROOT = 2; + + /** + * to prevent double instances of the same collection, we use weak value map + * + * //TODO what to do when there is rollback? //TODO clear on close + */ + final private Map> collections = new HashMap>(); + + /** + * Inserts a new record using a custom serializer. + * + * @param obj the object for the new record. + * @param serializer a custom serializer + * @return the rowid for the new record. + * @throws java.io.IOException when one of the underlying I/O operations + * fails. + */ + abstract long insert(A obj, Serializer serializer, boolean disableCache) + throws IOException; + + /** + * Deletes a record. + * + * @param recid the rowid for the record that should be deleted. + * @throws java.io.IOException when one of the underlying I/O operations + * fails. + */ + abstract void delete(long recid) throws IOException; + + /** + * Updates a record using a custom serializer. If given recid does not exist, + * IOException will be thrown before/during commit (cache). + * + * @param recid the recid for the record that is to be updated. + * @param obj the new object for the record. + * @param serializer a custom serializer + * @throws java.io.IOException when one of the underlying I/O operations fails + */ + abstract void update(long recid, A obj, Serializer serializer) + throws IOException; + + /** + * Fetches a record using a custom serializer. + * + * @param recid the recid for the record that must be fetched. + * @param serializer a custom serializer + * @return the object contained in the record, null if given recid does not + * exist + * @throws java.io.IOException when one of the underlying I/O operations + * fails. + */ + abstract A fetch(long recid, Serializer serializer) throws IOException; + + /** + * Fetches a record using a custom serializer and optionaly disabled cache + * + * @param recid the recid for the record that must be fetched. + * @param serializer a custom serializer + * @param disableCache true to disable any caching mechanism + * @return the object contained in the record, null if given recid does not + * exist + * @throws java.io.IOException when one of the underlying I/O operations + * fails. + */ + abstract A fetch(long recid, Serializer serializer, + boolean disableCache) throws IOException; + + @SuppressWarnings("unchecked") + public long insert(Object obj) throws IOException { + return insert(obj, defaultSerializer(), false); + } + + @SuppressWarnings("unchecked") + public void update(long recid, Object obj) throws IOException { + update(recid, obj, defaultSerializer()); + } + + @SuppressWarnings("unchecked") + public A fetch(long recid) throws IOException { + return (A) fetch(recid, defaultSerializer()); + } + + @SuppressWarnings("unchecked") + @Override + public Map getHashMap(String name) { + Object o = getCollectionInstance(name); + if (o != null) + return (Map) o; + + try { + long recid = getNamedObject(name); + if (recid == 0) + return null; + + @SuppressWarnings("rawtypes") + HTree tree = fetch(recid); + tree.setPersistenceContext(this); + if (!tree.hasValues()) { + throw new ClassCastException("HashSet is not HashMap"); + } + collections.put(name, new WeakReference(tree)); + return tree; + } catch (IOException e) { + throw new IOError(e); + } + } + + @Override + public Map createHashMap(String name) { + return createHashMap(name, null, null); + } + + @Override + public Map createHashMap(String name, + Serializer keySerializer, Serializer valueSerializer) { + try { + assertNameNotExist(name); + + @SuppressWarnings({ "unchecked", "rawtypes" }) + HTree tree = new HTree(this, keySerializer, valueSerializer, true); + long recid = insert(tree); + setNamedObject(name, recid); + collections.put(name, new WeakReference(tree)); + return tree; + } catch (IOException e) { + throw new IOError(e); + } + } + + @Override + public Set getHashSet(String name) { + Object o = getCollectionInstance(name); + if (o != null) + return (Set) o; + + try { + long recid = getNamedObject(name); + if (recid == 0) + return null; + + HTree tree = fetch(recid); + tree.setPersistenceContext(this); + if (tree.hasValues()) { + throw new ClassCastException("HashMap is not HashSet"); + } + Set ret = new HTreeSet(tree); + collections.put(name, new WeakReference(ret)); + return ret; + } catch (IOException e) { + throw new IOError(e); + } + } + + @Override + public Set createHashSet(String name) { + return createHashSet(name, null); + } + + @Override + public Set createHashSet(String name, Serializer keySerializer) { + try { + assertNameNotExist(name); + + HTree tree = new HTree(this, keySerializer, null, false); + long recid = insert(tree); + setNamedObject(name, recid); + + Set ret = new HTreeSet(tree); + collections.put(name, new WeakReference(ret)); + return ret; + } catch (IOException e) { + throw new IOError(e); + } + } + + @Override + public NavigableMap getTreeMap(String name) { + Object o = getCollectionInstance(name); + if (o != null) + return (NavigableMap) o; + + try { + long recid = getNamedObject(name); + if (recid == 0) + return null; + + BTree t = BTree. load(this, recid); + if (!t.hasValues()) + throw new ClassCastException("TreeSet is not TreeMap"); + NavigableMap ret = new BTreeMap(t, false); // TODO + // put + // readonly + // flag + // here + collections.put(name, new WeakReference(ret)); + return ret; + } catch (IOException e) { + throw new IOError(e); + } + } + + @Override + public , V> NavigableMap createTreeMap( + String name) { + return createTreeMap(name, null, null, null); + } + + @Override + public NavigableMap createTreeMap(String name, + Comparator keyComparator, Serializer keySerializer, + Serializer valueSerializer) { + try { + assertNameNotExist(name); + BTree tree = BTree.createInstance(this, keyComparator, + keySerializer, valueSerializer, true); + setNamedObject(name, tree.getRecid()); + NavigableMap ret = new BTreeMap(tree, false); // TODO + // put + // readonly + // flag + // here + collections.put(name, new WeakReference(ret)); + return ret; + } catch (IOException e) { + throw new IOError(e); + } + } + + @Override + public NavigableSet getTreeSet(String name) { + Object o = getCollectionInstance(name); + if (o != null) + return (NavigableSet) o; + + try { + long recid = getNamedObject(name); + if (recid == 0) + return null; + + BTree t = BTree. load(this, recid); + if (t.hasValues()) + throw new ClassCastException("TreeMap is not TreeSet"); + BTreeSet ret = new BTreeSet(new BTreeMap(t, false)); + collections.put(name, new WeakReference(ret)); + return ret; + + } catch (IOException e) { + throw new IOError(e); + } + } + + @Override + public NavigableSet createTreeSet(String name) { + return createTreeSet(name, null, null); + } + + @Override + public NavigableSet createTreeSet(String name, + Comparator keyComparator, Serializer keySerializer) { + try { + assertNameNotExist(name); + BTree tree = BTree.createInstance(this, keyComparator, + keySerializer, null, false); + setNamedObject(name, tree.getRecid()); + BTreeSet ret = new BTreeSet(new BTreeMap(tree, false)); + collections.put(name, new WeakReference(ret)); + return ret; + + } catch (IOException e) { + throw new IOError(e); + } + + } + + @Override + public List createLinkedList(String name) { + return createLinkedList(name, null); + } + + @Override + public List createLinkedList(String name, Serializer serializer) { + try { + assertNameNotExist(name); + + // allocate record and overwrite it + + LinkedList list = new LinkedList(this, serializer); + long recid = insert(list); + setNamedObject(name, recid); + + collections.put(name, new WeakReference(list)); + + return list; + } catch (IOException e) { + throw new IOError(e); + } + } + + @Override + public List getLinkedList(String name) { + Object o = getCollectionInstance(name); + if (o != null) + return (List) o; + + try { + long recid = getNamedObject(name); + if (recid == 0) + return null; + LinkedList list = (LinkedList) fetch(recid); + list.setPersistenceContext(this); + collections.put(name, new WeakReference(list)); + return list; + } catch (IOException e) { + throw new IOError(e); + } + } + + private Object getCollectionInstance(String name) { + WeakReference ref = collections.get(name); + if (ref == null) + return null; + Object o = ref.get(); + if (o != null) + return o; + // already GCed + collections.remove(name); + return null; + } + + private void assertNameNotExist(String name) throws IOException { + if (getNamedObject(name) != 0) + throw new IllegalArgumentException("Object with name '" + name + + "' already exists"); + } + + /** + * Obtain the record id of a named object. Returns 0 if named object doesn't + * exist. Named objects are used to store Map views and other well known + * objects. + */ + protected long getNamedObject(String name) throws IOException { + long nameDirectory_recid = getRoot(NAME_DIRECTORY_ROOT); + if (nameDirectory_recid == 0) { + return 0; + } + HTree m = fetch(nameDirectory_recid); + Long res = m.get(name); + if (res == null) + return 0; + return res; + } + + /** + * Set the record id of a named object. Named objects are used to store Map + * views and other well known objects. + */ + protected void setNamedObject(String name, long recid) throws IOException { + long nameDirectory_recid = getRoot(NAME_DIRECTORY_ROOT); + HTree m = null; + if (nameDirectory_recid == 0) { + // does not exists, create it + m = new HTree(this, null, null, true); + nameDirectory_recid = insert(m); + setRoot(NAME_DIRECTORY_ROOT, nameDirectory_recid); + } else { + // fetch it + m = fetch(nameDirectory_recid); + } + m.put(name, recid); + } + + @Override + public Map getCollections() { + try { + Map ret = new LinkedHashMap(); + long nameDirectory_recid = getRoot(NAME_DIRECTORY_ROOT); + if (nameDirectory_recid == 0) + return ret; + HTree m = fetch(nameDirectory_recid); + + for (Map.Entry e : m.entrySet()) { + Object o = fetch(e.getValue()); + if (o instanceof BTree) { + if (((BTree) o).hasValues) + o = getTreeMap(e.getKey()); + else + o = getTreeSet(e.getKey()); + } else if (o instanceof HTree) { + if (((HTree) o).hasValues) + o = getHashMap(e.getKey()); + else + o = getHashSet(e.getKey()); + } + + ret.put(e.getKey(), o); + } + return Collections.unmodifiableMap(ret); + } catch (IOException e) { + throw new IOError(e); + } + + } + + @Override + public void deleteCollection(String name) { + try { + long nameDirectory_recid = getRoot(NAME_DIRECTORY_ROOT); + if (nameDirectory_recid == 0) + throw new IOException("Collection not found"); + HTree dir = fetch(nameDirectory_recid); + + Long recid = dir.get(name); + if (recid == null) + throw new IOException("Collection not found"); + + Object o = fetch(recid); + // we can not use O instance since it is not correctly initialized + if (o instanceof LinkedList) { + LinkedList l = (LinkedList) o; + l.clear(); + delete(l.rootRecid); + } else if (o instanceof BTree) { + ((BTree) o).clear(); + } else if (o instanceof HTree) { + HTree t = (HTree) o; + t.clear(); + HTreeDirectory n = (HTreeDirectory) fetch(t.rootRecid, t.SERIALIZER); + n.deleteAllChildren(); + delete(t.rootRecid); + } else { + throw new InternalError("unknown collection type: " + + (o == null ? null : o.getClass())); + } + delete(recid); + collections.remove(name); + + dir.remove(name); + + } catch (IOException e) { + throw new IOError(e); + } + + } + + /** + * we need to set reference to this DB instance, so serializer needs to be + * here + */ + final Serializer defaultSerializationSerializer = new Serializer() { + + @Override + public void serialize(DataOutput out, Serialization obj) throws IOException { + LongPacker.packLong(out, obj.serialClassInfoRecid); + SerialClassInfo.serializer.serialize(out, obj.registered); + } + + @Override + public Serialization deserialize(DataInput in) throws IOException, + ClassNotFoundException { + final long recid = LongPacker.unpackLong(in); + final ArrayList classes = SerialClassInfo.serializer + .deserialize(in); + return new Serialization(DBAbstract.this, recid, classes); + } + }; + + public Serializer defaultSerializer() { + + try { + long serialClassInfoRecid = getRoot(SERIAL_CLASS_INFO_RECID_ROOT); + if (serialClassInfoRecid == 0) { + // allocate new recid + serialClassInfoRecid = insert(null, JDBMUtils.NULL_SERIALIZER, false); + // and insert new serializer + Serialization ser = new Serialization(this, serialClassInfoRecid, + new ArrayList()); + + update(serialClassInfoRecid, ser, defaultSerializationSerializer); + setRoot(SERIAL_CLASS_INFO_RECID_ROOT, serialClassInfoRecid); + return ser; + } else { + return fetch(serialClassInfoRecid, defaultSerializationSerializer); + } + + } catch (IOException e) { + throw new IOError(e); + } + + } + + final protected void checkNotClosed() { + if (isClosed()) + throw new IllegalStateException("db was closed"); + } + + protected abstract void setRoot(byte root, long recid); + + protected abstract long getRoot(byte root); + + @Override + public long collectionSize(Object collection) { + if (collection instanceof BTreeMap) { + BTreeMap t = (BTreeMap) collection; + if (t.fromKey != null || t.toKey != null) + throw new IllegalArgumentException( + "collectionSize does not work on BTree submap"); + return t.tree._entries; + } else if (collection instanceof HTree) { + return ((HTree) collection).getRoot().size; + } else if (collection instanceof HTreeSet) { + return collectionSize(((HTreeSet) collection).map); + } else if (collection instanceof BTreeSet) { + return collectionSize(((BTreeSet) collection).map); + } else if (collection instanceof LinkedList) { + return ((LinkedList) collection).getRoot().size; + } else { + throw new IllegalArgumentException("Not JDBM collection"); + } + } + + void addShutdownHook() { + if (shutdownCloseThread != null) { + shutdownCloseThread = new ShutdownCloseThread(); + Runtime.getRuntime().addShutdownHook(shutdownCloseThread); + } + } + + @Override + public void close() { + if (shutdownCloseThread != null) { + Runtime.getRuntime().removeShutdownHook(shutdownCloseThread); + shutdownCloseThread.dbToClose = null; + shutdownCloseThread = null; + } + } + + ShutdownCloseThread shutdownCloseThread = null; + + private static class ShutdownCloseThread extends Thread { + + DBAbstract dbToClose = null; + + ShutdownCloseThread() { + super("JDBM shutdown"); + } + + @Override + public void run() { + if (dbToClose != null && !dbToClose.isClosed()) { + dbToClose.shutdownCloseThread = null; + dbToClose.close(); + } + } + + } + + @Override + public void rollback() { + try { + for (WeakReference o : collections.values()) { + Object c = o.get(); + if (c != null && c instanceof BTreeMap) { + // reload tree + BTreeMap m = (BTreeMap) c; + m.tree = fetch(m.tree.getRecid()); + } + if (c != null && c instanceof BTreeSet) { + // reload tree + BTreeSet m = (BTreeSet) c; + m.map.tree = fetch(m.map.tree.getRecid()); + } + + } + } catch (IOException e) { + throw new IOError(e); + } + + } +} Index: graph/src/main/java/org/apache/jdbm/StorageDisk.java =================================================================== --- graph/src/main/java/org/apache/jdbm/StorageDisk.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/StorageDisk.java (Arbeitskopie) @@ -0,0 +1,224 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import static org.apache.jdbm.StorageDiskMapped.PAGES_PER_FILE; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.channels.OverlappingFileLockException; +import java.util.ArrayList; +import java.util.List; + +/** + * Storage which used files on disk to store data + */ +public final class StorageDisk implements Storage { + + private ArrayList rafs = new ArrayList(); + private ArrayList rafsTranslation = new ArrayList(); + + private String fileName; + + private boolean readonly; + + public StorageDisk(String fileName, boolean readonly, boolean lockingDisabled) + throws IOException { + this.fileName = fileName; + this.readonly = readonly; + // make sure first file can be opened + // lock it + try { + if (!readonly && !lockingDisabled) + getRaf(0).getChannel().tryLock(); + } catch (IOException e) { + throw new IOException("Could not lock DB file: " + fileName, e); + } catch (OverlappingFileLockException e) { + throw new IOException("Could not lock DB file: " + fileName, e); + } + + } + + RandomAccessFile getRaf(long pageNumber) throws IOException { + + int fileNumber = (int) (Math.abs(pageNumber) / PAGES_PER_FILE); + + List c = pageNumber >= 0 ? rafs : rafsTranslation; + + // increase capacity of array lists if needed + for (int i = c.size(); i <= fileNumber; i++) { + c.add(null); + } + + RandomAccessFile ret = c.get(fileNumber); + if (ret == null) { + String name = StorageDiskMapped.makeFileName(fileName, pageNumber, + fileNumber); + ret = new RandomAccessFile(name, readonly ? "r" : "rw"); + c.set(fileNumber, ret); + } + return ret; + + } + + @Override + public void write(long pageNumber, ByteBuffer data) throws IOException { + if (data.capacity() != PAGE_SIZE) + throw new IllegalArgumentException(); + + long offset = pageNumber * PAGE_SIZE; + + RandomAccessFile file = getRaf(pageNumber); + + // if (lastPageNumber + 1 != pageNumber) //TODO cache position again, so + // seek is not necessary + file.seek(Math.abs(offset % (PAGES_PER_FILE * PAGE_SIZE))); + + file.write(data.array()); + } + + @Override + public ByteBuffer read(long pageNumber) throws IOException { + + long offset = pageNumber * PAGE_SIZE; + ByteBuffer buffer = ByteBuffer.allocate(PAGE_SIZE); + + RandomAccessFile file = getRaf(pageNumber); + // if (lastPageNumber + 1 != pageNumber) //TODO cache position again, so + // seek is not necessary + file.seek(Math.abs(offset % (PAGES_PER_FILE * PAGE_SIZE))); + int remaining = buffer.limit(); + int pos = 0; + while (remaining > 0) { + int read = file.read(buffer.array(), pos, remaining); + if (read == -1) { + System + .arraycopy(PageFile.CLEAN_DATA, 0, buffer.array(), pos, remaining); + break; + } + remaining -= read; + pos += read; + } + return buffer; + } + + static final String transaction_log_file_extension = ".t"; + + @Override + public DataOutputStream openTransactionLog() throws IOException { + String logName = fileName + transaction_log_file_extension; + final FileOutputStream fileOut = new FileOutputStream(logName); + return new DataOutputStream(new BufferedOutputStream(fileOut)) { + + // default implementation of flush on FileOutputStream does nothing, + // so we use little workaround to make sure that data were really flushed + @Override + public void flush() throws IOException { + super.flush(); + fileOut.flush(); + fileOut.getFD().sync(); + } + }; + } + + @Override + public void deleteAllFiles() { + deleteTransactionLog(); + StorageDiskMapped.deleteFiles(fileName); + } + + /** + * Synchronizes the file. + */ + @Override + public void sync() throws IOException { + for (RandomAccessFile file : rafs) + if (file != null) + file.getFD().sync(); + for (RandomAccessFile file : rafsTranslation) + if (file != null) + file.getFD().sync(); + } + + @Override + public void forceClose() throws IOException { + for (RandomAccessFile f : rafs) { + if (f != null) + f.close(); + } + rafs = null; + for (RandomAccessFile f : rafsTranslation) { + if (f != null) + f.close(); + } + rafsTranslation = null; + } + + @Override + public DataInputStream readTransactionLog() { + + File logFile = new File(fileName + transaction_log_file_extension); + if (!logFile.exists()) + return null; + if (logFile.length() == 0) { + logFile.delete(); + return null; + } + + DataInputStream ois = null; + try { + ois = new DataInputStream(new BufferedInputStream(new FileInputStream( + logFile))); + } catch (FileNotFoundException e) { + // file should exists, we check for its presents just a miliseconds + // yearlier, anyway move on + return null; + } + + try { + if (ois.readShort() != Magic.LOGFILE_HEADER) + throw new Error("Bad magic on log file"); + } catch (IOException e) { + // corrupted/empty logfile + logFile.delete(); + return null; + } + return ois; + } + + @Override + public void deleteTransactionLog() { + File logFile = new File(fileName + transaction_log_file_extension); + if (logFile.exists()) + logFile.delete(); + } + + @Override + public boolean isReadonly() { + return false; + } +} Index: graph/src/main/java/org/apache/jdbm/LongPacker.java =================================================================== --- graph/src/main/java/org/apache/jdbm/LongPacker.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/LongPacker.java (Arbeitskopie) @@ -0,0 +1,106 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * Packing utility for non-negative long and values. + *

+ * Originally developed for Kryo by Nathan Sweet. Modified for JDBM by Jan Kotek + */ +public final class LongPacker { + + /** + * Pack non-negative long into output stream. It will occupy 1-10 bytes + * depending on value (lower values occupy smaller space) + * + * @param os + * @param value + * @throws IOException + */ + public static void packLong(DataOutput os, long value) throws IOException { + + if (value < 0) { + throw new IllegalArgumentException("negative value: v=" + value); + } + + while ((value & ~0x7FL) != 0) { + os.write((((int) value & 0x7F) | 0x80)); + value >>>= 7; + } + os.write((byte) value); + } + + /** + * Unpack positive long value from the input stream. + * + * @param is The input stream. + * @return The long value. + * @throws java.io.IOException + */ + public static long unpackLong(DataInput is) throws IOException { + + long result = 0; + for (int offset = 0; offset < 64; offset += 7) { + long b = is.readUnsignedByte(); + result |= (b & 0x7F) << offset; + if ((b & 0x80) == 0) { + return result; + } + } + throw new Error("Malformed long."); + } + + /** + * Pack non-negative long into output stream. It will occupy 1-5 bytes + * depending on value (lower values occupy smaller space) + * + * @param os + * @param value + * @throws IOException + */ + public static void packInt(DataOutput os, int value) throws IOException { + + if (value < 0) { + throw new IllegalArgumentException("negative value: v=" + value); + } + + while ((value & ~0x7F) != 0) { + os.write(((value & 0x7F) | 0x80)); + value >>>= 7; + } + + os.write((byte) value); + } + + public static int unpackInt(DataInput is) throws IOException { + for (int offset = 0, result = 0; offset < 32; offset += 7) { + int b = is.readUnsignedByte(); + result |= (b & 0x7F) << offset; + if ((b & 0x80) == 0) { + return result; + } + } + throw new Error("Malformed integer."); + + } + +} Index: graph/src/main/java/org/apache/jdbm/SerializationHeader.java =================================================================== --- graph/src/main/java/org/apache/jdbm/SerializationHeader.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/SerializationHeader.java (Arbeitskopie) @@ -0,0 +1,156 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +/** + * Header byte, is used at start of each record to indicate data type WARNING + * !!! values below must be unique !!!!! + */ +public final class SerializationHeader { + + final static int NULL = 0; + final static int NORMAL = 1; + final static int BOOLEAN_TRUE = 2; + final static int BOOLEAN_FALSE = 3; + final static int INTEGER_MINUS_1 = 4; + final static int INTEGER_0 = 5; + final static int INTEGER_1 = 6; + final static int INTEGER_2 = 7; + final static int INTEGER_3 = 8; + final static int INTEGER_4 = 9; + final static int INTEGER_5 = 10; + final static int INTEGER_6 = 11; + final static int INTEGER_7 = 12; + final static int INTEGER_8 = 13; + final static int INTEGER_255 = 14; + final static int INTEGER_PACK_NEG = 15; + final static int INTEGER_PACK = 16; + final static int LONG_MINUS_1 = 17; + final static int LONG_0 = 18; + final static int LONG_1 = 19; + final static int LONG_2 = 20; + final static int LONG_3 = 21; + final static int LONG_4 = 22; + final static int LONG_5 = 23; + final static int LONG_6 = 24; + final static int LONG_7 = 25; + final static int LONG_8 = 26; + final static int LONG_PACK_NEG = 27; + final static int LONG_PACK = 28; + final static int LONG_255 = 29; + final static int LONG_MINUS_MAX = 30; + final static int SHORT_MINUS_1 = 31; + final static int SHORT_0 = 32; + final static int SHORT_1 = 33; + final static int SHORT_255 = 34; + final static int SHORT_FULL = 35; + final static int BYTE_MINUS_1 = 36; + final static int BYTE_0 = 37; + final static int BYTE_1 = 38; + final static int BYTE_FULL = 39; + final static int CHAR = 40; + final static int FLOAT_MINUS_1 = 41; + final static int FLOAT_0 = 42; + final static int FLOAT_1 = 43; + final static int FLOAT_255 = 44; + final static int FLOAT_SHORT = 45; + final static int FLOAT_FULL = 46; + final static int DOUBLE_MINUS_1 = 47; + final static int DOUBLE_0 = 48; + final static int DOUBLE_1 = 49; + final static int DOUBLE_255 = 50; + final static int DOUBLE_SHORT = 51; + final static int DOUBLE_FULL = 52; + final static int DOUBLE_ARRAY = 53; + final static int BIGDECIMAL = 54; + final static int BIGINTEGER = 55; + final static int FLOAT_ARRAY = 56; + final static int INTEGER_MINUS_MAX = 57; + final static int SHORT_ARRAY = 58; + final static int BOOLEAN_ARRAY = 59; + + final static int ARRAY_INT_B_255 = 60; + final static int ARRAY_INT_B_INT = 61; + final static int ARRAY_INT_S = 62; + final static int ARRAY_INT_I = 63; + final static int ARRAY_INT_PACKED = 64; + + final static int ARRAY_LONG_B = 65; + final static int ARRAY_LONG_S = 66; + final static int ARRAY_LONG_I = 67; + final static int ARRAY_LONG_L = 68; + final static int ARRAY_LONG_PACKED = 69; + + final static int CHAR_ARRAY = 70; + final static int ARRAY_BYTE_INT = 71; + + final static int NOTUSED_ARRAY_OBJECT_255 = 72; + final static int ARRAY_OBJECT = 73; + // special cases for BTree values which stores references + final static int ARRAY_OBJECT_PACKED_LONG = 74; + final static int ARRAYLIST_PACKED_LONG = 75; + + final static int STRING_EMPTY = 101; + final static int NOTUSED_STRING_255 = 102; + final static int STRING = 103; + final static int NOTUSED_ARRAYLIST_255 = 104; + final static int ARRAYLIST = 105; + + final static int NOTUSED_TREEMAP_255 = 106; + final static int TREEMAP = 107; + final static int NOTUSED_HASHMAP_255 = 108; + final static int HASHMAP = 109; + final static int NOTUSED_LINKEDHASHMAP_255 = 110; + final static int LINKEDHASHMAP = 111; + + final static int NOTUSED_TREESET_255 = 112; + final static int TREESET = 113; + final static int NOTUSED_HASHSET_255 = 114; + final static int HASHSET = 115; + final static int NOTUSED_LINKEDHASHSET_255 = 116; + final static int LINKEDHASHSET = 117; + final static int NOTUSED_LINKEDLIST_255 = 118; + final static int LINKEDLIST = 119; + + final static int NOTUSED_VECTOR_255 = 120; + final static int VECTOR = 121; + final static int IDENTITYHASHMAP = 122; + final static int HASHTABLE = 123; + final static int LOCALE = 124; + final static int PROPERTIES = 125; + + final static int CLASS = 126; + final static int DATE = 127; + final static int UUID = 128; + + static final int JDBMLINKEDLIST = 159; + static final int HTREE = 160; + + final static int BTREE = 161; + + static final int BTREE_NODE_LEAF = 162; + static final int BTREE_NODE_NONLEAF = 163; + static final int HTREE_BUCKET = 164; + static final int HTREE_DIRECTORY = 165; + /** + * used for reference to already serialized object in object graph + */ + static final int OBJECT_STACK = 166; + static final int JAVA_SERIALIZATION = 172; + +} Index: graph/src/main/java/org/apache/jdbm/StorageZip.java =================================================================== --- graph/src/main/java/org/apache/jdbm/StorageZip.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/StorageZip.java (Arbeitskopie) @@ -0,0 +1,99 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.util.zip.ZipEntry; +import java.util.zip.ZipFile; + +/** + * A read-only storage which reads data from compressed zip archive. + *

+ * To improve performance with compressed archives each page is stored in + * separate file (zip archive entry). + */ +public final class StorageZip implements Storage { + + private String zip; + private String zip2; + private ZipFile z; + + StorageZip(String zipFile) throws IOException { + zip = zipFile; + z = new ZipFile(zip); + zip2 = "db"; + } + + @Override + public void write(long pageNumber, ByteBuffer data) throws IOException { + throw new UnsupportedOperationException("readonly"); + } + + @Override + public ByteBuffer read(long pageNumber) throws IOException { + ByteBuffer data = ByteBuffer.allocate(PAGE_SIZE); + + ZipEntry e = z.getEntry(zip2 + pageNumber); + if (e == null) + return ByteBuffer.wrap(PageFile.CLEAN_DATA).asReadOnlyBuffer(); + + InputStream i = z.getInputStream(e); + new DataInputStream(i).readFully(data.array()); + i.close(); + return data; + } + + @Override + public void forceClose() throws IOException { + z.close(); + z = null; + } + + @Override + public DataInputStream readTransactionLog() { + throw new UnsupportedOperationException("readonly"); + } + + @Override + public void deleteTransactionLog() { + throw new UnsupportedOperationException("readonly"); + } + + @Override + public void sync() throws IOException { + throw new UnsupportedOperationException("readonly"); + } + + @Override + public DataOutputStream openTransactionLog() throws IOException { + throw new UnsupportedOperationException("readonly"); + } + + @Override + public void deleteAllFiles() throws IOException { + } + + @Override + public boolean isReadonly() { + return true; + } +} Index: graph/src/main/java/org/apache/jdbm/DBCacheMRU.java =================================================================== --- graph/src/main/java/org/apache/jdbm/DBCacheMRU.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/DBCacheMRU.java (Arbeitskopie) @@ -0,0 +1,320 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; + +import javax.crypto.Cipher; + +/** + * A DB wrapping and caching another DB. + */ +public final class DBCacheMRU extends DBCache { + + private static final boolean debug = false; + + /** + * Cached object hashtable + */ + protected LongHashMap _hash; + + /** + * Maximum number of objects in the cache. + */ + protected int _max; + + /** + * Beginning of linked-list of cache elements. First entry is element which + * has been used least recently. + */ + protected CacheEntry _first; + + /** + * End of linked-list of cache elements. Last entry is element which has been + * used most recently. + */ + protected CacheEntry _last; + + /** + * Construct a CacheRecordManager wrapping another DB and using a given cache + * policy. + */ + public DBCacheMRU(String filename, boolean readonly, + boolean transactionDisabled, Cipher cipherIn, Cipher cipherOut, + boolean useRandomAccessFile, boolean deleteFilesAfterClose, + int cacheMaxRecords, boolean lockingDisabled) { + super(filename, readonly, transactionDisabled, cipherIn, cipherOut, + useRandomAccessFile, deleteFilesAfterClose, lockingDisabled); + + _hash = new LongHashMap(cacheMaxRecords); + _max = cacheMaxRecords; + + } + + public synchronized A fetch(long recid, Serializer serializer, + boolean disableCache) throws IOException { + + if (disableCache) + return super.fetch(recid, serializer, disableCache); + else + return fetch(recid, serializer); + } + + public synchronized void delete(long recid) throws IOException { + checkNotClosed(); + + super.delete(recid); + synchronized (_hash) { + CacheEntry entry = _hash.get(recid); + if (entry != null) { + removeEntry(entry); + _hash.remove(entry._recid); + } + _hashDirties.remove(recid); + } + + if (super.needsAutoCommit()) + commit(); + + } + + public synchronized void update(final long recid, final A obj, + final Serializer serializer) throws IOException { + checkNotClosed(); + + synchronized (_hash) { + + // remove entry if it already exists + CacheEntry entry = cacheGet(recid); + if (entry != null) { + _hash.remove(recid); + removeEntry(entry); + } + + // check if entry is in dirties, in this case just update its object + DirtyCacheEntry e = _hashDirties.get(recid); + if (e != null) { + if (recid != e._recid) + throw new Error(); + e._obj = obj; + e._serializer = serializer; + return; + } + + // create new dirty entry + e = new DirtyCacheEntry(); + e._recid = recid; + e._obj = obj; + e._serializer = serializer; + _hashDirties.put(recid, e); + } + + if (super.needsAutoCommit()) + commit(); + + } + + public synchronized A fetch(long recid, Serializer serializer) + throws IOException { + + checkNotClosed(); + + final CacheEntry entry = cacheGet(recid); + if (entry != null) { + return (A) entry._obj; + } + + // check dirties + final DirtyCacheEntry entry2 = _hashDirties.get(recid); + if (entry2 != null) { + return (A) entry2._obj; + } + + A value = super.fetch(recid, serializer); + + if (super.needsAutoCommit()) + commit(); + + // put record into MRU cache + cachePut(recid, value); + + return value; + } + + public synchronized void close() { + + if (isClosed()) + return; + + updateCacheEntries(); + super.close(); + _hash = null; + } + + public synchronized void rollback() { + + // discard all cache entries since we don't know which entries + // where part of the transaction + synchronized (_hash) { + _hash.clear(); + _first = null; + _last = null; + } + + super.rollback(); + } + + /** + * Obtain an object in the cache + */ + protected CacheEntry cacheGet(long key) { + synchronized (_hash) { + CacheEntry entry = _hash.get(key); + if (entry != null && _last != entry) { + // touch entry + removeEntry(entry); + addEntry(entry); + } + return entry; + } + } + + /** + * Place an object in the cache. + * + * @throws IOException + */ + protected void cachePut(final long recid, final Object value) + throws IOException { + synchronized (_hash) { + CacheEntry entry = _hash.get(recid); + if (entry != null) { + entry._obj = value; + // touch entry + if (_last != entry) { + removeEntry(entry); + addEntry(entry); + } + } else { + + if (_hash.size() >= _max) { + // purge and recycle entry + entry = purgeEntry(); + entry._recid = recid; + entry._obj = value; + } else { + entry = new CacheEntry(recid, value); + } + addEntry(entry); + _hash.put(entry._recid, entry); + } + } + } + + /** + * Add a CacheEntry. Entry goes at the end of the list. + */ + protected void addEntry(CacheEntry entry) { + synchronized (_hash) { + if (_first == null) { + _first = entry; + _last = entry; + } else { + _last._next = entry; + entry._previous = _last; + _last = entry; + } + } + } + + /** + * Remove a CacheEntry from linked list + */ + protected void removeEntry(CacheEntry entry) { + synchronized (_hash) { + if (entry == _first) { + _first = entry._next; + } + if (_last == entry) { + _last = entry._previous; + } + CacheEntry previous = entry._previous; + CacheEntry next = entry._next; + if (previous != null) { + previous._next = next; + } + if (next != null) { + next._previous = previous; + } + entry._previous = null; + entry._next = null; + } + } + + /** + * Purge least recently used object from the cache + * + * @return recyclable CacheEntry + */ + protected CacheEntry purgeEntry() { + synchronized (_hash) { + CacheEntry entry = _first; + if (entry == null) + return new CacheEntry(-1, null); + + removeEntry(entry); + _hash.remove(entry._recid); + entry._obj = null; + return entry; + } + } + + @SuppressWarnings("unchecked") + static final class CacheEntry { + + protected long _recid; + protected Object _obj; + + protected CacheEntry _previous; + protected CacheEntry _next; + + CacheEntry(long recid, Object obj) { + _recid = recid; + _obj = obj; + } + + } + + public void clearCache() { + if (debug) + System.err.println("DBCache: Clear cache"); + + // discard all cache entries since we don't know which entries + // where part of the transaction + synchronized (_hash) { + _hash.clear(); + _first = null; + _last = null; + + // clear dirties + updateCacheEntries(); + + } + } + +} Index: graph/src/main/java/org/apache/jdbm/BTreeMap.java =================================================================== --- graph/src/main/java/org/apache/jdbm/BTreeMap.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/BTreeMap.java (Arbeitskopie) @@ -0,0 +1,609 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOError; +import java.io.IOException; +import java.util.AbstractMap; +import java.util.AbstractSet; +import java.util.Comparator; +import java.util.Iterator; +import java.util.NavigableMap; +import java.util.NavigableSet; +import java.util.NoSuchElementException; +import java.util.Set; +import java.util.concurrent.ConcurrentNavigableMap; + +/** + * Wrapper for BTree which implements + * ConcurrentNavigableMap interface + * + * @param key type + * @param value type + */ +public final class BTreeMap extends AbstractMap implements + NavigableMap { + + protected BTree tree; + + protected final K fromKey; + + protected final K toKey; + + protected final boolean readonly; + + protected NavigableSet keySet2; + private final boolean toInclusive; + private final boolean fromInclusive; + + public BTreeMap(BTree tree, boolean readonly) { + this(tree, readonly, null, false, null, false); + } + + protected BTreeMap(BTree tree, boolean readonly, K fromKey, + boolean fromInclusive, K toKey, boolean toInclusive) { + this.tree = tree; + this.fromKey = fromKey; + this.fromInclusive = fromInclusive; + this.toKey = toKey; + this.toInclusive = toInclusive; + this.readonly = readonly; + } + + @Override + public Set> entrySet() { + return _entrySet; + } + + private final Set> _entrySet = new AbstractSet>() { + + protected Entry newEntry(K k, V v) { + return new SimpleEntry(k, v) { + private static final long serialVersionUID = 978651696969194154L; + + @Override + public V setValue(V arg0) { + BTreeMap.this.put(getKey(), arg0); + return super.setValue(arg0); + } + + }; + } + + @Override + public boolean add(java.util.Map.Entry e) { + if (readonly) + throw new UnsupportedOperationException("readonly"); + + try { + if (e.getKey() == null) + throw new NullPointerException("Can not add null key"); + if (!inBounds(e.getKey())) + throw new IllegalArgumentException("key outside of bounds"); + return tree.insert(e.getKey(), e.getValue(), true) == null; + } catch (IOException e1) { + throw new IOError(e1); + } + } + + @Override + @SuppressWarnings("unchecked") + public boolean contains(Object o) { + + if (o instanceof Entry) { + Entry e = (java.util.Map.Entry) o; + try { + if (!inBounds(e.getKey())) + return false; + if (e.getKey() != null && tree.get(e.getKey()) != null) + return true; + } catch (IOException e1) { + throw new IOError(e1); + } + } + return false; + } + + @Override + public Iterator> iterator() { + try { + final BTree.BTreeTupleBrowser br = fromKey == null ? tree + .browse() : tree.browse(fromKey, fromInclusive); + return new Iterator>() { + + private Entry next; + private K lastKey; + + void ensureNext() { + try { + BTree.BTreeTuple t = new BTree.BTreeTuple(); + if (br.getNext(t) && inBounds(t.key)) + next = newEntry(t.key, t.value); + else + next = null; + } catch (IOException e1) { + throw new IOError(e1); + } + } + + { + ensureNext(); + } + + @Override + public boolean hasNext() { + return next != null; + } + + @Override + public java.util.Map.Entry next() { + if (next == null) + throw new NoSuchElementException(); + Entry ret = next; + lastKey = ret.getKey(); + // move to next position + ensureNext(); + return ret; + } + + @Override + public void remove() { + if (readonly) + throw new UnsupportedOperationException("readonly"); + + if (lastKey == null) + throw new IllegalStateException(); + try { + br.remove(lastKey); + lastKey = null; + } catch (IOException e1) { + throw new IOError(e1); + } + + } + }; + + } catch (IOException e) { + throw new IOError(e); + } + + } + + @Override + @SuppressWarnings("unchecked") + public boolean remove(Object o) { + if (readonly) + throw new UnsupportedOperationException("readonly"); + + if (o instanceof Entry) { + Entry e = (java.util.Map.Entry) o; + try { + // check for nulls + if (e.getKey() == null || e.getValue() == null) + return false; + if (!inBounds(e.getKey())) + throw new IllegalArgumentException("out of bounds"); + // get old value, must be same as item in entry + V v = get(e.getKey()); + if (v == null || !e.getValue().equals(v)) + return false; + V v2 = tree.remove(e.getKey()); + return v2 != null; + } catch (IOException e1) { + throw new IOError(e1); + } + } + return false; + + } + + @Override + public int size() { + return BTreeMap.this.size(); + } + + @Override + public void clear() { + if (fromKey != null || toKey != null) + super.clear(); + else + try { + tree.clear(); + } catch (IOException e) { + throw new IOError(e); + } + } + + }; + + public boolean inBounds(K e) { + if (fromKey == null && toKey == null) + return true; + + Comparator comp = comparator(); + if (comp == null) + comp = JDBMUtils.COMPARABLE_COMPARATOR; + + if (fromKey != null) { + final int compare = comp.compare(e, fromKey); + if (compare < 0) + return false; + if (!fromInclusive && compare == 0) + return false; + } + if (toKey != null) { + final int compare = comp.compare(e, toKey); + if (compare > 0) + return false; + if (!toInclusive && compare == 0) + return false; + } + return true; + } + + @SuppressWarnings("unchecked") + @Override + public V get(Object key) { + try { + if (key == null) + return null; + if (!inBounds((K) key)) + return null; + return tree.get((K) key); + } catch (ClassCastException e) { + return null; + } catch (IOException e) { + throw new IOError(e); + } + } + + @SuppressWarnings("unchecked") + @Override + public V remove(Object key) { + if (readonly) + throw new UnsupportedOperationException("readonly"); + + try { + if (key == null || tree.get((K) key) == null) + return null; + if (!inBounds((K) key)) + throw new IllegalArgumentException("out of bounds"); + + return tree.remove((K) key); + } catch (ClassCastException e) { + return null; + } catch (IOException e) { + throw new IOError(e); + } + } + + @Override + public V put(K key, V value) { + if (readonly) + throw new UnsupportedOperationException("readonly"); + + try { + if (key == null || value == null) + throw new NullPointerException("Null key or value"); + if (!inBounds(key)) + throw new IllegalArgumentException("out of bounds"); + return tree.insert(key, value, true); + } catch (IOException e) { + throw new IOError(e); + } + } + + @Override + public void clear() { + entrySet().clear(); + } + + @SuppressWarnings("unchecked") + @Override + public boolean containsKey(Object key) { + if (key == null) + return false; + try { + if (!inBounds((K) key)) + return false; + V v = tree.get((K) key); + return v != null; + } catch (IOException e) { + throw new IOError(e); + } catch (ClassCastException e) { + return false; + } + } + + @Override + public Comparator comparator() { + return tree._comparator; + } + + @Override + public K firstKey() { + if (isEmpty()) + return null; + try { + + BTree.BTreeTupleBrowser b = fromKey == null ? tree.browse() : tree + .browse(fromKey, fromInclusive); + BTree.BTreeTuple t = new BTree.BTreeTuple(); + b.getNext(t); + return t.key; + } catch (IOException e) { + throw new IOError(e); + } + } + + @Override + public K lastKey() { + if (isEmpty()) + return null; + try { + BTree.BTreeTupleBrowser b = toKey == null ? tree.browse(null, true) + : tree.browse(toKey, false); + BTree.BTreeTuple t = new BTree.BTreeTuple(); + b.getPrevious(t); + if (!toInclusive && toKey != null) { + // make sure we wont return last key + Comparator c = comparator(); + if (c == null) + c = JDBMUtils.COMPARABLE_COMPARATOR; + if (c.compare(t.key, toKey) == 0) + b.getPrevious(t); + } + return t.key; + } catch (IOException e) { + throw new IOError(e); + } + } + + @Override + public NavigableMap headMap(K toKey2, boolean inclusive) { + K toKey3 = JDBMUtils.min(this.toKey, toKey2, comparator()); + boolean inclusive2 = toKey3 == toKey ? toInclusive : inclusive; + return new BTreeMap(tree, readonly, this.fromKey, this.fromInclusive, + toKey3, inclusive2); + } + + @Override + public NavigableMap headMap(K toKey) { + return headMap(toKey, false); + } + + @Override + public Entry lowerEntry(K key) { + K k = lowerKey(key); + return k == null ? null : new SimpleEntry(k, get(k)); + } + + @Override + public K lowerKey(K key) { + if (isEmpty()) + return null; + K key2 = JDBMUtils.min(key, toKey, comparator()); + try { + BTree.BTreeTupleBrowser b = tree.browse(key2, true); + BTree.BTreeTuple t = new BTree.BTreeTuple(); + b.getPrevious(t); + + return t.key; + + } catch (IOException e) { + throw new IOError(e); + } + + } + + @Override + public Entry floorEntry(K key) { + K k = floorKey(key); + return k == null ? null : new SimpleEntry(k, get(k)); + + } + + @Override + public K floorKey(K key) { + if (isEmpty()) + return null; + + K key2 = JDBMUtils.max(key, fromKey, comparator()); + try { + BTree.BTreeTupleBrowser b = tree.browse(key2, true); + BTree.BTreeTuple t = new BTree.BTreeTuple(); + b.getNext(t); + Comparator comp = comparator(); + if (comp == null) + comp = JDBMUtils.COMPARABLE_COMPARATOR; + if (comp.compare(t.key, key2) == 0) + return t.key; + + b.getPrevious(t); + b.getPrevious(t); + return t.key; + + } catch (IOException e) { + throw new IOError(e); + } + } + + @Override + public Entry ceilingEntry(K key) { + K k = ceilingKey(key); + return k == null ? null : new SimpleEntry(k, get(k)); + } + + @Override + public K ceilingKey(K key) { + if (isEmpty()) + return null; + K key2 = JDBMUtils.min(key, toKey, comparator()); + + try { + BTree.BTreeTupleBrowser b = tree.browse(key2, true); + BTree.BTreeTuple t = new BTree.BTreeTuple(); + b.getNext(t); + return t.key; + + } catch (IOException e) { + throw new IOError(e); + } + } + + @Override + public Entry higherEntry(K key) { + K k = higherKey(key); + return k == null ? null : new SimpleEntry(k, get(k)); + } + + @Override + public K higherKey(K key) { + if (isEmpty()) + return null; + + K key2 = JDBMUtils.max(key, fromKey, comparator()); + + try { + BTree.BTreeTupleBrowser b = tree.browse(key2, false); + BTree.BTreeTuple t = new BTree.BTreeTuple(); + b.getNext(t); + return t.key; + + } catch (IOException e) { + throw new IOError(e); + } + } + + @Override + public Entry firstEntry() { + K k = firstKey(); + return k == null ? null : new SimpleEntry(k, get(k)); + } + + @Override + public Entry lastEntry() { + K k = lastKey(); + return k == null ? null : new SimpleEntry(k, get(k)); + } + + @Override + public Entry pollFirstEntry() { + Entry first = firstEntry(); + if (first != null) + remove(first.getKey()); + return first; + } + + @Override + public Entry pollLastEntry() { + Entry last = lastEntry(); + if (last != null) + remove(last.getKey()); + return last; + } + + @Override + public ConcurrentNavigableMap descendingMap() { + throw new UnsupportedOperationException("not implemented yet"); + // TODO implement descending (reverse order) map + } + + @Override + public NavigableSet keySet() { + return navigableKeySet(); + } + + @Override + public NavigableSet navigableKeySet() { + if (keySet2 == null) + keySet2 = new BTreeSet((BTreeMap) this); + return keySet2; + } + + @Override + public NavigableSet descendingKeySet() { + return descendingMap().navigableKeySet(); + } + + @Override + public NavigableMap tailMap(K fromKey) { + return tailMap(fromKey, true); + } + + @Override + public NavigableMap tailMap(K fromKey2, boolean inclusive) { + K fromKey3 = JDBMUtils.max(this.fromKey, fromKey2, comparator()); + boolean inclusive2 = fromKey3 == toKey ? toInclusive : inclusive; + + return new BTreeMap(tree, readonly, fromKey3, inclusive2, toKey, + toInclusive); + } + + @Override + public NavigableMap subMap(K fromKey, boolean fromInclusive, K toKey, + boolean toInclusive) { + Comparator comp = comparator(); + if (comp == null) + comp = JDBMUtils.COMPARABLE_COMPARATOR; + if (comp.compare(fromKey, toKey) > 0) + throw new IllegalArgumentException("fromKey is bigger then toKey"); + return new BTreeMap(tree, readonly, fromKey, fromInclusive, toKey, + toInclusive); + } + + @Override + public NavigableMap subMap(K fromKey, K toKey) { + return subMap(fromKey, true, toKey, false); + } + + public BTree getTree() { + return tree; + } + + public void addRecordListener(RecordListener listener) { + tree.addRecordListener(listener); + } + + public DBAbstract getRecordManager() { + return tree.getRecordManager(); + } + + public void removeRecordListener(RecordListener listener) { + tree.removeRecordListener(listener); + } + + @Override + public int size() { + if (fromKey == null && toKey == null) + return (int) tree._entries; // use fast counter on tree if Map has no + // bounds + else { + // had to count items in iterator + Iterator iter = keySet().iterator(); + int counter = 0; + while (iter.hasNext()) { + iter.next(); + counter++; + } + return counter; + } + + } + +} Index: graph/src/main/java/org/apache/jdbm/BTree.java =================================================================== --- graph/src/main/java/org/apache/jdbm/BTree.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/BTree.java (Arbeitskopie) @@ -0,0 +1,635 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOError; +import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; +import java.util.List; + +import org.apache.hadoop.io.Text; + +/** + * B+Tree persistent indexing data structure. B+Trees are optimized for + * block-based, random I/O storage because they store multiple keys on one tree + * node (called BTreeNode). In addition, the leaf nodes directly + * contain (inline) small values associated with the keys, allowing a single (or + * sequential) disk read of all the values on the node. + *

+ * B+Trees are n-airy, yeilding log(N) search cost. They are self-balancing, + * preventing search performance degradation when the size of the tree grows. + *

+ * BTree stores its keys sorted. By default JDBM expects key to implement + * Comparable interface but user may supply its own + * Comparator at BTree creation time. Comparator is serialized and + * stored as part of BTree. + *

+ * The B+Tree allows traversing the keys in forward and reverse order using a + * TupleBrowser obtained from the browse() methods. But it is better to use + * BTreeMap wrapper which implements SortedMap + * interface + *

+ * This implementation does not directly support duplicate keys. It is possible + * to handle duplicates by grouping values using an ArrayList as value. This + * scenario is supported by JDBM serialization so there is no big performance + * penalty. + *

+ * There is no limit on key size or value size, but it is recommended to keep + * keys as small as possible to reduce disk I/O. If serialized value exceeds 32 + * bytes, it is stored in separate record and tree contains only recid reference + * to it. BTree uses delta compression for its keys. + * + */ +public final class BTree { + + private static final boolean DEBUG = false; + + /** + * Default node size (number of entries per node) + */ + public static final int DEFAULT_SIZE = 32; // TODO test optimal size, it has + // serious impact on sequencial + // write and read + + /** + * Record manager used to persist changes in BTreeNodes + */ + protected transient DBAbstract _db; + + /** + * This BTree's record ID in the DB. + */ + private transient long _recid; + + /** + * Comparator used to index entries (optional) + */ + protected Comparator _comparator; + + /** + * Serializer used to serialize index keys (optional) + */ + protected Serializer keySerializer; + + /** + * Serializer used to serialize index values (optional) + */ + protected Serializer valueSerializer; + + /** + * indicates if values should be loaded during deserialization, set to false + * during defragmentation + */ + boolean loadValues = true; + + /** if false map contains only keys, used for set */ + boolean hasValues = true; + + /** + * The number of structural modifications to the tree for fail fast iterators. + * This value is just for runtime, it is not persisted + */ + transient int modCount = 0; + + /** + * cached instance of an insert result, so we do not have to allocate new + * object on each insert + */ + protected BTreeNode.InsertResult insertResultReuse; // TODO investigate + // performance + // impact of + // removing this + + public Serializer getKeySerializer() { + return keySerializer; + } + + public Serializer getValueSerializer() { + return valueSerializer; + } + + /** + * Height of the B+Tree. This is the number of BTreeNodes you have to traverse + * to get to a leaf BTreeNode, starting from the root. + */ + private int _height; + + /** + * Recid of the root BTreeNode + */ + private transient long _root; + + /** + * Total number of entries in the BTree + */ + protected volatile long _entries; + + /** + * Serializer used for BTreeNodes of this tree + */ + private transient BTreeNode _nodeSerializer = new BTreeNode(); + { + _nodeSerializer._btree = this; + } + + /** + * Listeners which are notified about changes in records + */ + protected RecordListener[] recordListeners = new RecordListener[0]; + + /** + * No-argument constructor used by serialization. + */ + public BTree() { + // empty + } + + /** + * Create a new persistent BTree + */ + public static BTree createInstance( + DBAbstract db) throws IOException { + return createInstance(db, null, null, null, true); + } + + /** + * Create a new persistent BTree + */ + public static BTree createInstance(DBAbstract db, + Comparator comparator, Serializer keySerializer, + Serializer valueSerializer, boolean hasValues) throws IOException { + BTree btree; + + if (db == null) { + throw new IllegalArgumentException("Argument 'db' is null"); + } + + btree = new BTree(); + btree._db = db; + btree._comparator = comparator; + btree.keySerializer = keySerializer; + btree.valueSerializer = valueSerializer; + btree.hasValues = hasValues; + btree._recid = db.insert(btree, btree.getRecordManager() + .defaultSerializer(), false); + + return btree; + } + + /** + * Load a persistent BTree. + * + * @param db DB used to store the persistent btree + * @param recid Record id of the BTree + */ + @SuppressWarnings("unchecked") + public static BTree load(DBAbstract db, long recid) + throws IOException { + BTree btree = (BTree) db.fetch(recid); + btree._recid = recid; + btree._db = db; + btree._nodeSerializer = new BTreeNode(); + btree._nodeSerializer._btree = btree; + return btree; + } + + /** + * Insert an entry in the BTree. + *

+ * The BTree cannot store duplicate entries. An existing entry can be replaced + * using the replace flag. If an entry with the same key already + * exists in the BTree, its value is returned. + * + * @param key Insert key + * @param value Insert value + * @param replace Set to true to replace an existing key-value pair. + * @return Existing value, if any. + */ + public V insert(final K key, final V value, final boolean replace) + throws IOException { + if (key == null) { + throw new IllegalArgumentException("Argument 'key' is null"); + } + if(key instanceof Text){ + System.out.println(); + } + if (value == null) { + throw new IllegalArgumentException("Argument 'value' is null"); + } + BTreeNode rootNode = getRoot(); + + if (rootNode == null) { + // BTree is currently empty, create a new root BTreeNode + if (DEBUG) { + System.out.println("BTree.insert() new root BTreeNode"); + } + rootNode = new BTreeNode(this, key, value); + _root = rootNode._recid; + _height = 1; + _entries = 1; + _db.update(_recid, this); + modCount++; + // notifi listeners + for (RecordListener l : recordListeners) { + l.recordInserted(key, value); + } + return null; + } else { + BTreeNode.InsertResult insert = rootNode.insert(_height, key, + value, replace); + boolean dirty = false; + if (insert._overflow != null) { + // current root node overflowed, we replace with a new root node + if (DEBUG) { + System.out + .println("BTreeNode.insert() replace root BTreeNode due to overflow"); + } + rootNode = new BTreeNode(this, rootNode, insert._overflow); + _root = rootNode._recid; + _height += 1; + dirty = true; + } + if (insert._existing == null) { + _entries++; + modCount++; + dirty = true; + } + if (dirty) { + _db.update(_recid, this); + } + // notify listeners + for (RecordListener l : recordListeners) { + if (insert._existing == null) + l.recordInserted(key, value); + else + l.recordUpdated(key, insert._existing, value); + } + + // insert might have returned an existing value + V ret = insert._existing; + // zero out tuple and put it for reuse + insert._existing = null; + insert._overflow = null; + this.insertResultReuse = insert; + return ret; + } + } + + /** + * Remove an entry with the given key from the BTree. + * + * @param key Removal key + * @return Value associated with the key, or null if no entry with given key + * existed in the BTree. + */ + public V remove(K key) throws IOException { + if (key == null) { + throw new IllegalArgumentException("Argument 'key' is null"); + } + BTreeNode rootNode = getRoot(); + if (rootNode == null) { + return null; + } + boolean dirty = false; + BTreeNode.RemoveResult remove = rootNode.remove(_height, key); + if (remove._underflow && rootNode.isEmpty()) { + _height -= 1; + dirty = true; + + _db.delete(_root); + if (_height == 0) { + _root = 0; + } else { + _root = rootNode.loadLastChildNode()._recid; + } + } + if (remove._value != null) { + _entries--; + modCount++; + dirty = true; + } + if (dirty) { + _db.update(_recid, this); + } + if (remove._value != null) + for (RecordListener l : recordListeners) + l.recordRemoved(key, remove._value); + return remove._value; + } + + /** + * Find the value associated with the given key. + * + * @param key Lookup key. + * @return Value associated with the key, or null if not found. + */ + public V get(K key) throws IOException { + if (key == null) { + throw new IllegalArgumentException("Argument 'key' is null"); + } + BTreeNode rootNode = getRoot(); + if (rootNode == null) { + return null; + } + + return rootNode.findValue(_height, key); + } + + /** + * Find the value associated with the given key, or the entry immediately + * following this key in the ordered BTree. + * + * @param key Lookup key. + * @return Value associated with the key, or a greater entry, or null if no + * greater entry was found. + */ + public BTreeTuple findGreaterOrEqual(K key) throws IOException { + BTreeTuple tuple; + BTreeTupleBrowser browser; + + if (key == null) { + // there can't be a key greater than or equal to "null" + // because null is considered an infinite key. + return null; + } + + tuple = new BTreeTuple(null, null); + browser = browse(key, true); + if (browser.getNext(tuple)) { + return tuple; + } else { + return null; + } + } + + /** + * Get a browser initially positioned at the beginning of the BTree. + *

+ * WARNING: If you make structural modifications to the BTree during + * browsing, you will get inconsistent browing results. + * + * @return Browser positionned at the beginning of the BTree. + */ + @SuppressWarnings("unchecked") + public BTreeTupleBrowser browse() throws IOException { + BTreeNode rootNode = getRoot(); + if (rootNode == null) { + return EMPTY_BROWSER; + } + return rootNode.findFirst(); + } + + /** + * Get a browser initially positioned just before the given key. + *

+ * WARNING: �If you make structural modifications to the BTree during + * browsing, you will get inconsistent browing results. + * + * @param key Key used to position the browser. If null, the browser will be + * positionned after the last entry of the BTree. (Null is considered + * to be an "infinite" key) + * @return Browser positionned just before the given key. + */ + @SuppressWarnings("unchecked") + public BTreeTupleBrowser browse(final K key, final boolean inclusive) + throws IOException { + BTreeNode rootNode = getRoot(); + if (rootNode == null) { + return EMPTY_BROWSER; + } + BTreeTupleBrowser browser = rootNode.find(_height, key, inclusive); + return browser; + } + + /** + * Return the persistent record identifier of the BTree. + */ + public long getRecid() { + return _recid; + } + + /** + * Return the root BTreeNode, or null if it doesn't exist. + */ + BTreeNode getRoot() throws IOException { + if (_root == 0) { + return null; + } + BTreeNode root = _db.fetch(_root, _nodeSerializer); + if (root != null) { + root._recid = _root; + root._btree = this; + } + return root; + } + + static BTree readExternal(DataInput in, Serialization ser) + throws IOException, ClassNotFoundException { + BTree tree = new BTree(); + tree._db = ser.db; + tree._height = in.readInt(); + tree._recid = in.readLong(); + tree._root = in.readLong(); + tree._entries = in.readLong(); + tree.hasValues = in.readBoolean(); + tree._comparator = (Comparator) ser.deserialize(in); + tree.keySerializer = (Serializer) ser.deserialize(in); + tree.valueSerializer = (Serializer) ser.deserialize(in); + return tree; + } + + public void writeExternal(DataOutput out) throws IOException { + out.writeInt(_height); + out.writeLong(_recid); + out.writeLong(_root); + out.writeLong(_entries); + out.writeBoolean(hasValues); + _db.defaultSerializer().serialize(out, _comparator); + _db.defaultSerializer().serialize(out, keySerializer); + _db.defaultSerializer().serialize(out, valueSerializer); + } + + /** + * Copyes tree from one db to other, defragmenting it allong the way + * + * @param recid + * @param r1 + * @param r2 + * @throws IOException + */ + public static void defrag(long recid, DBStore r1, DBStore r2) + throws IOException { + try { + byte[] data = r1.fetchRaw(recid); + r2.forceInsert(recid, data); + DataInput in = new DataInputOutput(data); + BTree t = (BTree) r1.defaultSerializer().deserialize(in); + t.loadValues = false; + t._db = r1; + t._nodeSerializer = new BTreeNode(t, false); + + BTreeNode p = t.getRoot(); + if (p != null) { + r2.forceInsert(t._root, r1.fetchRaw(t._root)); + p.defrag(r1, r2); + } + + } catch (ClassNotFoundException e) { + throw new IOError(e); + } + } + + /** + * Browser returning no element. + */ + private static final BTreeTupleBrowser EMPTY_BROWSER = new BTreeTupleBrowser() { + + public boolean getNext(BTreeTuple tuple) { + return false; + } + + public boolean getPrevious(BTreeTuple tuple) { + return false; + } + + public void remove(Object key) { + throw new IndexOutOfBoundsException(); + } + }; + + /** + * add RecordListener which is notified about record changes + * + * @param listener + */ + public void addRecordListener(RecordListener listener) { + recordListeners = Arrays + .copyOf(recordListeners, recordListeners.length + 1); + recordListeners[recordListeners.length - 1] = listener; + } + + /** + * remove RecordListener which is notified about record changes + * + * @param listener + */ + public void removeRecordListener(RecordListener listener) { + List l = Arrays.asList(recordListeners); + l.remove(listener); + recordListeners = (RecordListener[]) l.toArray(new RecordListener[1]); + } + + public DBAbstract getRecordManager() { + return _db; + } + + public Comparator getComparator() { + return _comparator; + } + + /** + * Deletes all BTreeNodes in this BTree + */ + public void clear() throws IOException { + BTreeNode rootNode = getRoot(); + if (rootNode != null) + rootNode.delete(); + _entries = 0; + modCount++; + } + + /** + * Used for debugging and testing only. Populates the 'out' list with the + * recids of all child nodes in the BTree. + * + * @param out + * @throws IOException + */ + void dumpChildNodeRecIDs(List out) throws IOException { + BTreeNode root = getRoot(); + if (root != null) { + out.add(root._recid); + root.dumpChildNodeRecIDs(out, _height); + } + } + + public boolean hasValues() { + return hasValues; + } + + /** + * Browser to traverse a collection of tuples. The browser allows for forward + * and reverse order traversal. + * + * + */ + static interface BTreeTupleBrowser { + + /** + * Get the next tuple. + * + * @param tuple Tuple into which values are copied. + * @return True if values have been copied in tuple, or false if there is no + * next tuple. + */ + boolean getNext(BTree.BTreeTuple tuple) throws IOException; + + /** + * Get the previous tuple. + * + * @param tuple Tuple into which values are copied. + * @return True if values have been copied in tuple, or false if there is no + * previous tuple. + */ + boolean getPrevious(BTree.BTreeTuple tuple) throws IOException; + + /** + * Remove an entry with given key, and increases browsers expectedModCount + * This method is here to support 'ConcurrentModificationException' on Map + * interface. + * + * @param key + */ + void remove(K key) throws IOException; + + } + + /** + * Tuple consisting of a key-value pair. + */ + static final class BTreeTuple { + + K key; + + V value; + + BTreeTuple() { + // empty + } + + BTreeTuple(K key, V value) { + this.key = key; + this.value = value; + } + + } + +} Index: graph/src/main/java/org/apache/jdbm/DBStore.java =================================================================== --- graph/src/main/java/org/apache/jdbm/DBStore.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/DBStore.java (Arbeitskopie) @@ -0,0 +1,912 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOError; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.TreeSet; +import java.util.zip.ZipEntry; +import java.util.zip.ZipOutputStream; + +import javax.crypto.Cipher; + +/** + * This class manages records, which are uninterpreted blobs of data. The set of + * operations is simple and straightforward: you communicate with the class + * using long "rowids" and byte[] data blocks. Rowids are returned on inserts + * and you can stash them away someplace safe to be able to get back to them. + * Data blocks can be as long as you wish, and may have lengths different from + * the original when updating. + *

+ * Operations are synchronized, so that only one of them will happen + * concurrently even if you hammer away from multiple threads. Operations are + * made atomic by keeping a transaction log which is recovered after a crash, so + * the operations specified by this interface all have ACID properties. + *

+ * You identify a file by just the name. The package attaches .db for + * the database file, and .lg for the transaction log. The transaction + * log is synchronized regularly and then restarted, so don't worry if you see + * the size going up and down. + */ +public class DBStore extends DBAbstract { + + /** + * Version of storage. It should be safe to open lower versions, but engine + * should throw exception while opening new versions (as it contains + * unsupported features or serialization) + */ + static final long STORE_FORMAT_VERSION = 1L; + + /** + * Underlying file for store records. + */ + private PageFile _file; + + /** + * Page manager for physical manager. + */ + private PageManager _pageman; + + /** + * Physical row identifier manager. + */ + private PhysicalRowIdManager _physMgr; + + /** + * Indicated that store is opened for readonly operations If true, store will + * throw UnsupportedOperationException when update/insert/delete operation is + * called + */ + private final boolean readonly; + final boolean transactionsDisabled; + private final boolean deleteFilesAfterClose; + + private static final int AUTOCOMMIT_AFTER_N_PAGES = 1024 * 5; + + boolean commitInProgress = false; + + /** + * cipher used for decryption, may be null + */ + private Cipher cipherOut; + /** + * cipher used for encryption, may be null + */ + private Cipher cipherIn; + private boolean useRandomAccessFile; + private boolean lockingDisabled; + + void checkCanWrite() { + if (readonly) + throw new UnsupportedOperationException( + "Could not write, store is opened as read-only"); + } + + /** + * Logigal to Physical row identifier manager. + */ + private LogicalRowIdManager _logicMgr; + + /** + * Static debugging flag + */ + public static final boolean DEBUG = false; + + static final long PREALOCATE_PHYS_RECID = Short.MIN_VALUE; + + static final Object PREALOCATE_OBJ = new Object(); + + private final DataInputOutput buffer = new DataInputOutput(); + private boolean bufferInUse = false; + + private final String _filename; + + public DBStore(String filename, boolean readonly, + boolean transactionDisabled, boolean lockingDisabled) throws IOException { + this(filename, readonly, transactionDisabled, null, null, false, false, + false); + } + + /** + * Creates a record manager for the indicated file + * + * @throws IOException when the file cannot be opened or is not a valid file + * content-wise. + */ + public DBStore(String filename, boolean readonly, + boolean transactionDisabled, Cipher cipherIn, Cipher cipherOut, + boolean useRandomAccessFile, boolean deleteFilesAfterClose, + boolean lockingDisabled) { + _filename = filename; + this.readonly = readonly; + this.transactionsDisabled = transactionDisabled; + this.cipherIn = cipherIn; + this.cipherOut = cipherOut; + this.useRandomAccessFile = useRandomAccessFile; + this.deleteFilesAfterClose = deleteFilesAfterClose; + this.lockingDisabled = lockingDisabled; + reopen(); + } + + private void reopen() { + try { + _file = new PageFile(_filename, readonly, transactionsDisabled, cipherIn, + cipherOut, useRandomAccessFile, lockingDisabled); + _pageman = new PageManager(_file); + _physMgr = new PhysicalRowIdManager(_file, _pageman); + + _logicMgr = new LogicalRowIdManager(_file, _pageman); + + long versionNumber = getRoot(STORE_VERSION_NUMBER_ROOT); + if (versionNumber > STORE_FORMAT_VERSION) + throw new IOException( + "Unsupported version of store. Please update JDBM. Minimal supported ver:" + + STORE_FORMAT_VERSION + ", store ver:" + versionNumber); + if (!readonly) + setRoot(STORE_VERSION_NUMBER_ROOT, STORE_FORMAT_VERSION); + } catch (IOException e) { + throw new IOError(e); + } + } + + /** + * Closes the record manager. + * + * @throws IOException when one of the underlying I/O operations fails. + */ + @Override + public synchronized void close() { + checkNotClosed(); + try { + super.close(); + _pageman.close(); + _file.close(); + if (deleteFilesAfterClose) + _file.storage.deleteAllFiles(); + + _pageman = null; + + _file = null; + + } catch (IOException e) { + throw new IOError(e); + } + } + + @Override + public boolean isClosed() { + return _pageman == null; + } + + @Override + public synchronized long insert(final A obj, + final Serializer serializer, final boolean disableCache) + throws IOException { + checkNotClosed(); + checkCanWrite(); + + if (needsAutoCommit()) { + commit(); + } + + if (bufferInUse) { + // current reusable buffer is in use, have to fallback into creating new + // instances + DataInputOutput buffer2 = new DataInputOutput(); + return insert2(obj, serializer, buffer2); + } + + try { + + bufferInUse = true; + return insert2(obj, serializer, buffer); + } finally { + bufferInUse = false; + } + + } + + boolean needsAutoCommit() { + return transactionsDisabled && !commitInProgress + && (_file.getDirtyPageCount() >= AUTOCOMMIT_AFTER_N_PAGES); + } + + private long insert2(A obj, Serializer serializer, DataInputOutput buf) + throws IOException { + buf.reset(); + + long physRowId; + if (obj == PREALOCATE_OBJ) { + // if inserted record is PREALOCATE_OBJ , it gets special handling. + // it is inserted only into _logicMgr with special value to indicate null + // this is used to preallocate recid for lazy inserts in cache + physRowId = PREALOCATE_PHYS_RECID; + } else { + serializer.serialize(buf, obj); + if (buf.getPos() > RecordHeader.MAX_RECORD_SIZE) { + throw new IllegalArgumentException( + "Too big record. JDBM only supports record size up to: " + + RecordHeader.MAX_RECORD_SIZE + " bytes. Record size was: " + + buf.getPos()); + } + physRowId = _physMgr.insert(buf.getBuf(), 0, buf.getPos()); + } + final long recid = _logicMgr.insert(physRowId); + + if (DEBUG) { + System.out.println("BaseRecordManager.insert() recid " + recid + + " length " + buf.getPos()); + } + + return compressRecid(recid); + } + + @Override + public synchronized void delete(long logRowId) throws IOException { + + checkNotClosed(); + checkCanWrite(); + if (logRowId <= 0) { + throw new IllegalArgumentException("Argument 'recid' is invalid: " + + logRowId); + } + + if (needsAutoCommit()) { + commit(); + } + + if (DEBUG) { + System.out.println("BaseRecordManager.delete() recid " + logRowId); + } + + logRowId = decompressRecid(logRowId); + + long physRowId = _logicMgr.fetch(logRowId); + _logicMgr.delete(logRowId); + if (physRowId != PREALOCATE_PHYS_RECID) { + _physMgr.free(physRowId); + } + } + + @Override + public synchronized void update(long recid, A obj, + Serializer serializer) throws IOException { + checkNotClosed(); + checkCanWrite(); + if (recid <= 0) { + throw new IllegalArgumentException("Argument 'recid' is invalid: " + + recid); + } + + if (needsAutoCommit()) { + commit(); + } + + if (bufferInUse) { + // current reusable buffer is in use, have to create new instances + DataInputOutput buffer2 = new DataInputOutput(); + update2(recid, obj, serializer, buffer2); + return; + } + + try { + bufferInUse = true; + update2(recid, obj, serializer, buffer); + } finally { + bufferInUse = false; + } + } + + private void update2(long logRecid, final A obj, + final Serializer serializer, final DataInputOutput buf) + throws IOException { + + logRecid = decompressRecid(logRecid); + + long physRecid = _logicMgr.fetch(logRecid); + if (physRecid == 0) + throw new IOException("Can not update, recid does not exist: " + logRecid); + buf.reset(); + serializer.serialize(buf, obj); + + if (DEBUG) { + System.out.println("BaseRecordManager.update() recid " + logRecid + + " length " + buf.getPos()); + } + + long newRecid = physRecid != PREALOCATE_PHYS_RECID ? _physMgr.update( + physRecid, buf.getBuf(), 0, buf.getPos()) : + // previous record was only virtual and does not actually exist, so make new + // insert + _physMgr.insert(buf.getBuf(), 0, buf.getPos()); + + _logicMgr.update(logRecid, newRecid); + + } + + @Override + public synchronized A fetch(final long recid, + final Serializer serializer) throws IOException { + + checkNotClosed(); + if (recid <= 0) { + throw new IllegalArgumentException("Argument 'recid' is invalid: " + + recid); + } + + if (bufferInUse) { + // current reusable buffer is in use, have to create new instances + DataInputOutput buffer2 = new DataInputOutput(); + return fetch2(recid, serializer, buffer2); + } + try { + bufferInUse = true; + return fetch2(recid, serializer, buffer); + } finally { + bufferInUse = false; + } + } + + @Override + public synchronized A fetch(long recid, Serializer serializer, + boolean disableCache) throws IOException { + // we dont have any cache, so can ignore disableCache parameter + return fetch(recid, serializer); + } + + private A fetch2(long recid, final Serializer serializer, + final DataInputOutput buf) throws IOException { + + recid = decompressRecid(recid); + + buf.reset(); + long physLocation = _logicMgr.fetch(recid); + if (physLocation == 0) { + // throw new IOException("Record not found, recid: "+recid); + return null; + } + if (physLocation == PREALOCATE_PHYS_RECID) { + throw new InternalError("cache should prevent this!"); + } + + _physMgr.fetch(buf, physLocation); + + if (DEBUG) { + System.out.println("BaseRecordManager.fetch() recid " + recid + + " length " + buf.getPos()); + } + buf.resetForReading(); + try { + return serializer.deserialize(buf); // TODO there should be write limit to + // throw EOFException + } catch (ClassNotFoundException e) { + throw new IOError(e); + } + } + + byte[] fetchRaw(long recid) throws IOException { + recid = decompressRecid(recid); + long physLocation = _logicMgr.fetch(recid); + if (physLocation == 0) { + // throw new IOException("Record not found, recid: "+recid); + return null; + } + DataInputOutput i = new DataInputOutput(); + _physMgr.fetch(i, physLocation); + return i.toByteArray(); + } + + @Override + public synchronized long getRoot(final byte id) { + checkNotClosed(); + + return _pageman.getFileHeader().fileHeaderGetRoot(id); + } + + @Override + public synchronized void setRoot(final byte id, final long rowid) { + checkNotClosed(); + checkCanWrite(); + + _pageman.getFileHeader().fileHeaderSetRoot(id, rowid); + } + + @Override + public synchronized void commit() { + try { + commitInProgress = true; + checkNotClosed(); + checkCanWrite(); + /** flush free phys rows into pages */ + _physMgr.commit(); + _logicMgr.commit(); + + /** commit pages */ + _pageman.commit(); + + } catch (IOException e) { + throw new IOError(e); + } finally { + commitInProgress = false; + } + } + + @Override + public synchronized void rollback() { + if (transactionsDisabled) + throw new IllegalAccessError( + "Transactions are disabled, can not rollback"); + + try { + checkNotClosed(); + _physMgr.rollback(); + _logicMgr.rollback(); + _pageman.rollback(); + + super.rollback(); + } catch (IOException e) { + throw new IOError(e); + } + + } + + @Override + public void copyToZip(String zipFile) { + try { + String zip = zipFile; + String zip2 = "db"; + ZipOutputStream z = new ZipOutputStream(new FileOutputStream(zip)); + + // copy zero pages + { + String file = zip2 + 0; + z.putNextEntry(new ZipEntry(file)); + z.write(JDBMUtils.encrypt(cipherIn, _pageman.getHeaderBufData())); + z.closeEntry(); + } + + // iterate over pages and create new file for each + for (long pageid = _pageman.getFirst(Magic.TRANSLATION_PAGE); pageid != 0; pageid = _pageman + .getNext(pageid)) { + PageIo page = _file.get(pageid); + String file = zip2 + pageid; + z.putNextEntry(new ZipEntry(file)); + z.write(JDBMUtils.encrypt(cipherIn, page.getData())); + z.closeEntry(); + _file.release(page); + } + for (long pageid = _pageman.getFirst(Magic.FREELOGIDS_PAGE); pageid != 0; pageid = _pageman + .getNext(pageid)) { + PageIo page = _file.get(pageid); + String file = zip2 + pageid; + z.putNextEntry(new ZipEntry(file)); + z.write(JDBMUtils.encrypt(cipherIn, page.getData())); + z.closeEntry(); + _file.release(page); + } + + for (long pageid = _pageman.getFirst(Magic.USED_PAGE); pageid != 0; pageid = _pageman + .getNext(pageid)) { + PageIo page = _file.get(pageid); + String file = zip2 + pageid; + z.putNextEntry(new ZipEntry(file)); + z.write(JDBMUtils.encrypt(cipherIn, page.getData())); + z.closeEntry(); + _file.release(page); + } + for (long pageid = _pageman.getFirst(Magic.FREEPHYSIDS_PAGE); pageid != 0; pageid = _pageman + .getNext(pageid)) { + PageIo page = _file.get(pageid); + String file = zip2 + pageid; + z.putNextEntry(new ZipEntry(file)); + z.write(JDBMUtils.encrypt(cipherIn, page.getData())); + z.closeEntry(); + _file.release(page); + } + for (long pageid = _pageman.getFirst(Magic.FREEPHYSIDS_ROOT_PAGE); pageid != 0; pageid = _pageman + .getNext(pageid)) { + PageIo page = _file.get(pageid); + String file = zip2 + pageid; + z.putNextEntry(new ZipEntry(file)); + z.write(JDBMUtils.encrypt(cipherIn, page.getData())); + z.closeEntry(); + _file.release(page); + } + + z.close(); + + } catch (IOException e) { + throw new IOError(e); + } + } + + @Override + public synchronized void clearCache() { + // no cache + } + + private long statisticsCountPages(short pageType) throws IOException { + long pageCounter = 0; + + for (long pageid = _pageman.getFirst(pageType); pageid != 0; pageid = _pageman + .getNext(pageid)) { + pageCounter++; + } + + return pageCounter; + + } + + @Override + public synchronized String calculateStatistics() { + checkNotClosed(); + + try { + + final StringBuilder b = new StringBuilder(); + + // count pages + { + + b.append("PAGES:\n"); + long total = 0; + long pages = statisticsCountPages(Magic.USED_PAGE); + total += pages; + b.append(" " + pages + " used pages with size " + + JDBMUtils.formatSpaceUsage(pages * Storage.PAGE_SIZE) + "\n"); + pages = statisticsCountPages(Magic.TRANSLATION_PAGE); + total += pages; + b.append(" " + pages + " record translation pages with size " + + JDBMUtils.formatSpaceUsage(pages * Storage.PAGE_SIZE) + "\n"); + pages = statisticsCountPages(Magic.FREE_PAGE); + total += pages; + b.append(" " + pages + " free (unused) pages with size " + + JDBMUtils.formatSpaceUsage(pages * Storage.PAGE_SIZE) + "\n"); + pages = statisticsCountPages(Magic.FREEPHYSIDS_PAGE); + total += pages; + b.append(" " + pages + " free (phys) pages with size " + + JDBMUtils.formatSpaceUsage(pages * Storage.PAGE_SIZE) + "\n"); + pages = statisticsCountPages(Magic.FREELOGIDS_PAGE); + total += pages; + b.append(" " + pages + " free (logical) pages with size " + + JDBMUtils.formatSpaceUsage(pages * Storage.PAGE_SIZE) + "\n"); + b.append(" Total number of pages is " + total + " with size " + + JDBMUtils.formatSpaceUsage(total * Storage.PAGE_SIZE) + "\n"); + + } + { + b.append("RECORDS:\n"); + + long recordCount = 0; + long freeRecordCount = 0; + long maximalRecordSize = 0; + long maximalAvailSizeDiff = 0; + long totalRecordSize = 0; + long totalAvailDiff = 0; + + // count records + for (long pageid = _pageman.getFirst(Magic.TRANSLATION_PAGE); pageid != 0; pageid = _pageman + .getNext(pageid)) { + PageIo io = _file.get(pageid); + + for (int i = 0; i < _logicMgr.ELEMS_PER_PAGE; i += 1) { + final int pos = Magic.PAGE_HEADER_SIZE + i + * Magic.PhysicalRowId_SIZE; + final long physLoc = io.pageHeaderGetLocation((short) pos); + + if (physLoc == 0) { + freeRecordCount++; + continue; + } + + if (physLoc == PREALOCATE_PHYS_RECID) { + continue; + } + + recordCount++; + + // get size + PageIo page = _file.get(physLoc >>> Storage.PAGE_SIZE_SHIFT); + final short physOffset = (short) (physLoc & Storage.OFFSET_MASK); + int availSize = RecordHeader.getAvailableSize(page, physOffset); + int currentSize = RecordHeader.getCurrentSize(page, physOffset); + _file.release(page); + + maximalAvailSizeDiff = Math.max(maximalAvailSizeDiff, availSize + - currentSize); + maximalRecordSize = Math.max(maximalRecordSize, currentSize); + totalAvailDiff += availSize - currentSize; + totalRecordSize += currentSize; + + } + _file.release(io); + } + + b.append(" Contains " + recordCount + " records and " + + freeRecordCount + " free slots.\n"); + b.append(" Total space occupied by data is " + + JDBMUtils.formatSpaceUsage(totalRecordSize) + "\n"); + b.append(" Average data size in record is " + + JDBMUtils.formatSpaceUsage(Math.round(1D * totalRecordSize + / recordCount)) + "\n"); + b.append(" Maximal data size in record is " + + JDBMUtils.formatSpaceUsage(maximalRecordSize) + "\n"); + b.append(" Space wasted in record fragmentation is " + + JDBMUtils.formatSpaceUsage(totalAvailDiff) + "\n"); + b.append(" Maximal space wasted in single record fragmentation is " + + JDBMUtils.formatSpaceUsage(maximalAvailSizeDiff) + "\n"); + } + + return b.toString(); + } catch (IOException e) { + throw new IOError(e); + } + } + + @Override + public synchronized void defrag(boolean sortCollections) { + + try { + checkNotClosed(); + checkCanWrite(); + commit(); + final String filename2 = _filename + "_defrag" + + System.currentTimeMillis(); + final String filename1 = _filename; + DBStore db2 = new DBStore(filename2, false, true, cipherIn, cipherOut, + false, false, false); + + // recreate logical file with original page layout + { + // find minimal logical pageid (logical pageids are negative) + LongHashMap logicalPages = new LongHashMap(); + long minpageid = 0; + for (long pageid = _pageman.getFirst(Magic.TRANSLATION_PAGE); pageid != 0; pageid = _pageman + .getNext(pageid)) { + minpageid = Math.min(minpageid, pageid); + logicalPages.put(pageid, JDBMUtils.EMPTY_STRING); + } + + // fill second db with logical pages + long pageCounter = 0; + for (long pageid = db2._pageman.allocate(Magic.TRANSLATION_PAGE); pageid >= minpageid; pageid = db2._pageman + .allocate(Magic.TRANSLATION_PAGE)) { + pageCounter++; + if (pageCounter % 1000 == 0) + db2.commit(); + } + + logicalPages = null; + } + + // reinsert collections so physical records are located near each other + // iterate over named object recids, it is sorted with TreeSet + if (sortCollections) { + long nameRecid = getRoot(NAME_DIRECTORY_ROOT); + Collection recids = new TreeSet(); + if (nameRecid != 0) { + HTree m = fetch(nameRecid); + recids.addAll(m.values()); + } + + for (Long namedRecid : recids) { + Object obj = fetch(namedRecid); + if (obj instanceof LinkedList) { + LinkedList.defrag(namedRecid, this, db2); + } else if (obj instanceof HTree) { + HTree.defrag(namedRecid, this, db2); + } else if (obj instanceof BTree) { + BTree.defrag(namedRecid, this, db2); + } + } + } + + for (long pageid = _pageman.getFirst(Magic.TRANSLATION_PAGE); pageid != 0; pageid = _pageman + .getNext(pageid)) { + PageIo io = _file.get(pageid); + + for (int i = 0; i < _logicMgr.ELEMS_PER_PAGE; i += 1) { + final int pos = Magic.PAGE_HEADER_SIZE + i * Magic.PhysicalRowId_SIZE; + if (pos > Short.MAX_VALUE) + throw new Error(); + + // write to new file + final long logicalRowId = ((-pageid) << Storage.PAGE_SIZE_SHIFT) + + (long) pos; + + // read from logical location in second db, + // check if record was already inserted as part of collections + if (db2._pageman.getLast(Magic.TRANSLATION_PAGE) <= pageid + && db2._logicMgr.fetch(logicalRowId) != 0) { + // yes, this record already exists in second db + continue; + } + + // get physical location in this db + final long physRowId = io.pageHeaderGetLocation((short) pos); + + if (physRowId == 0) + continue; + + if (physRowId == PREALOCATE_PHYS_RECID) { + db2._logicMgr.forceInsert(logicalRowId, physRowId); + continue; + } + + // read from physical location at this db + DataInputOutput b = new DataInputOutput(); + _physMgr.fetch(b, physRowId); + byte[] bb = b.toByteArray(); + + // force insert into other file, without decompressing logical id to + // external form + long physLoc = db2._physMgr.insert(bb, 0, bb.length); + db2._logicMgr.forceInsert(logicalRowId, physLoc); + + } + _file.release(io); + db2.commit(); + } + for (byte b = 0; b < Magic.FILE_HEADER_NROOTS; b++) { + db2.setRoot(b, getRoot(b)); + } + + db2.close(); + _pageman.close(); + _file.close(); + + List filesToDelete = new ArrayList(); + // now rename old files + String[] exts = { StorageDiskMapped.IDR, StorageDiskMapped.DBR }; + for (String ext : exts) { + String f1 = filename1 + ext; + String f2 = filename2 + "_OLD" + ext; + + // first rename transaction log + File f1t = new File(f1 + StorageDisk.transaction_log_file_extension); + File f2t = new File(f2 + StorageDisk.transaction_log_file_extension); + f1t.renameTo(f2t); + filesToDelete.add(f2t); + + // rename data files, iterate until file exist + for (int i = 0;; i++) { + File f1d = new File(f1 + "." + i); + if (!f1d.exists()) + break; + File f2d = new File(f2 + "." + i); + f1d.renameTo(f2d); + filesToDelete.add(f2d); + } + } + + // rename new files + for (String ext : exts) { + String f1 = filename2 + ext; + String f2 = filename1 + ext; + + // first rename transaction log + File f1t = new File(f1 + StorageDisk.transaction_log_file_extension); + File f2t = new File(f2 + StorageDisk.transaction_log_file_extension); + f1t.renameTo(f2t); + + // rename data files, iterate until file exist + for (int i = 0;; i++) { + File f1d = new File(f1 + "." + i); + if (!f1d.exists()) + break; + File f2d = new File(f2 + "." + i); + f1d.renameTo(f2d); + } + } + + for (File d : filesToDelete) { + d.delete(); + } + + reopen(); + } catch (IOException e) { + throw new IOError(e); + } + + } + + /** + * Insert data at forced logicalRowId, use only for defragmentation !! + * + * @param logicalRowId + * @param data + * @throws IOException + */ + void forceInsert(long logicalRowId, byte[] data) throws IOException { + logicalRowId = decompressRecid(logicalRowId); + + if (needsAutoCommit()) { + commit(); + } + + long physLoc = _physMgr.insert(data, 0, data.length); + _logicMgr.forceInsert(logicalRowId, physLoc); + } + + /** + * Returns number of records stored in database. Is used for unit tests + */ + long countRecords() throws IOException { + long counter = 0; + + long page = _pageman.getFirst(Magic.TRANSLATION_PAGE); + while (page != 0) { + PageIo io = _file.get(page); + for (int i = 0; i < _logicMgr.ELEMS_PER_PAGE; i += 1) { + int pos = Magic.PAGE_HEADER_SIZE + i * Magic.PhysicalRowId_SIZE; + if (pos > Short.MAX_VALUE) + throw new Error(); + + // get physical location + long physRowId = io.pageHeaderGetLocation((short) pos); + + if (physRowId != 0) + counter += 1; + } + _file.release(io); + page = _pageman.getNext(page); + } + return counter; + } + + private static int COMPRESS_RECID_PAGE_SHIFT = Integer.MIN_VALUE; + static { + int shift = 1; + while ((1 << shift) < LogicalRowIdManager.ELEMS_PER_PAGE) + shift++; + COMPRESS_RECID_PAGE_SHIFT = shift; + } + + private final static long COMPRESS_RECID_OFFSET_MASK = 0xFFFFFFFFFFFFFFFFL >>> (64 - COMPRESS_RECID_PAGE_SHIFT); + + /** + * Compress recid from physical form (block - offset) to (block - slot). This + * way resulting number is smaller and can be easier packed with LongPacker + */ + static long compressRecid(final long recid) { + final long page = recid >>> Storage.PAGE_SIZE_SHIFT; + short offset = (short) (recid & Storage.OFFSET_MASK); + + offset = (short) (offset - Magic.PAGE_HEADER_SIZE); + if (offset % Magic.PhysicalRowId_SIZE != 0) + throw new InternalError("recid not dividable " + Magic.PhysicalRowId_SIZE); + long slot = offset / Magic.PhysicalRowId_SIZE; + + return (page << COMPRESS_RECID_PAGE_SHIFT) + slot; + + } + + static long decompressRecid(final long recid) { + + final long page = recid >>> COMPRESS_RECID_PAGE_SHIFT; + final short offset = (short) ((recid & COMPRESS_RECID_OFFSET_MASK) + * Magic.PhysicalRowId_SIZE + Magic.PAGE_HEADER_SIZE); + return (page << Storage.PAGE_SIZE_SHIFT) + (long) offset; + } + +} Index: graph/src/main/java/org/apache/jdbm/DBMaker.java =================================================================== --- graph/src/main/java/org/apache/jdbm/DBMaker.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/DBMaker.java (Arbeitskopie) @@ -0,0 +1,365 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOError; +import java.security.spec.KeySpec; + +import javax.crypto.Cipher; +import javax.crypto.SecretKey; +import javax.crypto.SecretKeyFactory; +import javax.crypto.spec.IvParameterSpec; +import javax.crypto.spec.PBEKeySpec; +import javax.crypto.spec.SecretKeySpec; + +/** + * Class used to configure and create DB. It uses builder pattern. + */ +public final class DBMaker { + + private byte cacheType = DBCache.MRU; + private int mruCacheSize = 2048; + + private String location = null; + + private boolean disableTransactions = false; + private boolean lockingDisabled = false; + private boolean readonly = false; + private String password = null; + private boolean useAES256Bit = true; + private boolean useRandomAccessFile = false; + private boolean autoClearRefCacheOnLowMem = true; + private boolean closeOnJVMExit = false; + private boolean deleteFilesAfterCloseFlag = false; + + private DBMaker() { + } + + /** + * Creates new DBMaker and sets file to load data from. + * + * @param file to load data from + * @return new DBMaker + */ + public static DBMaker openFile(String file) { + DBMaker m = new DBMaker(); + m.location = file; + return m; + } + + /** + * Creates new DBMaker which uses in memory store. Data will be lost after JVM + * exits. + * + * @return new DBMaker + */ + public static DBMaker openMemory() { + return new DBMaker(); + } + + /** + * Open store in zip file + * + * @param zip file + * @return new DBMaker + */ + public static DBMaker openZip(String zip) { + DBMaker m = new DBMaker(); + m.location = "$$ZIP$$://" + zip; + return m; + } + + static String isZipFileLocation(String location) { + String match = "$$ZIP$$://"; + if (location.startsWith(match)) { + return location.substring(match.length()); + } + return null; + } + + /** + * Use WeakReference for cache. This cache does not improve performance much, + * but prevents JDBM from creating multiple instances of the same object. + * + * @return this builder + */ + public DBMaker enableWeakCache() { + cacheType = DBCache.WEAK; + return this; + } + + /** + * Use SoftReference for cache. This cache greatly improves performance if you + * have enoguth memory. Instances in cache are Garbage Collected when memory + * gets low + * + * @return this builder + */ + public DBMaker enableSoftCache() { + cacheType = DBCache.SOFT; + return this; + } + + /** + * Use hard reference for cache. This greatly improves performance if there is + * enought memory Hard cache has smaller memory overhead then Soft or Weak, + * because reference objects and queue does not have to be maintained + * + * @return this builder + */ + public DBMaker enableHardCache() { + cacheType = DBCache.HARD; + return this; + } + + /** + * Use 'Most Recently Used' cache with limited size. Oldest instances are + * released from cache when new instances are fetched. This cache is not + * cleared by GC. Is good for systems with limited memory. + *

+ * Default size for MRU cache is 2048 records. + * + * @return this builder + */ + public DBMaker enableMRUCache() { + cacheType = DBCache.MRU; + return this; + } + + /** + * + * Sets 'Most Recently Used' cache size. This cache is activated by default + * with size 2048 + * + * @param cacheSize number of instances which will be kept in cache. + * @return this builder + */ + public DBMaker setMRUCacheSize(int cacheSize) { + if (cacheSize < 0) + throw new IllegalArgumentException("Cache size is smaller than zero"); + cacheType = DBCache.MRU; + mruCacheSize = cacheSize; + return this; + } + + /** + * If reference (soft,weak or hard) cache is enabled, GC may not release + * references fast enough (or not at all in case of hard cache). So JDBM + * periodically checks amount of free heap memory. If free memory is less than + * 25% or 10MB, JDBM completely clears its reference cache to prevent possible + * memory issues. + *

+ * Calling this method disables auto cache clearing when mem is low. And of + * course it can cause some out of memory exceptions. + * + * @return this builder + */ + public DBMaker disableCacheAutoClear() { + this.autoClearRefCacheOnLowMem = false; + return this; + } + + /** + * Enabled storage encryption using AES cipher. JDBM supports both 128 bit and + * 256 bit encryption if JRE provides it. There are some restrictions on AES + * 256 bit and not all JREs have it by default. + *

+ * Storage can not be read (decrypted), unless the key is provided next time + * it is opened + * + * @param password used to encrypt store + * @param useAES256Bit if true strong AES 256 bit encryption is used. + * Otherwise more usual AES 128 bit is used. + * @return this builder + */ + public DBMaker enableEncryption(String password, boolean useAES256Bit) { + this.password = password; + this.useAES256Bit = useAES256Bit; + return this; + } + + /** + * Make DB readonly. Update/delete/insert operation will throw + * 'UnsupportedOperationException' + * + * @return this builder + */ + public DBMaker readonly() { + readonly = true; + return this; + } + + /** + * Disable cache completely + * + * @return this builder + */ + public DBMaker disableCache() { + cacheType = DBCache.NONE; + return this; + } + + /** + * Option to disable transaction (to increase performance at the cost of + * potential data loss). Transactions are enabled by default + *

+ * Switches off transactioning for the record manager. This means that a) a + * transaction log is not kept, and b) writes aren't synch'ed after every + * update. Writes are cached in memory and then flushed to disk every N + * writes. You may also flush writes manually by calling commit(). This is + * useful when batch inserting into a new database. + *

+ * When using this, database must be properly closed before JVM shutdown. + * Failing to do so may and WILL corrupt store. + * + * @return this builder + */ + public DBMaker disableTransactions() { + this.disableTransactions = true; + return this; + } + + /** + * Disable file system based locking (for file systems that do not support + * it). + * + * Locking is not supported by many remote or distributed file systems; such + * as Lustre and NFS. Attempts to perform locks will result in an IOException + * with the message "Function not implemented". + * + * Disabling locking will avoid this issue, though of course it comes with all + * the issues of uncontrolled file access. + * + * @return this builder + */ + public DBMaker disableLocking() { + this.lockingDisabled = true; + return this; + } + + /** + * By default JDBM uses mapped memory buffers to read from files. But this may + * behave strangely on some platforms. Safe alternative is to use old + * RandomAccessFile rather then mapped ByteBuffer. There is typically slower + * (pages needs to be copyed into memory on every write). + * + * @return this builder + */ + public DBMaker useRandomAccessFile() { + this.useRandomAccessFile = true; + return this; + } + + /** + * Registers shutdown hook and close database on JVM exit, if it was not + * already closed; + * + * @return this builder + */ + public DBMaker closeOnExit() { + this.closeOnJVMExit = true; + return this; + } + + /** + * Delete all storage files after DB is closed + * + * @return this builder + */ + public DBMaker deleteFilesAfterClose() { + this.deleteFilesAfterCloseFlag = true; + return this; + } + + /** + * Opens database with settings earlier specified in this builder. + * + * @return new DB + * @throws java.io.IOError if db could not be opened + */ + public DB make() { + + Cipher cipherIn = null; + Cipher cipherOut = null; + if (password != null) + try { + // initialize ciphers + // this code comes from stack owerflow + // http://stackoverflow.com/questions/992019/java-256bit-aes-encryption/992413#992413 + byte[] salt = new byte[] { 3, -34, 123, 53, 78, 121, -12, -1, 45, -12, + -48, 89, 11, 100, 99, 8 }; + + SecretKeyFactory factory = SecretKeyFactory + .getInstance("PBKDF2WithHmacSHA1"); + KeySpec spec = new PBEKeySpec(password.toCharArray(), salt, 1024, + useAES256Bit ? 256 : 128); + SecretKey tmp = factory.generateSecret(spec); + SecretKey secret = new SecretKeySpec(tmp.getEncoded(), "AES"); + + String transform = "AES/CBC/NoPadding"; + IvParameterSpec params = new IvParameterSpec(salt); + + cipherIn = Cipher.getInstance(transform); + cipherIn.init(Cipher.ENCRYPT_MODE, secret, params); + + cipherOut = Cipher.getInstance(transform); + cipherOut.init(Cipher.DECRYPT_MODE, secret, params); + + // sanity check, try with page size + byte[] data = new byte[Storage.PAGE_SIZE]; + byte[] encData = cipherIn.doFinal(data); + if (encData.length != Storage.PAGE_SIZE) + throw new Error( + "Page size changed after encryption, make sure you use '/NoPadding'"); + byte[] data2 = cipherOut.doFinal(encData); + for (int i = 0; i < data.length; i++) { + if (data[i] != data2[i]) + throw new Error("Encryption provided by JRE does not work"); + } + + } catch (Exception e) { + throw new IOError(e); + } + + DBAbstract db = null; + + if (cacheType == DBCache.MRU) { + db = new DBCacheMRU(location, readonly, disableTransactions, cipherIn, + cipherOut, useRandomAccessFile, deleteFilesAfterCloseFlag, + mruCacheSize, lockingDisabled); + } else if (cacheType == DBCache.SOFT || cacheType == DBCache.HARD + || cacheType == DBCache.WEAK) { + db = new DBCacheRef(location, readonly, disableTransactions, cipherIn, + cipherOut, useRandomAccessFile, deleteFilesAfterCloseFlag, cacheType, + autoClearRefCacheOnLowMem, lockingDisabled); + } else if (cacheType == DBCache.NONE) { + db = new DBStore(location, readonly, disableTransactions, cipherIn, + cipherOut, useRandomAccessFile, deleteFilesAfterCloseFlag, + lockingDisabled); + } else { + throw new IllegalArgumentException("Unknown cache type: " + cacheType); + } + + if (closeOnJVMExit) { + db.addShutdownHook(); + } + + return db; + } + +} Index: graph/src/main/java/org/apache/jdbm/PageFile.java =================================================================== --- graph/src/main/java/org/apache/jdbm/PageFile.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/PageFile.java (Arbeitskopie) @@ -0,0 +1,386 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOError; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Iterator; + +import javax.crypto.Cipher; + +/** + * This class represents a random access file as a set of fixed size records. + * Each record has a physical record number, and records are cached in order to + * improve access. + *

+ * The set of dirty records on the in-use list constitutes a transaction. Later + * on, we will send these records to some recovery thingy. + *

+ * PageFile is splited between more files, each with max size 1GB. + */ +public final class PageFile { + final PageTransactionManager txnMgr; + + /** + * Pages currently locked for read/update ops. When released the page goes to + * the dirty or clean list, depending on a flag. The file header page is + * normally locked plus the page that is currently being read or modified. + * + * @see PageIo#isDirty() + */ + private final LongHashMap inUse = new LongHashMap(); + + /** + * Pages whose state is dirty. + */ + private final LongHashMap dirty = new LongHashMap(); + /** + * Pages in a historical transaction(s) that have been written onto + * the log but which have not yet been committed to the database. + */ + private final LongHashMap inTxn = new LongHashMap(); + + // transactions disabled? + final boolean transactionsDisabled; + + /** + * A array of clean data to wipe clean pages. + */ + static final byte[] CLEAN_DATA = new byte[Storage.PAGE_SIZE]; + + final Storage storage; + private Cipher cipherOut; + private Cipher cipherIn; + + /** + * Creates a new object on the indicated filename. The file is opened in + * read/write mode. + * + * @param fileName the name of the file to open or create, without an + * extension. + * @throws IOException whenever the creation of the underlying + * RandomAccessFile throws it. + */ + PageFile(String fileName, boolean readonly, boolean transactionsDisabled, + Cipher cipherIn, Cipher cipherOut, boolean useRandomAccessFile, + boolean lockingDisabled) throws IOException { + this.cipherIn = cipherIn; + this.cipherOut = cipherOut; + this.transactionsDisabled = transactionsDisabled; + if (fileName == null) { + this.storage = new StorageMemory(transactionsDisabled); + } else if (DBMaker.isZipFileLocation(fileName) != null) + this.storage = new StorageZip(DBMaker.isZipFileLocation(fileName)); + // }else if (fileName.contains("!/")) + // this.storage = new StorageZip(fileName); + else if (useRandomAccessFile) + this.storage = new StorageDisk(fileName, readonly, lockingDisabled); + else + this.storage = new StorageDiskMapped(fileName, readonly, + transactionsDisabled, lockingDisabled); + + if (this.storage.isReadonly() && !readonly) + throw new IllegalArgumentException( + "This type of storage is readonly, you should call readonly() on DBMaker"); + if (!readonly && !transactionsDisabled) { + txnMgr = new PageTransactionManager(this, storage, cipherIn, cipherOut); + } else { + txnMgr = null; + } + } + + public PageFile(String filename) throws IOException { + this(filename, false, false, null, null, false, false); + } + + /** + * Gets a page from the file. The returned byte array is the in-memory copy of + * the record, and thus can be written (and subsequently released with a dirty + * flag in order to write the page back). If transactions are disabled, + * changes may be written directly + * + * @param pageId The record number to retrieve. + */ + PageIo get(long pageId) throws IOException { + + // try in transaction list, dirty list, free list + PageIo node = inTxn.get(pageId); + if (node != null) { + inTxn.remove(pageId); + inUse.put(pageId, node); + return node; + } + node = dirty.get(pageId); + if (node != null) { + dirty.remove(pageId); + inUse.put(pageId, node); + return node; + } + + // sanity check: can't be on in use list + if (inUse.get(pageId) != null) { + throw new Error("double get for page " + pageId); + } + + // read node from file + if (cipherOut == null) { + node = new PageIo(pageId, storage.read(pageId)); + } else { + // decrypt if needed + ByteBuffer b = storage.read(pageId); + byte[] bb; + if (b.hasArray()) { + bb = b.array(); + } else { + bb = new byte[Storage.PAGE_SIZE]; + b.position(0); + b.get(bb, 0, Storage.PAGE_SIZE); + } + if (!JDBMUtils.allZeros(bb)) + try { + bb = cipherOut.doFinal(bb); + node = new PageIo(pageId, ByteBuffer.wrap(bb)); + } catch (Exception e) { + throw new IOError(e); + } + else { + node = new PageIo(pageId, ByteBuffer.wrap(PageFile.CLEAN_DATA) + .asReadOnlyBuffer()); + } + } + + inUse.put(pageId, node); + node.setClean(); + return node; + } + + /** + * Releases a page. + * + * @param pageId The record number to release. + * @param isDirty If true, the page was modified since the get(). + */ + void release(final long pageId, final boolean isDirty) throws IOException { + + final PageIo page = inUse.remove(pageId); + if (!page.isDirty() && isDirty) + page.setDirty(); + + if (page.isDirty()) { + dirty.put(pageId, page); + } else if (!transactionsDisabled && page.isInTransaction()) { + inTxn.put(pageId, page); + } + } + + /** + * Releases a page. + * + * @param page The page to release. + */ + void release(final PageIo page) throws IOException { + final long key = page.getPageId(); + inUse.remove(key); + if (page.isDirty()) { + // System.out.println( "Dirty: " + key + page ); + dirty.put(key, page); + } else if (!transactionsDisabled && page.isInTransaction()) { + inTxn.put(key, page); + } + } + + /** + * Discards a page (will not write the page even if it's dirty) + * + * @param page The page to discard. + */ + void discard(PageIo page) { + long key = page.getPageId(); + inUse.remove(key); + } + + /** + * Commits the current transaction by flushing all dirty buffers to disk. + */ + void commit() throws IOException { + // debugging... + if (!inUse.isEmpty() && inUse.size() > 1) { + showList(inUse.valuesIterator()); + throw new Error("in use list not empty at commit time (" + inUse.size() + + ")"); + } + + // System.out.println("committing..."); + + if (dirty.size() == 0) { + // if no dirty pages, skip commit process + return; + } + + if (!transactionsDisabled) { + txnMgr.start(); + } + + // sort pages by IDs + long[] pageIds = new long[dirty.size()]; + int c = 0; + for (Iterator i = dirty.valuesIterator(); i.hasNext();) { + pageIds[c] = i.next().getPageId(); + c++; + } + Arrays.sort(pageIds); + + for (long pageId : pageIds) { + PageIo node = dirty.get(pageId); + + // System.out.println("node " + node + " map size now " + dirty.size()); + if (transactionsDisabled) { + if (cipherIn != null) + storage.write(node.getPageId(), + ByteBuffer.wrap(JDBMUtils.encrypt(cipherIn, node.getData()))); + else + storage.write(node.getPageId(), node.getData()); + node.setClean(); + } else { + txnMgr.add(node); + inTxn.put(node.getPageId(), node); + } + } + dirty.clear(); + if (!transactionsDisabled) { + txnMgr.commit(); + } + } + + /** + * Rollback the current transaction by discarding all dirty buffers + */ + void rollback() throws IOException { + // debugging... + if (!inUse.isEmpty()) { + showList(inUse.valuesIterator()); + throw new Error("in use list not empty at rollback time (" + inUse.size() + + ")"); + } + // System.out.println("rollback..."); + dirty.clear(); + + txnMgr.synchronizeLogFromDisk(); + + if (!inTxn.isEmpty()) { + showList(inTxn.valuesIterator()); + throw new Error("in txn list not empty at rollback time (" + inTxn.size() + + ")"); + } + ; + } + + /** + * Commits and closes file. + */ + void close() throws IOException { + if (!dirty.isEmpty()) { + commit(); + } + + if (!transactionsDisabled && txnMgr != null) { + txnMgr.shutdown(); + } + + if (!inTxn.isEmpty()) { + showList(inTxn.valuesIterator()); + throw new Error("In transaction not empty"); + } + + // these actually ain't that bad in a production release + if (!dirty.isEmpty()) { + System.out.println("ERROR: dirty pages at close time"); + showList(dirty.valuesIterator()); + throw new Error("Dirty pages at close time"); + } + if (!inUse.isEmpty()) { + System.out.println("ERROR: inUse pages at close time"); + showList(inUse.valuesIterator()); + throw new Error("inUse pages at close time"); + } + + storage.sync(); + storage.forceClose(); + } + + /** + * Force closing the file and underlying transaction manager. Used for testing + * purposed only. + */ + void forceClose() throws IOException { + if (!transactionsDisabled) { + txnMgr.forceClose(); + } + storage.forceClose(); + } + + /** + * Prints contents of a list + */ + private void showList(Iterator i) { + int cnt = 0; + while (i.hasNext()) { + System.out.println("elem " + cnt + ": " + i.next()); + cnt++; + } + } + + /** + * Synchs a node to disk. This is called by the transaction manager's + * synchronization code. + */ + void synch(PageIo node) throws IOException { + ByteBuffer data = node.getData(); + if (data != null) { + if (cipherIn != null) + storage.write(node.getPageId(), + ByteBuffer.wrap(JDBMUtils.encrypt(cipherIn, data))); + else + storage.write(node.getPageId(), data); + } + } + + /** + * Releases a node from the transaction list, if it was sitting there. + */ + void releaseFromTransaction(PageIo node) throws IOException { + inTxn.remove(node.getPageId()); + } + + /** + * Synchronizes the file. + */ + void sync() throws IOException { + storage.sync(); + } + + public int getDirtyPageCount() { + return dirty.size(); + } + + public void deleteAllFiles() throws IOException { + storage.deleteAllFiles(); + } +} Index: graph/src/main/java/org/apache/jdbm/Serialization.java =================================================================== --- graph/src/main/java/org/apache/jdbm/Serialization.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/Serialization.java (Arbeitskopie) @@ -0,0 +1,1364 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import static org.apache.jdbm.SerializationHeader.*; + +import java.io.ByteArrayOutputStream; +import java.io.DataInput; +import java.io.DataOutput; +import java.io.DataOutputStream; +import java.io.EOFException; +import java.io.IOException; +import java.lang.reflect.Array; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Comparator; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Hashtable; +import java.util.IdentityHashMap; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.Locale; +import java.util.Map; +import java.util.Properties; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.UUID; +import java.util.Vector; + +/** + * Serialization util. It reduces serialized data size for most common java + * types. + *

+ * Common pattern is one byte header which identifies data type, then size is + * written (if required) and data. + *

+ * On unknown types normal java serialization is used + *

+ *

+ * Header byte values bellow 180 are reserved by author for future use. If you + * want to customize this class, use values over 180, to be compatible with + * future updates. + */ +@SuppressWarnings({ "unchecked", "rawtypes" }) +public final class Serialization extends SerialClassInfo implements Serializer { + + /** + * print statistics to STDOUT + */ + static final boolean DEBUG = false; + + static final String UTF8 = "UTF-8"; + + Serialization(DBAbstract db, long serialClassInfoRecid, + ArrayList info) throws IOException { + super(db, serialClassInfoRecid, info); + } + + public Serialization() { + super(null, 0L, new ArrayList()); + // Add java.lang.Object as registered class + registered.add(new ClassInfo(Object.class.getName(), new FieldInfo[] {}, + false, false)); + } + + /** + * Serialize the object into a byte array. + */ + public byte[] serialize(Object obj) throws IOException { + DataInputOutput ba = new DataInputOutput(); + + serialize(ba, obj); + + return ba.toByteArray(); + } + + boolean isSerializable(Object obj) { + // TODO suboptimal code + try { + serialize(new DataOutputStream(new ByteArrayOutputStream()), obj); + return true; + } catch (Exception e) { + return false; + } + } + + public void serialize(final DataOutput out, final Object obj) + throws IOException { + serialize(out, obj, null); + } + + public void serialize(final DataOutput out, final Object obj, + FastArrayList objectStack) throws IOException { + + /** try to find object on stack if it exists */ + if (objectStack != null) { + int indexInObjectStack = objectStack.identityIndexOf(obj); + if (indexInObjectStack != -1) { + // object was already serialized, just write reference to it and return + out.write(OBJECT_STACK); + LongPacker.packInt(out, indexInObjectStack); + return; + } + // add this object to objectStack + objectStack.add(obj); + } + + final Class clazz = obj != null ? obj.getClass() : null; + + /** first try to serialize object without initializing object stack */ + if (obj == null) { + out.write(NULL); + return; + } else if (clazz == Boolean.class) { + if (((Boolean) obj).booleanValue()) + out.write(BOOLEAN_TRUE); + else + out.write(BOOLEAN_FALSE); + return; + } else if (clazz == Integer.class) { + final int val = (Integer) obj; + writeInteger(out, val); + return; + } else if (clazz == Double.class) { + double v = (Double) obj; + if (v == -1d) + out.write(DOUBLE_MINUS_1); + else if (v == 0d) + out.write(DOUBLE_0); + else if (v == 1d) + out.write(DOUBLE_1); + else if (v >= 0 && v <= 255 && (int) v == v) { + out.write(DOUBLE_255); + out.write((int) v); + } else if (v >= Short.MIN_VALUE && v <= Short.MAX_VALUE && (short) v == v) { + out.write(DOUBLE_SHORT); + out.writeShort((int) v); + } else { + out.write(DOUBLE_FULL); + out.writeDouble(v); + } + return; + } else if (clazz == Float.class) { + float v = (Float) obj; + if (v == -1f) + out.write(FLOAT_MINUS_1); + else if (v == 0f) + out.write(FLOAT_0); + else if (v == 1f) + out.write(FLOAT_1); + else if (v >= 0 && v <= 255 && (int) v == v) { + out.write(FLOAT_255); + out.write((int) v); + } else if (v >= Short.MIN_VALUE && v <= Short.MAX_VALUE && (short) v == v) { + out.write(FLOAT_SHORT); + out.writeShort((int) v); + + } else { + out.write(FLOAT_FULL); + out.writeFloat(v); + } + return; + } else if (clazz == BigInteger.class) { + out.write(BIGINTEGER); + byte[] buf = ((BigInteger) obj).toByteArray(); + serializeByteArrayInt(out, buf); + return; + } else if (clazz == BigDecimal.class) { + out.write(BIGDECIMAL); + BigDecimal d = (BigDecimal) obj; + serializeByteArrayInt(out, d.unscaledValue().toByteArray()); + LongPacker.packInt(out, d.scale()); + return; + } else if (clazz == Long.class) { + final long val = (Long) obj; + writeLong(out, val); + return; + } else if (clazz == Short.class) { + short val = (Short) obj; + if (val == -1) + out.write(SHORT_MINUS_1); + else if (val == 0) + out.write(SHORT_0); + else if (val == 1) + out.write(SHORT_1); + else if (val > 0 && val < 255) { + out.write(SHORT_255); + out.write(val); + } else { + out.write(SHORT_FULL); + out.writeShort(val); + } + return; + } else if (clazz == Byte.class) { + byte val = (Byte) obj; + if (val == -1) + out.write(BYTE_MINUS_1); + else if (val == 0) + out.write(BYTE_0); + else if (val == 1) + out.write(BYTE_1); + else { + out.write(SHORT_FULL); + out.writeByte(val); + } + return; + } else if (clazz == Character.class) { + out.write(CHAR); + out.writeChar((Character) obj); + return; + } else if (clazz == String.class) { + String s = (String) obj; + if (s.length() == 0) { + out.write(STRING_EMPTY); + } else { + out.write(STRING); + serializeString(out, s); + } + return; + } else if (obj instanceof Class) { + out.write(CLASS); + serialize(out, ((Class) obj).getName()); + return; + } else if (obj instanceof int[]) { + writeIntArray(out, (int[]) obj); + return; + } else if (obj instanceof long[]) { + writeLongArray(out, (long[]) obj); + return; + } else if (obj instanceof short[]) { + out.write(SHORT_ARRAY); + short[] a = (short[]) obj; + LongPacker.packInt(out, a.length); + for (short s : a) + out.writeShort(s); + return; + } else if (obj instanceof boolean[]) { + out.write(BOOLEAN_ARRAY); + boolean[] a = (boolean[]) obj; + LongPacker.packInt(out, a.length); + for (boolean s : a) + out.writeBoolean(s); // TODO pack 8 booleans to single byte + return; + } else if (obj instanceof double[]) { + out.write(DOUBLE_ARRAY); + double[] a = (double[]) obj; + LongPacker.packInt(out, a.length); + for (double s : a) + out.writeDouble(s); + return; + } else if (obj instanceof float[]) { + out.write(FLOAT_ARRAY); + float[] a = (float[]) obj; + LongPacker.packInt(out, a.length); + for (float s : a) + out.writeFloat(s); + return; + } else if (obj instanceof char[]) { + out.write(CHAR_ARRAY); + char[] a = (char[]) obj; + LongPacker.packInt(out, a.length); + for (char s : a) + out.writeChar(s); + return; + } else if (obj instanceof byte[]) { + byte[] b = (byte[]) obj; + out.write(ARRAY_BYTE_INT); + serializeByteArrayInt(out, b); + return; + } else if (clazz == Date.class) { + out.write(DATE); + out.writeLong(((Date) obj).getTime()); + return; + } else if (clazz == UUID.class) { + out.write(UUID); + serializeUUID(out, (UUID) obj); + return; + } else if (clazz == BTree.class) { + out.write(BTREE); + ((BTree) obj).writeExternal(out); + return; + } else if (clazz == HTree.class) { + out.write(HTREE); + ((HTree) obj).serialize(out); + return; + } else if (clazz == LinkedList.class) { + out.write(JDBMLINKEDLIST); + ((LinkedList) obj).serialize(out); + return; + } + + /** + * classes bellow need object stack, so initialize it if not alredy + * initialized + */ + if (objectStack == null) { + objectStack = new FastArrayList(); + objectStack.add(obj); + } + + if (obj instanceof Object[]) { + Object[] b = (Object[]) obj; + boolean packableLongs = b.length <= 255; + if (packableLongs) { + // check if it contains packable longs + for (Object o : b) { + if (o != null + && (o.getClass() != Long.class || (((Long) o).longValue() < 0 && ((Long) o) + .longValue() != Long.MAX_VALUE))) { + packableLongs = false; + break; + } + } + } + + if (packableLongs) { + // packable Longs is special case, it is often used in JDBM to reference + // fields + out.write(ARRAY_OBJECT_PACKED_LONG); + out.write(b.length); + for (Object o : b) { + if (o == null) + LongPacker.packLong(out, 0); + else + LongPacker.packLong(out, ((Long) o).longValue() + 1); + } + + } else { + out.write(ARRAY_OBJECT); + LongPacker.packInt(out, b.length); + + // Write class id for components + Class componentType = obj.getClass().getComponentType(); + registerClass(componentType); + // write class header + int classId = getClassId(componentType); + LongPacker.packInt(out, classId); + + for (Object o : b) + serialize(out, o, objectStack); + + } + + } else if (clazz == ArrayList.class) { + ArrayList l = (ArrayList) obj; + boolean packableLongs = l.size() < 255; + if (packableLongs) { + // packable Longs is special case, it is often used in JDBM to reference + // fields + for (Object o : l) { + if (o != null + && (o.getClass() != Long.class || (((Long) o).longValue() < 0 && ((Long) o) + .longValue() != Long.MAX_VALUE))) { + packableLongs = false; + break; + } + } + } + if (packableLongs) { + out.write(ARRAYLIST_PACKED_LONG); + out.write(l.size()); + for (Object o : l) { + if (o == null) + LongPacker.packLong(out, 0); + else + LongPacker.packLong(out, ((Long) o).longValue() + 1); + } + } else { + serializeCollection(ARRAYLIST, out, obj, objectStack); + } + + } else if (clazz == java.util.LinkedList.class) { + serializeCollection(LINKEDLIST, out, obj, objectStack); + } else if (clazz == Vector.class) { + serializeCollection(VECTOR, out, obj, objectStack); + } else if (clazz == TreeSet.class) { + TreeSet l = (TreeSet) obj; + out.write(TREESET); + LongPacker.packInt(out, l.size()); + serialize(out, l.comparator(), objectStack); + for (Object o : l) + serialize(out, o, objectStack); + } else if (clazz == HashSet.class) { + serializeCollection(HASHSET, out, obj, objectStack); + } else if (clazz == LinkedHashSet.class) { + serializeCollection(LINKEDHASHSET, out, obj, objectStack); + } else if (clazz == TreeMap.class) { + TreeMap l = (TreeMap) obj; + out.write(TREEMAP); + LongPacker.packInt(out, l.size()); + serialize(out, l.comparator(), objectStack); + for (Object o : l.keySet()) { + serialize(out, o, objectStack); + serialize(out, l.get(o), objectStack); + } + } else if (clazz == HashMap.class) { + serializeMap(HASHMAP, out, obj, objectStack); + } else if (clazz == IdentityHashMap.class) { + serializeMap(IDENTITYHASHMAP, out, obj, objectStack); + } else if (clazz == LinkedHashMap.class) { + serializeMap(LINKEDHASHMAP, out, obj, objectStack); + } else if (clazz == Hashtable.class) { + serializeMap(HASHTABLE, out, obj, objectStack); + } else if (clazz == Properties.class) { + serializeMap(PROPERTIES, out, obj, objectStack); + } else if (clazz == Locale.class) { + out.write(LOCALE); + Locale l = (Locale) obj; + out.writeUTF(l.getLanguage()); + out.writeUTF(l.getCountry()); + out.writeUTF(l.getVariant()); + } else { + out.write(NORMAL); + writeObject(out, obj, objectStack); + } + + } + + static void serializeString(DataOutput out, String obj) throws IOException { + final int len = obj.length(); + LongPacker.packInt(out, len); + for (int i = 0; i < len; i++) { + int c = (int) obj.charAt(i); // TODO investigate if c could be negative + // here + LongPacker.packInt(out, c); + } + + } + + private void serializeUUID(DataOutput out, UUID uuid) throws IOException { + out.writeLong(uuid.getMostSignificantBits()); + out.writeLong(uuid.getLeastSignificantBits()); + } + + private void serializeMap(int header, DataOutput out, Object obj, + FastArrayList objectStack) throws IOException { + Map l = (Map) obj; + out.write(header); + LongPacker.packInt(out, l.size()); + for (Object o : l.keySet()) { + serialize(out, o, objectStack); + serialize(out, l.get(o), objectStack); + } + } + + private void serializeCollection(int header, DataOutput out, Object obj, + FastArrayList objectStack) throws IOException { + Collection l = (Collection) obj; + out.write(header); + LongPacker.packInt(out, l.size()); + + for (Object o : l) + serialize(out, o, objectStack); + + } + + private void serializeByteArrayInt(DataOutput out, byte[] b) + throws IOException { + LongPacker.packInt(out, b.length); + out.write(b); + } + + private void writeLongArray(DataOutput da, long[] obj) throws IOException { + long max = Long.MIN_VALUE; + long min = Long.MAX_VALUE; + for (long i : obj) { + max = Math.max(max, i); + min = Math.min(min, i); + } + + if (0 <= min && max <= 255) { + da.write(ARRAY_LONG_B); + LongPacker.packInt(da, obj.length); + for (long l : obj) + da.write((int) l); + } else if (0 <= min && max <= Long.MAX_VALUE) { + da.write(ARRAY_LONG_PACKED); + LongPacker.packInt(da, obj.length); + for (long l : obj) + LongPacker.packLong(da, l); + } else if (Short.MIN_VALUE <= min && max <= Short.MAX_VALUE) { + da.write(ARRAY_LONG_S); + LongPacker.packInt(da, obj.length); + for (long l : obj) + da.writeShort((short) l); + } else if (Integer.MIN_VALUE <= min && max <= Integer.MAX_VALUE) { + da.write(ARRAY_LONG_I); + LongPacker.packInt(da, obj.length); + for (long l : obj) + da.writeInt((int) l); + } else { + da.write(ARRAY_LONG_L); + LongPacker.packInt(da, obj.length); + for (long l : obj) + da.writeLong(l); + } + + } + + private void writeIntArray(DataOutput da, int[] obj) throws IOException { + int max = Integer.MIN_VALUE; + int min = Integer.MAX_VALUE; + for (int i : obj) { + max = Math.max(max, i); + min = Math.min(min, i); + } + + boolean fitsInByte = 0 <= min && max <= 255; + boolean fitsInShort = Short.MIN_VALUE >= min && max <= Short.MAX_VALUE; + + if (obj.length <= 255 && fitsInByte) { + da.write(ARRAY_INT_B_255); + da.write(obj.length); + for (int i : obj) + da.write(i); + } else if (fitsInByte) { + da.write(ARRAY_INT_B_INT); + LongPacker.packInt(da, obj.length); + for (int i : obj) + da.write(i); + } else if (0 <= min && max <= Integer.MAX_VALUE) { + da.write(ARRAY_INT_PACKED); + LongPacker.packInt(da, obj.length); + for (int l : obj) + LongPacker.packInt(da, l); + } else if (fitsInShort) { + da.write(ARRAY_INT_S); + LongPacker.packInt(da, obj.length); + for (int i : obj) + da.writeShort(i); + } else { + da.write(ARRAY_INT_I); + LongPacker.packInt(da, obj.length); + for (int i : obj) + da.writeInt(i); + } + + } + + private void writeInteger(DataOutput da, final int val) throws IOException { + if (val == -1) + da.write(INTEGER_MINUS_1); + else if (val == 0) + da.write(INTEGER_0); + else if (val == 1) + da.write(INTEGER_1); + else if (val == 2) + da.write(INTEGER_2); + else if (val == 3) + da.write(INTEGER_3); + else if (val == 4) + da.write(INTEGER_4); + else if (val == 5) + da.write(INTEGER_5); + else if (val == 6) + da.write(INTEGER_6); + else if (val == 7) + da.write(INTEGER_7); + else if (val == 8) + da.write(INTEGER_8); + else if (val == Integer.MIN_VALUE) + da.write(INTEGER_MINUS_MAX); + else if (val > 0 && val < 255) { + da.write(INTEGER_255); + da.write(val); + } else if (val < 0) { + da.write(INTEGER_PACK_NEG); + LongPacker.packInt(da, -val); + } else { + da.write(INTEGER_PACK); + LongPacker.packInt(da, val); + } + } + + private void writeLong(DataOutput da, final long val) throws IOException { + if (val == -1) + da.write(LONG_MINUS_1); + else if (val == 0) + da.write(LONG_0); + else if (val == 1) + da.write(LONG_1); + else if (val == 2) + da.write(LONG_2); + else if (val == 3) + da.write(LONG_3); + else if (val == 4) + da.write(LONG_4); + else if (val == 5) + da.write(LONG_5); + else if (val == 6) + da.write(LONG_6); + else if (val == 7) + da.write(LONG_7); + else if (val == 8) + da.write(LONG_8); + else if (val == Long.MIN_VALUE) + da.write(LONG_MINUS_MAX); + else if (val > 0 && val < 255) { + da.write(LONG_255); + da.write((int) val); + } else if (val < 0) { + da.write(LONG_PACK_NEG); + LongPacker.packLong(da, -val); + } else { + da.write(LONG_PACK); + LongPacker.packLong(da, val); + } + } + + /** + * Deserialize an object from a byte array + * + * @throws IOException + * @throws ClassNotFoundException + */ + public Object deserialize(byte[] buf) throws ClassNotFoundException, + IOException { + DataInputOutput bs = new DataInputOutput(buf); + Object ret = deserialize(bs); + if (bs.available() != 0) + throw new InternalError("bytes left: " + bs.available()); + + return ret; + } + + static String deserializeString(DataInput buf) throws IOException { + int len = LongPacker.unpackInt(buf); + char[] b = new char[len]; + for (int i = 0; i < len; i++) + b[i] = (char) LongPacker.unpackInt(buf); + + return new String(b); + } + + public Object deserialize(DataInput is) throws IOException, + ClassNotFoundException { + return deserialize(is, null); + } + + public Object deserialize(DataInput is, FastArrayList objectStack) + throws IOException, ClassNotFoundException { + + Object ret = null; + + final int head = is.readUnsignedByte(); + + /** first try to deserialize object without allocating object stack */ + switch (head) { + case NULL: + break; + case BOOLEAN_TRUE: + ret = Boolean.TRUE; + break; + case BOOLEAN_FALSE: + ret = Boolean.FALSE; + break; + case INTEGER_MINUS_1: + ret = Integer.valueOf(-1); + break; + case INTEGER_0: + ret = Integer.valueOf(0); + break; + case INTEGER_1: + ret = Integer.valueOf(1); + break; + case INTEGER_2: + ret = Integer.valueOf(2); + break; + case INTEGER_3: + ret = Integer.valueOf(3); + break; + case INTEGER_4: + ret = Integer.valueOf(4); + break; + case INTEGER_5: + ret = Integer.valueOf(5); + break; + case INTEGER_6: + ret = Integer.valueOf(6); + break; + case INTEGER_7: + ret = Integer.valueOf(7); + break; + case INTEGER_8: + ret = Integer.valueOf(8); + break; + case INTEGER_MINUS_MAX: + ret = Integer.valueOf(Integer.MIN_VALUE); + break; + case INTEGER_255: + ret = Integer.valueOf(is.readUnsignedByte()); + break; + case INTEGER_PACK_NEG: + ret = Integer.valueOf(-LongPacker.unpackInt(is)); + break; + case INTEGER_PACK: + ret = Integer.valueOf(LongPacker.unpackInt(is)); + break; + case LONG_MINUS_1: + ret = Long.valueOf(-1); + break; + case LONG_0: + ret = Long.valueOf(0); + break; + case LONG_1: + ret = Long.valueOf(1); + break; + case LONG_2: + ret = Long.valueOf(2); + break; + case LONG_3: + ret = Long.valueOf(3); + break; + case LONG_4: + ret = Long.valueOf(4); + break; + case LONG_5: + ret = Long.valueOf(5); + break; + case LONG_6: + ret = Long.valueOf(6); + break; + case LONG_7: + ret = Long.valueOf(7); + break; + case LONG_8: + ret = Long.valueOf(8); + break; + case LONG_255: + ret = Long.valueOf(is.readUnsignedByte()); + break; + case LONG_PACK_NEG: + ret = Long.valueOf(-LongPacker.unpackLong(is)); + break; + case LONG_PACK: + ret = Long.valueOf(LongPacker.unpackLong(is)); + break; + case LONG_MINUS_MAX: + ret = Long.valueOf(Long.MIN_VALUE); + break; + case SHORT_MINUS_1: + ret = Short.valueOf((short) -1); + break; + case SHORT_0: + ret = Short.valueOf((short) 0); + break; + case SHORT_1: + ret = Short.valueOf((short) 1); + break; + case SHORT_255: + ret = Short.valueOf((short) is.readUnsignedByte()); + break; + case SHORT_FULL: + ret = Short.valueOf(is.readShort()); + break; + case BYTE_MINUS_1: + ret = Byte.valueOf((byte) -1); + break; + case BYTE_0: + ret = Byte.valueOf((byte) 0); + break; + case BYTE_1: + ret = Byte.valueOf((byte) 1); + break; + case BYTE_FULL: + ret = Byte.valueOf(is.readByte()); + break; + case SHORT_ARRAY: + int size = LongPacker.unpackInt(is); + ret = new short[size]; + for (int i = 0; i < size; i++) + ((short[]) ret)[i] = is.readShort(); + break; + case BOOLEAN_ARRAY: + size = LongPacker.unpackInt(is); + ret = new boolean[size]; + for (int i = 0; i < size; i++) + ((boolean[]) ret)[i] = is.readBoolean(); + break; + case DOUBLE_ARRAY: + size = LongPacker.unpackInt(is); + ret = new double[size]; + for (int i = 0; i < size; i++) + ((double[]) ret)[i] = is.readDouble(); + break; + case FLOAT_ARRAY: + size = LongPacker.unpackInt(is); + ret = new float[size]; + for (int i = 0; i < size; i++) + ((float[]) ret)[i] = is.readFloat(); + break; + case CHAR_ARRAY: + size = LongPacker.unpackInt(is); + ret = new char[size]; + for (int i = 0; i < size; i++) + ((char[]) ret)[i] = is.readChar(); + break; + case CHAR: + ret = Character.valueOf(is.readChar()); + break; + case FLOAT_MINUS_1: + ret = Float.valueOf(-1); + break; + case FLOAT_0: + ret = Float.valueOf(0); + break; + case FLOAT_1: + ret = Float.valueOf(1); + break; + case FLOAT_255: + ret = Float.valueOf(is.readUnsignedByte()); + break; + case FLOAT_SHORT: + ret = Float.valueOf(is.readShort()); + break; + case FLOAT_FULL: + ret = Float.valueOf(is.readFloat()); + break; + case DOUBLE_MINUS_1: + ret = Double.valueOf(-1); + break; + case DOUBLE_0: + ret = Double.valueOf(0); + break; + case DOUBLE_1: + ret = Double.valueOf(1); + break; + case DOUBLE_255: + ret = Double.valueOf(is.readUnsignedByte()); + break; + case DOUBLE_SHORT: + ret = Double.valueOf(is.readShort()); + break; + case DOUBLE_FULL: + ret = Double.valueOf(is.readDouble()); + break; + case BIGINTEGER: + ret = new BigInteger(deserializeArrayByteInt(is)); + break; + case BIGDECIMAL: + ret = new BigDecimal(new BigInteger(deserializeArrayByteInt(is)), + LongPacker.unpackInt(is)); + break; + case STRING: + ret = deserializeString(is); + break; + case STRING_EMPTY: + ret = JDBMUtils.EMPTY_STRING; + break; + + case CLASS: + ret = deserializeClass(is); + break; + case DATE: + ret = new Date(is.readLong()); + break; + case UUID: + ret = deserializeUUID(is); + break; + case ARRAY_INT_B_255: + ret = deserializeArrayIntB255(is); + break; + case ARRAY_INT_B_INT: + ret = deserializeArrayIntBInt(is); + break; + case ARRAY_INT_S: + ret = deserializeArrayIntSInt(is); + break; + case ARRAY_INT_I: + ret = deserializeArrayIntIInt(is); + break; + case ARRAY_INT_PACKED: + ret = deserializeArrayIntPack(is); + break; + case ARRAY_LONG_B: + ret = deserializeArrayLongB(is); + break; + case ARRAY_LONG_S: + ret = deserializeArrayLongS(is); + break; + case ARRAY_LONG_I: + ret = deserializeArrayLongI(is); + break; + case ARRAY_LONG_L: + ret = deserializeArrayLongL(is); + break; + case ARRAY_LONG_PACKED: + ret = deserializeArrayLongPack(is); + break; + case ARRAYLIST_PACKED_LONG: + ret = deserializeArrayListPackedLong(is); + break; + case ARRAY_BYTE_INT: + ret = deserializeArrayByteInt(is); + break; + case LOCALE: + ret = new Locale(is.readUTF(), is.readUTF(), is.readUTF()); + break; + case JDBMLINKEDLIST: + ret = LinkedList.deserialize(is, this); + break; + case HTREE: + ret = HTree.deserialize(is, this); + break; + case BTREE: + ret = BTree.readExternal(is, this); + break; + case BTREE_NODE_LEAF: + throw new InternalError("BPage header, wrong serializer used"); + case BTREE_NODE_NONLEAF: + throw new InternalError("BPage header, wrong serializer used"); + case JAVA_SERIALIZATION: + throw new InternalError( + "Wrong header, data were probably serialized with OutputStream, not with JDBM serialization"); + + case -1: + throw new EOFException(); + + } + + if (ret != null || head == NULL) { + if (objectStack != null) + objectStack.add(ret); + return ret; + } + + /** something else which needs object stack initialized */ + + if (objectStack == null) + objectStack = new FastArrayList(); + int oldObjectStackSize = objectStack.size(); + + switch (head) { + case NORMAL: + ret = readObject(is, objectStack); + break; + case OBJECT_STACK: + ret = objectStack.get(LongPacker.unpackInt(is)); + break; + case ARRAYLIST: + ret = deserializeArrayList(is, objectStack); + break; + case ARRAY_OBJECT: + ret = deserializeArrayObject(is, objectStack); + break; + case ARRAY_OBJECT_PACKED_LONG: + ret = deserializeArrayObjectPackedLong(is); + break; + case LINKEDLIST: + ret = deserializeLinkedList(is, objectStack); + break; + case TREESET: + ret = deserializeTreeSet(is, objectStack); + break; + case HASHSET: + ret = deserializeHashSet(is, objectStack); + break; + case LINKEDHASHSET: + ret = deserializeLinkedHashSet(is, objectStack); + break; + case VECTOR: + ret = deserializeVector(is, objectStack); + break; + case TREEMAP: + ret = deserializeTreeMap(is, objectStack); + break; + case HASHMAP: + ret = deserializeHashMap(is, objectStack); + break; + case IDENTITYHASHMAP: + ret = deserializeIdentityHashMap(is, objectStack); + break; + case LINKEDHASHMAP: + ret = deserializeLinkedHashMap(is, objectStack); + break; + case HASHTABLE: + ret = deserializeHashtable(is, objectStack); + break; + case PROPERTIES: + ret = deserializeProperties(is, objectStack); + break; + + default: + throw new InternalError("Unknown serialization header: " + head); + } + + if (head != OBJECT_STACK && objectStack.size() == oldObjectStackSize) { + // check if object was not already added to stack as part of collection + objectStack.add(ret); + } + + return ret; + } + + private Class deserializeClass(DataInput is) throws IOException, + ClassNotFoundException { + String className = (String) deserialize(is); + Class cls = Class.forName(className); + return cls; + } + + private byte[] deserializeArrayByteInt(DataInput is) throws IOException { + int size = LongPacker.unpackInt(is); + byte[] b = new byte[size]; + is.readFully(b); + return b; + } + + private long[] deserializeArrayLongL(DataInput is) throws IOException { + int size = LongPacker.unpackInt(is); + long[] ret = new long[size]; + for (int i = 0; i < size; i++) + ret[i] = is.readLong(); + return ret; + } + + private long[] deserializeArrayLongI(DataInput is) throws IOException { + int size = LongPacker.unpackInt(is); + long[] ret = new long[size]; + for (int i = 0; i < size; i++) + ret[i] = is.readInt(); + return ret; + } + + private long[] deserializeArrayLongS(DataInput is) throws IOException { + int size = LongPacker.unpackInt(is); + long[] ret = new long[size]; + for (int i = 0; i < size; i++) + ret[i] = is.readShort(); + return ret; + } + + private long[] deserializeArrayLongB(DataInput is) throws IOException { + int size = LongPacker.unpackInt(is); + long[] ret = new long[size]; + for (int i = 0; i < size; i++) { + ret[i] = is.readUnsignedByte(); + if (ret[i] < 0) + throw new EOFException(); + } + return ret; + } + + private int[] deserializeArrayIntIInt(DataInput is) throws IOException { + int size = LongPacker.unpackInt(is); + int[] ret = new int[size]; + for (int i = 0; i < size; i++) + ret[i] = is.readInt(); + return ret; + } + + private int[] deserializeArrayIntSInt(DataInput is) throws IOException { + int size = LongPacker.unpackInt(is); + int[] ret = new int[size]; + for (int i = 0; i < size; i++) + ret[i] = is.readShort(); + return ret; + } + + private int[] deserializeArrayIntBInt(DataInput is) throws IOException { + int size = LongPacker.unpackInt(is); + int[] ret = new int[size]; + for (int i = 0; i < size; i++) { + ret[i] = is.readUnsignedByte(); + if (ret[i] < 0) + throw new EOFException(); + } + return ret; + } + + private int[] deserializeArrayIntPack(DataInput is) throws IOException { + int size = LongPacker.unpackInt(is); + if (size < 0) + throw new EOFException(); + + int[] ret = new int[size]; + for (int i = 0; i < size; i++) { + ret[i] = LongPacker.unpackInt(is); + } + return ret; + } + + private long[] deserializeArrayLongPack(DataInput is) throws IOException { + int size = LongPacker.unpackInt(is); + if (size < 0) + throw new EOFException(); + + long[] ret = new long[size]; + for (int i = 0; i < size; i++) { + ret[i] = LongPacker.unpackLong(is); + } + return ret; + } + + private UUID deserializeUUID(DataInput is) throws IOException { + return new UUID(is.readLong(), is.readLong()); + } + + private int[] deserializeArrayIntB255(DataInput is) throws IOException { + int size = is.readUnsignedByte(); + if (size < 0) + throw new EOFException(); + + int[] ret = new int[size]; + for (int i = 0; i < size; i++) { + ret[i] = is.readUnsignedByte(); + if (ret[i] < 0) + throw new EOFException(); + } + return ret; + } + + private Object[] deserializeArrayObject(DataInput is, + FastArrayList objectStack) throws IOException, ClassNotFoundException { + int size = LongPacker.unpackInt(is); + // Read class id for components + int classId = LongPacker.unpackInt(is); + Class clazz = classId2class.get(classId); + if (clazz == null) + clazz = Object.class; + + Object[] s = (Object[]) Array.newInstance(clazz, size); + objectStack.add(s); + for (int i = 0; i < size; i++) + s[i] = deserialize(is, objectStack); + return s; + } + + private Object[] deserializeArrayObjectPackedLong(DataInput is) + throws IOException, ClassNotFoundException { + int size = is.readUnsignedByte(); + Object[] s = new Object[size]; + for (int i = 0; i < size; i++) { + long l = LongPacker.unpackLong(is); + if (l == 0) + s[i] = null; + else + s[i] = Long.valueOf(l - 1); + } + return s; + } + + private ArrayList deserializeArrayList(DataInput is, + FastArrayList objectStack) throws IOException, ClassNotFoundException { + int size = LongPacker.unpackInt(is); + ArrayList s = new ArrayList(size); + objectStack.add(s); + for (int i = 0; i < size; i++) { + s.add(deserialize(is, objectStack)); + } + return s; + } + + private ArrayList deserializeArrayListPackedLong(DataInput is) + throws IOException, ClassNotFoundException { + int size = is.readUnsignedByte(); + if (size < 0) + throw new EOFException(); + + ArrayList s = new ArrayList(size); + for (int i = 0; i < size; i++) { + long l = LongPacker.unpackLong(is); + if (l == 0) + s.add(null); + else + s.add(Long.valueOf(l - 1)); + } + return s; + } + + private java.util.LinkedList deserializeLinkedList(DataInput is, + FastArrayList objectStack) throws IOException, ClassNotFoundException { + int size = LongPacker.unpackInt(is); + java.util.LinkedList s = new java.util.LinkedList(); + objectStack.add(s); + for (int i = 0; i < size; i++) + s.add(deserialize(is, objectStack)); + return s; + } + + private Vector deserializeVector(DataInput is, + FastArrayList objectStack) throws IOException, ClassNotFoundException { + int size = LongPacker.unpackInt(is); + Vector s = new Vector(size); + objectStack.add(s); + for (int i = 0; i < size; i++) + s.add(deserialize(is, objectStack)); + return s; + } + + private HashSet deserializeHashSet(DataInput is, + FastArrayList objectStack) throws IOException, ClassNotFoundException { + int size = LongPacker.unpackInt(is); + HashSet s = new HashSet(size); + objectStack.add(s); + for (int i = 0; i < size; i++) + s.add(deserialize(is, objectStack)); + return s; + } + + private LinkedHashSet deserializeLinkedHashSet(DataInput is, + FastArrayList objectStack) throws IOException, ClassNotFoundException { + int size = LongPacker.unpackInt(is); + LinkedHashSet s = new LinkedHashSet(size); + objectStack.add(s); + for (int i = 0; i < size; i++) + s.add(deserialize(is, objectStack)); + return s; + } + + private TreeSet deserializeTreeSet(DataInput is, + FastArrayList objectStack) throws IOException, ClassNotFoundException { + int size = LongPacker.unpackInt(is); + TreeSet s = new TreeSet(); + objectStack.add(s); + Comparator comparator = (Comparator) deserialize(is, objectStack); + if (comparator != null) + s = new TreeSet(comparator); + + for (int i = 0; i < size; i++) + s.add(deserialize(is, objectStack)); + return s; + } + + private TreeMap deserializeTreeMap(DataInput is, + FastArrayList objectStack) throws IOException, ClassNotFoundException { + int size = LongPacker.unpackInt(is); + + TreeMap s = new TreeMap(); + objectStack.add(s); + Comparator comparator = (Comparator) deserialize(is, objectStack); + if (comparator != null) + s = new TreeMap(comparator); + for (int i = 0; i < size; i++) + s.put(deserialize(is, objectStack), deserialize(is, objectStack)); + return s; + } + + private HashMap deserializeHashMap(DataInput is, + FastArrayList objectStack) throws IOException, ClassNotFoundException { + int size = LongPacker.unpackInt(is); + + HashMap s = new HashMap(size); + objectStack.add(s); + for (int i = 0; i < size; i++) + s.put(deserialize(is, objectStack), deserialize(is, objectStack)); + return s; + } + + private IdentityHashMap deserializeIdentityHashMap( + DataInput is, FastArrayList objectStack) throws IOException, + ClassNotFoundException { + int size = LongPacker.unpackInt(is); + + IdentityHashMap s = new IdentityHashMap( + size); + objectStack.add(s); + for (int i = 0; i < size; i++) + s.put(deserialize(is, objectStack), deserialize(is, objectStack)); + return s; + } + + private LinkedHashMap deserializeLinkedHashMap(DataInput is, + FastArrayList objectStack) throws IOException, ClassNotFoundException { + int size = LongPacker.unpackInt(is); + + LinkedHashMap s = new LinkedHashMap(size); + objectStack.add(s); + for (int i = 0; i < size; i++) + s.put(deserialize(is, objectStack), deserialize(is, objectStack)); + return s; + } + + private Hashtable deserializeHashtable(DataInput is, + FastArrayList objectStack) throws IOException, ClassNotFoundException { + int size = LongPacker.unpackInt(is); + + Hashtable s = new Hashtable(size); + objectStack.add(s); + for (int i = 0; i < size; i++) + s.put(deserialize(is, objectStack), deserialize(is, objectStack)); + return s; + } + + private Properties deserializeProperties(DataInput is, + FastArrayList objectStack) throws IOException, ClassNotFoundException { + int size = LongPacker.unpackInt(is); + + Properties s = new Properties(); + objectStack.add(s); + for (int i = 0; i < size; i++) + s.put(deserialize(is, objectStack), deserialize(is, objectStack)); + return s; + } + + /** + * Utility class similar to ArrayList, but with fast identity search. + */ + static class FastArrayList { + + private int size = 0; + private K[] elementData = (K[]) new Object[8]; + + K get(int index) { + if (index >= size) + throw new IndexOutOfBoundsException(); + return elementData[index]; + } + + void add(K o) { + if (elementData.length == size) { + // grow array if necessary + elementData = Arrays.copyOf(elementData, elementData.length * 2); + } + + elementData[size] = o; + size++; + } + + int size() { + return size; + } + + /** + * This method is reason why ArrayList is not used. Search an item in list + * and returns its index. It uses identity rather than 'equalsTo' One could + * argue that TreeMap should be used instead, but we do not expect large + * object trees. This search is VERY FAST compared to Maps, it does not + * allocate new instances or uses method calls. + * + * @param obj + * @return index of object in list or -1 if not found + */ + int identityIndexOf(Object obj) { + for (int i = 0; i < size; i++) { + if (obj == elementData[i]) + return i; + } + return -1; + } + + } + +} Index: graph/src/main/java/org/apache/jdbm/PageTransactionManager.java =================================================================== --- graph/src/main/java/org/apache/jdbm/PageTransactionManager.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/PageTransactionManager.java (Arbeitskopie) @@ -0,0 +1,314 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.Iterator; +import java.util.TreeSet; + +import javax.crypto.Cipher; + +/** + * This class manages the transaction log that belongs to every {@link PageFile} + * . The transaction log is either clean, or in progress. In the latter case, + * the transaction manager takes care of a roll forward. + */ +// TODO: Handle the case where we are recovering lg9 and lg0, were we +// should start with lg9 instead of lg0! + +public final class PageTransactionManager { + private PageFile owner; + + // streams for transaction log. + private DataOutputStream oos; + + /** + * In-core copy of transactions. We could read everything back from the log + * file, but the PageFile needs to keep the dirty pages in core anyway, so we + * might as well point to them and spare us a lot of hassle. + */ + private ArrayList txn = new ArrayList(); + private int curTxn = -1; + + private Storage storage; + private Cipher cipherIn; + private Cipher cipherOut; + + /** + * Instantiates a transaction manager instance. If recovery needs to be + * performed, it is done. + * + * @param owner the PageFile instance that owns this transaction mgr. + * @param storage + * @param cipherIn + * @param cipherOut + */ + PageTransactionManager(PageFile owner, Storage storage, Cipher cipherIn, + Cipher cipherOut) throws IOException { + this.owner = owner; + this.storage = storage; + this.cipherIn = cipherIn; + this.cipherOut = cipherOut; + recover(); + open(); + } + + /** + * Synchronize log file data with the main database file. + *

+ * After this call, the main database file is guaranteed to be consistent and + * guaranteed to be the only file needed for backup purposes. + */ + public void synchronizeLog() throws IOException { + synchronizeLogFromMemory(); + } + + /** + * Synchs in-core transactions to data file and opens a fresh log + */ + private void synchronizeLogFromMemory() throws IOException { + close(); + + TreeSet pageList = new TreeSet(PAGE_IO_COMPARTOR); + + int numPages = 0; + int writtenPages = 0; + + if (txn != null) { + // Add each page to the pageList, replacing the old copy of this + // page if necessary, thus avoiding writing the same page twice + for (Iterator k = txn.iterator(); k.hasNext();) { + PageIo page = k.next(); + if (pageList.contains(page)) { + page.decrementTransactionCount(); + } else { + writtenPages++; + boolean result = pageList.add(page); + } + numPages++; + } + + txn = null; + } + + // Write the page from the pageList to disk + synchronizePages(pageList, true); + + owner.sync(); + open(); + } + + /** + * Opens the log file + */ + private void open() throws IOException { + + oos = storage.openTransactionLog(); + oos.writeShort(Magic.LOGFILE_HEADER); + oos.flush(); + curTxn = -1; + } + + /** + * Startup recovery on all files + */ + private void recover() throws IOException { + + DataInputStream ois = storage.readTransactionLog(); + + // if transaction log is empty, or does not exist + if (ois == null) + return; + + while (true) { + ArrayList pages = null; + try { + int size = LongPacker.unpackInt(ois); + pages = new ArrayList(size); + for (int i = 0; i < size; i++) { + PageIo b = new PageIo(); + b.readExternal(ois, cipherOut); + pages.add(b); + } + } catch (IOException e) { + // corrupted logfile, ignore rest of transactions + break; + } + synchronizePages(pages, false); + + } + owner.sync(); + ois.close(); + storage.deleteTransactionLog(); + } + + /** + * Synchronizes the indicated pages with the owner. + */ + private void synchronizePages(Iterable pages, boolean fromCore) + throws IOException { + // write pages vector elements to the data file. + for (PageIo cur : pages) { + owner.synch(cur); + if (fromCore) { + cur.decrementTransactionCount(); + if (!cur.isInTransaction()) { + owner.releaseFromTransaction(cur); + } + } + } + } + + /** + * Set clean flag on the pages. + */ + private void setClean(ArrayList pages) throws IOException { + for (PageIo cur : pages) { + cur.setClean(); + } + } + + /** + * Discards the indicated pages and notify the owner. + */ + private void discardPages(ArrayList pages) throws IOException { + for (PageIo cur : pages) { + + cur.decrementTransactionCount(); + if (!cur.isInTransaction()) { + owner.releaseFromTransaction(cur); + } + } + } + + /** + * Starts a transaction. This can pages if all slots have been filled with + * full transactions, waiting for the synchronization thread to clean out + * slots. + */ + void start() throws IOException { + curTxn++; + if (curTxn == 1) { + synchronizeLogFromMemory(); + curTxn = 0; + } + txn = new ArrayList(); + } + + /** + * Indicates the page is part of the transaction. + */ + void add(PageIo page) throws IOException { + page.incrementTransactionCount(); + txn.add(page); + } + + /** + * Commits the transaction to the log file. + */ + void commit() throws IOException { + LongPacker.packInt(oos, txn.size()); + for (PageIo page : txn) { + page.writeExternal(oos, cipherIn); + } + + sync(); + + // set clean flag to indicate pages have been written to log + setClean(txn); + + // open a new ObjectOutputStream in order to store + // newer states of PageIo + // oos = new DataOutputStream(new BufferedOutputStream(fos)); + } + + /** + * Flushes and syncs + */ + private void sync() throws IOException { + oos.flush(); + } + + /** + * Shutdowns the transaction manager. Resynchronizes outstanding logs. + */ + void shutdown() throws IOException { + synchronizeLogFromMemory(); + close(); + } + + /** + * Closes open files. + */ + private void close() throws IOException { + sync(); + oos.close(); + oos = null; + } + + /** + * Force closing the file without synchronizing pending transaction data. Used + * for testing purposes only. + */ + void forceClose() throws IOException { + oos.close(); + oos = null; + } + + /** + * Use the disk-based transaction log to synchronize the data file. + * Outstanding memory logs are discarded because they are believed to be + * inconsistent. + */ + void synchronizeLogFromDisk() throws IOException { + close(); + + if (txn != null) { + discardPages(txn); + txn = null; + } + + recover(); + open(); + } + + /** + * INNER CLASS. Comparator class for use by the tree set used to store the + * pages to write for this transaction. The PageIo objects are ordered by + * their page ids. + */ + private static final Comparator PAGE_IO_COMPARTOR = new Comparator() { + + public int compare(PageIo page1, PageIo page2) { + + if (page1.getPageId() == page2.getPageId()) { + return 0; + } else if (page1.getPageId() < page2.getPageId()) { + return -1; + } else { + return 1; + } + } + + }; + +} Index: graph/src/main/java/org/apache/jdbm/Magic.java =================================================================== --- graph/src/main/java/org/apache/jdbm/Magic.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/Magic.java (Arbeitskopie) @@ -0,0 +1,100 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +/** + * This interface contains magic cookies. + */ +public interface Magic { + /** + * Magic cookie at start of file + */ + short FILE_HEADER = 0x1350; + + /** + * Magic for pages. They're offset by the page type magic codes. + */ + short PAGE_MAGIC = 0x1351; + + /** + * Magics for pages in certain lists. + */ + short FREE_PAGE = 0; + short USED_PAGE = 1; + short TRANSLATION_PAGE = 2; + short FREELOGIDS_PAGE = 3; + short FREEPHYSIDS_PAGE = 4; + short FREEPHYSIDS_ROOT_PAGE = 5; + + /** + * Number of lists in a file + */ + short NLISTS = 6; + + /** + * Magic for transaction file + */ + short LOGFILE_HEADER = 0x1360; + + /** + * Size of an externalized byte + */ + short SZ_BYTE = 1; + /** + * Size of an externalized short + */ + short SZ_SHORT = 2; + + /** + * Size of an externalized int + */ + short SZ_INT = 4; + /** + * Size of an externalized long + */ + short SZ_LONG = 8; + + /** + * size of three byte integer + */ + short SZ_SIX_BYTE_LONG = 6; + + /** offsets in file header (zero page in file) */ + short FILE_HEADER_O_MAGIC = 0; // short magic + short FILE_HEADER_O_LISTS = Magic.SZ_SHORT; // long[2*NLISTS] + int FILE_HEADER_O_ROOTS = FILE_HEADER_O_LISTS + + (Magic.NLISTS * 2 * Magic.SZ_LONG); + /** + * The number of "root" rowids available in the file. + */ + int FILE_HEADER_NROOTS = 16; + + short PAGE_HEADER_O_MAGIC = 0; // short magic + short PAGE_HEADER_O_NEXT = Magic.SZ_SHORT; + short PAGE_HEADER_O_PREV = PAGE_HEADER_O_NEXT + Magic.SZ_SIX_BYTE_LONG; + short PAGE_HEADER_SIZE = PAGE_HEADER_O_PREV + Magic.SZ_SIX_BYTE_LONG; + + short PhysicalRowId_O_LOCATION = 0; // long page + // short PhysicalRowId_O_OFFSET = Magic.SZ_SIX_BYTE_LONG; // short offset + int PhysicalRowId_SIZE = Magic.SZ_SIX_BYTE_LONG; + + short DATA_PAGE_O_FIRST = PAGE_HEADER_SIZE; // short firstrowid + short DATA_PAGE_O_DATA = (short) (DATA_PAGE_O_FIRST + Magic.SZ_SHORT); + short DATA_PER_PAGE = (short) (Storage.PAGE_SIZE - DATA_PAGE_O_DATA); + +} Index: graph/src/main/java/org/apache/jdbm/RecordListener.java =================================================================== --- graph/src/main/java/org/apache/jdbm/RecordListener.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/RecordListener.java (Arbeitskopie) @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; + +/** + * An listener notifed when record is inserted, updated or removed. + *

+ * NOTE: this class was used in JDBM2 to support secondary indexes JDBM3 does + * not have a secondary indexes, so this class is not publicly exposed. + * + * @param key type + * @param value type + */ +public interface RecordListener { + + void recordInserted(K key, V value) throws IOException; + + void recordUpdated(K key, V oldValue, V newValue) throws IOException; + + void recordRemoved(K key, V value) throws IOException; + +} Index: graph/src/main/java/org/apache/jdbm/DB.java =================================================================== --- graph/src/main/java/org/apache/jdbm/DB.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/DB.java (Arbeitskopie) @@ -0,0 +1,199 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.NavigableSet; +import java.util.Set; + +/** + * Database is root class for creating and loading persistent collections. It + * also contains transaction operations. + *

+ */ +public interface DB { + + /** + * Closes the DB and release resources. DB can not be used after it was closed + */ + void close(); + + /** @return true if db was already closed */ + boolean isClosed(); + + /** + * Clear cache and remove all entries it contains. This may be useful for some + * Garbage Collection when reference cache is used. + */ + void clearCache(); + + /** + * Defragments storage so it consumes less space. It basically copyes all + * records into different store and then renames it, replacing original store. + *

+ * Defrag has two steps: In first collections are rearranged, so records in + * collection are close to each other, and read speed is improved. In second + * step all records are sequentially transferred, reclaiming all unused space. + * First step is optinal and may slow down defragmentation significantly as ut + * requires many random-access reads. Second step reads and writes data + * sequentially and is very fast, comparable to copying files to new location. + * + *

+ * This commits any uncommited data. Defrag also requires free space, as store + * is basically recreated at new location. + * + * @param sortCollections if collection records should be rearranged during + * defragment, this takes some extra time + */ + void defrag(boolean sortCollections); + + /** + * Commit (make persistent) all changes since beginning of transaction. JDBM + * supports only single transaction. + */ + void commit(); + + /** + * Rollback (cancel) all changes since beginning of transaction. JDBM supports + * only single transaction. This operations affects all maps created or loaded + * by this DB. + */ + void rollback(); + + /** + * This calculates some database statistics such as collection sizes and + * record distributions. Can be useful for performance optimalisations and + * trouble shuting. This method can run for very long time. + * + * @return statistics contained in string + */ + String calculateStatistics(); + + /** + * Copy database content into ZIP file + * + * @param zipFile + */ + void copyToZip(String zipFile); + + /** + * Get a Map which was already created and saved in DB. This map + * uses disk based H*Tree and should have similar performance as + * HashMap. + * + * @param name of hash map + * + * @return map + */ + Map getHashMap(String name); + + /** + * Creates Map which persists data into DB. + * + * @param name record name + * @return + */ + Map createHashMap(String name); + + /** + * Creates Hash Map which persists data into DB. Map will use custom + * serializers for Keys and Values. Leave keySerializer null to use default + * serializer for keys + * + * @param Key type + * @param Value type + * @param name record name + * @param keySerializer serializer to be used for Keys, leave null to use + * default serializer + * @param valueSerializer serializer to be used for Values + * @return + */ + Map createHashMap(String name, Serializer keySerializer, + Serializer valueSerializer); + + Set createHashSet(String name); + + Set getHashSet(String name); + + Set createHashSet(String name, Serializer keySerializer); + + NavigableMap getTreeMap(String name); + + /** + * Create TreeMap which persists data into DB. + * + * @param Key type + * @param Value type + * @param name record name + * @return + */ + , V> NavigableMap createTreeMap(String name); + + /** + * Creates TreeMap which persists data into DB. + * + * @param Key type + * @param Value type + * @param name record name + * @param keyComparator Comparator used to sort keys + * @param keySerializer Serializer used for keys. This may reduce disk space + * usage * + * @param valueSerializer Serializer used for values. This may reduce disk + * space usage + * @return + */ + NavigableMap createTreeMap(String name, + Comparator keyComparator, Serializer keySerializer, + Serializer valueSerializer); + + NavigableSet getTreeSet(String name); + + NavigableSet createTreeSet(String name); + + NavigableSet createTreeSet(String name, Comparator keyComparator, + Serializer keySerializer); + + List createLinkedList(String name); + + List createLinkedList(String name, Serializer serializer); + + List getLinkedList(String name); + + /** + * returns unmodifiable map which contains all collection names and + * collections thenselfs + */ + Map getCollections(); + + /** completely remove collection from store */ + void deleteCollection(String name); + + /** + * Java Collections returns their size as int. This may not be enought for + * JDBM collections. This method returns number of elements in JDBM collection + * as long. + * + * @param collection created by JDBM + * @return number of elements in collection as long + */ + long collectionSize(Object collection); + +} Index: graph/src/main/java/org/apache/jdbm/RecordHeader.java =================================================================== --- graph/src/main/java/org/apache/jdbm/RecordHeader.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/RecordHeader.java (Arbeitskopie) @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +/** + * The data that comes at the start of a record of data. It stores both the + * current size and the avaliable size for the record - the latter can be bigger + * than the former, which allows the record to grow without needing to be moved + * and which allows the system to put small records in larger free spots. + *

+ * In JDBM 1.0 both values were stored as four-byte integers. This was very + * wastefull. Now available size is stored in two bytes, it is compressed, so + * maximal value is up to 120 MB (not sure with exact number) Current size is + * stored as two-byte-unsigned-short difference from Available Size. + */ +public final class RecordHeader { + // offsets + private static final short O_CURRENTSIZE = 0; // int currentSize + private static final short O_AVAILABLESIZE = Magic.SZ_BYTE; // int + // availableSize + static final int MAX_RECORD_SIZE = 8355839; + static final int SIZE = O_AVAILABLESIZE + Magic.SZ_SHORT; + /** + * Maximal difference between current and available size, Maximal value is + * reserved for currentSize 0, so use -1 + */ + static final int MAX_SIZE_SPACE = 255 - 1; + + /** + * Returns the current size + */ + static int getCurrentSize(final PageIo page, final short pos) { + int s = page.readByte(pos + O_CURRENTSIZE) & 0xFF; + if (s == MAX_SIZE_SPACE + 1) + return 0; + return getAvailableSize(page, pos) - s; + } + + /** + * Sets the current size + */ + static void setCurrentSize(final PageIo page, final short pos, int value) { + if (value == 0) { + page.writeByte(pos + O_CURRENTSIZE, (byte) (MAX_SIZE_SPACE + 1)); + return; + } + int availSize = getAvailableSize(page, pos); + if (value < (availSize - MAX_SIZE_SPACE) || value > availSize) + throw new IllegalArgumentException( + "currentSize out of bounds, need to realocate " + value + " - " + + availSize); + page.writeByte(pos + O_CURRENTSIZE, (byte) (availSize - value)); + } + + /** + * Returns the available size + */ + static int getAvailableSize(final PageIo page, final short pos) { + return deconvertAvailSize(page.readShort(pos + O_AVAILABLESIZE)); + } + + /** + * Sets the available size + */ + static void setAvailableSize(final PageIo page, final short pos, int value) { + if (value != roundAvailableSize(value)) + throw new IllegalArgumentException("value is not rounded"); + int oldCurrSize = getCurrentSize(page, pos); + + page.writeShort(pos + O_AVAILABLESIZE, convertAvailSize(value)); + setCurrentSize(page, pos, oldCurrSize); + } + + static short convertAvailSize(final int recordSize) { + if (recordSize <= Short.MAX_VALUE) + return (short) recordSize; + else { + int shift = recordSize - Short.MAX_VALUE; + if (shift % MAX_SIZE_SPACE == 0) + shift = shift / MAX_SIZE_SPACE; + else + shift = 1 + shift / MAX_SIZE_SPACE; + shift = -shift; + return (short) (shift); + } + + } + + static int deconvertAvailSize(final short converted) { + if (converted >= 0) + return converted; + else { + int shifted = -converted; + shifted = shifted * MAX_SIZE_SPACE; + return Short.MAX_VALUE + shifted; + } + + } + + static int roundAvailableSize(int value) { + if (value > MAX_RECORD_SIZE) + new InternalError("Maximal record size (" + MAX_RECORD_SIZE + + ") exceeded: " + value); + return deconvertAvailSize(convertAvailSize(value)); + } + +} Index: graph/src/main/java/org/apache/jdbm/AdvancedObjectInputStream.java =================================================================== --- graph/src/main/java/org/apache/jdbm/AdvancedObjectInputStream.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/AdvancedObjectInputStream.java (Arbeitskopie) @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.DataInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.ObjectInput; +import java.util.ArrayList; + +/** + * An alternative to java.io.ObjectInputStream which uses more + * efficient serialization + */ +public final class AdvancedObjectInputStream extends DataInputStream implements + ObjectInput { + + public AdvancedObjectInputStream(InputStream in) { + super(in); + } + + @Override + public Object readObject() throws ClassNotFoundException, IOException { + // first read class data + ArrayList info = SerialClassInfo.serializer + .deserialize(this); + + Serialization ser = new Serialization(null, 0, info); + return ser.deserialize(this); + } +} Index: graph/src/main/java/org/apache/jdbm/HTreeDirectory.java =================================================================== --- graph/src/main/java/org/apache/jdbm/HTreeDirectory.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/HTreeDirectory.java (Arbeitskopie) @@ -0,0 +1,601 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jdbm; + +import java.io.DataOutput; +import java.io.IOError; +import java.io.IOException; +import java.util.ArrayList; +import java.util.ConcurrentModificationException; +import java.util.Iterator; +import java.util.NoSuchElementException; + +/** + * Hashtable directory page. + */ +public final class HTreeDirectory { + + /** + * Maximum number of children in a directory. + *

+ * (Must be a power of 2 -- if you update this value, you must also update + * BIT_SIZE and MAX_DEPTH.) + *

+ * !!!! do not change this, it affects storage format, there are also magic + * numbers which relies on 255 !!! + */ + static final int MAX_CHILDREN = 256; + + /** + * Number of significant bits per directory level. + */ + static final int BIT_SIZE = 8; // log2(256) = 8 + + /** + * Maximum number of levels (zero-based) + *

+ * (4 * 8 bits = 32 bits, which is the size of an "int", and as you know, + * hashcodes in Java are "ints") + */ + static final int MAX_DEPTH = 3; // 4 levels + + /** + * Record ids of children nodes. It is saved in matrix to save memory, some + * subarrays may be null. + */ + private long[][] _children; + + /** + * Depth of this directory page, zero-based + */ + private byte _depth; + + /** + * This directory's record ID in the DB. (transient) + */ + private long _recid; + + /** if this is root (depth=0), it contains size, otherwise -1 */ + long size; + + protected final HTree tree; + + /** + * Public constructor used by serialization + */ + public HTreeDirectory(HTree tree) { + this.tree = tree; + } + + /** + * Construct a HashDirectory + * + * @param depth Depth of this directory node. + */ + HTreeDirectory(HTree tree, byte depth) { + this.tree = tree; + _depth = depth; + _children = new long[32][]; + } + + /** + * Sets persistence context. This method must be called before any + * persistence-related operation. + * + * @param recid Record id of this directory. + */ + void setPersistenceContext(long recid) { + this._recid = recid; + } + + /** + * Get the record identifier used to load this hashtable. + */ + long getRecid() { + return _recid; + } + + /** + * Returns whether or not this directory is empty. A directory is empty when + * it no longer contains buckets or sub-directories. + */ + boolean isEmpty() { + for (int i = 0; i < _children.length; i++) { + long[] sub = _children[i]; + if (sub != null) { + for (int j = 0; j < 8; j++) { + if (sub[j] != 0) { + return false; + } + } + } + } + return true; + } + + /** + * Returns the value which is associated with the given key. Returns + * null if there is not association for this key. + * + * @param key key whose associated value is to be returned + */ + V get(K key) throws IOException { + int hash = hashCode(key); + long child_recid = getRecid(hash); + if (child_recid == 0) { + // not bucket/node --> not found + return null; + } else { + Object node = tree.db.fetch(child_recid, tree.SERIALIZER); + // System.out.println("HashDirectory.get() child is : "+node); + + if (node instanceof HTreeDirectory) { + // recurse into next directory level + HTreeDirectory dir = (HTreeDirectory) node; + dir.setPersistenceContext(child_recid); + return dir.get(key); + } else { + // node is a bucket + HTreeBucket bucket = (HTreeBucket) node; + return bucket.getValue(key); + } + } + } + + private long getRecid(int hash) { + long[] sub = _children[hash >>> 3]; + return sub == null ? 0 : sub[hash % 8]; + } + + private void putRecid(int hash, long recid) { + long[] sub = _children[hash >>> 3]; + if (sub == null) { + sub = new long[8]; + _children[hash >>> 3] = sub; + } + sub[hash % 8] = recid; + } + + /** + * Associates the specified value with the specified key. + * + * @param key key with which the specified value is to be assocated. + * @param value value to be associated with the specified key. + * @return object which was previously associated with the given key, or + * null if no association existed. + */ + Object put(final Object key, final Object value) throws IOException { + if (value == null) { + return remove(key); + } + int hash = hashCode(key); + long child_recid = getRecid(hash); + if (child_recid == 0) { + // no bucket/node here yet, let's create a bucket + HTreeBucket bucket = new HTreeBucket(tree, (byte) (_depth + 1)); + + // insert (key,value) pair in bucket + Object existing = bucket.addElement(key, value); + + long b_recid = tree.db.insert(bucket, tree.SERIALIZER, false); + putRecid(hash, b_recid); + + tree.db.update(_recid, this, tree.SERIALIZER); + + // System.out.println("Added: "+bucket); + return existing; + } else { + Object node = tree.db.fetch(child_recid, tree.SERIALIZER); + + if (node instanceof HTreeDirectory) { + // recursive insert in next directory level + HTreeDirectory dir = (HTreeDirectory) node; + dir.setPersistenceContext(child_recid); + return dir.put(key, value); + } else { + // node is a bucket + HTreeBucket bucket = (HTreeBucket) node; + if (bucket.hasRoom()) { + Object existing = bucket.addElement(key, value); + tree.db.update(child_recid, bucket, tree.SERIALIZER); + // System.out.println("Added: "+bucket); + return existing; + } else { + // overflow, so create a new directory + if (_depth == MAX_DEPTH) { + throw new RuntimeException("Cannot create deeper directory. " + + "Depth=" + _depth); + } + HTreeDirectory dir = new HTreeDirectory(tree, (byte) (_depth + 1)); + long dir_recid = tree.db.insert(dir, tree.SERIALIZER, false); + dir.setPersistenceContext(dir_recid); + + putRecid(hash, dir_recid); + tree.db.update(_recid, this, tree.SERIALIZER); + + // discard overflown bucket + tree.db.delete(child_recid); + + // migrate existing bucket elements + ArrayList keys = bucket.getKeys(); + ArrayList values = bucket.getValues(); + int entries = keys.size(); + for (int i = 0; i < entries; i++) { + dir.put(keys.get(i), values.get(i)); + } + + // (finally!) insert new element + return dir.put(key, value); + } + } + } + } + + /** + * Remove the value which is associated with the given key. If the key does + * not exist, this method simply ignores the operation. + * + * @param key key whose associated value is to be removed + * @return object which was associated with the given key, or + * null if no association existed with given key. + */ + Object remove(Object key) throws IOException { + int hash = hashCode(key); + long child_recid = getRecid(hash); + if (child_recid == 0) { + // not bucket/node --> not found + return null; + } else { + Object node = tree.db.fetch(child_recid, tree.SERIALIZER); + // System.out.println("HashDirectory.remove() child is : "+node); + + if (node instanceof HTreeDirectory) { + // recurse into next directory level + HTreeDirectory dir = (HTreeDirectory) node; + dir.setPersistenceContext(child_recid); + Object existing = dir.remove(key); + if (existing != null) { + if (dir.isEmpty()) { + // delete empty directory + tree.db.delete(child_recid); + putRecid(hash, 0); + tree.db.update(_recid, this, tree.SERIALIZER); + } + } + return existing; + } else { + // node is a bucket + HTreeBucket bucket = (HTreeBucket) node; + Object existing = bucket.removeElement(key); + if (existing != null) { + if (bucket.getElementCount() >= 1) { + tree.db.update(child_recid, bucket, tree.SERIALIZER); + } else { + // delete bucket, it's empty + tree.db.delete(child_recid); + putRecid(hash, 0); + tree.db.update(_recid, this, tree.SERIALIZER); + } + } + return existing; + } + } + } + + /** + * Calculates the hashcode of a key, based on the current directory depth. + */ + private int hashCode(Object key) { + int hashMask = hashMask(); + int hash = key.hashCode(); + hash = hash & hashMask; + hash = hash >>> ((MAX_DEPTH - _depth) * BIT_SIZE); + hash = hash % MAX_CHILDREN; + /* + * System.out.println("HashDirectory.hashCode() is: 0x" + * +Integer.toHexString(hash) +" for object hashCode() 0x" + * +Integer.toHexString(key.hashCode())); + */ + return hash; + } + + /** + * Calculates the hashmask of this directory. The hashmask is the bit mask + * applied to a hashcode to retain only bits that are relevant to this + * directory level. + */ + int hashMask() { + int bits = MAX_CHILDREN - 1; + int hashMask = bits << ((MAX_DEPTH - _depth) * BIT_SIZE); + /* + * System.out.println("HashDirectory.hashMask() is: 0x" + * +Integer.toHexString(hashMask)); + */ + return hashMask; + } + + /** + * Returns an enumeration of the keys contained in this + */ + Iterator keys() throws IOException { + return new HDIterator(true); + } + + /** + * Returns an enumeration of the values contained in this + */ + Iterator values() throws IOException { + return new HDIterator(false); + } + + public void writeExternal(DataOutput out) throws IOException { + out.writeByte(_depth); + if (_depth == 0) { + LongPacker.packLong(out, size); + } + + int zeroStart = 0; + for (int i = 0; i < MAX_CHILDREN; i++) { + if (getRecid(i) != 0) { + zeroStart = i; + break; + } + } + + out.write(zeroStart); + if (zeroStart == MAX_CHILDREN) + return; + + int zeroEnd = 0; + for (int i = MAX_CHILDREN - 1; i >= 0; i--) { + if (getRecid(i) != 0) { + zeroEnd = i; + break; + } + } + out.write(zeroEnd); + + for (int i = zeroStart; i <= zeroEnd; i++) { + LongPacker.packLong(out, getRecid(i)); + } + } + + public void readExternal(DataInputOutput in) throws IOException, + ClassNotFoundException { + _depth = in.readByte(); + if (_depth == 0) + size = LongPacker.unpackLong(in); + else + size = -1; + + _children = new long[32][]; + int zeroStart = in.readUnsignedByte(); + int zeroEnd = in.readUnsignedByte(); + + for (int i = zeroStart; i <= zeroEnd; i++) { + long recid = LongPacker.unpackLong(in); + if (recid != 0) + putRecid(i, recid); + } + + } + + public void defrag(DBStore r1, DBStore r2) throws IOException, + ClassNotFoundException { + for (long[] sub : _children) { + if (sub == null) + continue; + for (long child : sub) { + if (child == 0) + continue; + byte[] data = r1.fetchRaw(child); + r2.forceInsert(child, data); + Object t = tree.SERIALIZER.deserialize(new DataInputOutput(data)); + if (t instanceof HTreeDirectory) { + ((HTreeDirectory) t).defrag(r1, r2); + } + } + } + } + + void deleteAllChildren() throws IOException { + for (long[] ll : _children) { + if (ll != null) { + for (long l : ll) { + if (l != 0) { + tree.db.delete(l); + } + } + } + } + + } + + // ////////////////////////////////////////////////////////////////////// + // INNER CLASS + // ////////////////////////////////////////////////////////////////////// + + /** + * Utility class to enumerate keys/values in a HTree + */ + class HDIterator implements Iterator { + + /** + * True if we're iterating on keys, False if enumerating on values. + */ + private boolean _iterateKeys; + + /** + * Stacks of directories & last enumerated child position + */ + private ArrayList _dirStack; + private ArrayList _childStack; + + /** + * Current HashDirectory in the hierarchy + */ + private HTreeDirectory _dir; + + /** + * Current child position + */ + private int _child; + + /** + * Current bucket iterator + */ + private Iterator _iter; + + private A next; + + /** + * last item returned in next(), is used to remove() last item + */ + private A last; + + private int expectedModCount; + + /** + * Construct an iterator on this directory. + * + * @param iterateKeys True if iteration supplies keys, False if iterateKeys + * supplies values. + */ + HDIterator(boolean iterateKeys) throws IOException { + _dirStack = new ArrayList(); + _childStack = new ArrayList(); + _dir = HTreeDirectory.this; + _child = -1; + _iterateKeys = iterateKeys; + expectedModCount = tree.modCount; + + prepareNext(); + next = next2(); + + } + + /** + * Returns the next object. + */ + public A next2() { + A next = null; + if (_iter != null && _iter.hasNext()) { + next = _iter.next(); + } else { + try { + prepareNext(); + } catch (IOException except) { + throw new IOError(except); + } + if (_iter != null && _iter.hasNext()) { + return next2(); + } + } + return next; + } + + /** + * Prepare internal state so we can answer hasMoreElements + *

+ * Actually, this code prepares an Enumeration on the next Bucket to + * enumerate. If no following bucket is found, the next Enumeration is set + * to null. + */ + private void prepareNext() throws IOException { + long child_recid = 0; + + // get next bucket/directory to enumerate + do { + _child++; + if (_child >= MAX_CHILDREN) { + + if (_dirStack.isEmpty()) { + // no more directory in the stack, we're finished + return; + } + + // try next node + _dir = (HTreeDirectory) _dirStack.remove(_dirStack.size() - 1); + _child = ((Integer) _childStack.remove(_childStack.size() - 1)) + .intValue(); + continue; + } + child_recid = _dir.getRecid(_child); + } while (child_recid == 0); + + if (child_recid == 0) { + throw new Error("child_recid cannot be 0"); + } + + Object node = tree.db.fetch(child_recid, tree.SERIALIZER); + // System.out.println("HDEnumeration.get() child is : "+node); + + if (node instanceof HTreeDirectory) { + // save current position + _dirStack.add(_dir); + _childStack.add(new Integer(_child)); + + _dir = (HTreeDirectory) node; + _child = -1; + + // recurse into + _dir.setPersistenceContext(child_recid); + prepareNext(); + } else { + // node is a bucket + HTreeBucket bucket = (HTreeBucket) node; + if (_iterateKeys) { + ArrayList keys2 = bucket.getKeys(); + _iter = keys2.iterator(); + } else { + _iter = bucket.getValues().iterator(); + } + } + } + + public boolean hasNext() { + return next != null; + } + + public A next() { + if (next == null) + throw new NoSuchElementException(); + if (expectedModCount != tree.modCount) + throw new ConcurrentModificationException(); + last = next; + next = next2(); + return last; + } + + public void remove() { + if (last == null) + throw new IllegalStateException(); + + if (expectedModCount != tree.modCount) + throw new ConcurrentModificationException(); + + // TODO current delete behaviour may change node layout. INVESTIGATE if + // this can happen! + tree.remove(last); + last = null; + expectedModCount++; + } + } + +} Index: graph/src/main/java/org/apache/jdbm/JDBMUtils.java =================================================================== --- graph/src/main/java/org/apache/jdbm/JDBMUtils.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/JDBMUtils.java (Arbeitskopie) @@ -0,0 +1,125 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOError; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Comparator; + +import javax.crypto.Cipher; + +/** + * Various utilities used in JDBM + */ +public final class JDBMUtils { + + /** + * empty string is used as dummy value to represent null values in HashSet and + * TreeSet + */ + static final String EMPTY_STRING = ""; + + public static byte[] encrypt(Cipher cipherIn, ByteBuffer b) { + if (cipherIn == null && b.hasArray()) + return b.array(); + byte[] bb = new byte[Storage.PAGE_SIZE]; + b.rewind(); + b.get(bb, 0, Storage.PAGE_SIZE); + return encrypt(cipherIn, bb); + } + + public static byte[] encrypt(Cipher cipherIn, byte[] b) { + if (cipherIn == null) + return b; + + try { + return cipherIn.doFinal(b); + } catch (Exception e) { + throw new IOError(e); + } + + } + + /** + * Compares comparables. Default comparator for most of java types + */ + static final Comparator COMPARABLE_COMPARATOR = new Comparator() { + public int compare(Comparable o1, Comparable o2) { + return o1 == null && o2 != null ? -1 : (o1 != null && o2 == null ? 1 : o1 + .compareTo(o2)); + } + }; + + static String formatSpaceUsage(long size) { + if (size < 1e4) + return size + "B"; + else if (size < 1e7) + return "" + Math.round(1D * size / 1024D) + "KB"; + else if (size < 1e10) + return "" + Math.round(1D * size / 1e6) + "MB"; + else + return "" + Math.round(1D * size / 1e9) + "GB"; + } + + static boolean allZeros(byte[] b) { + for (int i = 0; i < b.length; i++) { + if (b[i] != 0) + return false; + } + return true; + } + + static E max(E e1, E e2, Comparator comp) { + if (e1 == null) + return e2; + if (e2 == null) + return e1; + + if (comp == null) + comp = COMPARABLE_COMPARATOR; + return comp.compare(e1, e2) < 0 ? e2 : e1; + } + + static E min(E e1, E e2, Comparator comp) { + if (e1 == null) + return e2; + if (e2 == null) + return e1; + + if (comp == null) + comp = COMPARABLE_COMPARATOR; + + return comp.compare(e1, e2) > 0 ? e2 : e1; + } + + static final Serializer NULL_SERIALIZER = new Serializer() { + public void serialize(DataOutput out, Object obj) throws IOException { + out.writeByte(11); + } + + public Object deserialize(DataInput in) throws IOException, + ClassNotFoundException { + in.readByte(); + return null; + } + }; + +} Index: graph/src/main/java/org/apache/jdbm/PageIo.java =================================================================== --- graph/src/main/java/org/apache/jdbm/PageIo.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/PageIo.java (Arbeitskopie) @@ -0,0 +1,449 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import static org.apache.jdbm.Magic.DATA_PAGE_O_DATA; +import static org.apache.jdbm.Magic.DATA_PAGE_O_FIRST; +import static org.apache.jdbm.Magic.FILE_HEADER; +import static org.apache.jdbm.Magic.FILE_HEADER_O_LISTS; +import static org.apache.jdbm.Magic.FILE_HEADER_O_MAGIC; +import static org.apache.jdbm.Magic.FILE_HEADER_O_ROOTS; +import static org.apache.jdbm.Magic.PAGE_HEADER_O_MAGIC; +import static org.apache.jdbm.Magic.PAGE_HEADER_O_NEXT; +import static org.apache.jdbm.Magic.PAGE_HEADER_O_PREV; +import static org.apache.jdbm.Magic.PhysicalRowId_O_LOCATION; + +import java.io.DataInputStream; +import java.io.DataOutput; +import java.io.IOError; +import java.io.IOException; +import java.nio.ByteBuffer; + +import javax.crypto.Cipher; + +/** + * Wraps a page sizes ByteBuffer for reading and writing. + *

+ * ByteBuffer may be subview of a larger buffer (ie large buffer mapped over a + * file). In this case ByteBuffer will have set limit, mark and other variables + * to limit its size. + *

+ * For reading buffered may be shared. For example StoreMemory just returns its + * pages without copying. In this case buffer is marked as 'readonly' and needs + * to be copied before write (Copy On Write - COW). COW is not necessary if + * transactions are disabled and changes can not be rolled back. + *

+ */ +public final class PageIo { + + private long pageId; + + private ByteBuffer data; // work area + + /** buffers contains changes which were not written to disk yet. */ + private boolean dirty = false; + + private int transactionCount = 0; + + /** + * Default constructor for serialization + */ + public PageIo() { + // empty + } + + /** + * Constructs a new PageIo instance working on the indicated buffer. + */ + PageIo(long pageId, byte[] data) { + this.pageId = pageId; + this.data = ByteBuffer.wrap(data); + } + + public PageIo(long pageId, ByteBuffer data) { + this.pageId = pageId; + this.data = data; + } + + /** + * Frequent reads on direct buffer may be slower then on heap buffer. This + * method converts native direct to heap buffer + */ + void ensureHeapBuffer() { + if (data.isDirect()) { + final byte[] bb = new byte[Storage.PAGE_SIZE]; + data.get(bb, 0, Storage.PAGE_SIZE); + data = ByteBuffer.wrap(bb); + if (data.isReadOnly()) + throw new InternalError(); + } + + } + + /** + * Returns the underlying array + */ + ByteBuffer getData() { + return data; + } + + /** + * Returns the page number. + */ + long getPageId() { + return pageId; + } + + /** + * Sets the dirty flag + */ + void setDirty() { + dirty = true; + + if (data.isReadOnly()) { + // make copy if needed, so we can write into buffer + byte[] buf = new byte[Storage.PAGE_SIZE]; + data.get(buf, 0, Storage.PAGE_SIZE); + data = ByteBuffer.wrap(buf); + } + } + + /** + * Clears the dirty flag + */ + void setClean() { + dirty = false; + } + + /** + * Returns true if the dirty flag is set. + */ + boolean isDirty() { + return dirty; + } + + /** + * Returns true if the block is still dirty with respect to the transaction + * log. + */ + boolean isInTransaction() { + return transactionCount != 0; + } + + /** + * Increments transaction count for this block, to signal that this block is + * in the log but not yet in the data file. The method also takes a snapshot + * so that the data may be modified in new transactions. + */ + void incrementTransactionCount() { + transactionCount++; + } + + /** + * Decrements transaction count for this block, to signal that this block has + * been written from the log to the data file. + */ + void decrementTransactionCount() { + transactionCount--; + if (transactionCount < 0) + throw new Error("transaction count on page " + getPageId() + + " below zero!"); + + } + + /** + * Reads a byte from the indicated position + */ + public byte readByte(int pos) { + return data.get(pos); + } + + /** + * Writes a byte to the indicated position + */ + public void writeByte(int pos, byte value) { + setDirty(); + data.put(pos, value); + } + + /** + * Reads a short from the indicated position + */ + public short readShort(int pos) { + return data.getShort(pos); + } + + /** + * Writes a short to the indicated position + */ + public void writeShort(int pos, short value) { + setDirty(); + data.putShort(pos, value); + } + + /** + * Reads an int from the indicated position + */ + public int readInt(int pos) { + return data.getInt(pos); + } + + /** + * Writes an int to the indicated position + */ + public void writeInt(int pos, int value) { + setDirty(); + data.putInt(pos, value); + } + + /** + * Reads a long from the indicated position + */ + public long readLong(int pos) { + return data.getLong(pos); + } + + /** + * Writes a long to the indicated position + */ + public void writeLong(int pos, long value) { + setDirty(); + data.putLong(pos, value); + } + + /** + * Reads a long from the indicated position + */ + public long readSixByteLong(int pos) { + long ret = ((long) (data.get(pos + 0) & 0x7f) << 40) + | ((long) (data.get(pos + 1) & 0xff) << 32) + | ((long) (data.get(pos + 2) & 0xff) << 24) + | ((long) (data.get(pos + 3) & 0xff) << 16) + | ((long) (data.get(pos + 4) & 0xff) << 8) + | ((long) (data.get(pos + 5) & 0xff) << 0); + if ((data.get(pos + 0) & 0x80) != 0) + return -ret; + else + return ret; + + } + + /** + * Writes a long to the indicated position + */ + public void writeSixByteLong(int pos, long value) { + // if(value<0) throw new IllegalArgumentException(); + // if(value >> (6*8)!=0) + // throw new IllegalArgumentException("does not fit"); + int negativeBit = 0; + if (value < 0) { + value = -value; + negativeBit = 0x80; + } + + setDirty(); + data.put(pos + 0, (byte) ((0x7f & (value >> 40)) | negativeBit)); + data.put(pos + 1, (byte) (0xff & (value >> 32))); + data.put(pos + 2, (byte) (0xff & (value >> 24))); + data.put(pos + 3, (byte) (0xff & (value >> 16))); + data.put(pos + 4, (byte) (0xff & (value >> 8))); + data.put(pos + 5, (byte) (0xff & (value >> 0))); + + } + + // overrides java.lang.Object + + public String toString() { + return "PageIo(" + pageId + "," + dirty + ")"; + } + + public void readExternal(DataInputStream in, Cipher cipherOut) + throws IOException { + pageId = in.readLong(); + byte[] data2 = new byte[Storage.PAGE_SIZE]; + in.readFully(data2); + if (cipherOut == null || JDBMUtils.allZeros(data2)) + data = ByteBuffer.wrap(data2); + else + try { + data = ByteBuffer.wrap(cipherOut.doFinal(data2)); + } catch (Exception e) { + throw new IOError(e); + } + } + + public void writeExternal(DataOutput out, Cipher cipherIn) throws IOException { + out.writeLong(pageId); + out.write(JDBMUtils.encrypt(cipherIn, data.array())); + } + + public byte[] getByteArray() { + if (data.hasArray()) + return data.array(); + byte[] d = new byte[Storage.PAGE_SIZE]; + data.rewind(); + data.get(d, 0, Storage.PAGE_SIZE); + return d; + } + + public void writeByteArray(byte[] buf, int srcOffset, int offset, int length) { + setDirty(); + data.rewind(); + data.position(offset); + data.put(buf, srcOffset, length); + } + + public void fileHeaderCheckHead(boolean isNew) { + if (isNew) + writeShort(FILE_HEADER_O_MAGIC, Magic.FILE_HEADER); + else { + short magic = readShort(FILE_HEADER_O_MAGIC); + if (magic != FILE_HEADER) + throw new Error("CRITICAL: file header magic not OK " + magic); + } + } + + /** + * Returns the first page of the indicated list + */ + long fileHeaderGetFirstOf(int list) { + return readLong(fileHeaderOffsetOfFirst(list)); + } + + /** + * Sets the first page of the indicated list + */ + void fileHeaderSetFirstOf(int list, long value) { + writeLong(fileHeaderOffsetOfFirst(list), value); + } + + /** + * Returns the last page of the indicated list + */ + long fileHeaderGetLastOf(int list) { + return readLong(fileHeaderOffsetOfLast(list)); + } + + /** + * Sets the last page of the indicated list + */ + void fileHeaderSetLastOf(int list, long value) { + writeLong(fileHeaderOffsetOfLast(list), value); + } + + /** + * Returns the offset of the "first" page of the indicated list + */ + private short fileHeaderOffsetOfFirst(int list) { + return (short) (FILE_HEADER_O_LISTS + (2 * Magic.SZ_LONG * list)); + } + + /** + * Returns the offset of the "last" page of the indicated list + */ + private short fileHeaderOffsetOfLast(int list) { + return (short) (fileHeaderOffsetOfFirst(list) + Magic.SZ_LONG); + } + + /** + * Returns the indicated root rowid. A root rowid is a special rowid that + * needs to be kept between sessions. It could conceivably be stored in a + * special file, but as a large amount of space in the page header is wasted + * anyway, it's more useful to store it where it belongs. + * + */ + long fileHeaderGetRoot(final int root) { + final short offset = (short) (FILE_HEADER_O_ROOTS + (root * Magic.SZ_LONG)); + return readLong(offset); + } + + /** + * Sets the indicated root rowid. + * + */ + void fileHeaderSetRoot(final int root, final long rowid) { + final short offset = (short) (FILE_HEADER_O_ROOTS + (root * Magic.SZ_LONG)); + writeLong(offset, rowid); + } + + /** + * Returns true if the magic corresponds with the fileHeader magic. + */ + boolean pageHeaderMagicOk() { + int magic = pageHeaderGetMagic(); + return magic >= Magic.PAGE_MAGIC + && magic <= (Magic.PAGE_MAGIC + Magic.FREEPHYSIDS_ROOT_PAGE); + } + + /** + * For paranoia mode + */ + protected void pageHeaderParanoiaMagicOk() { + if (!pageHeaderMagicOk()) + throw new Error("CRITICAL: page header magic not OK " + + pageHeaderGetMagic()); + } + + short pageHeaderGetMagic() { + return readShort(PAGE_HEADER_O_MAGIC); + } + + long pageHeaderGetNext() { + pageHeaderParanoiaMagicOk(); + return readSixByteLong(PAGE_HEADER_O_NEXT); + } + + void pageHeaderSetNext(long next) { + pageHeaderParanoiaMagicOk(); + writeSixByteLong(PAGE_HEADER_O_NEXT, next); + } + + long pageHeaderGetPrev() { + pageHeaderParanoiaMagicOk(); + return readSixByteLong(PAGE_HEADER_O_PREV); + } + + void pageHeaderSetPrev(long prev) { + pageHeaderParanoiaMagicOk(); + writeSixByteLong(PAGE_HEADER_O_PREV, prev); + } + + void pageHeaderSetType(short type) { + writeShort(PAGE_HEADER_O_MAGIC, (short) (Magic.PAGE_MAGIC + type)); + } + + long pageHeaderGetLocation(final short pos) { + return readSixByteLong(pos + PhysicalRowId_O_LOCATION); + } + + void pageHeaderSetLocation(short pos, long value) { + writeSixByteLong(pos + PhysicalRowId_O_LOCATION, value); + } + + short dataPageGetFirst() { + return readShort(DATA_PAGE_O_FIRST); + } + + void dataPageSetFirst(short value) { + pageHeaderParanoiaMagicOk(); + if (value > 0 && value < DATA_PAGE_O_DATA) + throw new Error("DataPage.setFirst: offset " + value + " too small"); + writeShort(DATA_PAGE_O_FIRST, value); + } + +} Index: graph/src/main/java/org/apache/jdbm/HTreeBucket.java =================================================================== --- graph/src/main/java/org/apache/jdbm/HTreeBucket.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/HTreeBucket.java (Arbeitskopie) @@ -0,0 +1,339 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jdbm; + +import java.io.DataOutput; +import java.io.IOException; +import java.util.ArrayList; + +/** + * A bucket is a placeholder for multiple (key, value) pairs. Buckets are used + * to store collisions (same hash value) at all levels of an H*tree. + *

+ * There are two types of buckets: leaf and non-leaf. + *

+ * Non-leaf buckets are buckets which hold collisions which happen when the + * H*tree is not fully expanded. Keys in a non-leaf buckets can have different + * hash codes. Non-leaf buckets are limited to an arbitrary size. When this + * limit is reached, the H*tree should create a new HTreeDirectory node and + * distribute keys of the non-leaf buckets into the newly created + * HTreeDirectory. + *

+ * A leaf bucket is a bucket which contains keys which all have the same + * hashCode(). Leaf buckets stand at the bottom of an H*tree + * because the hashing algorithm cannot further discriminate between different + * keys based on their hash code. + */ +public final class HTreeBucket { + + /** + * The maximum number of elements (key, value) a non-leaf bucket can contain. + */ + public static final int OVERFLOW_SIZE = 16; + + /** + * Depth of this bucket. + */ + private byte _depth; + + /** + * Keys and values in this bucket. Keys are followed by values at + * KEYPOS+OVERFLOW_SIZE + */ + private Object[] _keysAndValues; + + private byte size = 0; + + private final HTree tree; + + /** + * Public constructor for serialization. + */ + public HTreeBucket(HTree tree) { + this.tree = tree; + } + + /** + * Construct a bucket with a given depth level. Depth level is the number of + * HashDirectory above this bucket. + */ + public HTreeBucket(HTree tree, byte level) { + this.tree = tree; + if (level > HTreeDirectory.MAX_DEPTH + 1) { + throw new IllegalArgumentException( + "Cannot create bucket with depth > MAX_DEPTH+1. " + "Depth=" + level); + } + _depth = level; + _keysAndValues = new Object[OVERFLOW_SIZE * 2]; + } + + /** + * Returns the number of elements contained in this bucket. + */ + public int getElementCount() { + return size; + } + + /** + * Returns whether or not this bucket is a "leaf bucket". + */ + public boolean isLeaf() { + return (_depth > HTreeDirectory.MAX_DEPTH); + } + + /** + * Returns true if bucket can accept at least one more element. + */ + public boolean hasRoom() { + if (isLeaf()) { + return true; // leaf buckets are never full + } else { + // non-leaf bucket + return (size < OVERFLOW_SIZE); + } + } + + /** + * Add an element (key, value) to this bucket. If an existing element has the + * same key, it is replaced silently. + * + * @return Object which was previously associated with the given key or + * null if no association existed. + */ + public V addElement(K key, V value) { + // find entry + byte existing = -1; + for (byte i = 0; i < size; i++) { + if (key.equals(_keysAndValues[i])) { + existing = i; + break; + } + } + + if (existing != -1) { + // replace existing element + Object before = _keysAndValues[existing + OVERFLOW_SIZE]; + if (before instanceof BTreeLazyRecord) { + BTreeLazyRecord rec = (BTreeLazyRecord) before; + before = rec.get(); + rec.delete(); + } + _keysAndValues[existing + OVERFLOW_SIZE] = value; + return (V) before; + } else { + // add new (key, value) pair + _keysAndValues[size] = key; + _keysAndValues[size + OVERFLOW_SIZE] = value; + size++; + return null; + } + } + + /** + * Remove an element, given a specific key. + * + * @param key Key of the element to remove + * @return Removed element value, or null if not found + */ + public V removeElement(K key) { + // find entry + byte existing = -1; + for (byte i = 0; i < size; i++) { + if (key.equals(_keysAndValues[i])) { + existing = i; + break; + } + } + + if (existing != -1) { + Object o = _keysAndValues[existing + OVERFLOW_SIZE]; + if (o instanceof BTreeLazyRecord) { + BTreeLazyRecord rec = (BTreeLazyRecord) o; + o = rec.get(); + rec.delete(); + } + + // move last element to existing + size--; + _keysAndValues[existing] = _keysAndValues[size]; + _keysAndValues[existing + OVERFLOW_SIZE] = _keysAndValues[size + + OVERFLOW_SIZE]; + + // and unset last element + _keysAndValues[size] = null; + _keysAndValues[size + OVERFLOW_SIZE] = null; + + return (V) o; + } else { + // not found + return null; + } + } + + /** + * Returns the value associated with a given key. If the given key is not + * found in this bucket, returns null. + */ + public V getValue(K key) { + // find entry + byte existing = -1; + for (byte i = 0; i < size; i++) { + if (key.equals(_keysAndValues[i])) { + existing = i; + break; + } + } + + if (existing != -1) { + Object o = _keysAndValues[existing + OVERFLOW_SIZE]; + if (o instanceof BTreeLazyRecord) + return ((BTreeLazyRecord) o).get(); + else + return (V) o; + } else { + // key not found + return null; + } + } + + /** + * Obtain keys contained in this buckets. Keys are ordered to match their + * values, which be be obtained by calling getValues(). + *

+ * As an optimization, the Vector returned is the instance member of this + * class. Please don't modify outside the scope of this class. + */ + ArrayList getKeys() { + ArrayList ret = new ArrayList(); + for (byte i = 0; i < size; i++) { + ret.add((K) _keysAndValues[i]); + } + return ret; + } + + /** + * Obtain values contained in this buckets. Values are ordered to match their + * keys, which be be obtained by calling getKeys(). + *

+ * As an optimization, the Vector returned is the instance member of this + * class. Please don't modify outside the scope of this class. + */ + ArrayList getValues() { + ArrayList ret = new ArrayList(); + for (byte i = 0; i < size; i++) { + ret.add((V) _keysAndValues[i + OVERFLOW_SIZE]); + } + return ret; + + } + + public void writeExternal(DataOutput out) throws IOException { + out.write(_depth); + out.write(size); + + DataInputOutput out3 = tree.writeBufferCache.getAndSet(null); + if (out3 == null) + out3 = new DataInputOutput(); + else + out3.reset(); + + Serializer keySerializer = tree.keySerializer != null ? tree.keySerializer + : tree.getRecordManager().defaultSerializer(); + for (byte i = 0; i < size; i++) { + out3.reset(); + keySerializer.serialize(out3, _keysAndValues[i]); + LongPacker.packInt(out, out3.getPos()); + out.write(out3.getBuf(), 0, out3.getPos()); + + } + + // write values + if (tree.hasValues()) { + Serializer valSerializer = tree.valueSerializer != null ? tree.valueSerializer + : tree.getRecordManager().defaultSerializer(); + + for (byte i = 0; i < size; i++) { + Object value = _keysAndValues[i + OVERFLOW_SIZE]; + if (value == null) { + out.write(BTreeLazyRecord.NULL); + } else if (value instanceof BTreeLazyRecord) { + out.write(BTreeLazyRecord.LAZY_RECORD); + LongPacker.packLong(out, ((BTreeLazyRecord) value).recid); + } else { + // transform to byte array + out3.reset(); + valSerializer.serialize(out3, value); + + if (out3.getPos() > BTreeLazyRecord.MAX_INTREE_RECORD_SIZE) { + // store as separate record + long recid = tree.getRecordManager().insert(out3.toByteArray(), + BTreeLazyRecord.FAKE_SERIALIZER, true); + out.write(BTreeLazyRecord.LAZY_RECORD); + LongPacker.packLong(out, recid); + } else { + out.write(out3.getPos()); + out.write(out3.getBuf(), 0, out3.getPos()); + } + } + } + } + tree.writeBufferCache.set(out3); + + } + + public void readExternal(DataInputOutput in) throws IOException, + ClassNotFoundException { + _depth = in.readByte(); + size = in.readByte(); + + // read keys + Serializer keySerializer = tree.keySerializer != null ? tree.keySerializer + : tree.getRecordManager().defaultSerializer(); + _keysAndValues = (K[]) new Object[OVERFLOW_SIZE * 2]; + for (byte i = 0; i < size; i++) { + int expectedSize = LongPacker.unpackInt(in); + K key = (K) BTreeLazyRecord.fastDeser(in, keySerializer, expectedSize); + _keysAndValues[i] = key; + } + + // read values + if (tree.hasValues()) { + Serializer valSerializer = tree.valueSerializer != null ? tree.valueSerializer + : (Serializer) tree.getRecordManager().defaultSerializer(); + for (byte i = 0; i < size; i++) { + int header = in.readUnsignedByte(); + if (header == BTreeLazyRecord.NULL) { + _keysAndValues[i + OVERFLOW_SIZE] = null; + } else if (header == BTreeLazyRecord.LAZY_RECORD) { + long recid = LongPacker.unpackLong(in); + _keysAndValues[i + OVERFLOW_SIZE] = (new BTreeLazyRecord( + tree.getRecordManager(), recid, valSerializer)); + } else { + _keysAndValues[i + OVERFLOW_SIZE] = BTreeLazyRecord.fastDeser(in, + valSerializer, header); + } + } + } else { + for (byte i = 0; i < size; i++) { + if (_keysAndValues[i] != null) + _keysAndValues[i + OVERFLOW_SIZE] = JDBMUtils.EMPTY_STRING; + } + } + } +} Index: graph/src/main/java/org/apache/jdbm/PageManager.java =================================================================== --- graph/src/main/java/org/apache/jdbm/PageManager.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/PageManager.java (Arbeitskopie) @@ -0,0 +1,242 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; +import java.nio.ByteBuffer; + +/** + * This class manages the linked lists of pages that make up a file. + */ +public final class PageManager { + // our record file + final PageFile file; + + private PageIo headerBuf; + + /** + * Creates a new page manager using the indicated record file. + */ + PageManager(PageFile file) throws IOException { + this.file = file; + + // check the file headerBuf.fileHeader If the magic is 0, we assume a new + // file. Note that we hold on to the file header node. + headerBuf = file.get(0); + headerBuf.ensureHeapBuffer(); + headerBuf.fileHeaderCheckHead(headerBuf.readShort(0) == 0); + } + + /** + * Allocates a page of the indicated type. Returns recid of the page. + */ + long allocate(short type) throws IOException { + + if (type == Magic.FREE_PAGE) + throw new Error("allocate of free page?"); + + // do we have something on the free list? + long retval = headerBuf.fileHeaderGetFirstOf(Magic.FREE_PAGE); + boolean isNew = false; + + if (type != Magic.TRANSLATION_PAGE) { + + if (retval != 0) { + // yes. Point to it and make the next of that page the + // new first free page. + headerBuf.fileHeaderSetFirstOf(Magic.FREE_PAGE, getNext(retval)); + } else { + // nope. make a new record + retval = headerBuf.fileHeaderGetLastOf(Magic.FREE_PAGE); + if (retval == 0) + // very new file - allocate record #1 + retval = 1; + headerBuf.fileHeaderSetLastOf(Magic.FREE_PAGE, retval + 1); + isNew = true; + } + } else { + // translation pages have different allocation scheme + // and also have negative address + retval = headerBuf.fileHeaderGetLastOf(Magic.TRANSLATION_PAGE) - 1; + isNew = true; + } + + // Cool. We have a record, add it to the correct list + PageIo pageHdr = file.get(retval); + if (isNew) { + pageHdr.pageHeaderSetType(type); + } else { + if (!pageHdr.pageHeaderMagicOk()) + throw new Error("CRITICAL: page header magic for page " + + pageHdr.getPageId() + " not OK " + pageHdr.pageHeaderGetMagic()); + } + long oldLast = headerBuf.fileHeaderGetLastOf(type); + + // Clean data. + pageHdr.writeByteArray(PageFile.CLEAN_DATA, 0, 0, Storage.PAGE_SIZE); + + pageHdr.pageHeaderSetType(type); + pageHdr.pageHeaderSetPrev(oldLast); + pageHdr.pageHeaderSetNext(0); + + if (oldLast == 0) + // This was the first one of this type + headerBuf.fileHeaderSetFirstOf(type, retval); + headerBuf.fileHeaderSetLastOf(type, retval); + file.release(retval, true); + + // If there's a previous, fix up its pointer + if (oldLast != 0) { + pageHdr = file.get(oldLast); + pageHdr.pageHeaderSetNext(retval); + file.release(oldLast, true); + } + + return retval; + } + + /** + * Frees a page of the indicated type. + */ + void free(short type, long recid) throws IOException { + if (type == Magic.FREE_PAGE) + throw new Error("free free page?"); + if (type == Magic.TRANSLATION_PAGE) + throw new Error("Translation page can not be dealocated"); + + if (recid == 0) + throw new Error("free header page?"); + + // get the page and read next and previous pointers + PageIo pageHdr = file.get(recid); + long prev = pageHdr.pageHeaderGetPrev(); + long next = pageHdr.pageHeaderGetNext(); + + // put the page at the front of the free list. + pageHdr.pageHeaderSetType(Magic.FREE_PAGE); + pageHdr.pageHeaderSetNext(headerBuf.fileHeaderGetFirstOf(Magic.FREE_PAGE)); + pageHdr.pageHeaderSetPrev(0); + + headerBuf.fileHeaderSetFirstOf(Magic.FREE_PAGE, recid); + file.release(recid, true); + + // remove the page from its old list + if (prev != 0) { + pageHdr = file.get(prev); + pageHdr.pageHeaderSetNext(next); + file.release(prev, true); + } else { + headerBuf.fileHeaderSetFirstOf(type, next); + } + if (next != 0) { + pageHdr = file.get(next); + pageHdr.pageHeaderSetPrev(prev); + file.release(next, true); + } else { + headerBuf.fileHeaderSetLastOf(type, prev); + } + + } + + /** + * Returns the page following the indicated page + */ + long getNext(long page) throws IOException { + try { + return file.get(page).pageHeaderGetNext(); + } finally { + file.release(page, false); + } + } + + /** + * Returns the page before the indicated page + */ + long getPrev(long page) throws IOException { + try { + return file.get(page).pageHeaderGetPrev(); + } finally { + file.release(page, false); + } + } + + /** + * Returns the first page on the indicated list. + */ + long getFirst(short type) throws IOException { + return headerBuf.fileHeaderGetFirstOf(type); + } + + /** + * Returns the last page on the indicated list. + */ + long getLast(short type) throws IOException { + return headerBuf.fileHeaderGetLastOf(type); + } + + /** + * Commit all pending (in-memory) data by flushing the page manager. This + * forces a flush of all outstanding pages (this it's an implicit + * {@link PageFile#commit} as well). + */ + void commit() throws IOException { + // write the header out + file.release(headerBuf); + file.commit(); + + // and obtain it again + headerBuf = file.get(0); + headerBuf.ensureHeapBuffer(); + headerBuf.fileHeaderCheckHead(headerBuf.readShort(0) == 0); + } + + /** + * Flushes the page manager. This forces a flush of all outstanding pages + * (this it's an implicit {@link PageFile#commit} as well). + */ + void rollback() throws IOException { + // release header + file.discard(headerBuf); + file.rollback(); + // and obtain it again + headerBuf = file.get(0); + headerBuf.ensureHeapBuffer(); + headerBuf.fileHeaderCheckHead(headerBuf.readShort(0) == 0); + } + + /** + * Closes the page manager. This flushes the page manager and releases the + * lock on the headerBuf.fileHeader + */ + void close() throws IOException { + file.release(headerBuf); + file.commit(); + headerBuf = null; + } + + /** + * PageManager permanently locks zero page, and we need this for backups + */ + ByteBuffer getHeaderBufData() { + return headerBuf.getData(); + } + + public PageIo getFileHeader() { + return headerBuf; + } +} Index: graph/src/main/java/org/apache/jdbm/Serializer.java =================================================================== --- graph/src/main/java/org/apache/jdbm/Serializer.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/Serializer.java (Arbeitskopie) @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jdbm; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * Interface used to provide a serialization mechanism other than a class' + * normal serialization. + */ +public interface Serializer { + + /** + * Serialize the content of an object into a byte array. + * + * @param out ObjectOutput to save object into + * @param obj Object to serialize + */ + public void serialize(DataOutput out, A obj) throws IOException; + + /** + * Deserialize the content of an object from a byte array. + * + * @param in to read serialized data from + * @return deserialized object + * @throws IOException + * @throws ClassNotFoundException + */ + public A deserialize(DataInput in) throws IOException, ClassNotFoundException; + +} Index: graph/src/main/java/org/apache/jdbm/DataInputOutput.java =================================================================== --- graph/src/main/java/org/apache/jdbm/DataInputOutput.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/DataInputOutput.java (Arbeitskopie) @@ -0,0 +1,302 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.nio.ByteBuffer; +import java.util.Arrays; + +/** + * Utility class which implements DataInput and DataOutput on top of byte[] + * buffer with minimal overhead + */ +public final class DataInputOutput implements DataInput, DataOutput, + ObjectInput, ObjectOutput { + + private int pos = 0; + private int count = 0; + private byte[] buf; + + public DataInputOutput() { + buf = new byte[8]; + } + + public DataInputOutput(byte[] data) { + buf = data; + count = data.length; + } + + public byte[] getBuf() { + return buf; + } + + public int getPos() { + return pos; + } + + public void reset() { + pos = 0; + count = 0; + } + + public void resetForReading() { + count = pos; + pos = 0; + } + + public void reset(byte[] b) { + pos = 0; + buf = b; + count = b.length; + } + + public byte[] toByteArray() { + byte[] d = new byte[pos]; + System.arraycopy(buf, 0, d, 0, pos); + return d; + } + + public int available() { + return count - pos; + } + + public void readFully(byte[] b) throws IOException { + readFully(b, 0, b.length); + } + + public void readFully(byte[] b, int off, int len) throws IOException { + System.arraycopy(buf, pos, b, off, len); + pos += len; + } + + public int skipBytes(int n) throws IOException { + pos += n; + return n; + } + + public boolean readBoolean() throws IOException { + return buf[pos++] == 1; + } + + public byte readByte() throws IOException { + return buf[pos++]; + } + + public int readUnsignedByte() throws IOException { + return buf[pos++] & 0xff; + } + + public short readShort() throws IOException { + return (short) (((short) (buf[pos++] & 0xff) << 8) | ((short) (buf[pos++] & 0xff) << 0)); + + } + + public int readUnsignedShort() throws IOException { + return (((int) (buf[pos++] & 0xff) << 8) | ((int) (buf[pos++] & 0xff) << 0)); + } + + public char readChar() throws IOException { + return (char) readInt(); + } + + public int readInt() throws IOException { + return (((buf[pos++] & 0xff) << 24) | ((buf[pos++] & 0xff) << 16) + | ((buf[pos++] & 0xff) << 8) | ((buf[pos++] & 0xff) << 0)); + + } + + public long readLong() throws IOException { + return (((long) (buf[pos++] & 0xff) << 56) + | ((long) (buf[pos++] & 0xff) << 48) + | ((long) (buf[pos++] & 0xff) << 40) + | ((long) (buf[pos++] & 0xff) << 32) + | ((long) (buf[pos++] & 0xff) << 24) + | ((long) (buf[pos++] & 0xff) << 16) + | ((long) (buf[pos++] & 0xff) << 8) | ((long) (buf[pos++] & 0xff) << 0)); + + } + + public float readFloat() throws IOException { + return Float.intBitsToFloat(readInt()); + } + + public double readDouble() throws IOException { + return Double.longBitsToDouble(readLong()); + } + + public String readLine() throws IOException { + return readUTF(); + } + + public String readUTF() throws IOException { + return Serialization.deserializeString(this); + } + + /** + * make sure there will be enought space in buffer to write N bytes + */ + private void ensureAvail(int n) { + if (pos + n >= buf.length) { + int newSize = Math.max(pos + n, buf.length * 2); + buf = Arrays.copyOf(buf, newSize); + } + } + + public void write(int b) throws IOException { + ensureAvail(1); + buf[pos++] = (byte) b; + } + + public void write(byte[] b) throws IOException { + write(b, 0, b.length); + } + + public void write(byte[] b, int off, int len) throws IOException { + ensureAvail(len); + System.arraycopy(b, off, buf, pos, len); + pos += len; + } + + public void writeBoolean(boolean v) throws IOException { + ensureAvail(1); + buf[pos++] = (byte) (v ? 1 : 0); + } + + public void writeByte(int v) throws IOException { + ensureAvail(1); + buf[pos++] = (byte) (v); + } + + public void writeShort(int v) throws IOException { + ensureAvail(2); + buf[pos++] = (byte) (0xff & (v >> 8)); + buf[pos++] = (byte) (0xff & (v >> 0)); + + } + + public void writeChar(int v) throws IOException { + writeInt(v); + } + + public void writeInt(int v) throws IOException { + ensureAvail(4); + buf[pos++] = (byte) (0xff & (v >> 24)); + buf[pos++] = (byte) (0xff & (v >> 16)); + buf[pos++] = (byte) (0xff & (v >> 8)); + buf[pos++] = (byte) (0xff & (v >> 0)); + + } + + public void writeLong(long v) throws IOException { + ensureAvail(8); + buf[pos++] = (byte) (0xff & (v >> 56)); + buf[pos++] = (byte) (0xff & (v >> 48)); + buf[pos++] = (byte) (0xff & (v >> 40)); + buf[pos++] = (byte) (0xff & (v >> 32)); + buf[pos++] = (byte) (0xff & (v >> 24)); + buf[pos++] = (byte) (0xff & (v >> 16)); + buf[pos++] = (byte) (0xff & (v >> 8)); + buf[pos++] = (byte) (0xff & (v >> 0)); + } + + public void writeFloat(float v) throws IOException { + ensureAvail(4); + writeInt(Float.floatToIntBits(v)); + } + + public void writeDouble(double v) throws IOException { + ensureAvail(8); + writeLong(Double.doubleToLongBits(v)); + } + + public void writeBytes(String s) throws IOException { + writeUTF(s); + } + + public void writeChars(String s) throws IOException { + writeUTF(s); + } + + public void writeUTF(String s) throws IOException { + Serialization.serializeString(this, s); + } + + /** helper method to write data directly from PageIo */ + public void writeFromByteBuffer(ByteBuffer b, int offset, int length) { + ensureAvail(length); + b.position(offset); + b.get(buf, pos, length); + pos += length; + } + + // temp var used for Externalizable + SerialClassInfo serializer; + // temp var used for Externalizable + Serialization.FastArrayList objectStack; + + public Object readObject() throws ClassNotFoundException, IOException { + // is here just to implement ObjectInput + // Fake method which reads data from serializer. + // We could probably implement separate wrapper for this, but I want to safe + // class space + return serializer.deserialize(this, objectStack); + } + + public int read() throws IOException { + // is here just to implement ObjectInput + return readUnsignedByte(); + } + + public int read(byte[] b) throws IOException { + // is here just to implement ObjectInput + readFully(b); + return b.length; + } + + public int read(byte[] b, int off, int len) throws IOException { + // is here just to implement ObjectInput + readFully(b, off, len); + return len; + } + + public long skip(long n) throws IOException { + // is here just to implement ObjectInput + pos += n; + return n; + } + + public void close() throws IOException { + // is here just to implement ObjectInput + // do nothing + } + + public void writeObject(Object obj) throws IOException { + // is here just to implement ObjectOutput + serializer.serialize(this, obj, objectStack); + } + + public void flush() throws IOException { + // is here just to implement ObjectOutput + // do nothing + } + +} Index: graph/src/main/java/org/apache/jdbm/LogicalRowIdManager.java =================================================================== --- graph/src/main/java/org/apache/jdbm/LogicalRowIdManager.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/LogicalRowIdManager.java (Arbeitskopie) @@ -0,0 +1,239 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.IOException; +import java.util.Arrays; + +/** + * This class manages the linked lists of logical rowid pages. + */ +public final class LogicalRowIdManager { + // our record file and associated page manager + private final PageFile file; + private final PageManager pageman; + static final short ELEMS_PER_PAGE = (short) ((Storage.PAGE_SIZE - Magic.PAGE_HEADER_SIZE) / Magic.PhysicalRowId_SIZE); + + private long[] freeRecordsInTransRowid = new long[4]; + private int freeRecordsInTransSize = 0; + + /** number of free logical rowids on logical free page, is SHORT */ + static final int OFFSET_FREE_COUNT = Magic.PAGE_HEADER_SIZE; + static final int FREE_HEADER_SIZE = Magic.PAGE_HEADER_SIZE + Magic.SZ_SHORT; + /** maximal number of free logical per page */ + static final int FREE_RECORDS_PER_PAGE = (Storage.PAGE_SIZE - FREE_HEADER_SIZE) / 6; + + /** + * Creates a log rowid manager using the indicated record file and page + * manager + */ + LogicalRowIdManager(PageFile file, PageManager pageman) throws IOException { + this.file = file; + this.pageman = pageman; + } + + /** + * Creates a new logical rowid pointing to the indicated physical id + * + * @param physloc physical location to point to + * @return logical recid + */ + long insert(final long physloc) throws IOException { + // check whether there's a free rowid to reuse + long retval = getFreeSlot(); + if (retval == 0) { + // no. This means that we bootstrap things by allocating + // a new translation page and freeing all the rowids on it. + long firstPage = pageman.allocate(Magic.TRANSLATION_PAGE); + short curOffset = Magic.PAGE_HEADER_SIZE; + for (int i = 0; i < ELEMS_PER_PAGE; i++) { + putFreeSlot(((-firstPage) << Storage.PAGE_SIZE_SHIFT) + + (long) curOffset); + + curOffset += Magic.PhysicalRowId_SIZE; + } + + retval = getFreeSlot(); + if (retval == 0) { + throw new Error("couldn't obtain free translation"); + } + } + // write the translation. + update(retval, physloc); + return retval; + } + + /** + * Insert at forced location, use only for defragmentation !! + * + * @param logicalRowId + * @param physLoc + * @throws IOException + */ + void forceInsert(final long logicalRowId, final long physLoc) + throws IOException { + if (fetch(logicalRowId) != 0) + throw new Error("can not forceInsert, record already exists: " + + logicalRowId); + + update(logicalRowId, physLoc); + } + + /** + * Releases the indicated logical rowid. + */ + void delete(final long logicalrowid) throws IOException { + // zero out old location, is needed for defragmentation + final long pageId = -(logicalrowid >>> Storage.PAGE_SIZE_SHIFT); + final PageIo xlatPage = file.get(pageId); + xlatPage.pageHeaderSetLocation( + (short) (logicalrowid & Storage.OFFSET_MASK), 0); + file.release(pageId, true); + putFreeSlot(logicalrowid); + } + + /** + * Updates the mapping + * + * @param logicalrowid The logical rowid + * @param physloc The physical rowid + */ + void update(final long logicalrowid, final long physloc) throws IOException { + + final long pageId = -(logicalrowid >>> Storage.PAGE_SIZE_SHIFT); + final PageIo xlatPage = file.get(pageId); + xlatPage.pageHeaderSetLocation( + (short) (logicalrowid & Storage.OFFSET_MASK), physloc); + file.release(pageId, true); + } + + /** + * Returns a mapping + * + * @param logicalrowid The logical rowid + * @return The physical rowid, 0 if does not exist + */ + long fetch(long logicalrowid) throws IOException { + final long pageId = -(logicalrowid >>> Storage.PAGE_SIZE_SHIFT); + final long last = pageman.getLast(Magic.TRANSLATION_PAGE); + if (last - 1 > pageId) + return 0; + + final short offset = (short) (logicalrowid & Storage.OFFSET_MASK); + + final PageIo xlatPage = file.get(pageId); + final long ret = xlatPage.pageHeaderGetLocation(offset); + + file.release(pageId, false); + return ret; + } + + void commit() throws IOException { + if (freeRecordsInTransSize == 0) + return; + + long freeRecPageId = pageman.getLast(Magic.FREELOGIDS_PAGE); + if (freeRecPageId == 0) { + // allocate new + freeRecPageId = pageman.allocate(Magic.FREELOGIDS_PAGE); + } + PageIo freeRecPage = file.get(freeRecPageId); + // write all uncommited free records + for (int rowPos = 0; rowPos < freeRecordsInTransSize; rowPos++) { + short count = freeRecPage.readShort(OFFSET_FREE_COUNT); + if (count == FREE_RECORDS_PER_PAGE) { + // allocate new free recid page + file.release(freeRecPage); + freeRecPageId = pageman.allocate(Magic.FREELOGIDS_PAGE); + freeRecPage = file.get(freeRecPageId); + freeRecPage.writeShort(FREE_RECORDS_PER_PAGE, (short) 0); + count = 0; + } + final int offset = (count) * 6 + FREE_HEADER_SIZE; + // write free recid and increase counter + freeRecPage.writeSixByteLong(offset, freeRecordsInTransRowid[rowPos]); + count++; + freeRecPage.writeShort(OFFSET_FREE_COUNT, count); + + } + file.release(freeRecPage); + + clearFreeRecidsInTransaction(); + } + + private void clearFreeRecidsInTransaction() { + if (freeRecordsInTransRowid.length > 128) + freeRecordsInTransRowid = new long[4]; + freeRecordsInTransSize = 0; + } + + void rollback() throws IOException { + clearFreeRecidsInTransaction(); + } + + /** + * Returns a free Logical rowid, or 0 if nothing was found. + */ + long getFreeSlot() throws IOException { + if (freeRecordsInTransSize != 0) { + return freeRecordsInTransRowid[--freeRecordsInTransSize]; + } + + final long logicFreePageId = pageman.getLast(Magic.FREELOGIDS_PAGE); + if (logicFreePageId == 0) { + return 0; + } + PageIo logicFreePage = file.get(logicFreePageId); + short recCount = logicFreePage.readShort(OFFSET_FREE_COUNT); + if (recCount <= 0) { + throw new InternalError(); + } + + final int offset = (recCount - 1) * 6 + FREE_HEADER_SIZE; + final long ret = logicFreePage.readSixByteLong(offset); + + recCount--; + + if (recCount > 0) { + // decrease counter and zero out old record + logicFreePage.writeSixByteLong(offset, 0); + logicFreePage.writeShort(OFFSET_FREE_COUNT, recCount); + file.release(logicFreePage); + } else { + // release this page + file.release(logicFreePage); + pageman.free(Magic.FREELOGIDS_PAGE, logicFreePageId); + } + + return ret; + } + + /** + * Puts the indicated rowid on the free list + */ + void putFreeSlot(long rowid) throws IOException { + // ensure capacity + if (freeRecordsInTransSize == freeRecordsInTransRowid.length) + freeRecordsInTransRowid = Arrays.copyOf(freeRecordsInTransRowid, + freeRecordsInTransRowid.length * 4); + // add record and increase size + freeRecordsInTransRowid[freeRecordsInTransSize] = rowid; + freeRecordsInTransSize++; + } + +} Index: graph/src/main/java/org/apache/jdbm/PhysicalRowIdManager.java =================================================================== --- graph/src/main/java/org/apache/jdbm/PhysicalRowIdManager.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/PhysicalRowIdManager.java (Arbeitskopie) @@ -0,0 +1,359 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import static org.apache.jdbm.Storage.PAGE_SIZE; + +import java.io.IOException; + +/** + * This class manages physical row ids, and their data. + */ +public final class PhysicalRowIdManager { + + // The file we're talking to and the associated page manager. + final private PageFile file; + final private PageManager pageman; + final PhysicalFreeRowIdManager freeman; + static final private short DATA_PER_PAGE = (short) (PAGE_SIZE - Magic.DATA_PAGE_O_DATA); + // caches offset after last allocation. So we dont have to iterate throw page + // every allocation + private long cachedLastAllocatedRecordPage = Long.MIN_VALUE; + private short cachedLastAllocatedRecordOffset = Short.MIN_VALUE; + + /** + * Creates a new rowid manager using the indicated record file. and page + * manager. + */ + PhysicalRowIdManager(PageFile file, PageManager pageManager) + throws IOException { + this.file = file; + this.pageman = pageManager; + this.freeman = new PhysicalFreeRowIdManager(file, pageManager); + + } + + /** + * Inserts a new record. Returns the new physical rowid. + */ + long insert(final byte[] data, final int start, final int length) + throws IOException { + if (length < 1) + throw new IllegalArgumentException("Length is <1"); + if (start < 0) + throw new IllegalArgumentException("negative start"); + + long retval = alloc(length); + write(retval, data, start, length); + return retval; + } + + /** + * Updates an existing record. Returns the possibly changed physical rowid. + */ + long update(long rowid, final byte[] data, final int start, final int length) + throws IOException { + // fetch the record header + PageIo page = file.get(rowid >>> Storage.PAGE_SIZE_SHIFT); + short head = (short) (rowid & Storage.OFFSET_MASK); + int availSize = RecordHeader.getAvailableSize(page, head); + if (length > availSize || + // difference between free and available space can be only 254. + // if bigger, need to realocate and free page + availSize - length > RecordHeader.MAX_SIZE_SPACE) { + // not enough space - we need to copy to a new rowid. + file.release(page); + free(rowid); + rowid = alloc(length); + } else { + file.release(page); + } + + // 'nuff space, write it in and return the rowid. + write(rowid, data, start, length); + return rowid; + } + + void fetch(final DataInputOutput out, final long rowid) throws IOException { + // fetch the record header + long current = rowid >>> Storage.PAGE_SIZE_SHIFT; + PageIo page = file.get(current); + final short head = (short) (rowid & Storage.OFFSET_MASK); + + // allocate a return buffer + // byte[] retval = new byte[ head.getCurrentSize() ]; + final int size = RecordHeader.getCurrentSize(page, head); + if (size == 0) { + file.release(current, false); + return; + } + + // copy bytes in + int leftToRead = size; + short dataOffset = (short) (head + RecordHeader.SIZE); + while (leftToRead > 0) { + // copy current page's data to return buffer + int toCopy = PAGE_SIZE - dataOffset; + if (leftToRead < toCopy) { + toCopy = leftToRead; + } + + out.writeFromByteBuffer(page.getData(), dataOffset, toCopy); + + // Go to the next page + leftToRead -= toCopy; + // out.flush(); + file.release(page); + + if (leftToRead > 0) { + current = pageman.getNext(current); + page = file.get(current); + dataOffset = Magic.DATA_PAGE_O_DATA; + } + + } + + // return retval; + } + + /** + * Allocate a new rowid with the indicated size. + */ + private long alloc(int size) throws IOException { + size = RecordHeader.roundAvailableSize(size); + long retval = freeman.getFreeRecord(size); + if (retval == 0) { + retval = allocNew(size, pageman.getLast(Magic.USED_PAGE)); + } + return retval; + } + + /** + * Allocates a new rowid. The second parameter is there to allow for a + * recursive call - it indicates where the search should start. + */ + private long allocNew(int size, long start) throws IOException { + PageIo curPage; + if (start == 0 + || + // last page was completely filled? + cachedLastAllocatedRecordPage == start + && cachedLastAllocatedRecordOffset == PAGE_SIZE) { + // we need to create a new page. + start = pageman.allocate(Magic.USED_PAGE); + curPage = file.get(start); + curPage.dataPageSetFirst(Magic.DATA_PAGE_O_DATA); + cachedLastAllocatedRecordOffset = Magic.DATA_PAGE_O_DATA; + cachedLastAllocatedRecordPage = curPage.getPageId(); + RecordHeader.setAvailableSize(curPage, Magic.DATA_PAGE_O_DATA, 0); + RecordHeader.setCurrentSize(curPage, Magic.DATA_PAGE_O_DATA, 0); + + } else { + curPage = file.get(start); + } + + // follow the rowids on this page to get to the last one. We don't + // fall off, because this is the last page, remember? + short pos = curPage.dataPageGetFirst(); + if (pos == 0) { + // page is exactly filled by the last page of a record + file.release(curPage); + return allocNew(size, 0); + } + + short hdr = pos; + + if (cachedLastAllocatedRecordPage != curPage.getPageId()) { + // position was not cached, have to find it again + int availSize = RecordHeader.getAvailableSize(curPage, hdr); + while (availSize != 0 && pos < PAGE_SIZE) { + pos += availSize + RecordHeader.SIZE; + if (pos == PAGE_SIZE) { + // Again, a filled page. + file.release(curPage); + return allocNew(size, 0); + } + hdr = pos; + availSize = RecordHeader.getAvailableSize(curPage, hdr); + } + } else { + hdr = cachedLastAllocatedRecordOffset; + pos = cachedLastAllocatedRecordOffset; + } + + if (pos == RecordHeader.SIZE) { // TODO why is this here? + // the last record exactly filled the page. Restart forcing + // a new page. + file.release(curPage); + } + + if (hdr > Storage.PAGE_SIZE - 16) { + file.release(curPage); + // there is not enought space on current page, so force new page + return allocNew(size, 0); + } + + // we have the position, now tack on extra pages until we've got + // enough space. + long retval = (start << Storage.PAGE_SIZE_SHIFT) + (long) pos; + int freeHere = PAGE_SIZE - pos - RecordHeader.SIZE; + if (freeHere < size) { + // check whether the last page would have only a small bit left. + // if yes, increase the allocation. A small bit is a record + // header plus 16 bytes. + int lastSize = (size - freeHere) % DATA_PER_PAGE; + if (size < DATA_PER_PAGE + && (DATA_PER_PAGE - lastSize) < (RecordHeader.SIZE + 16)) { + size += (DATA_PER_PAGE - lastSize); + size = RecordHeader.roundAvailableSize(size); + } + + // write out the header now so we don't have to come back. + RecordHeader.setAvailableSize(curPage, hdr, size); + file.release(start, true); + + int neededLeft = size - freeHere; + // Refactor these two pages! + while (neededLeft >= DATA_PER_PAGE) { + start = pageman.allocate(Magic.USED_PAGE); + curPage = file.get(start); + curPage.dataPageSetFirst((short) 0); // no rowids, just data + file.release(start, true); + neededLeft -= DATA_PER_PAGE; + } + if (neededLeft > 0) { + // done with whole chunks, allocate last fragment. + start = pageman.allocate(Magic.USED_PAGE); + curPage = file.get(start); + curPage.dataPageSetFirst((short) (Magic.DATA_PAGE_O_DATA + neededLeft)); + file.release(start, true); + cachedLastAllocatedRecordOffset = (short) (Magic.DATA_PAGE_O_DATA + neededLeft); + cachedLastAllocatedRecordPage = curPage.getPageId(); + + } + } else { + // just update the current page. If there's less than 16 bytes + // left, we increase the allocation (16 bytes is an arbitrary + // number). + if (freeHere - size <= (16 + RecordHeader.SIZE)) { + size = freeHere; + } + RecordHeader.setAvailableSize(curPage, hdr, size); + file.release(start, true); + cachedLastAllocatedRecordOffset = (short) (hdr + RecordHeader.SIZE + size); + cachedLastAllocatedRecordPage = curPage.getPageId(); + + } + return retval; + + } + + void free(final long id) throws IOException { + // get the rowid, and write a zero current size into it. + final long curPageId = id >>> Storage.PAGE_SIZE_SHIFT; + final PageIo curPage = file.get(curPageId); + final short offset = (short) (id & Storage.OFFSET_MASK); + RecordHeader.setCurrentSize(curPage, offset, 0); + int size = RecordHeader.getAvailableSize(curPage, offset); + + // trim size if spreads across multiple pages + if (offset + RecordHeader.SIZE + size > PAGE_SIZE + + (PAGE_SIZE - Magic.DATA_PAGE_O_DATA)) { + int numOfPagesToSkip = (size - (Storage.PAGE_SIZE - (offset - RecordHeader.SIZE)) // minus + // data + // remaining + // on + // this + // page + ) + / (PAGE_SIZE - Magic.DATA_PAGE_O_DATA); + size = size - numOfPagesToSkip * (PAGE_SIZE - Magic.DATA_PAGE_O_DATA); + RecordHeader.setAvailableSize(curPage, offset, size); + + // get next page + long nextPage = curPage.pageHeaderGetNext(); + file.release(curPage); + + // release pages + for (int i = 0; i < numOfPagesToSkip; i++) { + PageIo page = file.get(nextPage); + long nextPage2 = page.pageHeaderGetNext(); + file.release(page); + pageman.free(Magic.USED_PAGE, nextPage); + nextPage = nextPage2; + } + + } else { + file.release(curPage); + } + + // write the rowid to the free list + freeman.putFreeRecord(id, size); + } + + /** + * Writes out data to a rowid. Assumes that any resizing has been done. + */ + private void write(final long rowid, final byte[] data, final int start, + final int length) throws IOException { + long current = rowid >>> Storage.PAGE_SIZE_SHIFT; + PageIo page = file.get(current); + final short hdr = (short) (rowid & Storage.OFFSET_MASK); + RecordHeader.setCurrentSize(page, hdr, length); + if (length == 0) { + file.release(current, true); + return; + } + + // copy bytes in + int offsetInBuffer = start; + int leftToWrite = length; + short dataOffset = (short) (hdr + RecordHeader.SIZE); + while (leftToWrite > 0) { + // copy current page's data to return buffer + int toCopy = PAGE_SIZE - dataOffset; + + if (leftToWrite < toCopy) { + toCopy = leftToWrite; + } + page.writeByteArray(data, offsetInBuffer, dataOffset, toCopy); + + // Go to the next page + leftToWrite -= toCopy; + offsetInBuffer += toCopy; + + file.release(current, true); + + if (leftToWrite > 0) { + current = pageman.getNext(current); + page = file.get(current); + dataOffset = Magic.DATA_PAGE_O_DATA; + } + } + } + + void rollback() throws IOException { + cachedLastAllocatedRecordPage = Long.MIN_VALUE; + cachedLastAllocatedRecordOffset = Short.MIN_VALUE; + freeman.rollback(); + } + + void commit() throws IOException { + freeman.commit(); + } +} Index: graph/src/main/java/org/apache/jdbm/SerialClassInfo.java =================================================================== --- graph/src/main/java/org/apache/jdbm/SerialClassInfo.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/SerialClassInfo.java (Arbeitskopie) @@ -0,0 +1,563 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.Externalizable; +import java.io.IOException; +import java.io.InvalidClassException; +import java.io.NotSerializableException; +import java.io.ObjectStreamClass; +import java.io.ObjectStreamField; +import java.io.Serializable; +import java.lang.reflect.Constructor; +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.jdbm.Serialization.FastArrayList; + +/** + * This class stores information about serialized classes and fields. + */ +public abstract class SerialClassInfo { + + static final Serializer> serializer = new Serializer>() { + + @Override + public void serialize(DataOutput out, ArrayList obj) + throws IOException { + LongPacker.packInt(out, obj.size()); + for (ClassInfo ci : obj) { + out.writeUTF(ci.getName()); + out.writeBoolean(ci.isEnum); + out.writeBoolean(ci.isExternalizable); + if (ci.isExternalizable) + continue; // no fields + + LongPacker.packInt(out, ci.fields.size()); + for (FieldInfo fi : ci.fields) { + out.writeUTF(fi.getName()); + out.writeBoolean(fi.isPrimitive()); + out.writeUTF(fi.getType()); + } + } + } + + @Override + public ArrayList deserialize(DataInput in) throws IOException, + ClassNotFoundException { + + int size = LongPacker.unpackInt(in); + ArrayList ret = new ArrayList(size); + + for (int i = 0; i < size; i++) { + String className = in.readUTF(); + boolean isEnum = in.readBoolean(); + boolean isExternalizable = in.readBoolean(); + + int fieldsNum = isExternalizable ? 0 : LongPacker.unpackInt(in); + FieldInfo[] fields = new FieldInfo[fieldsNum]; + for (int j = 0; j < fieldsNum; j++) { + fields[j] = new FieldInfo(in.readUTF(), in.readBoolean(), + in.readUTF(), Class.forName(className)); + } + ret.add(new ClassInfo(className, fields, isEnum, isExternalizable)); + } + return ret; + } + }; + + long serialClassInfoRecid; + + public SerialClassInfo(DBAbstract db, long serialClassInfoRecid, + ArrayList registered) { + this.db = db; + this.serialClassInfoRecid = serialClassInfoRecid; + this.registered = registered; + } + + /** + * Stores info about single class stored in JDBM. Roughly corresponds to + * 'java.io.ObjectStreamClass' + */ + static class ClassInfo { + + private final String name; + private final List fields = new ArrayList(); + private final Map name2fieldInfo = new HashMap(); + private final Map name2fieldId = new HashMap(); + private ObjectStreamField[] objectStreamFields; + + final boolean isEnum; + + final boolean isExternalizable; + + ClassInfo(final String name, final FieldInfo[] fields, + final boolean isEnum, final boolean isExternalizable) { + this.name = name; + this.isEnum = isEnum; + this.isExternalizable = isExternalizable; + + for (FieldInfo f : fields) { + this.name2fieldId.put(f.getName(), this.fields.size()); + this.fields.add(f); + this.name2fieldInfo.put(f.getName(), f); + } + } + + public String getName() { + return name; + } + + public FieldInfo[] getFields() { + return (FieldInfo[]) fields.toArray(); + } + + public FieldInfo getField(String name) { + return name2fieldInfo.get(name); + } + + public int getFieldId(String name) { + Integer fieldId = name2fieldId.get(name); + if (fieldId != null) + return fieldId; + return -1; + } + + public FieldInfo getField(int serialId) { + return fields.get(serialId); + } + + public int addFieldInfo(FieldInfo field) { + name2fieldId.put(field.getName(), fields.size()); + name2fieldInfo.put(field.getName(), field); + fields.add(field); + return fields.size() - 1; + } + + public ObjectStreamField[] getObjectStreamFields() { + return objectStreamFields; + } + + public void setObjectStreamFields(ObjectStreamField[] objectStreamFields) { + this.objectStreamFields = objectStreamFields; + } + + } + + /** + * Stores info about single field stored in JDBM. Roughly corresponds to + * 'java.io.ObjectFieldClass' + */ + static class FieldInfo { + private final String name; + private final boolean primitive; + private final String type; + private Class typeClass; + // Class containing this field + private final Class clazz; + private Object setter; + private int setterIndex; + private Object getter; + private int getterIndex; + + public FieldInfo(String name, boolean primitive, String type, Class clazz) { + this.name = name; + this.primitive = primitive; + this.type = type; + this.clazz = clazz; + try { + this.typeClass = Class.forName(type); + } catch (ClassNotFoundException e) { + this.typeClass = null; + } + initSetter(); + initGetter(); + } + + private void initSetter() { + // Set setter + String setterName = "set" + firstCharCap(name); + String fieldSetterName = clazz.getName() + "#" + setterName; + + Class aClazz = clazz; + + // iterate over class hierarchy, until root class + while (aClazz != Object.class) { + // check if there is getMethod + try { + Method m = aClazz.getMethod(setterName, typeClass); + if (m != null) { + setter = m; + return; + } + } catch (Exception e) { + // e.printStackTrace(); + } + + // no get method, access field directly + try { + Field f = aClazz.getDeclaredField(name); + // security manager may not be happy about this + if (!f.isAccessible()) + f.setAccessible(true); + setter = f; + return; + } catch (Exception e) { + // e.printStackTrace(); + } + // move to superclass + aClazz = aClazz.getSuperclass(); + } + } + + private void initGetter() { + // Set setter + String getterName = "get" + firstCharCap(name); + String fieldSetterName = clazz.getName() + "#" + getterName; + + Class aClazz = clazz; + + // iterate over class hierarchy, until root class + while (aClazz != Object.class) { + // check if there is getMethod + try { + Method m = aClazz.getMethod(getterName); + if (m != null) { + getter = m; + return; + } + } catch (Exception e) { + // e.printStackTrace(); + } + + // no get method, access field directly + try { + Field f = aClazz.getDeclaredField(name); + // security manager may not be happy about this + if (!f.isAccessible()) + f.setAccessible(true); + getter = f; + return; + } catch (Exception e) { + // e.printStackTrace(); + } + // move to superclass + aClazz = aClazz.getSuperclass(); + } + } + + public FieldInfo(ObjectStreamField sf, Class clazz) { + this(sf.getName(), sf.isPrimitive(), sf.getType().getName(), clazz); + } + + public String getName() { + return name; + } + + public boolean isPrimitive() { + return primitive; + } + + public String getType() { + return type; + } + + private String firstCharCap(String s) { + return Character.toUpperCase(s.charAt(0)) + s.substring(1); + } + } + + ArrayList registered; + Map class2classId = new HashMap(); + Map classId2class = new HashMap(); + + final DBAbstract db; + + public void registerClass(Class clazz) throws IOException { + if (clazz != Object.class) + assertClassSerializable(clazz); + + if (containsClass(clazz)) + return; + + ObjectStreamField[] streamFields = getFields(clazz); + FieldInfo[] fields = new FieldInfo[streamFields.length]; + for (int i = 0; i < fields.length; i++) { + ObjectStreamField sf = streamFields[i]; + fields[i] = new FieldInfo(sf, clazz); + } + + ClassInfo i = new ClassInfo(clazz.getName(), fields, clazz.isEnum(), + Externalizable.class.isAssignableFrom(clazz)); + class2classId.put(clazz, registered.size()); + classId2class.put(registered.size(), clazz); + registered.add(i); + + if (db != null) + db.update(serialClassInfoRecid, (Serialization) this, + db.defaultSerializationSerializer); + + } + + private ObjectStreamField[] getFields(Class clazz) { + ObjectStreamField[] fields = null; + ClassInfo classInfo = null; + Integer classId = class2classId.get(clazz); + if (classId != null) { + classInfo = registered.get(classId); + fields = classInfo.getObjectStreamFields(); + } + if (fields == null) { + ObjectStreamClass streamClass = ObjectStreamClass.lookup(clazz); + FastArrayList fieldsList = new FastArrayList(); + while (streamClass != null) { + for (ObjectStreamField f : streamClass.getFields()) { + fieldsList.add(f); + } + clazz = clazz.getSuperclass(); + streamClass = ObjectStreamClass.lookup(clazz); + } + fields = new ObjectStreamField[fieldsList.size()]; + for (int i = 0; i < fields.length; i++) { + fields[i] = fieldsList.get(i); + } + if (classInfo != null) + classInfo.setObjectStreamFields(fields); + } + return fields; + } + + private void assertClassSerializable(Class clazz) + throws NotSerializableException, InvalidClassException { + if (containsClass(clazz)) + return; + + if (!Serializable.class.isAssignableFrom(clazz)) + throw new NotSerializableException(clazz.getName()); + } + + public Object getFieldValue(String fieldName, Object object) { + try { + registerClass(object.getClass()); + } catch (IOException e) { + e.printStackTrace(); + } + ClassInfo classInfo = registered.get(class2classId.get(object.getClass())); + return getFieldValue(classInfo.getField(fieldName), object); + } + + public Object getFieldValue(FieldInfo fieldInfo, Object object) { + + Object fieldAccessor = fieldInfo.getter; + try { + if (fieldAccessor instanceof Method) { + Method m = (Method) fieldAccessor; + return m.invoke(object); + } else { + Field f = (Field) fieldAccessor; + return f.get(object); + } + } catch (Exception e) { + + } + + throw new NoSuchFieldError(object.getClass() + "." + fieldInfo.getName()); + } + + public void setFieldValue(String fieldName, Object object, Object value) { + try { + registerClass(object.getClass()); + } catch (IOException e) { + e.printStackTrace(); + } + ClassInfo classInfo = registered.get(class2classId.get(object.getClass())); + setFieldValue(classInfo.getField(fieldName), object, value); + } + + public void setFieldValue(FieldInfo fieldInfo, Object object, Object value) { + + Object fieldAccessor = fieldInfo.setter; + try { + if (fieldAccessor instanceof Method) { + Method m = (Method) fieldAccessor; + m.invoke(object, value); + } else { + Field f = (Field) fieldAccessor; + f.set(object, value); + } + return; + } catch (Throwable e) { + e.printStackTrace(); + } + + throw new NoSuchFieldError(object.getClass() + "." + fieldInfo.getName()); + } + + public boolean containsClass(Class clazz) { + return (class2classId.get(clazz) != null); + } + + public int getClassId(Class clazz) { + Integer classId = class2classId.get(clazz); + if (classId != null) { + return classId; + } + throw new Error("Class is not registered: " + clazz); + } + + public void writeObject(DataOutput out, Object obj, FastArrayList objectStack) + throws IOException { + registerClass(obj.getClass()); + + // write class header + int classId = getClassId(obj.getClass()); + LongPacker.packInt(out, classId); + ClassInfo classInfo = registered.get(classId); + + if (classInfo.isExternalizable) { + Externalizable o = (Externalizable) obj; + DataInputOutput out2 = (DataInputOutput) out; + try { + out2.serializer = this; + out2.objectStack = objectStack; + o.writeExternal(out2); + } finally { + out2.serializer = null; + out2.objectStack = null; + } + return; + } + + if (classInfo.isEnum) { + int ordinal = ((Enum) obj).ordinal(); + LongPacker.packInt(out, ordinal); + } + + ObjectStreamField[] fields = getFields(obj.getClass()); + LongPacker.packInt(out, fields.length); + + for (ObjectStreamField f : fields) { + // write field ID + int fieldId = classInfo.getFieldId(f.getName()); + if (fieldId == -1) { + // field does not exists in class definition stored in db, + // propably new field was added so add field descriptor + fieldId = classInfo.addFieldInfo(new FieldInfo(f, obj.getClass())); + db.update(serialClassInfoRecid, (Serialization) this, + db.defaultSerializationSerializer); + } + LongPacker.packInt(out, fieldId); + // and write value + Object fieldValue = getFieldValue(classInfo.getField(fieldId), obj); + serialize(out, fieldValue, objectStack); + } + } + + public Object readObject(DataInput in, FastArrayList objectStack) + throws IOException { + // read class header + try { + int classId = LongPacker.unpackInt(in); + ClassInfo classInfo = registered.get(classId); + // Class clazz = Class.forName(classInfo.getName()); + Class clazz = classId2class.get(classId); + if (clazz == null) + clazz = Class.forName(classInfo.getName()); + assertClassSerializable(clazz); + + Object o; + + if (classInfo.isEnum) { + int ordinal = LongPacker.unpackInt(in); + o = clazz.getEnumConstants()[ordinal]; + } else { + o = createInstance(clazz, Object.class); + } + + objectStack.add(o); + + if (classInfo.isExternalizable) { + Externalizable oo = (Externalizable) o; + DataInputOutput in2 = (DataInputOutput) in; + try { + in2.serializer = this; + in2.objectStack = objectStack; + oo.readExternal(in2); + } finally { + in2.serializer = null; + in2.objectStack = null; + } + + } else { + int fieldCount = LongPacker.unpackInt(in); + for (int i = 0; i < fieldCount; i++) { + int fieldId = LongPacker.unpackInt(in); + FieldInfo f = classInfo.getField(fieldId); + Object fieldValue = deserialize(in, objectStack); + setFieldValue(f, o, fieldValue); + } + } + return o; + } catch (Exception e) { + throw new Error("Could not instanciate class", e); + } + } + + // TODO dependecy on nonpublic JVM API + static private sun.reflect.ReflectionFactory rf = sun.reflect.ReflectionFactory + .getReflectionFactory(); + + private static Map class2constuctor = new HashMap(); + + /** + * Little trick to create new instance without using constructor. Taken from + * http://www.javaspecialists.eu/archive/Issue175.html + */ + private static T createInstance(Class clazz, Class parent) { + + try { + Constructor intConstr = class2constuctor.get(clazz); + + if (intConstr == null) { + Constructor objDef = parent.getDeclaredConstructor(); + intConstr = rf.newConstructorForSerialization(clazz, objDef); + class2constuctor.put(clazz, intConstr); + } + + return clazz.cast(intConstr.newInstance()); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new IllegalStateException("Cannot create object", e); + } + } + + protected abstract Object deserialize(DataInput in, FastArrayList objectStack) + throws IOException, ClassNotFoundException; + + protected abstract void serialize(DataOutput out, Object fieldValue, + FastArrayList objectStack) throws IOException; + // + +} Index: graph/src/main/java/org/apache/jdbm/AdvancedObjectOutputStream.java =================================================================== --- graph/src/main/java/org/apache/jdbm/AdvancedObjectOutputStream.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/AdvancedObjectOutputStream.java (Arbeitskopie) @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.ObjectOutput; +import java.io.OutputStream; +import java.util.ArrayList; + +import org.apache.jdbm.SerialClassInfo.ClassInfo; + +/** + * An alternative to java.io.ObjectOutputStream which uses more + * efficient serialization + */ +public final class AdvancedObjectOutputStream extends DataOutputStream + implements ObjectOutput { + + public AdvancedObjectOutputStream(OutputStream out) { + super(out); + } + + @Override + public void writeObject(Object obj) throws IOException { + ArrayList registered = new ArrayList(); + Serialization ser = new Serialization(null, 0, registered); + + byte[] data = ser.serialize(obj); + // write class info first + SerialClassInfo.serializer.serialize(this, registered); + // and write data + write(data); + } +} Index: graph/src/main/java/org/apache/jdbm/StorageMemory.java =================================================================== --- graph/src/main/java/org/apache/jdbm/StorageMemory.java (Revision 0) +++ graph/src/main/java/org/apache/jdbm/StorageMemory.java (Arbeitskopie) @@ -0,0 +1,124 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jdbm; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOError; +import java.io.IOException; +import java.nio.ByteBuffer; + +/** + * Storage which keeps all data in memory. Data are lost after storage is + * closed. + */ +public final class StorageMemory implements Storage { + + private LongHashMap pages = new LongHashMap(); + private boolean transactionsDisabled; + + StorageMemory(boolean transactionsDisabled) { + this.transactionsDisabled = transactionsDisabled; + } + + @Override + public ByteBuffer read(long pageNumber) throws IOException { + + byte[] data = pages.get(pageNumber); + if (data == null) { + // out of bounds, so just return empty data + return ByteBuffer.wrap(PageFile.CLEAN_DATA).asReadOnlyBuffer(); + } else { + ByteBuffer b = ByteBuffer.wrap(data); + if (!transactionsDisabled) + return b.asReadOnlyBuffer(); + else + return b; + } + + } + + @Override + public void write(long pageNumber, ByteBuffer data) throws IOException { + if (data.capacity() != PAGE_SIZE) + throw new IllegalArgumentException(); + + byte[] b = pages.get(pageNumber); + + if (transactionsDisabled && data.hasArray() && data.array() == b) { + // already putted directly into array + return; + } + + if (b == null) + b = new byte[PAGE_SIZE]; + + data.position(0); + data.get(b, 0, PAGE_SIZE); + pages.put(pageNumber, b); + } + + @Override + public void sync() throws IOException { + } + + @Override + public void forceClose() throws IOException { + pages = null; + } + + private ByteArrayOutputStream transLog; + + @Override + public DataInputStream readTransactionLog() { + if (transLog == null) + return null; + DataInputStream ret = new DataInputStream(new ByteArrayInputStream( + transLog.toByteArray())); + // read stream header + try { + ret.readShort(); + } catch (IOException e) { + throw new IOError(e); + } + return ret; + } + + @Override + public void deleteTransactionLog() { + transLog = null; + } + + @Override + public DataOutputStream openTransactionLog() throws IOException { + if (transLog == null) + transLog = new ByteArrayOutputStream(); + return new DataOutputStream(transLog); + } + + @Override + public void deleteAllFiles() throws IOException { + } + + @Override + public boolean isReadonly() { + return false; + } +}