diff -ruN drlvm-0703-svn/build/make/components/vm/gc_gen.xml drlvm-0703-new/build/make/components/vm/gc_gen.xml
--- drlvm-0703-svn/build/make/components/vm/gc_gen.xml 2007-07-03 23:22:15.000000000 +0800
+++ drlvm-0703-new/build/make/components/vm/gc_gen.xml 2007-07-04 01:24:55.000000000 +0800
@@ -53,15 +53,16 @@
diff -ruN drlvm-0703-svn/vm/gc_gen/src/common/gc_block.h drlvm-0703-new/vm/gc_gen/src/common/gc_block.h
--- drlvm-0703-svn/vm/gc_gen/src/common/gc_block.h 2007-07-03 23:22:27.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/common/gc_block.h 2007-07-04 01:42:45.000000000 +0800
@@ -355,4 +355,3 @@
#endif //#ifndef _BLOCK_H_
-
diff -ruN drlvm-0703-svn/vm/gc_gen/src/common/gc_common.cpp drlvm-0703-new/vm/gc_gen/src/common/gc_common.cpp
--- drlvm-0703-svn/vm/gc_gen/src/common/gc_common.cpp 2007-07-03 23:22:27.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/common/gc_common.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -335,4 +335,3 @@
return;
}
-
diff -ruN drlvm-0703-svn/vm/gc_gen/src/common/gc_common.h drlvm-0703-new/vm/gc_gen/src/common/gc_common.h
--- drlvm-0703-svn/vm/gc_gen/src/common/gc_common.h 2007-07-03 23:22:27.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/common/gc_common.h 2007-07-04 01:42:45.000000000 +0800
@@ -109,6 +109,7 @@
GC_CAUSE_NIL,
GC_CAUSE_NOS_IS_FULL,
GC_CAUSE_LOS_IS_FULL,
+ GC_CAUSE_POS_IS_FULL,
GC_CAUSE_RUNTIME_FORCE_GC
};
diff -ruN drlvm-0703-svn/vm/gc_gen/src/common/gc_metadata.cpp drlvm-0703-new/vm/gc_gen/src/common/gc_metadata.cpp
--- drlvm-0703-svn/vm/gc_gen/src/common/gc_metadata.cpp 2007-07-03 23:22:27.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/common/gc_metadata.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -368,4 +368,3 @@
}
-
diff -ruN drlvm-0703-svn/vm/gc_gen/src/common/space_tuner.cpp drlvm-0703-new/vm/gc_gen/src/common/space_tuner.cpp
--- drlvm-0703-svn/vm/gc_gen/src/common/space_tuner.cpp 2007-07-03 23:22:27.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/common/space_tuner.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -163,7 +163,7 @@
}
#include "../thread/collector.h"
-#include "../mark_sweep/lspace.h"
+#include "../los/lspace.h"
static POINTER_SIZE_INT non_los_live_obj_size;
static POINTER_SIZE_INT los_live_obj_size;
@@ -520,4 +520,3 @@
}
-
diff -ruN drlvm-0703-svn/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp drlvm-0703-new/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
--- drlvm-0703-svn/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp 2007-07-03 23:22:24.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -24,7 +24,7 @@
#include "../thread/mutator.h"
#include "../common/gc_metadata.h"
#include "../trace_forward/fspace.h"
-#include "../mark_sweep/lspace.h"
+#include "../los/lspace.h"
#include "../gen/gen.h"
#include "../common/space_tuner.h"
diff -ruN drlvm-0703-svn/vm/gc_gen/src/gen/gen.cpp drlvm-0703-new/vm/gc_gen/src/gen/gen.cpp
--- drlvm-0703-svn/vm/gc_gen/src/gen/gen.cpp 2007-07-03 23:22:28.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/gen/gen.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -270,9 +270,11 @@
Space* gc_get_nos(GC_Gen* gc){ return (Space*)gc->nos;}
Space* gc_get_mos(GC_Gen* gc){ return (Space*)gc->mos;}
Space* gc_get_los(GC_Gen* gc){ return (Space*)gc->los;}
+Space* gc_get_pos(GC_Gen* gc) { return NULL; }
void gc_set_nos(GC_Gen* gc, Space* nos){ gc->nos = (Fspace*)nos;}
void gc_set_mos(GC_Gen* gc, Space* mos){ gc->mos = (Mspace*)mos;}
void gc_set_los(GC_Gen* gc, Space* los){ gc->los = (Lspace*)los;}
+void gc_set_pos(GC_Gen* gc, Space* pos) {}
void* mos_alloc(unsigned size, Allocator *allocator){return mspace_alloc(size, allocator);}
void* nos_alloc(unsigned size, Allocator *allocator){return fspace_alloc(size, allocator);}
diff -ruN drlvm-0703-svn/vm/gc_gen/src/gen/gen.h drlvm-0703-new/vm/gc_gen/src/gen/gen.h
--- drlvm-0703-svn/vm/gc_gen/src/gen/gen.h 2007-07-03 23:22:28.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/gen/gen.h 2007-07-04 01:42:45.000000000 +0800
@@ -26,7 +26,7 @@
#include "../thread/gc_thread.h"
#include "../trace_forward/fspace.h"
#include "../mark_compact/mspace.h"
-#include "../mark_sweep/lspace.h"
+#include "../los/lspace.h"
#include "../finalizer_weakref/finalizer_weakref_metadata.h"
enum Write_Barrier_Kind{
@@ -155,9 +155,11 @@
Space* gc_get_nos(GC_Gen* gc);
Space* gc_get_mos(GC_Gen* gc);
Space* gc_get_los(GC_Gen* gc);
+Space* gc_get_pos(GC_Gen* gc);
void gc_set_nos(GC_Gen* gc, Space* nos);
void gc_set_mos(GC_Gen* gc, Space* mos);
void gc_set_los(GC_Gen* gc, Space* los);
+void gc_set_pos(GC_Gen* gc, Space* pos);
unsigned int gc_get_processor_num(GC_Gen* gc);
@@ -181,4 +183,3 @@
#endif /* ifndef _GC_GEN_H_ */
-
diff -ruN drlvm-0703-svn/vm/gc_gen/src/los/free_area_pool.cpp drlvm-0703-new/vm/gc_gen/src/los/free_area_pool.cpp
--- drlvm-0703-svn/vm/gc_gen/src/los/free_area_pool.cpp 1970-01-01 08:00:00.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/los/free_area_pool.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Ji Qi, 2006/10/05
+ */
+
+#include "free_area_pool.h"
+
+void free_area_pool_init(Free_Area_Pool* pool)
+{
+ for(unsigned int i = 0; i < NUM_FREE_LIST; i ++){
+ Bidir_List* list = (Bidir_List*)(&pool->sized_area_list[i]);
+ list->next = list->prev = list;
+ ((Lockable_Bidir_List*)list)->lock = 0;
+ ((Lockable_Bidir_List*)list)->zero = 0;
+ }
+
+ memset((void*)pool->list_bit_flag, 0, NUM_FLAG_WORDS << BIT_SHIFT_TO_BYTES_PER_WORD);
+ return;
+}
+
+void free_area_pool_reset(Free_Area_Pool* pool)
+{
+ free_area_pool_init(pool);
+}
+
+Free_Area* free_pool_find_size_area(Free_Area_Pool* pool, POINTER_SIZE_INT size)
+{
+ assert(size >= GC_OBJ_SIZE_THRESHOLD);
+
+ size = ALIGN_UP_TO_KILO(size);
+ unsigned int index = pool_list_index_with_size(size);
+ /* Get first list index that is not empty */
+ index = pool_list_get_next_flag(pool, index);
+ assert(index <= NUM_FREE_LIST);
+
+ /*No free area left*/
+ if(index == NUM_FREE_LIST)
+ return NULL;
+
+ Bidir_List* list = (Bidir_List*)&pool->sized_area_list[index];
+ Free_Area* area = (Free_Area*)list->next;
+
+ if(index != MAX_LIST_INDEX)
+ return area;
+
+ /* Else, for last bucket MAX_LIST_INDEX, we must traverse it */
+ while( area != (Free_Area*)list ){
+ if(area->size >= size) return area;
+ area = (Free_Area*)(area->next);
+ }
+
+ return NULL;
+}
diff -ruN drlvm-0703-svn/vm/gc_gen/src/los/free_area_pool.h drlvm-0703-new/vm/gc_gen/src/los/free_area_pool.h
--- drlvm-0703-svn/vm/gc_gen/src/los/free_area_pool.h 1970-01-01 08:00:00.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/los/free_area_pool.h 2007-07-04 01:42:45.000000000 +0800
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Ji Qi, 2006/10/05
+ */
+
+#ifndef _BUDDY_H_
+#define _BUDDY_H_
+
+#include "../common/gc_common.h"
+#include "../utils/bit_ops.h"
+#include "../utils/bidir_list.h"
+
+#define ADDRESS_IS_KB_ALIGNED(addr) (!(((POINTER_SIZE_INT)addr) & ((1 << BIT_SHIFT_TO_KILO)-1)))
+#define ALIGN_UP_TO_KILO(addr) (((POINTER_SIZE_INT)(addr) + (KB - 1)) & (~(KB- 1)))
+#define ALIGN_DOWN_TO_KILO(addr) ((POINTER_SIZE_INT)(addr) & (~(KB- 1)))
+
+#define NUM_FREE_LIST 128
+
+typedef struct Lockable_Bidir_List{
+ /* <-- First couple of fields overloadded as Bidir_List */
+ POINTER_SIZE_INT zero;
+ Bidir_List* next;
+ Bidir_List* prev;
+ /* END of Bidir_List --> */
+ SpinLock lock;
+}Lockable_Bidir_List;
+
+typedef struct Free_Area{
+ /* <-- First couple of fields overloadded as Bidir_List */
+ POINTER_SIZE_INT zero;
+ Bidir_List* next;
+ Bidir_List* prev;
+ /* END of Bidir_List --> */
+ POINTER_SIZE_INT size;
+}Free_Area;
+
+/* this is the only interface for new area creation. If the new area size is smaller than threshold, return NULL*/
+inline Free_Area* free_area_new(void* start, POINTER_SIZE_INT size)
+{
+ assert(ADDRESS_IS_KB_ALIGNED(start));
+ assert(ADDRESS_IS_KB_ALIGNED(size));
+
+ Free_Area* area = (Free_Area*)start;
+ area->zero = 0;
+ area->next = area->prev = (Bidir_List*)area;
+ area->size = size;
+
+ if( size < GC_OBJ_SIZE_THRESHOLD) return NULL;
+ else return area;
+}
+
+#define NUM_FLAG_WORDS (NUM_FREE_LIST >> BIT_SHIFT_TO_BITS_PER_WORD)
+
+typedef struct Free_Area_Pool{
+ Lockable_Bidir_List sized_area_list[NUM_FREE_LIST];
+ /* each list corresponds to one bit in below vector */
+ POINTER_SIZE_INT list_bit_flag[NUM_FLAG_WORDS];
+}Free_Area_Pool;
+
+#define MAX_LIST_INDEX (NUM_FREE_LIST - 1)
+
+inline void pool_list_set_flag(Free_Area_Pool* pool, unsigned int index)
+{
+ words_set_bit(pool->list_bit_flag, NUM_FLAG_WORDS, index);
+}
+
+inline void pool_list_clear_flag(Free_Area_Pool* pool, unsigned int index)
+{
+ words_clear_bit(pool->list_bit_flag, NUM_FLAG_WORDS, index);
+}
+
+inline unsigned int pool_list_get_next_flag(Free_Area_Pool* pool, unsigned int start_idx)
+{
+ return words_get_next_set_lsb(pool->list_bit_flag, NUM_FLAG_WORDS, start_idx);
+}
+
+inline unsigned int pool_list_index_with_size(POINTER_SIZE_INT size)
+{
+ assert(size >= GC_OBJ_SIZE_THRESHOLD);
+
+ unsigned int index;
+ index = (unsigned int) (size >> BIT_SHIFT_TO_KILO);
+ if(index > MAX_LIST_INDEX) index = MAX_LIST_INDEX;
+ return index;
+}
+
+inline Free_Area* free_pool_add_area(Free_Area_Pool* pool, Free_Area* free_area)
+{
+ assert( free_area->size >= GC_OBJ_SIZE_THRESHOLD);
+
+ unsigned int index = pool_list_index_with_size(free_area->size);
+ bidir_list_add_item((Bidir_List*)&(pool->sized_area_list[index]), (Bidir_List*)free_area);
+
+ /* set bit flag of the list */
+ pool_list_set_flag(pool, index);
+ return free_area;
+}
+
+inline void free_pool_remove_area(Free_Area_Pool* pool, Free_Area* free_area)
+{
+ unsigned int index = pool_list_index_with_size(free_area->size);
+ bidir_list_remove_item((Bidir_List*)free_area);
+
+ /* set bit flag of the list */
+ Bidir_List* list = (Bidir_List*)&(pool->sized_area_list[index]);
+ if(list->next == list){
+ pool_list_clear_flag(pool, index);
+ }
+}
+
+void free_area_pool_init(Free_Area_Pool* p_buddy);
+void free_area_pool_reset(Free_Area_Pool* p_buddy);
+Free_Area* free_pool_find_size_area(Free_Area_Pool* pool, unsigned int size);
+
+#endif /*ifdef _BUDDY_H_*/
diff -ruN drlvm-0703-svn/vm/gc_gen/src/los/lspace_alloc_collect.cpp drlvm-0703-new/vm/gc_gen/src/los/lspace_alloc_collect.cpp
--- drlvm-0703-svn/vm/gc_gen/src/los/lspace_alloc_collect.cpp 1970-01-01 08:00:00.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/los/lspace_alloc_collect.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -0,0 +1,458 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Ji Qi, 2006/10/05
+ */
+
+#include "lspace.h"
+#include "../gen/gen.h"
+#include "../common/space_tuner.h"
+
+static void free_pool_lock_nr_list(Free_Area_Pool* pool, unsigned int list_index)
+{
+ Lockable_Bidir_List* list_head = &pool->sized_area_list[list_index];
+ lock(list_head->lock);
+}
+
+static void free_pool_unlock_nr_list(Free_Area_Pool* pool, unsigned int list_index)
+{
+ Lockable_Bidir_List* list_head = &pool->sized_area_list[list_index];
+ unlock(list_head->lock);
+}
+
+static unsigned int free_pool_nr_list_is_empty(Free_Area_Pool* pool, unsigned int list_index)
+{
+ Bidir_List* head = (Bidir_List*)(&pool->sized_area_list[list_index]);
+ return (head->next == head);
+}
+static void* free_pool_former_lists_atomic_take_area_piece(Free_Area_Pool* pool, unsigned int list_hint, POINTER_SIZE_INT size)
+{
+ Free_Area* free_area;
+ void* p_result;
+ POINTER_SIZE_SINT remain_size;
+ POINTER_SIZE_INT alloc_size = ALIGN_UP_TO_KILO(size);
+ unsigned int new_list_nr = 0;
+ Lockable_Bidir_List* head = &pool->sized_area_list[list_hint];
+
+ assert(list_hint < MAX_LIST_INDEX);
+
+ free_pool_lock_nr_list(pool, list_hint);
+ /*Other LOS allocation may race with this one, so check list status here.*/
+ if(free_pool_nr_list_is_empty(pool, list_hint)){
+ free_pool_unlock_nr_list(pool, list_hint);
+ return NULL;
+ }
+
+ free_area = (Free_Area*)(head->next);
+ /*if the list head is not NULL, it definitely satisfies the request. */
+ remain_size = free_area->size - alloc_size;
+ assert(remain_size >= 0);
+ if( remain_size >= GC_OBJ_SIZE_THRESHOLD){
+ new_list_nr = pool_list_index_with_size(remain_size);
+ p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size);
+ if(new_list_nr == list_hint){
+ free_area->size = remain_size;
+ free_pool_unlock_nr_list(pool, list_hint);
+ return p_result;
+ }else{
+ free_pool_remove_area(pool, free_area);
+ free_pool_unlock_nr_list(pool, list_hint);
+ free_area->size = remain_size;
+ free_pool_lock_nr_list(pool, new_list_nr);
+ free_pool_add_area(pool, free_area);
+ free_pool_unlock_nr_list(pool, new_list_nr);
+ return p_result;
+ }
+ }
+ else
+ {
+ free_pool_remove_area(pool, free_area);
+ free_pool_unlock_nr_list(pool, list_hint);
+ p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size);
+ if(remain_size > 0){
+ assert((remain_size >= KB) && (remain_size < GC_OBJ_SIZE_THRESHOLD));
+ free_area->size = remain_size;
+ }
+ return p_result;
+ }
+ assert(0);
+ return NULL;
+}
+
+static void* free_pool_last_list_atomic_take_area_piece(Free_Area_Pool* pool, POINTER_SIZE_INT size)
+{
+ void* p_result;
+ POINTER_SIZE_SINT remain_size = 0;
+ POINTER_SIZE_INT alloc_size = ALIGN_UP_TO_KILO(size);
+ Free_Area* free_area = NULL;
+ Free_Area* new_area = NULL;
+ unsigned int new_list_nr = 0;
+ Lockable_Bidir_List* head = &(pool->sized_area_list[MAX_LIST_INDEX]);
+
+ free_pool_lock_nr_list(pool, MAX_LIST_INDEX );
+ /*The last list is empty.*/
+ if(free_pool_nr_list_is_empty(pool, MAX_LIST_INDEX)){
+ free_pool_unlock_nr_list(pool, MAX_LIST_INDEX );
+ return NULL;
+ }
+
+ free_area = (Free_Area*)(head->next);
+ while( free_area != (Free_Area*)head ){
+ remain_size = free_area->size - alloc_size;
+ if( remain_size >= GC_OBJ_SIZE_THRESHOLD){
+ new_list_nr = pool_list_index_with_size(remain_size);
+ p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size);
+ if(new_list_nr == MAX_LIST_INDEX){
+ free_area->size = remain_size;
+ free_pool_unlock_nr_list(pool, MAX_LIST_INDEX);
+ return p_result;
+ }else{
+ free_pool_remove_area(pool, free_area);
+ free_pool_unlock_nr_list(pool, MAX_LIST_INDEX);
+ free_area->size = remain_size;
+ free_pool_lock_nr_list(pool, new_list_nr);
+ free_pool_add_area(pool, free_area);
+ free_pool_unlock_nr_list(pool, new_list_nr);
+ return p_result;
+ }
+ }
+ else if(remain_size >= 0)
+ {
+ free_pool_remove_area(pool, free_area);
+ free_pool_unlock_nr_list(pool, MAX_LIST_INDEX);
+ p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size);
+ if(remain_size > 0){
+ assert((remain_size >= KB) && (remain_size < GC_OBJ_SIZE_THRESHOLD));
+ free_area->size = remain_size;
+ }
+ return p_result;
+ }
+ else free_area = (Free_Area*)free_area->next;
+ }
+ /*No adequate area in the last list*/
+ free_pool_unlock_nr_list(pool, MAX_LIST_INDEX );
+ return NULL;
+}
+
+void* lspace_try_alloc(Lspace* lspace, POINTER_SIZE_INT alloc_size){
+ void* p_result = NULL;
+ Free_Area_Pool* pool = lspace->free_pool;
+ unsigned int list_hint = pool_list_index_with_size(alloc_size);
+ list_hint = pool_list_get_next_flag(pool, list_hint);
+
+ while((!p_result) && (list_hint <= MAX_LIST_INDEX)){
+ /*List hint is not the last list, so look for it in former lists.*/
+ if(list_hint < MAX_LIST_INDEX){
+ p_result = free_pool_former_lists_atomic_take_area_piece(pool, list_hint, alloc_size);
+ if(p_result){
+ memset(p_result, 0, alloc_size);
+ POINTER_SIZE_INT vold = lspace->alloced_size;
+ POINTER_SIZE_INT vnew = vold + alloc_size;
+ while( vold != atomic_casptrsz(&lspace->alloced_size, vnew, vold) ){
+ vold = lspace->alloced_size;
+ vnew = vold + alloc_size;
+ }
+ return p_result;
+ }else{
+ list_hint ++;
+ list_hint = pool_list_get_next_flag(pool, list_hint);
+ continue;
+ }
+ }
+ /*List hint is the last list, so look for it in the last list.*/
+ else
+ {
+ p_result = free_pool_last_list_atomic_take_area_piece(pool, alloc_size);
+ if(p_result){
+ memset(p_result, 0, alloc_size);
+ POINTER_SIZE_INT vold = lspace->alloced_size;
+ POINTER_SIZE_INT vnew = vold + alloc_size;
+ while( vold != atomic_casptrsz(&lspace->alloced_size, vnew, vold) ){
+ vold = lspace->alloced_size;
+ vnew = vold + alloc_size;
+ }
+ return p_result;
+ }
+ else break;
+ }
+ }
+ return p_result;
+}
+
+void* lspace_alloc(POINTER_SIZE_INT size, Allocator *allocator)
+{
+ unsigned int try_count = 0;
+ void* p_result = NULL;
+ POINTER_SIZE_INT alloc_size = ALIGN_UP_TO_KILO(size);
+ Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)allocator->gc);
+ Free_Area_Pool* pool = lspace->free_pool;
+
+ while( try_count < 2 ){
+ if(p_result = lspace_try_alloc(lspace, alloc_size))
+ return p_result;
+
+ /*Failled, no adequate area found in all lists, so GC at first, then get another try.*/
+ if(try_count == 0){
+ vm_gc_lock_enum();
+ /*Check again if there is space for the obj, for maybe other mutator
+ threads issus a GC in the time gap of waiting the gc lock*/
+ if(p_result = lspace_try_alloc(lspace, alloc_size)){
+ vm_gc_unlock_enum();
+ return p_result;
+ }
+ lspace->failure_size = round_up_to_size(alloc_size, KB);
+
+ gc_reclaim_heap(allocator->gc, GC_CAUSE_LOS_IS_FULL);
+
+ if(lspace->success_ptr){
+ p_result = lspace->success_ptr;
+ lspace->success_ptr = NULL;
+ vm_gc_unlock_enum();
+ return p_result;
+ }
+ vm_gc_unlock_enum();
+ try_count ++;
+ }else{
+ try_count ++;
+ }
+ }
+ return NULL;
+}
+
+void lspace_compute_object_target(Collector* collector, Lspace* lspace)
+{
+ void* dest_addr = lspace->heap_start;
+ unsigned int iterate_index = 0;
+ Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &iterate_index);
+
+ assert(!collector->rem_set);
+ collector->rem_set = free_set_pool_get_entry(collector->gc->metadata);
+#ifdef USE_32BITS_HASHCODE
+ collector->hashcode_set = free_set_pool_get_entry(collector->gc->metadata);
+#endif
+
+ while( p_obj ){
+ assert( obj_is_marked_in_vt(p_obj));
+ unsigned int obj_size = vm_object_size(p_obj);
+ assert(((POINTER_SIZE_INT)dest_addr + obj_size) <= (POINTER_SIZE_INT)lspace->heap_end);
+#ifdef USE_32BITS_HASHCODE
+ obj_size += hashcode_is_attached(p_obj)? GC_OBJECT_ALIGNMENT : 0 ;
+ Obj_Info_Type obj_info = slide_compact_process_hashcode(p_obj, dest_addr, &obj_size, collector, null, null);
+#else
+ Obj_Info_Type obj_info = get_obj_info_raw(p_obj);
+#endif
+
+ if( obj_info != 0 ) {
+ collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr);
+ collector_remset_add_entry(collector, (Partial_Reveal_Object **)obj_info);
+ }
+
+ obj_set_fw_in_oi(p_obj, dest_addr);
+ dest_addr = (void *)ALIGN_UP_TO_KILO(((POINTER_SIZE_INT) dest_addr + obj_size));
+ p_obj = lspace_get_next_marked_object(lspace, &iterate_index);
+ }
+
+ pool_put_entry(collector->gc->metadata->collector_remset_pool, collector->rem_set);
+ collector->rem_set = NULL;
+#ifdef USE_32BITS_HASHCODE
+ pool_put_entry(collector->gc->metadata->collector_hashcode_pool, collector->hashcode_set);
+ collector->hashcode_set = NULL;
+#endif
+
+ lspace->scompact_fa_start = dest_addr;
+ lspace->scompact_fa_end= lspace->heap_end;
+ return;
+}
+
+void lspace_sliding_compact(Collector* collector, Lspace* lspace)
+{
+ unsigned int iterate_index = 0;
+ Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &iterate_index);
+ Partial_Reveal_Object *p_target_obj = obj_get_fw_in_oi(p_obj);
+
+ while( p_obj ){
+ assert( obj_is_marked_in_vt(p_obj));
+#ifdef USE_32BITS_HASHCODE
+ obj_clear_dual_bits_in_vt(p_obj);
+#else
+ obj_unmark_in_vt(p_obj);
+#endif
+
+ unsigned int obj_size = vm_object_size(p_obj);
+#ifdef USE_32BITS_HASHCODE
+ obj_size += (obj_is_sethash_in_vt(p_obj))?GC_OBJECT_ALIGNMENT:0;
+#endif
+ Partial_Reveal_Object *p_target_obj = obj_get_fw_in_oi(p_obj);
+ POINTER_SIZE_INT target_obj_end = (POINTER_SIZE_INT)p_target_obj + obj_size;
+ if( p_obj != p_target_obj){
+ memmove(p_target_obj, p_obj, obj_size);
+ }
+ set_obj_info(p_target_obj, 0);
+ p_obj = lspace_get_next_marked_object(lspace, &iterate_index);
+ }
+
+ return;
+}
+
+void lspace_reset_after_collection(Lspace* lspace)
+{
+ GC* gc = lspace->gc;
+ Space_Tuner* tuner = gc->tuner;
+ POINTER_SIZE_INT trans_size = tuner->tuning_size;
+ POINTER_SIZE_INT new_fa_size = 0;
+ assert(!(trans_size%GC_BLOCK_SIZE_BYTES));
+
+ /* Reset the pool first because its info is useless now. */
+ free_area_pool_reset(lspace->free_pool);
+
+ switch(tuner->kind){
+ case TRANS_FROM_MOS_TO_LOS:{
+ if(lspace->move_object){
+ assert(tuner->force_tune);
+ Block* mos_first_block = ((GC_Gen*)gc)->mos->blocks;
+ lspace->heap_end = (void*)mos_first_block;
+ assert(!(tuner->tuning_size % GC_BLOCK_SIZE_BYTES));
+ new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size;
+ Free_Area* fa = free_area_new(lspace->scompact_fa_start, new_fa_size);
+ if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
+ }else{
+ void* origin_end = lspace->heap_end;
+ lspace->heap_end = (void*)(((GC_Gen*)gc)->mos->blocks);
+ /*The assumption that the first word of one KB must be zero when iterating lspace in
+ that function lspace_get_next_marked_object is not true*/
+ Free_Area* trans_fa = free_area_new(origin_end, trans_size);
+ if(trans_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, trans_fa);
+ }
+ lspace->committed_heap_size += trans_size;
+ break;
+ }
+ case TRANS_FROM_LOS_TO_MOS:{
+ assert(lspace->move_object);
+ assert(tuner->tuning_size);
+ Block* mos_first_block = ((GC_Gen*)gc)->mos->blocks;
+ assert( (POINTER_SIZE_INT)lspace->heap_end - trans_size == (POINTER_SIZE_INT)mos_first_block );
+ lspace->heap_end = (void*)mos_first_block;
+ lspace->committed_heap_size -= trans_size;
+ /*LOS_Shrink: We don't have to scan lspace to build free pool when slide compact LOS*/
+ assert((POINTER_SIZE_INT)lspace->scompact_fa_end > (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size);
+ new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start - tuner->tuning_size;
+ Free_Area* fa = free_area_new(lspace->scompact_fa_start, new_fa_size);
+ if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
+ break;
+ }
+ default:{
+ if(lspace->move_object){
+ assert(tuner->kind == TRANS_NOTHING);
+ assert(!tuner->tuning_size);
+ new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start;
+ Free_Area* fa = free_area_new(lspace->scompact_fa_start, new_fa_size);
+ if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
+ }
+ break;
+ }
+ }
+
+ /*For_statistic los information.*/
+ lspace->alloced_size = 0;
+ lspace->surviving_size = 0;
+
+ los_boundary = lspace->heap_end;
+}
+
+void lspace_sweep(Lspace* lspace)
+{
+ unsigned int mark_bit_idx = 0;
+ POINTER_SIZE_INT cur_size = 0;
+ void *cur_area_start, *cur_area_end;
+
+ /*If it is TRANS_FROM_MOS_TO_LOS now, we must clear the fa alread added in lspace_reset_after_collection*/
+ free_area_pool_reset(lspace->free_pool);
+
+ Partial_Reveal_Object* p_prev_obj = (Partial_Reveal_Object *)lspace->heap_start;
+ Partial_Reveal_Object* p_next_obj = lspace_get_first_marked_object(lspace, &mark_bit_idx);
+ if(p_next_obj){
+ obj_unmark_in_vt(p_next_obj);
+ /* we need this because, in hybrid situation of gen_mode and non_gen_mode, LOS will only be marked
+ in non_gen_mode, and not reset in gen_mode. When it switches back from gen_mode to non_gen_mode,
+ the last time marked object is thought to be already marked and not scanned for this cycle. */
+ obj_clear_dual_bits_in_oi(p_next_obj);
+ /*For_statistic: sum up the size of suvived large objects, useful to deciede los extention.*/
+unsigned int obj_size = vm_object_size(p_next_obj);
+#ifdef USE_32BITS_HASHCODE
+ obj_size += (hashcode_is_attached(p_next_obj))?GC_OBJECT_ALIGNMENT:0;
+#endif
+ lspace->surviving_size += ALIGN_UP_TO_KILO(obj_size);
+ }
+
+ cur_area_start = (void*)ALIGN_UP_TO_KILO(p_prev_obj);
+ cur_area_end = (void*)ALIGN_DOWN_TO_KILO(p_next_obj);
+ unsigned int hash_extend_size = 0;
+
+ Free_Area* cur_area = NULL;
+ while(cur_area_end){
+ cur_area = NULL;
+ cur_size = (POINTER_SIZE_INT)cur_area_end - (POINTER_SIZE_INT)cur_area_start;
+
+ if(cur_size){
+ //debug
+ assert(cur_size >= KB);
+ cur_area = free_area_new(cur_area_start, cur_size);
+ if( cur_area ) free_pool_add_area(lspace->free_pool, cur_area);
+ }
+ /* successfully create an area */
+
+ p_prev_obj = p_next_obj;
+ p_next_obj = lspace_get_next_marked_object(lspace, &mark_bit_idx);
+ if(p_next_obj){
+ obj_unmark_in_vt(p_next_obj);
+ obj_clear_dual_bits_in_oi(p_next_obj);
+ /*For_statistic: sum up the size of suvived large objects, useful to deciede los extention.*/
+ unsigned int obj_size = vm_object_size(p_next_obj);
+#ifdef USE_32BITS_HASHCODE
+ obj_size += (hashcode_is_attached(p_next_obj))?GC_OBJECT_ALIGNMENT:0;
+#endif
+ lspace->surviving_size += ALIGN_UP_TO_KILO(obj_size);
+ }
+
+#ifdef USE_32BITS_HASHCODE
+ hash_extend_size = (hashcode_is_attached((Partial_Reveal_Object*)p_prev_obj))?GC_OBJECT_ALIGNMENT:0;
+#endif
+ cur_area_start = (void*)ALIGN_UP_TO_KILO((POINTER_SIZE_INT)p_prev_obj + vm_object_size(p_prev_obj) + hash_extend_size);
+ cur_area_end = (void*)ALIGN_DOWN_TO_KILO(p_next_obj);
+
+ }
+
+ /* cur_area_end == NULL */
+ cur_area_end = (void*)ALIGN_DOWN_TO_KILO(lspace->heap_end);
+ cur_size = (POINTER_SIZE_INT)cur_area_end - (POINTER_SIZE_INT)cur_area_start;
+ if(cur_size){
+ //debug
+ assert(cur_size >= KB);
+ cur_area = free_area_new(cur_area_start, cur_size);
+ if( cur_area ) free_pool_add_area(lspace->free_pool, cur_area);
+ }
+
+ mark_bit_idx = 0;
+ assert(!lspace_get_first_marked_object(lspace, &mark_bit_idx));
+
+ /*Update survive ratio here. If we tune LOS this time, the ratio is computed by the new committed size.*/
+ /*Fixme: We should keep the surviving size of last time, and set denominator to last_survive + current_alloc*/
+ lspace->survive_ratio = (float)lspace->surviving_size / (float)lspace->committed_heap_size;
+
+ return;
+
+}
diff -ruN drlvm-0703-svn/vm/gc_gen/src/los/lspace.cpp drlvm-0703-new/vm/gc_gen/src/los/lspace.cpp
--- drlvm-0703-svn/vm/gc_gen/src/los/lspace.cpp 1970-01-01 08:00:00.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/los/lspace.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Ji Qi, 2006/10/05
+ */
+
+#include "lspace.h"
+
+void* los_boundary = NULL;
+Boolean* p_global_lspace_move_obj;
+
+struct GC_Gen;
+void gc_set_los(GC_Gen* gc, Space* lspace);
+
+extern POINTER_SIZE_INT min_los_size_bytes;
+extern POINTER_SIZE_INT min_none_los_size_bytes;
+void lspace_initialize(GC* gc, void* start, POINTER_SIZE_INT lspace_size)
+{
+ Lspace* lspace = (Lspace*)STD_MALLOC( sizeof(Lspace));
+ assert(lspace);
+ memset(lspace, 0, sizeof(Lspace));
+
+ /* commit mspace mem */
+ void* reserved_base = start;
+ POINTER_SIZE_INT committed_size = lspace_size;
+ if(!large_page_hint)
+ vm_commit_mem(reserved_base, lspace_size);
+ memset(reserved_base, 0, lspace_size);
+
+ min_los_size_bytes -= LOS_HEAD_RESERVE_FOR_HEAP_NULL;
+ lspace->committed_heap_size = committed_size - LOS_HEAD_RESERVE_FOR_HEAP_NULL;
+ lspace->reserved_heap_size = gc->reserved_heap_size - min_none_los_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_NULL;
+ lspace->heap_start = (void*)((POINTER_SIZE_INT)reserved_base + LOS_HEAD_RESERVE_FOR_HEAP_NULL);
+ lspace->heap_end = (void *)((POINTER_SIZE_INT)reserved_base + committed_size);
+
+ lspace->gc = gc;
+ /*LOS_Shrink:*/
+ lspace->move_object = FALSE;
+
+ /*Treat with free area buddies*/
+ lspace->free_pool = (Free_Area_Pool*)STD_MALLOC(sizeof(Free_Area_Pool));
+ free_area_pool_init(lspace->free_pool);
+ Free_Area* initial_fa = (Free_Area*)lspace->heap_start;
+ initial_fa->size = lspace->committed_heap_size;
+ free_pool_add_area(lspace->free_pool, initial_fa);
+
+ lspace->num_collections = 0;
+ lspace->time_collections = 0;
+ lspace->survive_ratio = 0.5f;
+
+ gc_set_los((GC_Gen*)gc, (Space*)lspace);
+ p_global_lspace_move_obj = &(lspace->move_object);
+ los_boundary = lspace->heap_end;
+
+ return;
+}
+
+void lspace_destruct(Lspace* lspace)
+{
+ STD_FREE(lspace);
+ lspace = NULL;
+ return;
+}
+
+#include "../common/fix_repointed_refs.h"
+
+/* this is minor collection, lspace is not swept, so we need clean markbits */
+void lspace_fix_after_copy_nursery(Collector* collector, Lspace* lspace)
+{
+ unsigned int mark_bit_idx = 0;
+ Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &mark_bit_idx);
+ while( p_obj){
+ assert(obj_is_marked_in_vt(p_obj));
+ obj_unmark_in_vt(p_obj);
+ object_fix_ref_slots(p_obj);
+ p_obj = lspace_get_next_marked_object(lspace, &mark_bit_idx);
+ }
+}
+
+void lspace_fix_repointed_refs(Collector* collector, Lspace* lspace)
+{
+ unsigned int start_pos = 0;
+ Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &start_pos);
+ while( p_obj){
+ assert(obj_is_marked_in_vt(p_obj));
+ object_fix_ref_slots(p_obj);
+ p_obj = lspace_get_next_marked_object(lspace, &start_pos);
+ }
+}
+
+void lspace_collection(Lspace* lspace)
+{
+ /* heap is marked already, we need only sweep here. */
+ lspace->num_collections ++;
+ lspace_reset_after_collection(lspace);
+ /*When sliding compacting lspace, we don't need to sweep it anymore.
+ What's more, the assumption that the first word of one KB must be zero when iterating
+ lspace in that function lspace_get_next_marked_object is not true*/
+ if(!lspace->move_object) lspace_sweep(lspace);
+ else lspace->surviving_size = (POINTER_SIZE_INT)lspace->scompact_fa_start - (POINTER_SIZE_INT)lspace->heap_start;
+ lspace->move_object = FALSE;
+ return;
+}
+
+POINTER_SIZE_INT lspace_get_failure_size(Lspace* lspace)
+{
+ return lspace->failure_size;
+}
diff -ruN drlvm-0703-svn/vm/gc_gen/src/los/lspace.h drlvm-0703-new/vm/gc_gen/src/los/lspace.h
--- drlvm-0703-svn/vm/gc_gen/src/los/lspace.h 1970-01-01 08:00:00.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/los/lspace.h 2007-07-04 01:42:45.000000000 +0800
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Ji Qi, 2006/10/05
+ */
+
+#ifndef _LSPACE_H_
+#define _LSPACE_H_
+
+#include "../common/gc_common.h"
+#include "../thread/gc_thread.h"
+#include "free_area_pool.h"
+#ifdef USE_32BITS_HASHCODE
+#include "../common/hashcode.h"
+#endif
+
+/*Fixme: This macro is for handling HEAP_NULL issues caused by JIT OPT*/
+#ifdef COMPRESS_REFERENCE
+ #define LOS_HEAD_RESERVE_FOR_HEAP_NULL ( SPACE_ALLOC_UNIT )
+#else
+ #define LOS_HEAD_RESERVE_FOR_HEAP_NULL ( 0*KB )
+#endif
+
+typedef struct Lspace{
+ /* <-- first couple of fields are overloadded as Space */
+ void* heap_start;
+ void* heap_end;
+ POINTER_SIZE_INT reserved_heap_size;
+ POINTER_SIZE_INT committed_heap_size;
+ unsigned int num_collections;
+ int64 time_collections;
+ float survive_ratio;
+ unsigned int collect_algorithm;
+ GC* gc;
+ /*LOS_Shrink:This field stands for sliding compact to lspace */
+ Boolean move_object;
+ /*For_statistic: size allocated science last time collect los, ie. last major*/
+ volatile POINTER_SIZE_INT alloced_size;
+ /*For_statistic: size survived after lspace_sweep*/
+ POINTER_SIZE_INT surviving_size;
+ /* END of Space --> */
+
+ Free_Area_Pool* free_pool;
+ /*Size of allocation which caused lspace alloc failure.
+ *This one is used to assign area to failed collection inside gc.
+ *Resetted in every gc_assign_free_area_to_mutators
+ */
+ POINTER_SIZE_INT failure_size;
+ void* success_ptr;
+
+ void* scompact_fa_start;
+ void* scompact_fa_end;
+}Lspace;
+
+void lspace_initialize(GC* gc, void* reserved_base, POINTER_SIZE_INT lspace_size);
+void lspace_destruct(Lspace* lspace);
+Managed_Object_Handle lspace_alloc(POINTER_SIZE_INT size, Allocator* allocator);
+void* lspace_try_alloc(Lspace* lspace, POINTER_SIZE_INT alloc_size);
+void lspace_sliding_compact(Collector* collector, Lspace* lspace);
+void lspace_compute_object_target(Collector* collector, Lspace* lspace);
+void lspace_sweep(Lspace* lspace);
+void lspace_reset_after_collection(Lspace* lspace);
+void lspace_collection(Lspace* lspace);
+
+inline POINTER_SIZE_INT lspace_free_memory_size(Lspace* lspace){ /* FIXME:: */ return 0; }
+inline POINTER_SIZE_INT lspace_committed_size(Lspace* lspace){ return lspace->committed_heap_size; }
+
+inline Partial_Reveal_Object* lspace_get_next_marked_object( Lspace* lspace, unsigned int* iterate_index)
+{
+ POINTER_SIZE_INT next_area_start = (POINTER_SIZE_INT)lspace->heap_start + (*iterate_index) * KB;
+ BOOLEAN reach_heap_end = 0;
+ unsigned int hash_extend_size = 0;
+
+ while(!reach_heap_end){
+ //FIXME: This while shoudl be if, try it!
+ while(!*((POINTER_SIZE_INT*)next_area_start)){
+ assert(((Free_Area*)next_area_start)->size);
+ next_area_start += ((Free_Area*)next_area_start)->size;
+ }
+ if(next_area_start < (POINTER_SIZE_INT)lspace->heap_end){
+ //If there is a living object at this addr, return it, and update iterate_index
+
+#ifdef USE_32BITS_HASHCODE
+ hash_extend_size = (hashcode_is_attached((Partial_Reveal_Object*)next_area_start))?GC_OBJECT_ALIGNMENT:0;
+#endif
+
+ if(obj_is_marked_in_vt((Partial_Reveal_Object*)next_area_start)){
+ POINTER_SIZE_INT obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start) + hash_extend_size);
+ *iterate_index = (unsigned int)((next_area_start + obj_size - (POINTER_SIZE_INT)lspace->heap_start) >> BIT_SHIFT_TO_KILO);
+ return (Partial_Reveal_Object*)next_area_start;
+ //If this is a dead object, go on to find a living one.
+ }else{
+ POINTER_SIZE_INT obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start)+ hash_extend_size);
+ next_area_start += obj_size;
+ }
+ }else{
+ reach_heap_end = 1;
+ }
+ }
+ return NULL;
+
+}
+
+inline Partial_Reveal_Object* lspace_get_first_marked_object(Lspace* lspace, unsigned int* mark_bit_idx)
+{
+ return lspace_get_next_marked_object(lspace, mark_bit_idx);
+}
+
+void lspace_fix_after_copy_nursery(Collector* collector, Lspace* lspace);
+
+void lspace_fix_repointed_refs(Collector* collector, Lspace* lspace);
+
+POINTER_SIZE_INT lspace_get_failure_size(Lspace* lspace);
+
+#endif /*_LSPACE_H_ */
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp drlvm-0703-new/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp
--- drlvm-0703-svn/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp 2007-07-03 23:22:22.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -209,4 +209,3 @@
}
#endif
-
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_compact/mspace_alloc.cpp drlvm-0703-new/vm/gc_gen/src/mark_compact/mspace_alloc.cpp
--- drlvm-0703-svn/vm/gc_gen/src/mark_compact/mspace_alloc.cpp 2007-07-03 23:22:22.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_compact/mspace_alloc.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -75,4 +75,3 @@
return p_return;
}
-
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp drlvm-0703-new/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp
--- drlvm-0703-svn/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp 2007-07-03 23:22:22.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -339,4 +339,3 @@
-
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_compact/mspace_collect_compact.h drlvm-0703-new/vm/gc_gen/src/mark_compact/mspace_collect_compact.h
--- drlvm-0703-svn/vm/gc_gen/src/mark_compact/mspace_collect_compact.h 2007-07-03 23:22:22.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_compact/mspace_collect_compact.h 2007-07-04 01:42:45.000000000 +0800
@@ -54,4 +54,3 @@
#endif /* _MSPACE_COLLECT_COMPACT_H_ */
-
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_compact/mspace.cpp drlvm-0703-new/vm/gc_gen/src/mark_compact/mspace.cpp
--- drlvm-0703-svn/vm/gc_gen/src/mark_compact/mspace.cpp 2007-07-03 23:22:22.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_compact/mspace.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -167,4 +167,3 @@
}
-
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp drlvm-0703-new/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp
--- drlvm-0703-svn/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp 2007-07-03 23:22:22.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -20,7 +20,7 @@
#include "mspace_collect_compact.h"
#include "../trace_forward/fspace.h"
-#include "../mark_sweep/lspace.h"
+#include "../los/lspace.h"
#include "../finalizer_weakref/finalizer_weakref.h"
#include "../gen/gen.h"
#include "../common/fix_repointed_refs.h"
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp drlvm-0703-new/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp
--- drlvm-0703-svn/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp 2007-07-03 23:22:22.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_compact/mspace_move_compact.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -20,7 +20,7 @@
#include "mspace_collect_compact.h"
#include "../trace_forward/fspace.h"
-#include "../mark_sweep/lspace.h"
+#include "../los/lspace.h"
#include "../finalizer_weakref/finalizer_weakref.h"
#ifdef USE_32BITS_HASHCODE
#include "../common/hashcode.h"
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp drlvm-0703-new/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp
--- drlvm-0703-svn/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp 2007-07-03 23:22:22.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -20,7 +20,7 @@
#include "mspace_collect_compact.h"
#include "../trace_forward/fspace.h"
-#include "../mark_sweep/lspace.h"
+#include "../los/lspace.h"
#include "../finalizer_weakref/finalizer_weakref.h"
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_sweep/free_area_pool.cpp drlvm-0703-new/vm/gc_gen/src/mark_sweep/free_area_pool.cpp
--- drlvm-0703-svn/vm/gc_gen/src/mark_sweep/free_area_pool.cpp 2007-07-03 23:22:29.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_sweep/free_area_pool.cpp 1970-01-01 08:00:00.000000000 +0800
@@ -1,68 +0,0 @@
-/*
- * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @author Ji Qi, 2006/10/05
- */
-
-#include "free_area_pool.h"
-
-void free_area_pool_init(Free_Area_Pool* pool)
-{
- for(unsigned int i = 0; i < NUM_FREE_LIST; i ++){
- Bidir_List* list = (Bidir_List*)(&pool->sized_area_list[i]);
- list->next = list->prev = list;
- ((Lockable_Bidir_List*)list)->lock = 0;
- ((Lockable_Bidir_List*)list)->zero = 0;
- }
-
- memset((void*)pool->list_bit_flag, 0, NUM_FLAG_WORDS << BIT_SHIFT_TO_BYTES_PER_WORD);
- return;
-}
-
-void free_area_pool_reset(Free_Area_Pool* pool)
-{
- free_area_pool_init(pool);
-}
-
-Free_Area* free_pool_find_size_area(Free_Area_Pool* pool, POINTER_SIZE_INT size)
-{
- assert(size >= GC_OBJ_SIZE_THRESHOLD);
-
- size = ALIGN_UP_TO_KILO(size);
- unsigned int index = pool_list_index_with_size(size);
- /* Get first list index that is not empty */
- index = pool_list_get_next_flag(pool, index);
- assert(index <= NUM_FREE_LIST);
-
- /*No free area left*/
- if(index == NUM_FREE_LIST)
- return NULL;
-
- Bidir_List* list = (Bidir_List*)&pool->sized_area_list[index];
- Free_Area* area = (Free_Area*)list->next;
-
- if(index != MAX_LIST_INDEX)
- return area;
-
- /* Else, for last bucket MAX_LIST_INDEX, we must traverse it */
- while( area != (Free_Area*)list ){
- if(area->size >= size) return area;
- area = (Free_Area*)(area->next);
- }
-
- return NULL;
-}
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_sweep/free_area_pool.h drlvm-0703-new/vm/gc_gen/src/mark_sweep/free_area_pool.h
--- drlvm-0703-svn/vm/gc_gen/src/mark_sweep/free_area_pool.h 2007-07-03 23:22:29.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_sweep/free_area_pool.h 1970-01-01 08:00:00.000000000 +0800
@@ -1,130 +0,0 @@
-/*
- * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @author Ji Qi, 2006/10/05
- */
-
-#ifndef _BUDDY_H_
-#define _BUDDY_H_
-
-#include "../common/gc_common.h"
-#include "../utils/bit_ops.h"
-#include "../utils/bidir_list.h"
-
-#define ADDRESS_IS_KB_ALIGNED(addr) (!(((POINTER_SIZE_INT)addr) & ((1 << BIT_SHIFT_TO_KILO)-1)))
-#define ALIGN_UP_TO_KILO(addr) (((POINTER_SIZE_INT)(addr) + (KB - 1)) & (~(KB- 1)))
-#define ALIGN_DOWN_TO_KILO(addr) ((POINTER_SIZE_INT)(addr) & (~(KB- 1)))
-
-#define NUM_FREE_LIST 128
-
-typedef struct Lockable_Bidir_List{
- /* <-- First couple of fields overloadded as Bidir_List */
- POINTER_SIZE_INT zero;
- Bidir_List* next;
- Bidir_List* prev;
- /* END of Bidir_List --> */
- SpinLock lock;
-}Lockable_Bidir_List;
-
-typedef struct Free_Area{
- /* <-- First couple of fields overloadded as Bidir_List */
- POINTER_SIZE_INT zero;
- Bidir_List* next;
- Bidir_List* prev;
- /* END of Bidir_List --> */
- POINTER_SIZE_INT size;
-}Free_Area;
-
-/* this is the only interface for new area creation. If the new area size is smaller than threshold, return NULL*/
-inline Free_Area* free_area_new(void* start, POINTER_SIZE_INT size)
-{
- assert(ADDRESS_IS_KB_ALIGNED(start));
- assert(ADDRESS_IS_KB_ALIGNED(size));
-
- Free_Area* area = (Free_Area*)start;
- area->zero = 0;
- area->next = area->prev = (Bidir_List*)area;
- area->size = size;
-
- if( size < GC_OBJ_SIZE_THRESHOLD) return NULL;
- else return area;
-}
-
-#define NUM_FLAG_WORDS (NUM_FREE_LIST >> BIT_SHIFT_TO_BITS_PER_WORD)
-
-typedef struct Free_Area_Pool{
- Lockable_Bidir_List sized_area_list[NUM_FREE_LIST];
- /* each list corresponds to one bit in below vector */
- POINTER_SIZE_INT list_bit_flag[NUM_FLAG_WORDS];
-}Free_Area_Pool;
-
-#define MAX_LIST_INDEX (NUM_FREE_LIST - 1)
-
-inline void pool_list_set_flag(Free_Area_Pool* pool, unsigned int index)
-{
- words_set_bit(pool->list_bit_flag, NUM_FLAG_WORDS, index);
-}
-
-inline void pool_list_clear_flag(Free_Area_Pool* pool, unsigned int index)
-{
- words_clear_bit(pool->list_bit_flag, NUM_FLAG_WORDS, index);
-}
-
-inline unsigned int pool_list_get_next_flag(Free_Area_Pool* pool, unsigned int start_idx)
-{
- return words_get_next_set_lsb(pool->list_bit_flag, NUM_FLAG_WORDS, start_idx);
-}
-
-inline unsigned int pool_list_index_with_size(POINTER_SIZE_INT size)
-{
- assert(size >= GC_OBJ_SIZE_THRESHOLD);
-
- unsigned int index;
- index = (unsigned int) (size >> BIT_SHIFT_TO_KILO);
- if(index > MAX_LIST_INDEX) index = MAX_LIST_INDEX;
- return index;
-}
-
-inline Free_Area* free_pool_add_area(Free_Area_Pool* pool, Free_Area* free_area)
-{
- assert( free_area->size >= GC_OBJ_SIZE_THRESHOLD);
-
- unsigned int index = pool_list_index_with_size(free_area->size);
- bidir_list_add_item((Bidir_List*)&(pool->sized_area_list[index]), (Bidir_List*)free_area);
-
- /* set bit flag of the list */
- pool_list_set_flag(pool, index);
- return free_area;
-}
-
-inline void free_pool_remove_area(Free_Area_Pool* pool, Free_Area* free_area)
-{
- unsigned int index = pool_list_index_with_size(free_area->size);
- bidir_list_remove_item((Bidir_List*)free_area);
-
- /* set bit flag of the list */
- Bidir_List* list = (Bidir_List*)&(pool->sized_area_list[index]);
- if(list->next == list){
- pool_list_clear_flag(pool, index);
- }
-}
-
-void free_area_pool_init(Free_Area_Pool* p_buddy);
-void free_area_pool_reset(Free_Area_Pool* p_buddy);
-Free_Area* free_pool_find_size_area(Free_Area_Pool* pool, unsigned int size);
-
-#endif /*ifdef _BUDDY_H_*/
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_sweep/lspace_alloc_collect.cpp drlvm-0703-new/vm/gc_gen/src/mark_sweep/lspace_alloc_collect.cpp
--- drlvm-0703-svn/vm/gc_gen/src/mark_sweep/lspace_alloc_collect.cpp 2007-07-03 23:22:29.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_sweep/lspace_alloc_collect.cpp 1970-01-01 08:00:00.000000000 +0800
@@ -1,458 +0,0 @@
-/*
- * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @author Ji Qi, 2006/10/05
- */
-
-#include "lspace.h"
-#include "../gen/gen.h"
-#include "../common/space_tuner.h"
-
-static void free_pool_lock_nr_list(Free_Area_Pool* pool, unsigned int list_index)
-{
- Lockable_Bidir_List* list_head = &pool->sized_area_list[list_index];
- lock(list_head->lock);
-}
-
-static void free_pool_unlock_nr_list(Free_Area_Pool* pool, unsigned int list_index)
-{
- Lockable_Bidir_List* list_head = &pool->sized_area_list[list_index];
- unlock(list_head->lock);
-}
-
-static unsigned int free_pool_nr_list_is_empty(Free_Area_Pool* pool, unsigned int list_index)
-{
- Bidir_List* head = (Bidir_List*)(&pool->sized_area_list[list_index]);
- return (head->next == head);
-}
-static void* free_pool_former_lists_atomic_take_area_piece(Free_Area_Pool* pool, unsigned int list_hint, POINTER_SIZE_INT size)
-{
- Free_Area* free_area;
- void* p_result;
- POINTER_SIZE_SINT remain_size;
- POINTER_SIZE_INT alloc_size = ALIGN_UP_TO_KILO(size);
- unsigned int new_list_nr = 0;
- Lockable_Bidir_List* head = &pool->sized_area_list[list_hint];
-
- assert(list_hint < MAX_LIST_INDEX);
-
- free_pool_lock_nr_list(pool, list_hint);
- /*Other LOS allocation may race with this one, so check list status here.*/
- if(free_pool_nr_list_is_empty(pool, list_hint)){
- free_pool_unlock_nr_list(pool, list_hint);
- return NULL;
- }
-
- free_area = (Free_Area*)(head->next);
- /*if the list head is not NULL, it definitely satisfies the request. */
- remain_size = free_area->size - alloc_size;
- assert(remain_size >= 0);
- if( remain_size >= GC_OBJ_SIZE_THRESHOLD){
- new_list_nr = pool_list_index_with_size(remain_size);
- p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size);
- if(new_list_nr == list_hint){
- free_area->size = remain_size;
- free_pool_unlock_nr_list(pool, list_hint);
- return p_result;
- }else{
- free_pool_remove_area(pool, free_area);
- free_pool_unlock_nr_list(pool, list_hint);
- free_area->size = remain_size;
- free_pool_lock_nr_list(pool, new_list_nr);
- free_pool_add_area(pool, free_area);
- free_pool_unlock_nr_list(pool, new_list_nr);
- return p_result;
- }
- }
- else
- {
- free_pool_remove_area(pool, free_area);
- free_pool_unlock_nr_list(pool, list_hint);
- p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size);
- if(remain_size > 0){
- assert((remain_size >= KB) && (remain_size < GC_OBJ_SIZE_THRESHOLD));
- free_area->size = remain_size;
- }
- return p_result;
- }
- assert(0);
- return NULL;
-}
-
-static void* free_pool_last_list_atomic_take_area_piece(Free_Area_Pool* pool, POINTER_SIZE_INT size)
-{
- void* p_result;
- POINTER_SIZE_SINT remain_size = 0;
- POINTER_SIZE_INT alloc_size = ALIGN_UP_TO_KILO(size);
- Free_Area* free_area = NULL;
- Free_Area* new_area = NULL;
- unsigned int new_list_nr = 0;
- Lockable_Bidir_List* head = &(pool->sized_area_list[MAX_LIST_INDEX]);
-
- free_pool_lock_nr_list(pool, MAX_LIST_INDEX );
- /*The last list is empty.*/
- if(free_pool_nr_list_is_empty(pool, MAX_LIST_INDEX)){
- free_pool_unlock_nr_list(pool, MAX_LIST_INDEX );
- return NULL;
- }
-
- free_area = (Free_Area*)(head->next);
- while( free_area != (Free_Area*)head ){
- remain_size = free_area->size - alloc_size;
- if( remain_size >= GC_OBJ_SIZE_THRESHOLD){
- new_list_nr = pool_list_index_with_size(remain_size);
- p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size);
- if(new_list_nr == MAX_LIST_INDEX){
- free_area->size = remain_size;
- free_pool_unlock_nr_list(pool, MAX_LIST_INDEX);
- return p_result;
- }else{
- free_pool_remove_area(pool, free_area);
- free_pool_unlock_nr_list(pool, MAX_LIST_INDEX);
- free_area->size = remain_size;
- free_pool_lock_nr_list(pool, new_list_nr);
- free_pool_add_area(pool, free_area);
- free_pool_unlock_nr_list(pool, new_list_nr);
- return p_result;
- }
- }
- else if(remain_size >= 0)
- {
- free_pool_remove_area(pool, free_area);
- free_pool_unlock_nr_list(pool, MAX_LIST_INDEX);
- p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size);
- if(remain_size > 0){
- assert((remain_size >= KB) && (remain_size < GC_OBJ_SIZE_THRESHOLD));
- free_area->size = remain_size;
- }
- return p_result;
- }
- else free_area = (Free_Area*)free_area->next;
- }
- /*No adequate area in the last list*/
- free_pool_unlock_nr_list(pool, MAX_LIST_INDEX );
- return NULL;
-}
-
-void* lspace_try_alloc(Lspace* lspace, POINTER_SIZE_INT alloc_size){
- void* p_result = NULL;
- Free_Area_Pool* pool = lspace->free_pool;
- unsigned int list_hint = pool_list_index_with_size(alloc_size);
- list_hint = pool_list_get_next_flag(pool, list_hint);
-
- while((!p_result) && (list_hint <= MAX_LIST_INDEX)){
- /*List hint is not the last list, so look for it in former lists.*/
- if(list_hint < MAX_LIST_INDEX){
- p_result = free_pool_former_lists_atomic_take_area_piece(pool, list_hint, alloc_size);
- if(p_result){
- memset(p_result, 0, alloc_size);
- POINTER_SIZE_INT vold = lspace->alloced_size;
- POINTER_SIZE_INT vnew = vold + alloc_size;
- while( vold != atomic_casptrsz(&lspace->alloced_size, vnew, vold) ){
- vold = lspace->alloced_size;
- vnew = vold + alloc_size;
- }
- return p_result;
- }else{
- list_hint ++;
- list_hint = pool_list_get_next_flag(pool, list_hint);
- continue;
- }
- }
- /*List hint is the last list, so look for it in the last list.*/
- else
- {
- p_result = free_pool_last_list_atomic_take_area_piece(pool, alloc_size);
- if(p_result){
- memset(p_result, 0, alloc_size);
- POINTER_SIZE_INT vold = lspace->alloced_size;
- POINTER_SIZE_INT vnew = vold + alloc_size;
- while( vold != atomic_casptrsz(&lspace->alloced_size, vnew, vold) ){
- vold = lspace->alloced_size;
- vnew = vold + alloc_size;
- }
- return p_result;
- }
- else break;
- }
- }
- return p_result;
-}
-
-void* lspace_alloc(POINTER_SIZE_INT size, Allocator *allocator)
-{
- unsigned int try_count = 0;
- void* p_result = NULL;
- POINTER_SIZE_INT alloc_size = ALIGN_UP_TO_KILO(size);
- Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)allocator->gc);
- Free_Area_Pool* pool = lspace->free_pool;
-
- while( try_count < 2 ){
- if(p_result = lspace_try_alloc(lspace, alloc_size))
- return p_result;
-
- /*Failled, no adequate area found in all lists, so GC at first, then get another try.*/
- if(try_count == 0){
- vm_gc_lock_enum();
- /*Check again if there is space for the obj, for maybe other mutator
- threads issus a GC in the time gap of waiting the gc lock*/
- if(p_result = lspace_try_alloc(lspace, alloc_size)){
- vm_gc_unlock_enum();
- return p_result;
- }
- lspace->failure_size = round_up_to_size(alloc_size, KB);
-
- gc_reclaim_heap(allocator->gc, GC_CAUSE_LOS_IS_FULL);
-
- if(lspace->success_ptr){
- p_result = lspace->success_ptr;
- lspace->success_ptr = NULL;
- vm_gc_unlock_enum();
- return p_result;
- }
- vm_gc_unlock_enum();
- try_count ++;
- }else{
- try_count ++;
- }
- }
- return NULL;
-}
-
-void lspace_compute_object_target(Collector* collector, Lspace* lspace)
-{
- void* dest_addr = lspace->heap_start;
- unsigned int iterate_index = 0;
- Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &iterate_index);
-
- assert(!collector->rem_set);
- collector->rem_set = free_set_pool_get_entry(collector->gc->metadata);
-#ifdef USE_32BITS_HASHCODE
- collector->hashcode_set = free_set_pool_get_entry(collector->gc->metadata);
-#endif
-
- while( p_obj ){
- assert( obj_is_marked_in_vt(p_obj));
- unsigned int obj_size = vm_object_size(p_obj);
- assert(((POINTER_SIZE_INT)dest_addr + obj_size) <= (POINTER_SIZE_INT)lspace->heap_end);
-#ifdef USE_32BITS_HASHCODE
- obj_size += hashcode_is_attached(p_obj)? GC_OBJECT_ALIGNMENT : 0 ;
- Obj_Info_Type obj_info = slide_compact_process_hashcode(p_obj, dest_addr, &obj_size, collector, null, null);
-#else
- Obj_Info_Type obj_info = get_obj_info_raw(p_obj);
-#endif
-
- if( obj_info != 0 ) {
- collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr);
- collector_remset_add_entry(collector, (Partial_Reveal_Object **)obj_info);
- }
-
- obj_set_fw_in_oi(p_obj, dest_addr);
- dest_addr = (void *)ALIGN_UP_TO_KILO(((POINTER_SIZE_INT) dest_addr + obj_size));
- p_obj = lspace_get_next_marked_object(lspace, &iterate_index);
- }
-
- pool_put_entry(collector->gc->metadata->collector_remset_pool, collector->rem_set);
- collector->rem_set = NULL;
-#ifdef USE_32BITS_HASHCODE
- pool_put_entry(collector->gc->metadata->collector_hashcode_pool, collector->hashcode_set);
- collector->hashcode_set = NULL;
-#endif
-
- lspace->scompact_fa_start = dest_addr;
- lspace->scompact_fa_end= lspace->heap_end;
- return;
-}
-
-void lspace_sliding_compact(Collector* collector, Lspace* lspace)
-{
- unsigned int iterate_index = 0;
- Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &iterate_index);
- Partial_Reveal_Object *p_target_obj = obj_get_fw_in_oi(p_obj);
-
- while( p_obj ){
- assert( obj_is_marked_in_vt(p_obj));
-#ifdef USE_32BITS_HASHCODE
- obj_clear_dual_bits_in_vt(p_obj);
-#else
- obj_unmark_in_vt(p_obj);
-#endif
-
- unsigned int obj_size = vm_object_size(p_obj);
-#ifdef USE_32BITS_HASHCODE
- obj_size += (obj_is_sethash_in_vt(p_obj))?GC_OBJECT_ALIGNMENT:0;
-#endif
- Partial_Reveal_Object *p_target_obj = obj_get_fw_in_oi(p_obj);
- POINTER_SIZE_INT target_obj_end = (POINTER_SIZE_INT)p_target_obj + obj_size;
- if( p_obj != p_target_obj){
- memmove(p_target_obj, p_obj, obj_size);
- }
- set_obj_info(p_target_obj, 0);
- p_obj = lspace_get_next_marked_object(lspace, &iterate_index);
- }
-
- return;
-}
-
-void lspace_reset_after_collection(Lspace* lspace)
-{
- GC* gc = lspace->gc;
- Space_Tuner* tuner = gc->tuner;
- POINTER_SIZE_INT trans_size = tuner->tuning_size;
- POINTER_SIZE_INT new_fa_size = 0;
- assert(!(trans_size%GC_BLOCK_SIZE_BYTES));
-
- /* Reset the pool first because its info is useless now. */
- free_area_pool_reset(lspace->free_pool);
-
- switch(tuner->kind){
- case TRANS_FROM_MOS_TO_LOS:{
- if(lspace->move_object){
- assert(tuner->force_tune);
- Block* mos_first_block = ((GC_Gen*)gc)->mos->blocks;
- lspace->heap_end = (void*)mos_first_block;
- assert(!(tuner->tuning_size % GC_BLOCK_SIZE_BYTES));
- new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size;
- Free_Area* fa = free_area_new(lspace->scompact_fa_start, new_fa_size);
- if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
- }else{
- void* origin_end = lspace->heap_end;
- lspace->heap_end = (void*)(((GC_Gen*)gc)->mos->blocks);
- /*The assumption that the first word of one KB must be zero when iterating lspace in
- that function lspace_get_next_marked_object is not true*/
- Free_Area* trans_fa = free_area_new(origin_end, trans_size);
- if(trans_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, trans_fa);
- }
- lspace->committed_heap_size += trans_size;
- break;
- }
- case TRANS_FROM_LOS_TO_MOS:{
- assert(lspace->move_object);
- assert(tuner->tuning_size);
- Block* mos_first_block = ((GC_Gen*)gc)->mos->blocks;
- assert( (POINTER_SIZE_INT)lspace->heap_end - trans_size == (POINTER_SIZE_INT)mos_first_block );
- lspace->heap_end = (void*)mos_first_block;
- lspace->committed_heap_size -= trans_size;
- /*LOS_Shrink: We don't have to scan lspace to build free pool when slide compact LOS*/
- assert((POINTER_SIZE_INT)lspace->scompact_fa_end > (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size);
- new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start - tuner->tuning_size;
- Free_Area* fa = free_area_new(lspace->scompact_fa_start, new_fa_size);
- if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
- break;
- }
- default:{
- if(lspace->move_object){
- assert(tuner->kind == TRANS_NOTHING);
- assert(!tuner->tuning_size);
- new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start;
- Free_Area* fa = free_area_new(lspace->scompact_fa_start, new_fa_size);
- if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
- }
- break;
- }
- }
-
- /*For_statistic los information.*/
- lspace->alloced_size = 0;
- lspace->surviving_size = 0;
-
- los_boundary = lspace->heap_end;
-}
-
-void lspace_sweep(Lspace* lspace)
-{
- unsigned int mark_bit_idx = 0;
- POINTER_SIZE_INT cur_size = 0;
- void *cur_area_start, *cur_area_end;
-
- /*If it is TRANS_FROM_MOS_TO_LOS now, we must clear the fa alread added in lspace_reset_after_collection*/
- free_area_pool_reset(lspace->free_pool);
-
- Partial_Reveal_Object* p_prev_obj = (Partial_Reveal_Object *)lspace->heap_start;
- Partial_Reveal_Object* p_next_obj = lspace_get_first_marked_object(lspace, &mark_bit_idx);
- if(p_next_obj){
- obj_unmark_in_vt(p_next_obj);
- /* we need this because, in hybrid situation of gen_mode and non_gen_mode, LOS will only be marked
- in non_gen_mode, and not reset in gen_mode. When it switches back from gen_mode to non_gen_mode,
- the last time marked object is thought to be already marked and not scanned for this cycle. */
- obj_clear_dual_bits_in_oi(p_next_obj);
- /*For_statistic: sum up the size of suvived large objects, useful to deciede los extention.*/
-unsigned int obj_size = vm_object_size(p_next_obj);
-#ifdef USE_32BITS_HASHCODE
- obj_size += (hashcode_is_attached(p_next_obj))?GC_OBJECT_ALIGNMENT:0;
-#endif
- lspace->surviving_size += ALIGN_UP_TO_KILO(obj_size);
- }
-
- cur_area_start = (void*)ALIGN_UP_TO_KILO(p_prev_obj);
- cur_area_end = (void*)ALIGN_DOWN_TO_KILO(p_next_obj);
- unsigned int hash_extend_size = 0;
-
- Free_Area* cur_area = NULL;
- while(cur_area_end){
- cur_area = NULL;
- cur_size = (POINTER_SIZE_INT)cur_area_end - (POINTER_SIZE_INT)cur_area_start;
-
- if(cur_size){
- //debug
- assert(cur_size >= KB);
- cur_area = free_area_new(cur_area_start, cur_size);
- if( cur_area ) free_pool_add_area(lspace->free_pool, cur_area);
- }
- /* successfully create an area */
-
- p_prev_obj = p_next_obj;
- p_next_obj = lspace_get_next_marked_object(lspace, &mark_bit_idx);
- if(p_next_obj){
- obj_unmark_in_vt(p_next_obj);
- obj_clear_dual_bits_in_oi(p_next_obj);
- /*For_statistic: sum up the size of suvived large objects, useful to deciede los extention.*/
- unsigned int obj_size = vm_object_size(p_next_obj);
-#ifdef USE_32BITS_HASHCODE
- obj_size += (hashcode_is_attached(p_next_obj))?GC_OBJECT_ALIGNMENT:0;
-#endif
- lspace->surviving_size += ALIGN_UP_TO_KILO(obj_size);
- }
-
-#ifdef USE_32BITS_HASHCODE
- hash_extend_size = (hashcode_is_attached((Partial_Reveal_Object*)p_prev_obj))?GC_OBJECT_ALIGNMENT:0;
-#endif
- cur_area_start = (void*)ALIGN_UP_TO_KILO((POINTER_SIZE_INT)p_prev_obj + vm_object_size(p_prev_obj) + hash_extend_size);
- cur_area_end = (void*)ALIGN_DOWN_TO_KILO(p_next_obj);
-
- }
-
- /* cur_area_end == NULL */
- cur_area_end = (void*)ALIGN_DOWN_TO_KILO(lspace->heap_end);
- cur_size = (POINTER_SIZE_INT)cur_area_end - (POINTER_SIZE_INT)cur_area_start;
- if(cur_size){
- //debug
- assert(cur_size >= KB);
- cur_area = free_area_new(cur_area_start, cur_size);
- if( cur_area ) free_pool_add_area(lspace->free_pool, cur_area);
- }
-
- mark_bit_idx = 0;
- assert(!lspace_get_first_marked_object(lspace, &mark_bit_idx));
-
- /*Update survive ratio here. If we tune LOS this time, the ratio is computed by the new committed size.*/
- /*Fixme: We should keep the surviving size of last time, and set denominator to last_survive + current_alloc*/
- lspace->survive_ratio = (float)lspace->surviving_size / (float)lspace->committed_heap_size;
-
- return;
-
-}
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_sweep/lspace.cpp drlvm-0703-new/vm/gc_gen/src/mark_sweep/lspace.cpp
--- drlvm-0703-svn/vm/gc_gen/src/mark_sweep/lspace.cpp 2007-07-03 23:22:29.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_sweep/lspace.cpp 1970-01-01 08:00:00.000000000 +0800
@@ -1,122 +0,0 @@
-/*
- * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @author Ji Qi, 2006/10/05
- */
-
-#include "lspace.h"
-
-void* los_boundary = NULL;
-Boolean* p_global_lspace_move_obj;
-
-struct GC_Gen;
-void gc_set_los(GC_Gen* gc, Space* lspace);
-
-extern POINTER_SIZE_INT min_los_size_bytes;
-extern POINTER_SIZE_INT min_none_los_size_bytes;
-void lspace_initialize(GC* gc, void* start, POINTER_SIZE_INT lspace_size)
-{
- Lspace* lspace = (Lspace*)STD_MALLOC( sizeof(Lspace));
- assert(lspace);
- memset(lspace, 0, sizeof(Lspace));
-
- /* commit mspace mem */
- void* reserved_base = start;
- POINTER_SIZE_INT committed_size = lspace_size;
- if(!large_page_hint)
- vm_commit_mem(reserved_base, lspace_size);
- memset(reserved_base, 0, lspace_size);
-
- min_los_size_bytes -= LOS_HEAD_RESERVE_FOR_HEAP_NULL;
- lspace->committed_heap_size = committed_size - LOS_HEAD_RESERVE_FOR_HEAP_NULL;
- lspace->reserved_heap_size = gc->reserved_heap_size - min_none_los_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_NULL;
- lspace->heap_start = (void*)((POINTER_SIZE_INT)reserved_base + LOS_HEAD_RESERVE_FOR_HEAP_NULL);
- lspace->heap_end = (void *)((POINTER_SIZE_INT)reserved_base + committed_size);
-
- lspace->gc = gc;
- /*LOS_Shrink:*/
- lspace->move_object = FALSE;
-
- /*Treat with free area buddies*/
- lspace->free_pool = (Free_Area_Pool*)STD_MALLOC(sizeof(Free_Area_Pool));
- free_area_pool_init(lspace->free_pool);
- Free_Area* initial_fa = (Free_Area*)lspace->heap_start;
- initial_fa->size = lspace->committed_heap_size;
- free_pool_add_area(lspace->free_pool, initial_fa);
-
- lspace->num_collections = 0;
- lspace->time_collections = 0;
- lspace->survive_ratio = 0.5f;
-
- gc_set_los((GC_Gen*)gc, (Space*)lspace);
- p_global_lspace_move_obj = &(lspace->move_object);
- los_boundary = lspace->heap_end;
-
- return;
-}
-
-void lspace_destruct(Lspace* lspace)
-{
- STD_FREE(lspace);
- lspace = NULL;
- return;
-}
-
-#include "../common/fix_repointed_refs.h"
-
-/* this is minor collection, lspace is not swept, so we need clean markbits */
-void lspace_fix_after_copy_nursery(Collector* collector, Lspace* lspace)
-{
- unsigned int mark_bit_idx = 0;
- Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &mark_bit_idx);
- while( p_obj){
- assert(obj_is_marked_in_vt(p_obj));
- obj_unmark_in_vt(p_obj);
- object_fix_ref_slots(p_obj);
- p_obj = lspace_get_next_marked_object(lspace, &mark_bit_idx);
- }
-}
-
-void lspace_fix_repointed_refs(Collector* collector, Lspace* lspace)
-{
- unsigned int start_pos = 0;
- Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &start_pos);
- while( p_obj){
- assert(obj_is_marked_in_vt(p_obj));
- object_fix_ref_slots(p_obj);
- p_obj = lspace_get_next_marked_object(lspace, &start_pos);
- }
-}
-
-void lspace_collection(Lspace* lspace)
-{
- /* heap is marked already, we need only sweep here. */
- lspace->num_collections ++;
- lspace_reset_after_collection(lspace);
- /*When sliding compacting lspace, we don't need to sweep it anymore.
- What's more, the assumption that the first word of one KB must be zero when iterating
- lspace in that function lspace_get_next_marked_object is not true*/
- if(!lspace->move_object) lspace_sweep(lspace);
- else lspace->surviving_size = (POINTER_SIZE_INT)lspace->scompact_fa_start - (POINTER_SIZE_INT)lspace->heap_start;
- lspace->move_object = FALSE;
- return;
-}
-
-POINTER_SIZE_INT lspace_get_failure_size(Lspace* lspace)
-{
- return lspace->failure_size;
-}
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_sweep/lspace.h drlvm-0703-new/vm/gc_gen/src/mark_sweep/lspace.h
--- drlvm-0703-svn/vm/gc_gen/src/mark_sweep/lspace.h 2007-07-03 23:22:29.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_sweep/lspace.h 1970-01-01 08:00:00.000000000 +0800
@@ -1,129 +0,0 @@
-/*
- * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @author Ji Qi, 2006/10/05
- */
-
-#ifndef _LSPACE_H_
-#define _LSPACE_H_
-
-#include "../common/gc_common.h"
-#include "../thread/gc_thread.h"
-#include "free_area_pool.h"
-#ifdef USE_32BITS_HASHCODE
-#include "../common/hashcode.h"
-#endif
-
-/*Fixme: This macro is for handling HEAP_NULL issues caused by JIT OPT*/
-#ifdef COMPRESS_REFERENCE
- #define LOS_HEAD_RESERVE_FOR_HEAP_NULL ( SPACE_ALLOC_UNIT )
-#else
- #define LOS_HEAD_RESERVE_FOR_HEAP_NULL ( 0*KB )
-#endif
-
-typedef struct Lspace{
- /* <-- first couple of fields are overloadded as Space */
- void* heap_start;
- void* heap_end;
- POINTER_SIZE_INT reserved_heap_size;
- POINTER_SIZE_INT committed_heap_size;
- unsigned int num_collections;
- int64 time_collections;
- float survive_ratio;
- unsigned int collect_algorithm;
- GC* gc;
- /*LOS_Shrink:This field stands for sliding compact to lspace */
- Boolean move_object;
- /*For_statistic: size allocated science last time collect los, ie. last major*/
- volatile POINTER_SIZE_INT alloced_size;
- /*For_statistic: size survived after lspace_sweep*/
- POINTER_SIZE_INT surviving_size;
- /* END of Space --> */
-
- Free_Area_Pool* free_pool;
- /*Size of allocation which caused lspace alloc failure.
- *This one is used to assign area to failed collection inside gc.
- *Resetted in every gc_assign_free_area_to_mutators
- */
- POINTER_SIZE_INT failure_size;
- void* success_ptr;
-
- void* scompact_fa_start;
- void* scompact_fa_end;
-}Lspace;
-
-void lspace_initialize(GC* gc, void* reserved_base, POINTER_SIZE_INT lspace_size);
-void lspace_destruct(Lspace* lspace);
-Managed_Object_Handle lspace_alloc(POINTER_SIZE_INT size, Allocator* allocator);
-void* lspace_try_alloc(Lspace* lspace, POINTER_SIZE_INT alloc_size);
-void lspace_sliding_compact(Collector* collector, Lspace* lspace);
-void lspace_compute_object_target(Collector* collector, Lspace* lspace);
-void lspace_sweep(Lspace* lspace);
-void lspace_reset_after_collection(Lspace* lspace);
-void lspace_collection(Lspace* lspace);
-
-inline POINTER_SIZE_INT lspace_free_memory_size(Lspace* lspace){ /* FIXME:: */ return 0; }
-inline POINTER_SIZE_INT lspace_committed_size(Lspace* lspace){ return lspace->committed_heap_size; }
-
-inline Partial_Reveal_Object* lspace_get_next_marked_object( Lspace* lspace, unsigned int* iterate_index)
-{
- POINTER_SIZE_INT next_area_start = (POINTER_SIZE_INT)lspace->heap_start + (*iterate_index) * KB;
- BOOLEAN reach_heap_end = 0;
- unsigned int hash_extend_size = 0;
-
- while(!reach_heap_end){
- //FIXME: This while shoudl be if, try it!
- while(!*((POINTER_SIZE_INT*)next_area_start)){
- assert(((Free_Area*)next_area_start)->size);
- next_area_start += ((Free_Area*)next_area_start)->size;
- }
- if(next_area_start < (POINTER_SIZE_INT)lspace->heap_end){
- //If there is a living object at this addr, return it, and update iterate_index
-
-#ifdef USE_32BITS_HASHCODE
- hash_extend_size = (hashcode_is_attached((Partial_Reveal_Object*)next_area_start))?GC_OBJECT_ALIGNMENT:0;
-#endif
-
- if(obj_is_marked_in_vt((Partial_Reveal_Object*)next_area_start)){
- POINTER_SIZE_INT obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start) + hash_extend_size);
- *iterate_index = (unsigned int)((next_area_start + obj_size - (POINTER_SIZE_INT)lspace->heap_start) >> BIT_SHIFT_TO_KILO);
- return (Partial_Reveal_Object*)next_area_start;
- //If this is a dead object, go on to find a living one.
- }else{
- POINTER_SIZE_INT obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start)+ hash_extend_size);
- next_area_start += obj_size;
- }
- }else{
- reach_heap_end = 1;
- }
- }
- return NULL;
-
-}
-
-inline Partial_Reveal_Object* lspace_get_first_marked_object(Lspace* lspace, unsigned int* mark_bit_idx)
-{
- return lspace_get_next_marked_object(lspace, mark_bit_idx);
-}
-
-void lspace_fix_after_copy_nursery(Collector* collector, Lspace* lspace);
-
-void lspace_fix_repointed_refs(Collector* collector, Lspace* lspace);
-
-POINTER_SIZE_INT lspace_get_failure_size(Lspace* lspace);
-
-#endif /*_LSPACE_H_ */
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp drlvm-0703-new/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp
--- drlvm-0703-svn/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp 1970-01-01 08:00:00.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_sweep/sspace_alloc.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -0,0 +1,330 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "sspace.h"
+#include "sspace_chunk.h"
+#include "sspace_mark_sweep.h"
+#include "../gen/gen.h"
+
+static Boolean slot_is_alloc_in_table(POINTER_SIZE_INT *table, unsigned int slot_index)
+{
+ unsigned int color_bits_index = slot_index * COLOR_BITS_PER_OBJ;
+ unsigned int word_index = color_bits_index / BITS_PER_WORD;
+ unsigned int index_in_word = color_bits_index % BITS_PER_WORD;
+
+ return table[word_index] & (cur_alloc_color << index_in_word);
+}
+
+static void alloc_slot_in_table(POINTER_SIZE_INT *table, unsigned int slot_index)
+{
+ assert(!slot_is_alloc_in_table(table, slot_index));
+
+ unsigned int color_bits_index = slot_index * COLOR_BITS_PER_OBJ;
+ unsigned int word_index = color_bits_index / BITS_PER_WORD;
+ unsigned int index_in_word = color_bits_index % BITS_PER_WORD;
+
+ table[word_index] |= cur_alloc_color << index_in_word;
+}
+
+static unsigned int first_free_index_in_color_word(POINTER_SIZE_INT word)
+{
+ unsigned int index = 0;
+
+ while(index < BITS_PER_WORD){
+ if(!(word & (cur_mark_color << index)))
+ return index;
+ index += COLOR_BITS_PER_OBJ;
+ }
+
+ assert(0); /* There must be a free obj in this table word */
+ return MAX_SLOT_INDEX;
+}
+
+static Boolean next_free_index_in_color_word(POINTER_SIZE_INT word, unsigned int &index)
+{
+ while(index < BITS_PER_WORD){
+ if(!(word & (cur_alloc_color << index)))
+ return TRUE;
+ index += COLOR_BITS_PER_OBJ;
+ }
+ return FALSE;
+}
+
+static unsigned int composed_slot_index(unsigned int word_index, unsigned int index_in_word)
+{
+ unsigned int color_bits_index = word_index*BITS_PER_WORD + index_in_word;
+ return color_bits_index/COLOR_BITS_PER_OBJ;
+}
+
+static unsigned int next_free_slot_index_in_table(POINTER_SIZE_INT *table, unsigned int slot_index, unsigned int slot_num)
+{
+ assert(slot_is_alloc_in_table(table, slot_index));
+
+ unsigned int max_word_index = ((slot_num-1) * COLOR_BITS_PER_OBJ) / BITS_PER_WORD;
+ Boolean found = FALSE;
+
+ unsigned int color_bits_index = slot_index * COLOR_BITS_PER_OBJ;
+ unsigned int word_index = color_bits_index / BITS_PER_WORD;
+ unsigned int index_in_word = color_bits_index % BITS_PER_WORD;
+
+ while(word_index < max_word_index){
+ found = next_free_index_in_color_word(table[word_index], index_in_word);
+ if(found)
+ return composed_slot_index(word_index, index_in_word);
+ ++word_index;
+ index_in_word = 0;
+ }
+
+ index_in_word = 0;
+ found = next_free_index_in_color_word(table[word_index], index_in_word);
+ if(found)
+ return composed_slot_index(word_index, index_in_word);
+
+ return MAX_SLOT_INDEX;
+}
+
+/* Used for collecting pfc */
+void chunk_set_slot_index(Chunk_Header* chunk, unsigned int first_free_word_index)
+{
+ unsigned int index_in_word = first_free_index_in_color_word(chunk->table[first_free_word_index]);
+ assert(index_in_word != MAX_SLOT_INDEX);
+ chunk->slot_index = composed_slot_index(first_free_word_index, index_in_word);
+}
+
+
+/* 1. No need of synchronization. This is a mutator local chunk no matter it is a small or medium obj chunk.
+ * 2. If this chunk runs out of space, clear the chunk pointer.
+ * So it is important to give an argument which is a local chunk pointer of a mutator while invoking this func.
+ */
+static void *alloc_in_chunk(Chunk_Header* &chunk)
+{
+ POINTER_SIZE_INT *table = chunk->table;
+ unsigned int slot_index = chunk->slot_index;
+
+ void *p_obj = (void*)((POINTER_SIZE_INT)chunk->base + ((POINTER_SIZE_INT)chunk->slot_size * slot_index));
+ alloc_slot_in_table(table, slot_index);
+ if(chunk->status & CHUNK_NEED_ZEROING)
+ memset(p_obj, 0, chunk->slot_size);
+#ifdef SSPACE_VERIFY
+ sspace_verify_free_area((POINTER_SIZE_INT*)p_obj, chunk->slot_size);
+#endif
+
+ chunk->slot_index = next_free_slot_index_in_table(table, slot_index, chunk->slot_num);
+ if(chunk->slot_index == MAX_SLOT_INDEX){
+ chunk->status = CHUNK_USED | CHUNK_NORMAL;
+ chunk = NULL;
+ }
+
+ return p_obj;
+}
+
+/* alloc small without-fin object in sspace without getting new free chunk */
+void *sspace_fast_alloc(unsigned size, Allocator *allocator)
+{
+ if(size > SUPER_OBJ_THRESHOLD) return NULL;
+
+ if(size <= MEDIUM_OBJ_THRESHOLD){ /* small object */
+ size = SMALL_SIZE_ROUNDUP(size);
+ Chunk_Header **small_chunks = ((Mutator*)allocator)->small_chunks;
+ unsigned int index = SMALL_SIZE_TO_INDEX(size);
+
+ if(!small_chunks[index]){
+ Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)allocator->gc);
+ Chunk_Header *chunk = sspace_get_small_pfc(sspace, index);
+ //if(!chunk)
+ //chunk = sspace_steal_small_pfc(sspace, index);
+ if(!chunk) return NULL;
+ small_chunks[index] = chunk;
+ }
+ return alloc_in_chunk(small_chunks[index]);
+ } else if(size <= LARGE_OBJ_THRESHOLD){ /* medium object */
+ size = MEDIUM_SIZE_ROUNDUP(size);
+ Chunk_Header **medium_chunks = ((Mutator*)allocator)->medium_chunks;
+ unsigned int index = MEDIUM_SIZE_TO_INDEX(size);
+
+ if(!medium_chunks[index]){
+ Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)allocator->gc);
+ Chunk_Header *chunk = sspace_get_medium_pfc(sspace, index);
+ //if(!chunk)
+ //chunk = sspace_steal_medium_pfc(sspace, index);
+ if(!chunk) return NULL;
+ medium_chunks[index] = chunk;
+ }
+ return alloc_in_chunk(medium_chunks[index]);
+ } else { /* large object */
+ assert(size <= SUPER_OBJ_THRESHOLD);
+ size = LARGE_SIZE_ROUNDUP(size);
+ unsigned int index = LARGE_SIZE_TO_INDEX(size);
+ Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)allocator->gc);
+ Chunk_Header *chunk = sspace_get_large_pfc(sspace, index);
+ //if(!chunk)
+ //chunk = sspace_steal_large_pfc(sspace, index);
+ if(!chunk) return NULL;
+ void *p_obj = alloc_in_chunk(chunk);
+ if(chunk)
+ sspace_put_large_pfc(sspace, chunk, index);
+ return p_obj;
+ }
+}
+
+static void *alloc_small_obj(unsigned size, Allocator *allocator)
+{
+ assert(size <= MEDIUM_OBJ_THRESHOLD);
+ assert(!(size & SMALL_GRANULARITY_LOW_MASK));
+
+ Chunk_Header **small_chunks = ((Mutator*)allocator)->small_chunks;
+ unsigned int index = SMALL_SIZE_TO_INDEX(size);
+ if(!small_chunks[index]){
+ Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)allocator->gc);
+ Chunk_Header *chunk = sspace_get_small_pfc(sspace, index);
+ //if(!chunk)
+ //chunk = sspace_steal_small_pfc(sspace, index);
+ if(!chunk){
+ chunk = (Chunk_Header*)sspace_get_normal_free_chunk(sspace);
+ if(chunk){
+ normal_chunk_init(chunk, size);
+ } else {
+ /*chunk = sspace_steal_small_pfc(sspace, index);
+ if(!chunk)*/ return NULL;
+ }
+ }
+ chunk->status |= CHUNK_IN_USE | CHUNK_NORMAL;
+ small_chunks[index] = chunk;
+ }
+
+ return alloc_in_chunk(small_chunks[index]);
+}
+
+static void *alloc_medium_obj(unsigned size, Allocator *allocator)
+{
+ assert((size > MEDIUM_OBJ_THRESHOLD) && (size <= LARGE_OBJ_THRESHOLD));
+ assert(!(size & MEDIUM_GRANULARITY_LOW_MASK));
+
+ Chunk_Header **medium_chunks = ((Mutator*)allocator)->medium_chunks;
+ unsigned int index = MEDIUM_SIZE_TO_INDEX(size);
+ if(!medium_chunks[index]){
+ Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)allocator->gc);
+ Chunk_Header *chunk = sspace_get_medium_pfc(sspace, index);
+ //if(!chunk)
+ //chunk = sspace_steal_medium_pfc(sspace, index);
+ if(!chunk){
+ chunk = (Chunk_Header*)sspace_get_normal_free_chunk(sspace);
+ if(chunk){
+ normal_chunk_init(chunk, size);
+ } else {
+ /*chunk = sspace_steal_medium_pfc(sspace, index);
+ if(!chunk) */return NULL;
+ }
+ }
+ chunk->status |= CHUNK_IN_USE | CHUNK_NORMAL;
+ medium_chunks[index] = chunk;
+ }
+
+ return alloc_in_chunk(medium_chunks[index]);
+}
+
+/* FIXME:: this is a simple version. It may return NULL while there are still pfc in pool put by other mutators */
+static void *alloc_large_obj(unsigned size, Allocator *allocator)
+{
+ assert((size > LARGE_OBJ_THRESHOLD) && (size <= SUPER_OBJ_THRESHOLD));
+ assert(!(size & LARGE_GRANULARITY_LOW_MASK));
+
+ Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)allocator->gc);
+ unsigned int index = LARGE_SIZE_TO_INDEX(size);
+ Chunk_Header *chunk = sspace_get_large_pfc(sspace, index);
+ //if(!chunk)
+ //chunk = sspace_steal_large_pfc(sspace, index);
+ if(!chunk){
+ chunk = (Chunk_Header*)sspace_get_normal_free_chunk(sspace);
+ if(chunk){
+ normal_chunk_init(chunk, size);
+ } else {
+ /*chunk = sspace_steal_large_pfc(sspace, index);
+ if(!chunk)*/ return NULL;
+ }
+ }
+ chunk->status |= CHUNK_NORMAL;
+
+ void *p_obj = alloc_in_chunk(chunk);
+ if(chunk)
+ sspace_put_large_pfc(sspace, chunk, index);
+ return p_obj;
+}
+
+static void *alloc_super_obj(unsigned size, Allocator *allocator)
+{
+ assert(size > SUPER_OBJ_THRESHOLD);
+
+ Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)allocator->gc);
+ unsigned int chunk_size = SUPER_SIZE_ROUNDUP(size);
+ assert(chunk_size > SUPER_OBJ_THRESHOLD);
+ assert(!(chunk_size & CHUNK_GRANULARITY_LOW_MASK));
+
+ Chunk_Header *chunk;
+ if(chunk_size <= HYPER_OBJ_THRESHOLD)
+ chunk = (Chunk_Header*)sspace_get_abnormal_free_chunk(sspace, chunk_size);
+ else
+ chunk = (Chunk_Header*)sspace_get_hyper_free_chunk(sspace, chunk_size, FALSE);
+
+ if(!chunk) return NULL;
+ abnormal_chunk_init(chunk, chunk_size, size);
+ chunk->status = CHUNK_IN_USE | CHUNK_ABNORMAL;
+ chunk->table[0] = cur_alloc_color;
+ set_super_obj_mask(chunk->base);
+ assert(get_obj_info_raw((Partial_Reveal_Object*)chunk->base) & SUPER_OBJ_MASK);
+ //printf("Obj: %x size: %x\t", (POINTER_SIZE_INT)chunk->base, size);
+ return chunk->base;
+}
+
+static void *sspace_try_alloc(unsigned size, Allocator *allocator)
+{
+ if(size <= MEDIUM_OBJ_THRESHOLD)
+ return alloc_small_obj(SMALL_SIZE_ROUNDUP(size), allocator);
+ else if(size <= LARGE_OBJ_THRESHOLD)
+ return alloc_medium_obj(MEDIUM_SIZE_ROUNDUP(size), allocator);
+ else if(size <= SUPER_OBJ_THRESHOLD)
+ return alloc_large_obj(LARGE_SIZE_ROUNDUP(size), allocator);
+ else
+ return alloc_super_obj(size, allocator);
+}
+
+/* FIXME:: the collection should be seperated from the alloation */
+void *sspace_alloc(unsigned size, Allocator *allocator)
+{
+ void *p_obj = NULL;
+
+ /* First, try to allocate object from TLB (thread local chunk) */
+ p_obj = sspace_try_alloc(size, allocator);
+ if(p_obj) return p_obj;
+
+ vm_gc_lock_enum();
+ /* after holding lock, try if other thread collected already */
+ p_obj = sspace_try_alloc(size, allocator);
+ if(p_obj){
+ vm_gc_unlock_enum();
+ return p_obj;
+ }
+ gc_reclaim_heap(allocator->gc, GC_CAUSE_POS_IS_FULL);
+ vm_gc_unlock_enum();
+
+#ifdef SSPACE_CHUNK_INFO
+ printf("Failure size: %x\n", size);
+#endif
+
+ p_obj = sspace_try_alloc(size, allocator);
+
+ return p_obj;
+}
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp drlvm-0703-new/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp
--- drlvm-0703-svn/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp 1970-01-01 08:00:00.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_sweep/sspace_chunk.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -0,0 +1,652 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "sspace_chunk.h"
+
+/* PFC stands for partially free chunk */
+#define SMALL_PFC_POOL_NUM SMALL_LOCAL_CHUNK_NUM
+#define MEDIUM_PFC_POOL_NUM MEDIUM_LOCAL_CHUNK_NUM
+#define LARGE_PFC_POOL_NUM ((SUPER_OBJ_THRESHOLD - LARGE_OBJ_THRESHOLD) >> LARGE_GRANULARITY_BITS)
+#define NUM_ALIGNED_FREE_CHUNK_BUCKET (HYPER_OBJ_THRESHOLD >> NORMAL_CHUNK_SHIFT_COUNT)
+#define NUM_UNALIGNED_FREE_CHUNK_BUCKET (HYPER_OBJ_THRESHOLD >> CHUNK_GRANULARITY_BITS)
+
+
+/* PFC stands for partially free chunk */
+static Pool *small_pfc_pools[SMALL_PFC_POOL_NUM];
+static Pool *medium_pfc_pools[MEDIUM_PFC_POOL_NUM];
+static Pool *large_pfc_pools[LARGE_PFC_POOL_NUM];
+static Free_Chunk_List aligned_free_chunk_lists[NUM_ALIGNED_FREE_CHUNK_BUCKET];
+static Free_Chunk_List unaligned_free_chunk_lists[NUM_UNALIGNED_FREE_CHUNK_BUCKET];
+static Free_Chunk_List hyper_free_chunk_list;
+
+static Boolean small_pfc_steal_flags[SMALL_PFC_POOL_NUM];
+static Boolean medium_pfc_steal_flags[MEDIUM_PFC_POOL_NUM];
+static Boolean large_pfc_steal_flags[LARGE_PFC_POOL_NUM];
+
+void sspace_init_chunks(Sspace *sspace)
+{
+ unsigned int i;
+
+ /* Init small obj partially free chunk pools */
+ for(i=SMALL_PFC_POOL_NUM; i--;){
+ small_pfc_steal_flags[i] = FALSE;
+ small_pfc_pools[i] = sync_pool_create();
+ }
+
+ /* Init medium obj partially free chunk pools */
+ for(i=MEDIUM_PFC_POOL_NUM; i--;){
+ medium_pfc_steal_flags[i] = FALSE;
+ medium_pfc_pools[i] = sync_pool_create();
+ }
+
+ /* Init large obj partially free chunk pools */
+ for(i=LARGE_PFC_POOL_NUM; i--;){
+ large_pfc_steal_flags[i] = FALSE;
+ large_pfc_pools[i] = sync_pool_create();
+ }
+
+ /* Init aligned free chunk lists */
+ for(i=NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;)
+ free_chunk_list_init(&aligned_free_chunk_lists[i]);
+
+ /* Init nonaligned free chunk lists */
+ for(i=NUM_UNALIGNED_FREE_CHUNK_BUCKET; i--;)
+ free_chunk_list_init(&unaligned_free_chunk_lists[i]);
+
+ /* Init super free chunk lists */
+ free_chunk_list_init(&hyper_free_chunk_list);
+
+ /* Init Sspace struct's chunk fields */
+ sspace->small_pfc_pools = small_pfc_pools;
+ sspace->medium_pfc_pools = medium_pfc_pools;
+ sspace->large_pfc_pools = large_pfc_pools;
+ sspace->aligned_free_chunk_lists = aligned_free_chunk_lists;
+ sspace->unaligned_free_chunk_lists = unaligned_free_chunk_lists;
+ sspace->hyper_free_chunk_list = &hyper_free_chunk_list;
+
+ /* Init the first free chunk: from heap start to heap end */
+ Free_Chunk *free_chunk = (Free_Chunk*)sspace->heap_start;
+ free_chunk->adj_next = (Chunk_Heaer_Basic*)sspace->heap_end;
+ POINTER_SIZE_INT chunk_size = sspace->reserved_heap_size;
+ assert(chunk_size > CHUNK_GRANULARITY && !(chunk_size % CHUNK_GRANULARITY));
+ sspace_put_free_chunk(sspace, free_chunk);
+}
+
+static void pfc_pool_set_steal_flag(Pool *pool, unsigned int steal_threshold, unsigned int &steal_flag)
+{
+ Chunk_Header *chunk = (Chunk_Header*)pool_get_entry(pool);
+ while(chunk){
+ steal_threshold--;
+ if(!steal_threshold)
+ break;
+ chunk = chunk->next;
+ }
+ steal_flag = steal_threshold ? FALSE : TRUE;
+}
+
+static void empty_pool(Pool *pool)
+{
+ pool->top = (Stack_Top)NULL;
+ pool->cur = NULL;
+}
+
+void sspace_clear_chunk_list(GC *gc)
+{
+ unsigned int i;
+ unsigned int collector_num = gc->num_collectors;
+ unsigned int steal_threshold;
+
+ steal_threshold = collector_num << SMALL_PFC_STEAL_THRESHOLD;
+ for(i=SMALL_PFC_POOL_NUM; i--;){
+ Pool *pool = small_pfc_pools[i];
+ pfc_pool_set_steal_flag(pool, steal_threshold, small_pfc_steal_flags[i]);
+ empty_pool(pool);
+ }
+
+ steal_threshold = collector_num << MEDIUM_PFC_STEAL_THRESHOLD;
+ for(i=MEDIUM_PFC_POOL_NUM; i--;){
+ Pool *pool = medium_pfc_pools[i];
+ pfc_pool_set_steal_flag(pool, steal_threshold, medium_pfc_steal_flags[i]);
+ empty_pool(pool);
+ }
+
+ steal_threshold = collector_num << LARGE_PFC_STEAL_THRESHOLD;
+ for(i=LARGE_PFC_POOL_NUM; i--;){
+ Pool *pool = large_pfc_pools[i];
+ pfc_pool_set_steal_flag(pool, steal_threshold, large_pfc_steal_flags[i]);
+ empty_pool(pool);
+ }
+
+ for(i=NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;)
+ free_chunk_list_clear(&aligned_free_chunk_lists[i]);
+
+ for(i=NUM_UNALIGNED_FREE_CHUNK_BUCKET; i--;)
+ free_chunk_list_clear(&unaligned_free_chunk_lists[i]);
+
+ free_chunk_list_clear(&hyper_free_chunk_list);
+
+ /* release small obj chunks of each mutator */
+ Mutator *mutator = gc->mutator_list;
+ while(mutator){
+ Chunk_Header **chunks = mutator->small_chunks;
+ for(i=SMALL_LOCAL_CHUNK_NUM; i--;)
+ chunks[i] = NULL;
+ chunks = mutator->medium_chunks;
+ for(i=MEDIUM_LOCAL_CHUNK_NUM; i--;)
+ chunks[i] = NULL;
+ mutator = mutator->next;
+ }
+}
+
+/* Simply put the free chunk to the according list
+ * Don't merge continuous free chunks
+ * The merging job is taken by sweeping
+ */
+static void list_put_free_chunk(Free_Chunk_List *list, Free_Chunk *chunk)
+{
+ chunk->status = CHUNK_FREE;
+ chunk->adj_prev = NULL;
+ chunk->prev = NULL;
+
+ lock(list->lock);
+ chunk->next = list->head;
+ if(list->head)
+ list->head->prev = chunk;
+ list->head = chunk;
+ assert(list->chunk_num < ~((unsigned int)0));
+ ++list->chunk_num;
+ unlock(list->lock);
+}
+
+static Free_Chunk *free_list_get_head(Free_Chunk_List *list)
+{
+ lock(list->lock);
+ Free_Chunk *chunk = list->head;
+ if(chunk){
+ list->head = chunk->next;
+ if(list->head)
+ list->head->prev = NULL;
+ assert(list->chunk_num);
+ --list->chunk_num;
+ assert(chunk->status == CHUNK_FREE);
+ }
+ unlock(list->lock);
+ return chunk;
+}
+
+void sspace_put_free_chunk(Sspace *sspace, Free_Chunk *chunk)
+{
+ POINTER_SIZE_INT chunk_size = CHUNK_SIZE(chunk);
+ assert(!(chunk_size % CHUNK_GRANULARITY));
+
+ if(chunk_size > HYPER_OBJ_THRESHOLD)
+ list_put_free_chunk(sspace->hyper_free_chunk_list, chunk);
+ else if(!((POINTER_SIZE_INT)chunk & NORMAL_CHUNK_LOW_MASK) && !(chunk_size & NORMAL_CHUNK_LOW_MASK))
+ list_put_free_chunk(&sspace->aligned_free_chunk_lists[ALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size)], chunk);
+ else
+ list_put_free_chunk(&sspace->unaligned_free_chunk_lists[UNALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size)], chunk);
+}
+
+static Free_Chunk *partition_normal_free_chunk(Sspace *sspace, Free_Chunk *chunk)
+{
+ assert(CHUNK_SIZE(chunk) > NORMAL_CHUNK_SIZE_BYTES);
+
+ Chunk_Heaer_Basic *adj_next = chunk->adj_next;
+ Free_Chunk *normal_chunk = (Free_Chunk*)(((POINTER_SIZE_INT)chunk + NORMAL_CHUNK_SIZE_BYTES-1) & NORMAL_CHUNK_HIGH_MASK);
+
+ if(chunk != normal_chunk){
+ assert(chunk < normal_chunk);
+ chunk->adj_next = (Chunk_Heaer_Basic*)normal_chunk;
+ sspace_put_free_chunk(sspace, chunk);
+ }
+ normal_chunk->adj_next = (Chunk_Heaer_Basic*)((POINTER_SIZE_INT)normal_chunk + NORMAL_CHUNK_SIZE_BYTES);
+ if(normal_chunk->adj_next != adj_next){
+ assert(normal_chunk->adj_next < adj_next);
+ Free_Chunk *back_chunk = (Free_Chunk*)normal_chunk->adj_next;
+ back_chunk->adj_next = adj_next;
+ sspace_put_free_chunk(sspace, back_chunk);
+ }
+
+ normal_chunk->status = CHUNK_FREE;
+ return normal_chunk;
+}
+
+/* Partition the free chunk to two free chunks:
+ * the first one's size is chunk_size
+ * the second will be inserted into free chunk list according to its size
+ */
+static void partition_abnormal_free_chunk(Sspace *sspace,Free_Chunk *chunk, unsigned int chunk_size)
+{
+ assert(CHUNK_SIZE(chunk) > chunk_size);
+
+ Free_Chunk *back_chunk = (Free_Chunk*)((POINTER_SIZE_INT)chunk + chunk_size);
+ back_chunk->adj_next = chunk->adj_next;
+ chunk->adj_next = (Chunk_Heaer_Basic*)back_chunk;
+ sspace_put_free_chunk(sspace, back_chunk);
+}
+
+Free_Chunk *sspace_get_normal_free_chunk(Sspace *sspace)
+{
+ Free_Chunk_List *aligned_lists = sspace->aligned_free_chunk_lists;
+ Free_Chunk_List *unaligned_lists = sspace->unaligned_free_chunk_lists;
+ Free_Chunk_List *list = NULL;
+ Free_Chunk *chunk = NULL;
+
+ /* Search in aligned chunk lists first */
+ unsigned int index = 0;
+ while(index < NUM_ALIGNED_FREE_CHUNK_BUCKET){
+ list = &aligned_lists[index];
+ if(list->head)
+ chunk = free_list_get_head(list);
+ if(chunk){
+ if(CHUNK_SIZE(chunk) > NORMAL_CHUNK_SIZE_BYTES)
+ chunk = partition_normal_free_chunk(sspace, chunk);
+ //zeroing_free_chunk(chunk);
+ return chunk;
+ }
+ index++;
+ }
+ assert(!chunk);
+
+ /* Search in unaligned chunk lists with larger chunk.
+ (NORMAL_CHUNK_SIZE_BYTES + (NORMAL_CHUNK_SIZE_BYTES-CHUNK_GRANULARITY))
+ is the smallest size which can guarantee the chunk includes a normal chunk.
+ */
+ index = UNALIGNED_CHUNK_SIZE_TO_INDEX((NORMAL_CHUNK_SIZE_BYTES<<1) - CHUNK_GRANULARITY);
+ while(index < NUM_UNALIGNED_FREE_CHUNK_BUCKET){
+ list = &unaligned_lists[index];
+ if(list->head)
+ chunk = free_list_get_head(list);
+ if(chunk){
+ chunk = partition_normal_free_chunk(sspace, chunk);
+ assert(!((POINTER_SIZE_INT)chunk & NORMAL_CHUNK_LOW_MASK));
+ //zeroing_free_chunk(chunk);
+ return chunk;
+ }
+ index++;
+ }
+ assert(!chunk);
+
+ /* search in the hyper free chunk list */
+ chunk = sspace_get_hyper_free_chunk(sspace, NORMAL_CHUNK_SIZE_BYTES, TRUE);
+ assert(!((POINTER_SIZE_INT)chunk & NORMAL_CHUNK_LOW_MASK));
+
+ return chunk;
+}
+
+Free_Chunk *sspace_get_abnormal_free_chunk(Sspace *sspace, unsigned int chunk_size)
+{
+ assert(chunk_size > CHUNK_GRANULARITY);
+ assert(!(chunk_size % CHUNK_GRANULARITY));
+ assert(chunk_size <= HYPER_OBJ_THRESHOLD);
+
+ Free_Chunk_List *unaligned_lists = sspace->unaligned_free_chunk_lists;
+ Free_Chunk_List *list = NULL;
+ Free_Chunk *chunk = NULL;
+ unsigned int index = 0;
+
+ /* Search in the list with chunk size of multiple chunk_size */
+ unsigned int search_size = chunk_size;
+ while(search_size <= HYPER_OBJ_THRESHOLD){
+ index = UNALIGNED_CHUNK_SIZE_TO_INDEX(search_size);
+ list = &unaligned_lists[index];
+ if(list->head)
+ chunk = free_list_get_head(list);
+ if(chunk){
+ if(search_size > chunk_size)
+ partition_abnormal_free_chunk(sspace, chunk, chunk_size);
+ zeroing_free_chunk(chunk);
+ return chunk;
+ }
+ search_size += chunk_size;
+ }
+ assert(!chunk);
+
+ /* search in the hyper free chunk list */
+ chunk = sspace_get_hyper_free_chunk(sspace, chunk_size, FALSE);
+ if(chunk) return chunk;
+
+ /* Search again in abnormal chunk lists */
+ index = UNALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size);
+ while(index < NUM_UNALIGNED_FREE_CHUNK_BUCKET){
+ list = &unaligned_lists[index];
+ if(list->head)
+ chunk = free_list_get_head(list);
+ if(chunk){
+ if(index > UNALIGNED_CHUNK_SIZE_TO_INDEX(chunk_size))
+ partition_abnormal_free_chunk(sspace, chunk, chunk_size);
+ zeroing_free_chunk(chunk);
+ return chunk;
+ }
+ ++index;
+ }
+
+ return chunk;
+}
+
+Free_Chunk *sspace_get_hyper_free_chunk(Sspace *sspace, unsigned int chunk_size, Boolean is_normal_chunk)
+{
+ assert(chunk_size >= CHUNK_GRANULARITY);
+ assert(!(chunk_size % CHUNK_GRANULARITY));
+
+ Free_Chunk_List *list = sspace->hyper_free_chunk_list;
+ lock(list->lock);
+ Free_Chunk **p_next = &list->head;
+ Free_Chunk *chunk = list->head;
+ while(chunk){
+ if(CHUNK_SIZE(chunk) >= chunk_size){
+ Free_Chunk *next_chunk = chunk->next;
+ *p_next = next_chunk;
+ if(next_chunk){
+ if(chunk != list->head)
+ next_chunk->prev = (Free_Chunk *)p_next; /* utilize an assumption: next is the first field of Free_Chunk */
+ else
+ next_chunk->prev = NULL;
+ }
+ break;
+ }
+ p_next = &chunk->next;
+ chunk = chunk->next;
+ }
+ unlock(list->lock);
+
+ if(chunk){
+ if(is_normal_chunk)
+ chunk = partition_normal_free_chunk(sspace, chunk);
+ else if(CHUNK_SIZE(chunk) > chunk_size)
+ partition_abnormal_free_chunk(sspace, chunk, chunk_size);
+ if(!is_normal_chunk)
+ zeroing_free_chunk(chunk);
+ }
+
+ return chunk;
+}
+
+#define min_value(x, y) (((x) < (y)) ? (x) : (y))
+
+Chunk_Header *sspace_steal_small_pfc(Sspace *sspace, unsigned int index)
+{
+ Chunk_Header *pfc = NULL;
+ unsigned int max_index = min_value(index + SMALL_PFC_STEAL_NUM + 1, SMALL_PFC_POOL_NUM);
+ ++index;
+ for(; index < max_index; ++index){
+ if(!small_pfc_steal_flags[index]) continue;
+ pfc = sspace_get_small_pfc(sspace, index);
+ if(pfc) return pfc;
+ }
+ return NULL;
+}
+Chunk_Header *sspace_steal_medium_pfc(Sspace *sspace, unsigned int index)
+{
+ Chunk_Header *pfc = NULL;
+ unsigned int max_index = min_value(index + MEDIUM_PFC_STEAL_NUM + 1, MEDIUM_PFC_POOL_NUM);
+ ++index;
+ for(; index < max_index; ++index){
+ if(!medium_pfc_steal_flags[index]) continue;
+ pfc = sspace_get_medium_pfc(sspace, index);
+ if(pfc) return pfc;
+ }
+ return NULL;
+}
+Chunk_Header *sspace_steal_large_pfc(Sspace *sspace, unsigned int index)
+{
+ Chunk_Header *pfc = NULL;
+ unsigned int max_index = min_value(index + LARGE_PFC_STEAL_NUM + 1, LARGE_PFC_POOL_NUM);
+ ++index;
+ for(; index < max_index; ++index){
+ if(!large_pfc_steal_flags[index]) continue;
+ pfc = sspace_get_large_pfc(sspace, index);
+ if(pfc) return pfc;
+ }
+ return NULL;
+}
+
+/* Because this computation doesn't use lock, its result is not accurate. And it is enough. */
+POINTER_SIZE_INT sspace_free_memory_size(Sspace *sspace)
+{
+ POINTER_SIZE_INT free_size = 0;
+
+ vm_gc_lock_enum();
+
+ for(unsigned int i=NUM_ALIGNED_FREE_CHUNK_BUCKET; i--;)
+ free_size += NORMAL_CHUNK_SIZE_BYTES * (i+1) * sspace->aligned_free_chunk_lists[i].chunk_num;
+
+ for(unsigned int i=NUM_UNALIGNED_FREE_CHUNK_BUCKET; i--;)
+ free_size += CHUNK_GRANULARITY * (i+1) * sspace->unaligned_free_chunk_lists[i].chunk_num;
+
+ Free_Chunk *hyper_chunk = sspace->hyper_free_chunk_list->head;
+ while(hyper_chunk){
+ free_size += CHUNK_SIZE(hyper_chunk);
+ hyper_chunk = hyper_chunk->next;
+ }
+
+ vm_gc_unlock_enum();
+
+ return free_size;
+}
+
+
+#ifdef SSPACE_CHUNK_INFO
+
+extern POINTER_SIZE_INT alloc_mask_in_table;
+static POINTER_SIZE_INT free_mem_size;
+
+static unsigned int word_set_bit_num(POINTER_SIZE_INT word)
+{
+ unsigned int count = 0;
+
+ while(word){
+ word &= word - 1;
+ ++count;
+ }
+ return count;
+}
+
+static unsigned int pfc_info(Chunk_Header *chunk, Boolean before_gc)
+{
+ POINTER_SIZE_INT *table = ((Chunk_Header*)chunk)->table;
+ unsigned int slot_num = chunk->slot_num;
+ unsigned int live_num = 0;
+
+ unsigned int index_word_num = (slot_num + SLOT_NUM_PER_WORD_IN_TABLE - 1) / SLOT_NUM_PER_WORD_IN_TABLE;
+ for(unsigned int i=0; i= fake_live_num_in_last_word);
+ live_num -= fake_live_num_in_last_word;
+ }
+ }
+ assert(live_num <= slot_num);
+ return live_num;
+}
+
+enum Obj_Type {
+ SMALL_OBJ,
+ MEDIUM_OBJ,
+ LARGE_OBJ
+};
+static unsigned int index_to_size(unsigned int index, Obj_Type type)
+{
+ if(type == SMALL_OBJ)
+ return SMALL_INDEX_TO_SIZE(index);
+ if(type == MEDIUM_OBJ)
+ return MEDIUM_INDEX_TO_SIZE(index);
+ assert(type == LARGE_OBJ);
+ return LARGE_INDEX_TO_SIZE(index);
+}
+
+static void pfc_pools_info(Sspace *sspace, Pool **pools, unsigned int pool_num, Obj_Type type, Boolean before_gc)
+{
+ unsigned int index;
+
+ for(index = 0; index < pool_num; ++index){
+ Pool *pool = pools[index];
+ Chunk_Header *chunk = NULL;
+ unsigned int chunk_counter = 0;
+ unsigned int slot_num = 0;
+ unsigned int live_num = 0;
+ pool_iterator_init(pool);
+ while(chunk = (Chunk_Header*)pool_iterator_next(pool)){
+ ++chunk_counter;
+ slot_num += chunk->slot_num;
+ live_num += pfc_info(chunk, before_gc);
+ }
+ if(slot_num){
+ printf("Size: %x\tchunk num: %d\tlive obj: %d\ttotal obj: %d\tLive Ratio: %f\n", index_to_size(index, type), chunk_counter, live_num, slot_num, (float)live_num/slot_num);
+ assert(live_num < slot_num);
+ free_mem_size += index_to_size(index, type) * (slot_num-live_num);
+ assert(free_mem_size < sspace->committed_heap_size);
+ }
+ }
+}
+
+enum Chunk_Type {
+ ALIGNED_CHUNK,
+ UNALIGNED_CHUNK
+};
+static unsigned int chunk_index_to_size(unsigned int index, Chunk_Type type)
+{
+ if(type == ALIGNED_CHUNK)
+ return ALIGNED_CHUNK_INDEX_TO_SIZE(index);
+ assert(type == UNALIGNED_CHUNK);
+ return UNALIGNED_CHUNK_INDEX_TO_SIZE(index);
+}
+
+static void free_lists_info(Sspace *sspace, Free_Chunk_List *lists, unsigned int list_num, Chunk_Type type)
+{
+ unsigned int index;
+
+ for(index = 0; index < list_num; ++index){
+ Free_Chunk *chunk = lists[index].head;
+ unsigned int chunk_counter = 0;
+ while(chunk){
+ ++chunk_counter;
+ unsigned int chunk_size = CHUNK_SIZE(chunk);
+ assert(chunk_size <= HYPER_OBJ_THRESHOLD);
+ free_mem_size += chunk_size;
+ assert(free_mem_size < sspace->committed_heap_size);
+ chunk = chunk->next;
+ }
+ printf("Free Size: %x\tnum: %d\n", chunk_index_to_size(index, type), chunk_counter);
+ }
+}
+
+void sspace_chunks_info(Sspace *sspace, Boolean before_gc)
+{
+ if(!before_gc) return;
+
+ printf("\n\nSMALL PFC INFO:\n\n");
+ pfc_pools_info(sspace, small_pfc_pools, SMALL_PFC_POOL_NUM, SMALL_OBJ, before_gc);
+
+ printf("\n\nMEDIUM PFC INFO:\n\n");
+ pfc_pools_info(sspace, medium_pfc_pools, MEDIUM_PFC_POOL_NUM, MEDIUM_OBJ, before_gc);
+
+ printf("\n\nLARGE PFC INFO:\n\n");
+ pfc_pools_info(sspace, large_pfc_pools, LARGE_PFC_POOL_NUM, LARGE_OBJ, before_gc);
+
+ printf("\n\nALIGNED FREE CHUNK INFO:\n\n");
+ free_lists_info(sspace, aligned_free_chunk_lists, NUM_ALIGNED_FREE_CHUNK_BUCKET, ALIGNED_CHUNK);
+
+ printf("\n\nUNALIGNED FREE CHUNK INFO:\n\n");
+ free_lists_info(sspace, unaligned_free_chunk_lists, NUM_UNALIGNED_FREE_CHUNK_BUCKET, UNALIGNED_CHUNK);
+
+ printf("\n\nSUPER FREE CHUNK INFO:\n\n");
+ Free_Chunk_List *list = &hyper_free_chunk_list;
+ Free_Chunk *chunk = list->head;
+ while(chunk){
+ printf("Size: %x\n", CHUNK_SIZE(chunk));
+ free_mem_size += CHUNK_SIZE(chunk);
+ assert(free_mem_size < sspace->committed_heap_size);
+ chunk = chunk->next;
+ }
+ printf("\n\nFree mem ratio: %f\n\n", (float)free_mem_size / sspace->committed_heap_size);
+ free_mem_size = 0;
+}
+
+#endif
+
+#ifdef SSPACE_ALLOC_INFO
+
+#define MEDIUM_THRESHOLD 256
+#define LARGE_THRESHOLD (1024)
+#define SUPER_THRESHOLD (6*KB)
+#define HYPER_THRESHOLD (64*KB)
+
+#define SMALL_OBJ_ARRAY_NUM (MEDIUM_THRESHOLD >> 2)
+#define MEDIUM_OBJ_ARRAY_NUM (LARGE_THRESHOLD >> 4)
+#define LARGE_OBJ_ARRAY_NUM (SUPER_THRESHOLD >> 6)
+#define SUPER_OBJ_ARRAY_NUM (HYPER_THRESHOLD >> 10)
+
+volatile unsigned int small_obj_num[SMALL_OBJ_ARRAY_NUM];
+volatile unsigned int medium_obj_num[MEDIUM_OBJ_ARRAY_NUM];
+volatile unsigned int large_obj_num[LARGE_OBJ_ARRAY_NUM];
+volatile unsigned int super_obj_num[SUPER_OBJ_ARRAY_NUM];
+volatile unsigned int hyper_obj_num;
+
+void sspace_alloc_info(unsigned int size)
+{
+ if(size <= MEDIUM_THRESHOLD)
+ atomic_inc32(&small_obj_num[(size>>2)-1]);
+ else if(size <= LARGE_THRESHOLD)
+ atomic_inc32(&medium_obj_num[(size>>4)-1]);
+ else if(size <= SUPER_THRESHOLD)
+ atomic_inc32(&large_obj_num[(size>>6)-1]);
+ else if(size <= HYPER_THRESHOLD)
+ atomic_inc32(&super_obj_num[(size>>10)-1]);
+ else
+ atomic_inc32(&hyper_obj_num);
+}
+
+void sspace_alloc_info_summary(void)
+{
+ unsigned int i;
+
+ printf("\n\nNORMAL OBJ\n\n");
+ for(i = 0; i < SMALL_OBJ_ARRAY_NUM; i++){
+ printf("Size: %x\tnum: %d\n", (i+1)<<2, small_obj_num[i]);
+ small_obj_num[i] = 0;
+ }
+
+ i = ((MEDIUM_THRESHOLD + (1<<4))>>4) - 1;
+ for(; i < MEDIUM_OBJ_ARRAY_NUM; i++){
+ printf("Size: %x\tnum: %d\n", (i+1)<<4, medium_obj_num[i]);
+ medium_obj_num[i] = 0;
+ }
+
+ i = ((LARGE_THRESHOLD + (1<<6))>>6) - 1;
+ for(; i < LARGE_OBJ_ARRAY_NUM; i++){
+ printf("Size: %x\tnum: %d\n", (i+1)<<6, large_obj_num[i]);
+ large_obj_num[i] = 0;
+ }
+
+ i = ((SUPER_THRESHOLD + (1<<10))>>10) - 1;
+ for(; i < SUPER_OBJ_ARRAY_NUM; i++){
+ printf("Size: %x\tnum: %d\n", (i+1)<<10, super_obj_num[i]);
+ super_obj_num[i] = 0;
+ }
+
+ printf("\n\nHYPER OBJ\n\n");
+ printf("num: %d\n", hyper_obj_num);
+ hyper_obj_num = 0;
+}
+
+#endif
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_sweep/sspace_chunk.h drlvm-0703-new/vm/gc_gen/src/mark_sweep/sspace_chunk.h
--- drlvm-0703-svn/vm/gc_gen/src/mark_sweep/sspace_chunk.h 1970-01-01 08:00:00.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_sweep/sspace_chunk.h 2007-07-04 01:42:45.000000000 +0800
@@ -0,0 +1,338 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _SSPACE_CHUNK_H_
+#define _SSPACE_CHUNK_H_
+
+#include "sspace.h"
+
+enum Chunk_Status {
+ CHUNK_NIL = 0,
+ CHUNK_FREE = 0x1,
+ CHUNK_IN_USE = 0x2,
+ CHUNK_USED = 0x4,
+ CHUNK_NORMAL = 0x10,
+ CHUNK_ABNORMAL = 0x20,
+ CHUNK_NEED_ZEROING = 0x100
+};
+
+typedef volatile POINTER_SIZE_INT Chunk_Status_t;
+
+typedef struct Chunk_Heaer_Basic {
+ Chunk_Heaer_Basic *next;
+ Chunk_Status_t status;
+ Chunk_Heaer_Basic *adj_prev; // adjacent previous chunk, for merging continuous free chunks
+ Chunk_Heaer_Basic *adj_next; // adjacent next chunk
+} Chunk_Heaer_Basic;
+
+typedef struct Chunk_Header {
+ /* Beginning of Chunk_Heaer_Basic */
+ Chunk_Header *next; /* pointing to the next pfc in the pfc pool */
+ Chunk_Status_t status;
+ Chunk_Heaer_Basic *adj_prev; // adjacent previous chunk, for merging continuous free chunks
+ Chunk_Heaer_Basic *adj_next; // adjacent next chunk
+ /* End of Chunk_Heaer_Basic */
+ void *base;
+ unsigned int slot_size;
+ unsigned int slot_num;
+ unsigned int slot_index; /* the index of which is the first free slot in this chunk */
+ POINTER_SIZE_INT table[1];
+} Chunk_Header;
+
+
+#define NORMAL_CHUNK_SHIFT_COUNT 16
+#define NORMAL_CHUNK_SIZE_BYTES (1 << NORMAL_CHUNK_SHIFT_COUNT)
+#define NORMAL_CHUNK_LOW_MASK ((POINTER_SIZE_INT)(NORMAL_CHUNK_SIZE_BYTES - 1))
+#define NORMAL_CHUNK_HIGH_MASK (~NORMAL_CHUNK_LOW_MASK)
+#define NORMAL_CHUNK_HEADER(addr) ((Chunk_Header*)((POINTER_SIZE_INT)(addr) & NORMAL_CHUNK_HIGH_MASK))
+#define ABNORMAL_CHUNK_HEADER(addr) ((Chunk_Header*)((POINTER_SIZE_INT)addr & CHUNK_GRANULARITY_HIGH_MASK))
+
+#define MAX_SLOT_INDEX 0xFFffFFff
+#define COLOR_BITS_PER_OBJ 2 // should be powers of 2
+#define SLOT_NUM_PER_WORD_IN_TABLE (BITS_PER_WORD /COLOR_BITS_PER_OBJ)
+
+/* Two equations:
+ * 1. CHUNK_HEADER_VARS_SIZE_BYTES + NORMAL_CHUNK_TABLE_SIZE_BYTES + slot_size*NORMAL_CHUNK_SLOT_NUM = NORMAL_CHUNK_SIZE_BYTES
+ * 2. (BITS_PER_BYTE * NORMAL_CHUNK_TABLE_SIZE_BYTES)/COLOR_BITS_PER_OBJ >= NORMAL_CHUNK_SLOT_NUM
+ * ===>
+ * NORMAL_CHUNK_SLOT_NUM <= BITS_PER_BYTE*(NORMAL_CHUNK_SIZE_BYTES - CHUNK_HEADER_VARS_SIZE_BYTES) / (BITS_PER_BYTE*slot_size + COLOR_BITS_PER_OBJ)
+ * ===>
+ * NORMAL_CHUNK_SLOT_NUM = BITS_PER_BYTE*(NORMAL_CHUNK_SIZE_BYTES - CHUNK_HEADER_VARS_SIZE_BYTES) / (BITS_PER_BYTE*slot_size + COLOR_BITS_PER_OBJ)
+ */
+
+#define CHUNK_HEADER_VARS_SIZE_BYTES ((POINTER_SIZE_INT)&(((Chunk_Header*)0)->table))
+#define NORMAL_CHUNK_SLOT_AREA_SIZE_BITS (BITS_PER_BYTE * (NORMAL_CHUNK_SIZE_BYTES - CHUNK_HEADER_VARS_SIZE_BYTES))
+#define SIZE_BITS_PER_SLOT(chunk) (BITS_PER_BYTE * chunk->slot_size + COLOR_BITS_PER_OBJ)
+
+#define NORMAL_CHUNK_SLOT_NUM(chunk) (NORMAL_CHUNK_SLOT_AREA_SIZE_BITS / SIZE_BITS_PER_SLOT(chunk))
+#define NORMAL_CHUNK_TABLE_SIZE_BYTES(chunk) (((NORMAL_CHUNK_SLOT_NUM(chunk) + SLOT_NUM_PER_WORD_IN_TABLE-1) / SLOT_NUM_PER_WORD_IN_TABLE) * BYTES_PER_WORD)
+#define NORMAL_CHUNK_HEADER_SIZE_BYTES(chunk) (CHUNK_HEADER_VARS_SIZE_BYTES + NORMAL_CHUNK_TABLE_SIZE_BYTES(chunk))
+
+#define NORMAL_CHUNK_BASE(chunk) ((void*)((POINTER_SIZE_INT)(chunk) + NORMAL_CHUNK_HEADER_SIZE_BYTES(chunk)))
+#define ABNORMAL_CHUNK_BASE(chunk) ((void*)((POINTER_SIZE_INT)(chunk) + sizeof(Chunk_Header)))
+
+#define CHUNK_END(chunk) ((chunk)->adj_next)
+#define CHUNK_SIZE(chunk) ((POINTER_SIZE_INT)chunk->adj_next - (POINTER_SIZE_INT)chunk)
+
+inline void *slot_index_to_addr(Chunk_Header *chunk, unsigned int index)
+{ return (void*)((POINTER_SIZE_INT)chunk->base + chunk->slot_size * index); }
+
+inline unsigned int slot_addr_to_index(Chunk_Header *chunk, void *addr)
+{ return (unsigned int)(((POINTER_SIZE_INT)addr - (POINTER_SIZE_INT)chunk->base) / chunk->slot_size); }
+
+typedef struct Free_Chunk {
+ /* Beginning of Chunk_Heaer_Basic */
+ Free_Chunk *next; /* pointing to the next free Free_Chunk */
+ Chunk_Status_t status;
+ Chunk_Heaer_Basic *adj_prev; // adjacent previous chunk, for merging continuous free chunks
+ Chunk_Heaer_Basic *adj_next; // adjacent next chunk
+ /* End of Chunk_Heaer_Basic */
+ Free_Chunk *prev; /* pointing to the prev free Free_Chunk */
+} Free_Chunk;
+
+typedef struct Free_Chunk_List {
+ Free_Chunk *head; /* get new free chunk from head */
+ Free_Chunk *tail; /* put free chunk to tail */
+ unsigned int chunk_num;
+ SpinLock lock;
+} Free_Chunk_List;
+
+/*
+typedef union Chunk{
+ Chunk_Header header;
+ Free_Chunk free_chunk;
+ unsigned char raw_bytes[NORMAL_CHUNK_SIZE_BYTES];
+} Chunk;
+*/
+
+inline void free_chunk_list_init(Free_Chunk_List *list)
+{
+ list->head = NULL;
+ list->tail = NULL;
+ list->chunk_num = 0;
+ list->lock = FREE_LOCK;
+}
+
+inline void free_chunk_list_clear(Free_Chunk_List *list)
+{
+ list->head = NULL;
+ list->tail = NULL;
+ list->chunk_num = 0;
+ assert(list->lock == FREE_LOCK);
+}
+
+/* Padding the last index word in table to facilitate allocation */
+inline void chunk_pad_last_index_word(Chunk_Header *chunk, POINTER_SIZE_INT alloc_mask)
+{
+ unsigned int ceiling_index_in_last_word = (chunk->slot_num * COLOR_BITS_PER_OBJ) % BITS_PER_WORD;
+ if(!ceiling_index_in_last_word)
+ return;
+ POINTER_SIZE_INT padding_mask = ~((1 << ceiling_index_in_last_word) - 1);
+ padding_mask &= alloc_mask;
+ unsigned int last_word_index = (chunk->slot_num-1) / SLOT_NUM_PER_WORD_IN_TABLE;
+ chunk->table[last_word_index] |= padding_mask;
+}
+
+extern POINTER_SIZE_INT alloc_mask_in_table;
+/* Used for allocating a fixed-size chunk from free area lists */
+inline void normal_chunk_init(Chunk_Header *chunk, unsigned int slot_size)
+{
+ assert(chunk->status == CHUNK_FREE);
+ assert((POINTER_SIZE_INT)chunk->adj_next == (POINTER_SIZE_INT)chunk + NORMAL_CHUNK_SIZE_BYTES);
+
+ chunk->next = NULL;
+ chunk->status = CHUNK_NEED_ZEROING;
+ chunk->slot_size = slot_size;
+ chunk->slot_num = NORMAL_CHUNK_SLOT_NUM(chunk);
+ chunk->slot_index = 0;
+ chunk->base = NORMAL_CHUNK_BASE(chunk);
+ memset(chunk->table, 0, NORMAL_CHUNK_TABLE_SIZE_BYTES(chunk));//memset table
+ chunk_pad_last_index_word(chunk, alloc_mask_in_table);
+}
+
+/* Used for allocating a chunk for large object from free area lists */
+inline void abnormal_chunk_init(Chunk_Header *chunk, unsigned int chunk_size, unsigned int obj_size)
+{
+ assert(chunk->status == CHUNK_FREE);
+ assert((POINTER_SIZE_INT)chunk->adj_next == (POINTER_SIZE_INT)chunk + chunk_size);
+
+ chunk->next = NULL;
+ chunk->status = CHUNK_NIL;
+ chunk->slot_size = obj_size;
+ chunk->slot_num = 1;
+ chunk->slot_index = 0;
+ chunk->base = ABNORMAL_CHUNK_BASE(chunk);
+}
+
+
+#ifdef POINTER64
+ #define GC_OBJECT_ALIGNMENT_BITS 3
+#else
+ #define GC_OBJECT_ALIGNMENT_BITS 2
+#endif
+
+#define MEDIUM_OBJ_THRESHOLD (128)
+#define LARGE_OBJ_THRESHOLD (256)
+#define SUPER_OBJ_THRESHOLD (1024)
+#define HYPER_OBJ_THRESHOLD (128*KB)
+
+#define SMALL_GRANULARITY_BITS (GC_OBJECT_ALIGNMENT_BITS)
+#define MEDIUM_GRANULARITY_BITS (SMALL_GRANULARITY_BITS + 1)
+#define LARGE_GRANULARITY_BITS 7
+#define CHUNK_GRANULARITY_BITS 10
+
+#define SMALL_GRANULARITY (1 << SMALL_GRANULARITY_BITS)
+#define MEDIUM_GRANULARITY (1 << MEDIUM_GRANULARITY_BITS)
+#define LARGE_GRANULARITY (1 << LARGE_GRANULARITY_BITS)
+#define CHUNK_GRANULARITY (1 << CHUNK_GRANULARITY_BITS)
+
+#define SMALL_GRANULARITY_LOW_MASK ((POINTER_SIZE_INT)(SMALL_GRANULARITY-1))
+#define SMALL_GRANULARITY_HIGH_MASK (~SMALL_GRANULARITY_LOW_MASK)
+#define MEDIUM_GRANULARITY_LOW_MASK ((POINTER_SIZE_INT)(MEDIUM_GRANULARITY-1))
+#define MEDIUM_GRANULARITY_HIGH_MASK (~MEDIUM_GRANULARITY_LOW_MASK)
+#define LARGE_GRANULARITY_LOW_MASK ((POINTER_SIZE_INT)(LARGE_GRANULARITY-1))
+#define LARGE_GRANULARITY_HIGH_MASK (~LARGE_GRANULARITY_LOW_MASK)
+#define CHUNK_GRANULARITY_LOW_MASK ((POINTER_SIZE_INT)(CHUNK_GRANULARITY-1))
+#define CHUNK_GRANULARITY_HIGH_MASK (~CHUNK_GRANULARITY_LOW_MASK)
+
+#define SMALL_LOCAL_CHUNK_NUM (MEDIUM_OBJ_THRESHOLD >> SMALL_GRANULARITY_BITS)
+#define MEDIUM_LOCAL_CHUNK_NUM ((LARGE_OBJ_THRESHOLD - MEDIUM_OBJ_THRESHOLD) >> MEDIUM_GRANULARITY_BITS)
+
+#define SMALL_SIZE_ROUNDUP(size) (size)
+#define MEDIUM_SIZE_ROUNDUP(size) (((size) + MEDIUM_GRANULARITY-1) & MEDIUM_GRANULARITY_HIGH_MASK)
+#define LARGE_SIZE_ROUNDUP(size) (((size) + LARGE_GRANULARITY-1) & LARGE_GRANULARITY_HIGH_MASK)
+#define SUPER_OBJ_TOTAL_SIZE(size) (sizeof(Chunk_Header) + (size))
+#define SUPER_SIZE_ROUNDUP(size) ((SUPER_OBJ_TOTAL_SIZE(size) + CHUNK_GRANULARITY-1) & CHUNK_GRANULARITY_HIGH_MASK)
+
+#define SMALL_SIZE_TO_INDEX(size) (((size) >> SMALL_GRANULARITY_BITS) - 1)
+#define MEDIUM_SIZE_TO_INDEX(size) ((((size)-MEDIUM_OBJ_THRESHOLD) >> MEDIUM_GRANULARITY_BITS) - 1)
+#define LARGE_SIZE_TO_INDEX(size) ((((size)-LARGE_OBJ_THRESHOLD) >> LARGE_GRANULARITY_BITS) - 1)
+#define ALIGNED_CHUNK_SIZE_TO_INDEX(size) (((size) >> NORMAL_CHUNK_SHIFT_COUNT) - 1)
+#define UNALIGNED_CHUNK_SIZE_TO_INDEX(size) (((size) >> CHUNK_GRANULARITY_BITS) - 1)
+
+#define SMALL_INDEX_TO_SIZE(index) (((index) + 1) << SMALL_GRANULARITY_BITS)
+#define MEDIUM_INDEX_TO_SIZE(index) ((((index) + 1) << MEDIUM_GRANULARITY_BITS) + MEDIUM_OBJ_THRESHOLD)
+#define LARGE_INDEX_TO_SIZE(index) ((((index) + 1) << LARGE_GRANULARITY_BITS) + LARGE_OBJ_THRESHOLD)
+#define ALIGNED_CHUNK_INDEX_TO_SIZE(index) (((index) + 1) << NORMAL_CHUNK_SHIFT_COUNT)
+#define UNALIGNED_CHUNK_INDEX_TO_SIZE(index) (((index) + 1) << CHUNK_GRANULARITY_BITS)
+
+#define SMALL_PFC_STEAL_NUM 3
+#define MEDIUM_PFC_STEAL_NUM 3
+#define LARGE_PFC_STEAL_NUM 3
+
+#define SMALL_PFC_STEAL_THRESHOLD 3
+#define MEDIUM_PFC_STEAL_THRESHOLD 3
+#define LARGE_PFC_STEAL_THRESHOLD 3
+
+
+inline Chunk_Header *sspace_get_small_pfc(Sspace *sspace, unsigned int index)
+{
+ Pool *pfc_pool = sspace->small_pfc_pools[index];
+ Chunk_Header *chunk = (Chunk_Header*)pool_get_entry(pfc_pool);
+ assert(!chunk || chunk->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING));
+ return chunk;
+}
+inline void sspace_put_small_pfc(Sspace *sspace, Chunk_Header *chunk, unsigned int index)
+{
+ assert(chunk);
+
+ Pool *pfc_pool = sspace->small_pfc_pools[index];
+ pool_put_entry(pfc_pool, chunk);
+}
+
+inline Chunk_Header *sspace_get_medium_pfc(Sspace *sspace, unsigned int index)
+{
+ Pool *pfc_pool = sspace->medium_pfc_pools[index];
+ Chunk_Header *chunk = (Chunk_Header*)pool_get_entry(pfc_pool);
+ assert(!chunk || chunk->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING));
+ return chunk;
+}
+inline void sspace_put_medium_pfc(Sspace *sspace, Chunk_Header *chunk, unsigned int index)
+{
+ assert(chunk);
+
+ Pool *pfc_pool = sspace->medium_pfc_pools[index];
+ pool_put_entry(pfc_pool, chunk);
+}
+
+inline Chunk_Header *sspace_get_large_pfc(Sspace *sspace, unsigned int index)
+{
+ Pool *pfc_pool = sspace->large_pfc_pools[index];
+ Chunk_Header *chunk = (Chunk_Header*)pool_get_entry(pfc_pool);
+ assert(!chunk || chunk->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING));
+ return chunk;
+}
+inline void sspace_put_large_pfc(Sspace *sspace, Chunk_Header *chunk, unsigned int index)
+{
+ assert(chunk);
+
+ Pool *pfc_pool = sspace->large_pfc_pools[index];
+ pool_put_entry(pfc_pool, chunk);
+}
+
+/*
+inline Chunk_Header *sspace_get_pfc(Sspace *sspace, unsigned int size)
+{
+ assert(size <= SUPER_OBJ_THRESHOLD);
+
+ if(size > LARGE_OBJ_THRESHOLD)
+ return sspace_get_large_pfc(sspace, size);
+ else if(size > MEDIUM_OBJ_THRESHOLD)
+ return sspace_get_medium_pfc(sspace, size);
+ return sspace_get_small_pfc(sspace, size);
+}
+*/
+
+inline void sspace_put_pfc(Sspace *sspace, Chunk_Header *chunk, unsigned int size)
+{
+ assert(size <= SUPER_OBJ_THRESHOLD);
+
+ chunk->status = CHUNK_NORMAL | CHUNK_NEED_ZEROING;
+ unsigned int index;
+
+ if(size > LARGE_OBJ_THRESHOLD){
+ assert(!(size & LARGE_GRANULARITY_LOW_MASK));
+ assert((size > LARGE_OBJ_THRESHOLD) && (size <= SUPER_OBJ_THRESHOLD));
+ index = LARGE_SIZE_TO_INDEX(size);
+ sspace_put_large_pfc(sspace, chunk, index);
+ } else if(size > MEDIUM_OBJ_THRESHOLD){
+ assert(!(size & MEDIUM_GRANULARITY_LOW_MASK));
+ assert((size > MEDIUM_OBJ_THRESHOLD) && (size <= LARGE_OBJ_THRESHOLD));
+ index = MEDIUM_SIZE_TO_INDEX(size);
+ sspace_put_medium_pfc(sspace, chunk, index);
+ } else {
+ assert(!(size & SMALL_GRANULARITY_LOW_MASK));
+ assert(size <= MEDIUM_OBJ_THRESHOLD);
+ index = SMALL_SIZE_TO_INDEX(size);
+ sspace_put_small_pfc(sspace, chunk, index);
+ }
+}
+
+
+extern void sspace_init_chunks(Sspace *sspace);
+extern void sspace_clear_chunk_list(GC *gc);
+extern void sspace_put_free_chunk(Sspace *sspace, Free_Chunk *chunk);
+extern Free_Chunk *sspace_get_normal_free_chunk(Sspace *sspace);
+extern Free_Chunk *sspace_get_abnormal_free_chunk(Sspace *sspace, unsigned int chunk_size);
+extern Free_Chunk *sspace_get_hyper_free_chunk(Sspace *sspace, unsigned int chunk_size, Boolean is_normal_chunk);
+extern Chunk_Header *sspace_steal_small_pfc(Sspace *sspace, unsigned int index);
+extern Chunk_Header *sspace_steal_medium_pfc(Sspace *sspace, unsigned int index);
+extern Chunk_Header *sspace_steal_large_pfc(Sspace *sspace, unsigned int index);
+
+extern void zeroing_free_chunk(Free_Chunk *chunk);
+
+
+#endif //#ifndef _SSPACE_CHUNK_H_
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_sweep/sspace.cpp drlvm-0703-new/vm/gc_gen/src/mark_sweep/sspace.cpp
--- drlvm-0703-svn/vm/gc_gen/src/mark_sweep/sspace.cpp 1970-01-01 08:00:00.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_sweep/sspace.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "sspace.h"
+#include "sspace_chunk.h"
+#include "../gen/gen.h"
+#include "../common/gc_space.h"
+#include "sspace_verify.h"
+
+struct GC_Gen;
+
+void sspace_initialize(GC *gc, void *start, unsigned int sspace_size, unsigned int commit_size)
+{
+ /* With sspace in the heap, the heap must be composed of a single sspace or a sspace and a NOS.
+ * In either case, the reserved size and committed size of sspace must be the same.
+ * Because sspace has only mark-sweep collection, it is not possible to shrink sspace.
+ * So there is no need to use dynamic space resizing.
+ */
+ assert(sspace_size == commit_size);
+
+ Sspace *sspace = (Sspace*)STD_MALLOC(sizeof(Sspace));
+ assert(sspace);
+ memset(sspace, 0, sizeof(Sspace));
+
+ sspace->reserved_heap_size = sspace_size;
+
+ void *reserved_base = start;
+
+ /* commit sspace mem */
+ if(!large_page_hint)
+ vm_commit_mem(reserved_base, commit_size);
+ memset(reserved_base, 0, commit_size);
+ sspace->committed_heap_size = commit_size;
+
+ sspace->heap_start = reserved_base;
+ sspace->heap_end = (void *)((POINTER_SIZE_INT)reserved_base + sspace_size);
+
+ sspace->num_collections = 0;
+ sspace->time_collections = 0;
+ sspace->survive_ratio = 0.2f;
+
+ sspace->move_object = FALSE;
+ sspace->gc = gc;
+
+ sspace_init_chunks(sspace);
+
+ gc_set_pos((GC_Gen*)gc, (Space*)sspace);
+#ifdef SSPACE_VERIFY
+ sspace_verify_init(gc);
+#endif
+ return;
+}
+
+static void sspace_destruct_chunks(Sspace *sspace) { return; }
+
+void sspace_destruct(Sspace *sspace)
+{
+ //FIXME:: when map the to-half, the decommission start address should change
+ sspace_destruct_chunks(sspace);
+ STD_FREE(sspace);
+}
+
+void mutator_init_small_chunks(Mutator *mutator)
+{
+ unsigned int size = sizeof(Chunk_Header*) * (SMALL_LOCAL_CHUNK_NUM + MEDIUM_LOCAL_CHUNK_NUM);
+ Chunk_Header **chunks = (Chunk_Header**)STD_MALLOC(size);
+ memset(chunks, 0, size);
+ mutator->small_chunks = chunks;
+ mutator->medium_chunks = chunks + SMALL_LOCAL_CHUNK_NUM;
+}
+
+extern void mark_sweep_sspace(Collector *collector);
+
+void sspace_collection(Sspace *sspace)
+{
+ GC *gc = sspace->gc;
+ sspace->num_collections++;
+
+#ifdef SSPACE_ALLOC_INFO
+ sspace_alloc_info_summary();
+#endif
+#ifdef SSPACE_CHUNK_INFO
+ sspace_chunks_info(sspace, TRUE);
+#endif
+
+#ifdef SSPACE_VERIFY
+ sspace_verify_vtable_mark(gc);
+#endif
+
+#ifdef SSPACE_TIME
+ sspace_gc_time(gc, TRUE);
+#endif
+
+ pool_iterator_init(gc->metadata->gc_rootset_pool);
+ sspace_clear_chunk_list(gc);
+
+ collector_execute_task(gc, (TaskType)mark_sweep_sspace, (Space*)sspace);
+
+#ifdef SSPACE_TIME
+ sspace_gc_time(gc, FALSE);
+#endif
+
+#ifdef SSPACE_CHUNK_INFO
+ sspace_chunks_info(sspace, FALSE);
+#endif
+
+}
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_sweep/sspace.h drlvm-0703-new/vm/gc_gen/src/mark_sweep/sspace.h
--- drlvm-0703-svn/vm/gc_gen/src/mark_sweep/sspace.h 1970-01-01 08:00:00.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_sweep/sspace.h 2007-07-04 01:42:45.000000000 +0800
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _SWEEP_SPACE_H_
+#define _SWEEP_SPACE_H_
+
+#include "../thread/gc_thread.h"
+#include "../thread/collector_alloc.h"
+#include "../thread/mutator.h"
+#include "../common/gc_common.h"
+
+/*
+ * The sweep space accomodates objects collected by mark-sweep
+ */
+
+#define ONLY_SSPACE_IN_HEAP
+
+struct Free_Chunk_List;
+
+typedef struct Sspace {
+ /* <-- first couple of fields are overloadded as Space */
+ void *heap_start;
+ void *heap_end;
+ unsigned int reserved_heap_size;
+ unsigned int committed_heap_size;
+ unsigned int num_collections;
+ int64 time_collections;
+ float survive_ratio;
+ unsigned int collect_algorithm;
+ GC *gc;
+ Boolean move_object;
+ /* Size allocted after last collection. Not available in fspace now. */
+ unsigned int alloced_size;
+ /* For_statistic: not available now for fspace */
+ unsigned int surviving_size;
+ /* END of Space --> */
+
+ Pool **small_pfc_pools;
+ Pool **medium_pfc_pools;
+ Pool **large_pfc_pools;
+ Free_Chunk_List *aligned_free_chunk_lists;
+ Free_Chunk_List *unaligned_free_chunk_lists;
+ Free_Chunk_List *hyper_free_chunk_list;
+} Sspace;
+
+void sspace_initialize(GC *gc, void *start, unsigned int sspace_size, unsigned int commit_size);
+void sspace_destruct(Sspace *sspace);
+
+void *sspace_fast_alloc(unsigned size, Allocator *allocator);
+void *sspace_alloc(unsigned size, Allocator *allocator);
+
+void sspace_reset_for_allocation(Sspace *sspace);
+
+void sspace_collection(Sspace *sspace);
+
+void mutator_init_small_chunks(Mutator *mutator);
+void collector_init_free_chunk_list(Collector *collector);
+
+POINTER_SIZE_INT sspace_free_memory_size(Sspace *sspace);
+
+#endif // _SWEEP_SPACE_H_
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_sweep/sspace_mark.cpp drlvm-0703-new/vm/gc_gen/src/mark_sweep/sspace_mark.cpp
--- drlvm-0703-svn/vm/gc_gen/src/mark_sweep/sspace_mark.cpp 1970-01-01 08:00:00.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_sweep/sspace_mark.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "sspace_mark_sweep.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+
+static FORCE_INLINE void scan_slot(Collector *collector, REF *p_ref)
+{
+ Partial_Reveal_Object *p_obj = read_slot(p_ref);
+ if( p_obj == NULL) return;
+
+ assert(address_belongs_to_gc_heap(p_obj, collector->gc));
+ if(obj_mark_in_table(p_obj)){
+ assert(p_obj);
+ collector_tracestack_push(collector, p_obj);
+ }
+}
+
+static FORCE_INLINE void scan_object(Collector *collector, Partial_Reveal_Object *p_obj)
+{
+ if(!object_has_ref_field(p_obj)) return;
+
+ REF *p_ref;
+
+ if(object_is_array(p_obj)){ /* scan array object */
+ Partial_Reveal_Array *array = (Partial_Reveal_Array*)p_obj;
+ unsigned int array_length = array->array_len;
+
+ p_ref = (REF *)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array));
+ for (unsigned int i = 0; i < array_length; i++)
+ scan_slot(collector, p_ref+i);
+
+ return;
+ }
+
+ /* scan non-array object */
+ unsigned int num_refs = object_ref_field_num(p_obj);
+ int *ref_iterator = object_ref_iterator_init(p_obj);
+
+ for(unsigned int i=0; itrace_stack;
+ while(!vector_stack_is_empty(trace_stack)){
+ p_obj = (Partial_Reveal_Object*)vector_stack_pop(trace_stack);
+ scan_object(collector, p_obj);
+ trace_stack = collector->trace_stack;
+ }
+}
+
+/* NOTE:: This is another marking version: marking in color bitmap table.
+ Originally, we have to mark the object before put it into markstack, to
+ guarantee there is only one occurrance of an object in markstack. This is to
+ guarantee there is only one occurrance of a repointed ref slot in repset (they
+ are put to the set when the object is scanned). If the same object is put to
+ markstack twice, they will be scanned twice and their ref slots will be recorded twice.
+ Problem occurs when the ref slot is updated first time with new position,
+ the second time the value in the ref slot is not the old position as expected.
+ It needs to read the original obj header for forwarding pointer. With the new value,
+ it will read something nonsense since the obj is not moved yet.
+ This can be worked around if we want.
+ To do this we have to use atomic instruction for marking, which is undesirable.
+ So we abondoned this design. We no longer use the repset to remember repointed slots.
+*/
+
+/* for marking phase termination detection */
+static volatile unsigned int num_finished_collectors = 0;
+
+void sspace_mark_scan(Collector *collector)
+{
+ GC *gc = collector->gc;
+ GC_Metadata *metadata = gc->metadata;
+
+ /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
+ unsigned int num_active_collectors = gc->num_active_collectors;
+ atomic_cas32(&num_finished_collectors, 0, num_active_collectors);
+
+ collector->trace_stack = free_task_pool_get_entry(metadata);
+
+ Vector_Block *root_set = pool_iterator_next(metadata->gc_rootset_pool);
+
+ /* first step: copy all root objects to mark tasks.
+ FIXME:: can be done sequentially before coming here to eliminate atomic ops */
+ while(root_set){
+ POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set);
+ while(!vector_block_iterator_end(root_set,iter)){
+ REF *p_ref = (REF *)*iter;
+ iter = vector_block_iterator_advance(root_set,iter);
+
+ Partial_Reveal_Object *p_obj = read_slot(p_ref);
+ /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */
+ assert(p_obj!=NULL);
+ /* we have to mark the object before putting it into marktask, because
+ it is possible to have two slots containing a same object. They will
+ be scanned twice and their ref slots will be recorded twice. Problem
+ occurs after the ref slot is updated first time with new position
+ and the second time the value is the ref slot is the old position as expected.
+ This can be worked around if we want.
+ */
+ assert(address_belongs_to_gc_heap(p_obj, gc));
+ if(obj_mark_in_table(p_obj))
+ collector_tracestack_push(collector, p_obj);
+ }
+ root_set = pool_iterator_next(metadata->gc_rootset_pool);
+ }
+ /* put back the last trace_stack task */
+ pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
+
+ /* second step: iterate over the mark tasks and scan objects */
+ /* get a task buf for the mark stack */
+ collector->trace_stack = free_task_pool_get_entry(metadata);
+
+retry:
+ Vector_Block *mark_task = pool_get_entry(metadata->mark_task_pool);
+
+ while(mark_task){
+ POINTER_SIZE_INT *iter = vector_block_iterator_init(mark_task);
+ while(!vector_block_iterator_end(mark_task,iter)){
+ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)*iter;
+ iter = vector_block_iterator_advance(mark_task,iter);
+
+ /* FIXME:: we should not let mark_task empty during working, , other may want to steal it.
+ degenerate my stack into mark_task, and grab another mark_task */
+ trace_object(collector, p_obj);
+ }
+ /* run out one task, put back to the pool and grab another task */
+ vector_stack_clear(mark_task);
+ pool_put_entry(metadata->free_task_pool, mark_task);
+ mark_task = pool_get_entry(metadata->mark_task_pool);
+ }
+
+ /* termination detection. This is also a barrier.
+ NOTE:: We can simply spin waiting for num_finished_collectors, because each
+ generated new task would surely be processed by its generating collector eventually.
+ So code below is only for load balance optimization. */
+ atomic_inc32(&num_finished_collectors);
+ while(num_finished_collectors != num_active_collectors){
+ if(!pool_is_empty(metadata->mark_task_pool)){
+ atomic_dec32(&num_finished_collectors);
+ goto retry;
+ }
+ }
+
+ /* put back the last mark stack to the free pool */
+ mark_task = (Vector_Block*)collector->trace_stack;
+ vector_stack_clear(mark_task);
+ pool_put_entry(metadata->free_task_pool, mark_task);
+ collector->trace_stack = NULL;
+
+ return;
+}
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp drlvm-0703-new/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp
--- drlvm-0703-svn/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp 1970-01-01 08:00:00.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "sspace_mark_sweep.h"
+#include "sspace_verify.h"
+#include "../gen/gen.h"
+#include "../thread/collector.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+
+
+POINTER_SIZE_INT alloc_mask_in_table = ~BLACK_MASK_IN_TABLE;
+POINTER_SIZE_INT mark_mask_in_table = BLACK_MASK_IN_TABLE;
+POINTER_SIZE_INT cur_alloc_color = OBJ_COLOR_WHITE;
+POINTER_SIZE_INT cur_mark_color = OBJ_COLOR_BLACK;
+
+static void ops_color_flip(void)
+{
+ uint32 temp = cur_alloc_color;
+ cur_alloc_color = cur_mark_color;
+ cur_mark_color = temp;
+ alloc_mask_in_table = ~alloc_mask_in_table;
+ mark_mask_in_table = ~mark_mask_in_table;
+}
+
+void collector_init_free_chunk_list(Collector *collector)
+{
+ Free_Chunk_List *list = (Free_Chunk_List*)STD_MALLOC(sizeof(Free_Chunk_List));
+ free_chunk_list_init(list);
+ collector->free_chunk_list = list;
+}
+
+extern Chunk_Heaer_Basic *volatile next_chunk_for_sweep;
+static void gc_init_chunk_for_sweep(GC *gc, Sspace *sspace)
+{
+ next_chunk_for_sweep = (Chunk_Heaer_Basic*)space_heap_start((Space*)sspace);
+ next_chunk_for_sweep->adj_prev = NULL;
+
+ unsigned int i = gc->num_collectors;
+ while(i--){
+ Free_Chunk_List *list = gc->collectors[i]->free_chunk_list;
+ assert(!list->head);
+ assert(!list->tail);
+ assert(list->lock == FREE_LOCK);
+ }
+}
+
+
+static volatile unsigned int num_marking_collectors = 0;
+static volatile unsigned int num_sweeping_collectors = 0;
+
+void mark_sweep_sspace(Collector *collector)
+{
+ GC *gc = collector->gc;
+ Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)gc);
+
+ unsigned int num_active_collectors = gc->num_active_collectors;
+
+ /* Pass 1: **************************************************
+ mark all live objects in heap ****************************/
+ unsigned int old_num = atomic_cas32(&num_marking_collectors, 0, num_active_collectors+1);
+
+ sspace_mark_scan(collector);
+
+ old_num = atomic_inc32(&num_marking_collectors);
+ if( ++old_num == num_active_collectors ){
+ /* last collector's world here */
+#ifdef SSPACE_TIME
+ sspace_mark_time(FALSE);
+#endif
+ if(!IGNORE_FINREF )
+ collector_identify_finref(collector);
+#ifndef BUILD_IN_REFERENT
+ else {
+ gc_set_weakref_sets(gc);
+ gc_update_weakref_ignore_finref(gc);
+ }
+#endif
+ gc_init_chunk_for_sweep(gc, sspace);
+ /* let other collectors go */
+ num_marking_collectors++;
+ }
+ while(num_marking_collectors != num_active_collectors + 1);
+
+ /* Pass 2: **************************************************
+ sweep dead objects ***************************************/
+ atomic_cas32( &num_sweeping_collectors, 0, num_active_collectors);
+
+ sspace_sweep(collector, sspace);
+
+ atomic_inc32(&num_sweeping_collectors);
+ while(num_sweeping_collectors != num_active_collectors);
+
+ if( collector->thread_handle != 0 )
+ return;
+
+ /* Leftover: ************************************************ */
+#ifdef SSPACE_TIME
+ sspace_sweep_time(FALSE);
+#endif
+
+ gc_collect_free_chunks(gc, sspace);
+
+#ifdef SSPACE_TIME
+ sspace_merge_time(FALSE);
+#endif
+
+ ops_color_flip();
+ gc->root_set = NULL; // FIXME:: should be placed to a more appopriate place
+ gc_set_pool_clear(gc->metadata->gc_rootset_pool);
+
+#ifdef SSPACE_VERIFY
+ sspace_verify_after_collection(gc);
+#endif
+}
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h drlvm-0703-new/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h
--- drlvm-0703-svn/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h 1970-01-01 08:00:00.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_sweep/sspace_mark_sweep.h 2007-07-04 01:42:45.000000000 +0800
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _SSPACE_MARK_SWEEP_H_
+#define _SSPACE_MARK_SWEEP_H_
+
+#include "sspace_chunk.h"
+#include "sspace_verify.h"
+
+#define PFC_REUSABLE_RATIO 0.1
+
+enum Obj_Color {
+ OBJ_COLOR_BLUE = 0x0,
+ OBJ_COLOR_WHITE = 0x1,
+ OBJ_COLOR_BLACK = 0x2,
+ OBJ_COLOR_GRAY = 0x3,
+ OBJ_COLOR_MASK = 0x3
+};
+
+#ifdef POINTER64
+ #define BLACK_MASK_IN_TABLE ((POINTER_SIZE_INT)0xAAAAAAAAAAAAAAAA)
+#else
+ #define BLACK_MASK_IN_TABLE ((POINTER_SIZE_INT)0xAAAAAAAA)
+#endif
+
+extern POINTER_SIZE_INT alloc_mask_in_table;
+extern POINTER_SIZE_INT mark_mask_in_table;
+extern POINTER_SIZE_INT cur_alloc_color;
+extern POINTER_SIZE_INT cur_mark_color;
+
+#define SUPER_OBJ_MASK ((Obj_Info_Type)0x1) /* the lowest bit in obj info */
+
+inline void set_super_obj_mask(void *large_obj)
+{ ((Partial_Reveal_Object*)large_obj)->obj_info |= SUPER_OBJ_MASK; }
+
+inline Boolean is_super_obj(Partial_Reveal_Object *obj)
+{
+ //return get_obj_info_raw(obj) & SUPER_OBJ_MASK;/*
+ if(vm_object_size(obj) > SUPER_OBJ_THRESHOLD){
+ return TRUE;
+ } else {
+ return FALSE;
+ }
+}
+
+inline POINTER_SIZE_INT *get_color_word_in_table(Partial_Reveal_Object *obj, unsigned int &index_in_word)
+{
+ Chunk_Header *chunk;
+ unsigned int index;
+
+ if(is_super_obj(obj)){
+ chunk = ABNORMAL_CHUNK_HEADER(obj);
+ index = 0;
+ } else {
+ chunk = NORMAL_CHUNK_HEADER(obj);
+ index = slot_addr_to_index(chunk, obj);
+ }
+ unsigned int word_index = index / SLOT_NUM_PER_WORD_IN_TABLE;
+ index_in_word = COLOR_BITS_PER_OBJ * (index % SLOT_NUM_PER_WORD_IN_TABLE);
+
+ return &chunk->table[word_index];
+}
+
+/* Accurate marking: TRUE stands for being marked by this collector, and FALSE for another collector */
+inline Boolean obj_mark_in_table(Partial_Reveal_Object *obj)
+{
+ volatile POINTER_SIZE_INT *p_color_word;
+ unsigned int index_in_word;
+ p_color_word = get_color_word_in_table(obj, index_in_word);
+ assert(p_color_word);
+
+ POINTER_SIZE_INT color_bits_mask = ~(OBJ_COLOR_MASK << index_in_word);
+ POINTER_SIZE_INT mark_color = cur_mark_color << index_in_word;
+
+ POINTER_SIZE_INT old_word = *p_color_word;
+ POINTER_SIZE_INT new_word = (old_word & color_bits_mask) | mark_color;
+ while(new_word != old_word) {
+ POINTER_SIZE_INT temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)p_color_word, (void*)new_word, (void*)old_word);
+ if(temp == old_word){
+#ifdef SSPACE_VERIFY
+ assert(obj_is_marked_in_vt(obj));
+ obj_unmark_in_vt(obj);
+ sspace_verify_mark(obj, vm_object_size(obj));
+#endif
+ return TRUE;
+ }
+ old_word = *p_color_word;
+ new_word = (old_word & color_bits_mask) | mark_color;
+ }
+
+ return FALSE;
+}
+
+extern void sspace_mark_scan(Collector *collector);
+extern void sspace_sweep(Collector *collector, Sspace *sspace);
+extern void gc_collect_free_chunks(GC *gc, Sspace *sspace);
+
+extern void chunk_set_slot_index(Chunk_Header* chunk, unsigned int first_free_word_index);
+
+#endif // _SSPACE_MARK_SWEEP_H_
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp drlvm-0703-new/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp
--- drlvm-0703-svn/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp 1970-01-01 08:00:00.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_sweep/sspace_sweep.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -0,0 +1,281 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "sspace_chunk.h"
+#include "sspace_mark_sweep.h"
+
+
+Chunk_Heaer_Basic *volatile next_chunk_for_sweep;
+
+
+static unsigned int word_set_bit_num(POINTER_SIZE_INT word)
+{
+ unsigned int count = 0;
+
+ while(word){
+ word &= word - 1;
+ ++count;
+ }
+ return count;
+}
+
+static Chunk_Heaer_Basic *sspace_get_next_sweep_chunk(Collector *collector, Sspace *sspace)
+{
+ Chunk_Heaer_Basic *cur_sweep_chunk = next_chunk_for_sweep;
+
+ Chunk_Heaer_Basic *sspace_ceiling = (Chunk_Heaer_Basic*)space_heap_end((Space*)sspace);
+ while(cur_sweep_chunk < sspace_ceiling){
+ Chunk_Heaer_Basic *next_sweep_chunk = CHUNK_END(cur_sweep_chunk);
+
+ Chunk_Heaer_Basic *temp = (Chunk_Heaer_Basic*)atomic_casptr((volatile void **)&next_chunk_for_sweep, next_sweep_chunk, cur_sweep_chunk);
+ if(temp == cur_sweep_chunk){
+ if(next_sweep_chunk < sspace_ceiling)
+ next_sweep_chunk->adj_prev = cur_sweep_chunk;
+ return cur_sweep_chunk;
+ }
+ cur_sweep_chunk = next_chunk_for_sweep;
+ }
+
+ return NULL;
+}
+
+static void collector_add_free_chunk(Collector *collector, Free_Chunk *chunk)
+{
+ Free_Chunk_List *list = collector->free_chunk_list;
+
+ chunk->status = CHUNK_FREE | CHUNK_IN_USE;
+ chunk->next = list->head;
+ chunk->prev = NULL;
+ if(list->head)
+ list->head->prev = chunk;
+ else
+ list->tail = chunk;
+ list->head = chunk;
+}
+
+void zeroing_free_chunk(Free_Chunk *chunk)
+{
+ assert(chunk->status == CHUNK_FREE);
+
+ void *start = (void*)((POINTER_SIZE_INT)chunk + sizeof(Free_Chunk));
+ POINTER_SIZE_INT size = CHUNK_SIZE(chunk) - sizeof(Free_Chunk);
+ memset(start, 0, size);
+}
+
+/* Zeroing should be optimized to do it while sweeping index word */
+static void zeroing_free_areas_in_pfc(Chunk_Header *chunk, unsigned int live_num)
+{
+ assert(live_num);
+
+ assert(chunk->status & CHUNK_NORMAL);
+ unsigned int slot_num = chunk->slot_num;
+ unsigned int slot_size = chunk->slot_size;
+ POINTER_SIZE_INT chunk_base = (POINTER_SIZE_INT)chunk->base;
+ POINTER_SIZE_INT *table = chunk->table;
+
+ POINTER_SIZE_INT base = (POINTER_SIZE_INT)NULL;
+ assert(slot_num >= live_num);
+ unsigned int free_slot_num = slot_num - live_num;
+ unsigned int cur_free_slot_num = 0;
+ unsigned int slot_index = chunk->slot_index;
+ unsigned int word_index = slot_index / SLOT_NUM_PER_WORD_IN_TABLE;
+ assert(live_num >= slot_index);
+ live_num -= slot_index;
+ POINTER_SIZE_INT index_word = table[word_index];
+ POINTER_SIZE_INT mark_color = cur_mark_color << (COLOR_BITS_PER_OBJ * (slot_index % SLOT_NUM_PER_WORD_IN_TABLE));
+ for(; slot_index < slot_num; ++slot_index){
+ assert(!(index_word & ~mark_mask_in_table));
+ if(index_word & mark_color){
+ if(cur_free_slot_num){
+ memset((void*)base, 0, slot_size*cur_free_slot_num);
+ assert(free_slot_num >= cur_free_slot_num);
+ free_slot_num -= cur_free_slot_num;
+ cur_free_slot_num = 0;
+ if(!free_slot_num) break;
+ }
+ assert(live_num);
+ --live_num;
+ } else {
+ if(cur_free_slot_num){
+ ++cur_free_slot_num;
+ } else {
+ base = chunk_base + slot_size * slot_index;
+ cur_free_slot_num = 1;
+ if(!live_num) break;
+ }
+ }
+ mark_color <<= COLOR_BITS_PER_OBJ;
+ if(!mark_color){
+ mark_color = cur_mark_color;
+ ++word_index;
+ index_word = table[word_index];
+ while(index_word == mark_mask_in_table && cur_free_slot_num == 0 && slot_index < slot_num){
+ slot_index += SLOT_NUM_PER_WORD_IN_TABLE;
+ ++word_index;
+ index_word = table[word_index];
+ assert(live_num >= SLOT_NUM_PER_WORD_IN_TABLE);
+ live_num -= SLOT_NUM_PER_WORD_IN_TABLE;
+ }
+ while(index_word == 0 && cur_free_slot_num > 0 && slot_index < slot_num){
+ slot_index += SLOT_NUM_PER_WORD_IN_TABLE;
+ ++word_index;
+ index_word = table[word_index];
+ cur_free_slot_num += SLOT_NUM_PER_WORD_IN_TABLE;
+ }
+ }
+ }
+ assert((cur_free_slot_num>0 && live_num==0) || (cur_free_slot_num==0 && live_num>0));
+ if(cur_free_slot_num)
+ memset((void*)base, 0, slot_size*free_slot_num);
+}
+
+static void collector_sweep_normal_chunk(Collector *collector, Sspace *sspace, Chunk_Header *chunk)
+{
+ unsigned int slot_num = chunk->slot_num;
+ unsigned int live_num = 0;
+ unsigned int first_free_word_index = MAX_SLOT_INDEX;
+ POINTER_SIZE_INT *table = chunk->table;
+
+ unsigned int index_word_num = (slot_num + SLOT_NUM_PER_WORD_IN_TABLE - 1) / SLOT_NUM_PER_WORD_IN_TABLE;
+ for(unsigned int i=0; ilive_obj_num += live_num;
+ //printf("Chunk: %x live obj: %d slot num: %d\n", (POINTER_SIZE_INT)chunk, live_num, slot_num);
+#endif
+ if(!live_num){ /* all objects in this chunk are dead */
+ collector_add_free_chunk(collector, (Free_Chunk*)chunk);
+ } else if((float)(slot_num-live_num)/slot_num > PFC_REUSABLE_RATIO){ /* most objects in this chunk are swept, add chunk to pfc list*/
+#ifdef SSPACE_VERIFY
+ //zeroing_free_areas_in_pfc((Chunk_Header*)chunk, live_num);
+#endif
+ chunk_pad_last_index_word((Chunk_Header*)chunk, mark_mask_in_table);
+ sspace_put_pfc(sspace, chunk, chunk->slot_size);
+ }
+ /* the rest: chunks with free rate < 0.1. we don't use them */
+#ifdef SSPACE_VERIFY
+ //else// if(live_num < slot_num)
+ //zeroing_free_areas_in_pfc((Chunk_Header*)chunk, live_num);
+#endif
+}
+
+void sspace_sweep(Collector *collector, Sspace *sspace)
+{
+ Chunk_Heaer_Basic *chunk;
+#ifdef SSPACE_VERIFY
+ collector->live_obj_num = 0;
+#endif
+
+ chunk = sspace_get_next_sweep_chunk(collector, sspace);
+ while(chunk){
+ /* chunk is free before GC */
+ if(chunk->status == CHUNK_FREE){
+ collector_add_free_chunk(collector, (Free_Chunk*)chunk);
+ } else if(chunk->status & CHUNK_NORMAL){ /* chunk is used as a normal sized obj chunk */
+ collector_sweep_normal_chunk(collector, sspace, (Chunk_Header*)chunk);
+ } else { /* chunk is used as a super obj chunk */
+ assert(chunk->status & (CHUNK_IN_USE | CHUNK_ABNORMAL));
+ POINTER_SIZE_INT *table = ((Chunk_Header*)chunk)->table;
+ table[0] &= mark_mask_in_table;
+ if(!table[0]){
+ collector_add_free_chunk(collector, (Free_Chunk*)chunk);
+ }
+#ifdef SSPACE_VERIFY
+ else {
+ collector->live_obj_num++;
+ }
+#endif
+ }
+
+ chunk = sspace_get_next_sweep_chunk(collector, sspace);
+ }
+}
+
+static void free_list_detach_chunk(Free_Chunk_List *list, Free_Chunk *chunk)
+{
+ if(chunk->prev)
+ chunk->prev->next = chunk->next;
+ else // chunk is the head
+ list->head = chunk->next;
+ if(chunk->next)
+ chunk->next->prev = chunk->prev;
+}
+
+void gc_collect_free_chunks(GC *gc, Sspace *sspace)
+{
+ Free_Chunk *sspace_ceiling = (Free_Chunk*)space_heap_end((Space*)sspace);
+
+ Free_Chunk_List free_chunk_list;
+ free_chunk_list.head = NULL;
+ free_chunk_list.tail = NULL;
+
+ /* Collect free chunks from collectors to one list */
+ for(unsigned int i=0; inum_collectors; ++i){
+ Free_Chunk_List *list = gc->collectors[i]->free_chunk_list;
+ if(free_chunk_list.tail){
+ free_chunk_list.head->prev = list->tail;
+ } else {
+ free_chunk_list.tail = list->tail;
+ }
+ if(list->head){
+ list->tail->next = free_chunk_list.head;
+ free_chunk_list.head = list->head;
+ }
+ list->head = NULL;
+ list->tail = NULL;
+ }
+
+ Free_Chunk *chunk = free_chunk_list.head;
+ while(chunk){
+ assert(chunk->status == (CHUNK_FREE | CHUNK_IN_USE));
+ /* Remove current chunk from the chunk list */
+ free_chunk_list.head = chunk->next;
+ if(free_chunk_list.head)
+ free_chunk_list.head->prev = NULL;
+ /* Check if the back adjcent chunks are free */
+ Free_Chunk *back_chunk = (Free_Chunk*)chunk->adj_next;
+ while(back_chunk < sspace_ceiling && back_chunk->status == (CHUNK_FREE | CHUNK_IN_USE)){
+ /* Remove back_chunk from list */
+ free_list_detach_chunk(&free_chunk_list, back_chunk);
+ chunk->adj_next = back_chunk->adj_next;
+ back_chunk = (Free_Chunk*)chunk->adj_next;
+ }
+ /* Check if the prev adjacent chunks are free */
+ Free_Chunk *prev_chunk = (Free_Chunk*)chunk->adj_prev;
+ while(prev_chunk && prev_chunk->status == (CHUNK_FREE | CHUNK_IN_USE)){
+ /* Remove prev_chunk from list */
+ free_list_detach_chunk(&free_chunk_list, prev_chunk);
+ prev_chunk->adj_next = chunk->adj_next;
+ chunk = prev_chunk;
+ prev_chunk = (Free_Chunk*)chunk->adj_prev;
+ }
+
+ //zeroing_free_chunk(chunk);
+
+ /* put the free chunk to the according free chunk list */
+ sspace_put_free_chunk(sspace, chunk);
+
+ chunk = free_chunk_list.head;
+ }
+}
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_sweep/sspace_verify.cpp drlvm-0703-new/vm/gc_gen/src/mark_sweep/sspace_verify.cpp
--- drlvm-0703-svn/vm/gc_gen/src/mark_sweep/sspace_verify.cpp 1970-01-01 08:00:00.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_sweep/sspace_verify.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -0,0 +1,542 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "sspace_verify.h"
+#include "sspace_chunk.h"
+#include "sspace_mark_sweep.h"
+#include "../utils/vector_block.h"
+#include "../gen/gen.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+
+#ifdef SSPACE_VERIFY
+
+#define VERIFY_CARD_SIZE_BYTES_SHIFT 12
+#define VERIFY_CARD_SIZE_BYTES (1 << VERIFY_CARD_SIZE_BYTES_SHIFT)
+#define VERIFY_CARD_LOW_MASK (VERIFY_CARD_SIZE_BYTES - 1)
+#define VERIFY_CARD_HIGH_MASK (~VERIFY_CARD_LOW_MASK)
+
+#define VERIFY_MAX_OBJ_SIZE_BYTES (1 << (32-VERIFY_CARD_SIZE_BYTES_SHIFT))
+
+typedef struct Verify_Card {
+ SpinLock lock;
+ Vector_Block *block;
+} Verify_Card;
+
+typedef unsigned int Obj_Addr;
+
+static GC *gc_in_verify = NULL;
+static Verify_Card *alloc_verify_cards = NULL;
+static Verify_Card *mark_verify_cards = NULL;
+static POINTER_SIZE_INT card_num = 0;
+static POINTER_SIZE_INT alloc_obj = 0;
+volatile POINTER_SIZE_INT live_obj_in_mark = 0;
+
+void sspace_verify_init(GC *gc)
+{
+ gc_in_verify = gc;
+
+ POINTER_SIZE_INT heap_size = gc_gen_total_memory_size((GC_Gen*)gc);
+ card_num = heap_size >> VERIFY_CARD_SIZE_BYTES_SHIFT;
+ POINTER_SIZE_INT cards_size = sizeof(Verify_Card) * card_num;
+
+ alloc_verify_cards = (Verify_Card*)STD_MALLOC(cards_size);
+ memset(alloc_verify_cards, 0, cards_size);
+
+ mark_verify_cards = (Verify_Card*)STD_MALLOC(cards_size);
+ memset(mark_verify_cards, 0, cards_size);
+}
+
+static Obj_Addr compose_obj_addr(unsigned int offset, unsigned int size)
+{
+ assert(size < VERIFY_MAX_OBJ_SIZE_BYTES);
+ return offset | (size << VERIFY_CARD_SIZE_BYTES_SHIFT);
+}
+
+static void *decompose_obj_addr(Obj_Addr obj_addr, POINTER_SIZE_INT card_index, unsigned int & size)
+{
+ assert(card_index < card_num);
+ POINTER_SIZE_INT card_offset = obj_addr & VERIFY_CARD_LOW_MASK;
+ POINTER_SIZE_INT heap_offset = VERIFY_CARD_SIZE_BYTES * card_index + card_offset;
+ size = (obj_addr & VERIFY_CARD_HIGH_MASK) >> VERIFY_CARD_SIZE_BYTES_SHIFT;
+ assert(size < VERIFY_MAX_OBJ_SIZE_BYTES);
+ return (void*)(heap_offset + (POINTER_SIZE_INT)gc_heap_base(gc_in_verify));
+}
+
+static Boolean obj_addr_overlapped(Obj_Addr addr1, Obj_Addr addr2)
+{
+ unsigned int offset1 = addr1 & VERIFY_CARD_LOW_MASK;
+ unsigned int size1 = (addr1 & VERIFY_CARD_HIGH_MASK) >> VERIFY_CARD_SIZE_BYTES_SHIFT;
+ unsigned int ceiling1 = offset1 + size1;
+ unsigned int offset2 = addr2 & VERIFY_CARD_LOW_MASK;
+ unsigned int size2 = (addr2 & VERIFY_CARD_HIGH_MASK) >> VERIFY_CARD_SIZE_BYTES_SHIFT;
+ unsigned int ceiling2 = offset2 + size2;
+
+ unsigned int reason = 0;
+ if(offset1 == offset2)
+ reason = 1;
+ if((offset1 < offset2) && (ceiling1 > offset2))
+ reason = 2;
+ if((offset2 < offset1) && (ceiling2 > offset1))
+ reason = 3;
+ if(!reason)
+ return FALSE;
+ printf("\nreason: %d\nold offset: %x size: %d\nnew offset: %x size: %d", reason, (POINTER_SIZE_INT)offset1, size1, (POINTER_SIZE_INT)offset2, size2);
+ return TRUE;
+}
+
+static Vector_Block *create_vector_block(unsigned int size)
+{
+ Vector_Block *block = (Vector_Block*)STD_MALLOC(size);
+ vector_block_init(block, size);
+ return block;
+}
+
+static void verify_card_get_block(Verify_Card *card)
+{
+ lock(card->lock);
+ if(card->block){
+ unlock(card->lock);
+ return;
+ }
+ card->block = create_vector_block(VECTOR_BLOCK_DATA_SIZE_BYTES);
+ unlock(card->lock);
+}
+
+void sspace_verify_alloc(void *addr, unsigned int size)
+{
+ assert(address_belongs_to_gc_heap(addr, gc_in_verify));
+ ++alloc_obj;
+
+ unsigned int heap_offset = (unsigned int)((POINTER_SIZE_INT)addr - (POINTER_SIZE_INT)gc_heap_base(gc_in_verify));
+ unsigned int card_offset = heap_offset & VERIFY_CARD_LOW_MASK;
+ Verify_Card *card = &alloc_verify_cards[heap_offset >> VERIFY_CARD_SIZE_BYTES_SHIFT];
+
+ verify_card_get_block(card);
+ Vector_Block *block = card->block;
+
+ Obj_Addr obj_addr = compose_obj_addr(card_offset, size);
+
+ lock(card->lock);
+ Obj_Addr *p_addr = block->head;
+ while(p_addr < block->tail){
+ assert(!obj_addr_overlapped(obj_addr, *p_addr));
+ p_addr++;
+ }
+ vector_block_add_entry(block, obj_addr);
+ unlock(card->lock);
+}
+
+/* size is rounded up size */
+static Boolean obj_position_is_correct(void *addr, unsigned int size)
+{
+ Chunk_Header *chunk = NULL;
+
+ if(size <= SUPER_OBJ_THRESHOLD)
+ chunk = NORMAL_CHUNK_HEADER(addr);
+ else
+ chunk = ABNORMAL_CHUNK_HEADER(addr);
+ if(chunk->slot_size != size) return FALSE;
+ if(((POINTER_SIZE_INT)addr - (POINTER_SIZE_INT)chunk->base) % size != 0) return FALSE;
+ return TRUE;
+}
+
+/* size is real size of obj */
+void sspace_verify_mark(void *addr, unsigned int size)
+{
+ assert(address_belongs_to_gc_heap(addr, gc_in_verify));
+ atomic_inc32(&live_obj_in_mark);
+
+ unsigned int heap_offset = (unsigned int)((POINTER_SIZE_INT)addr - (POINTER_SIZE_INT)gc_heap_base(gc_in_verify));
+ unsigned int card_offset = heap_offset & VERIFY_CARD_LOW_MASK;
+ Verify_Card *card = &mark_verify_cards[heap_offset >> VERIFY_CARD_SIZE_BYTES_SHIFT];
+
+ verify_card_get_block(card);
+ Vector_Block *block = card->block;
+
+ if(size <= MEDIUM_OBJ_THRESHOLD)
+ size = SMALL_SIZE_ROUNDUP(size);
+ else if(size <= LARGE_OBJ_THRESHOLD)
+ size = MEDIUM_SIZE_ROUNDUP(size);
+ else if(size <= SUPER_OBJ_THRESHOLD)
+ size = LARGE_SIZE_ROUNDUP(size);
+ assert(obj_position_is_correct(addr, size));
+ Obj_Addr obj_addr = compose_obj_addr(card_offset, size);
+
+ lock(card->lock);
+ Obj_Addr *p_addr = block->head;
+ while(p_addr < block->tail){
+ assert(!obj_addr_overlapped(obj_addr, *p_addr));
+ p_addr++;
+ }
+ vector_block_add_entry(block, obj_addr);
+ unlock(card->lock);
+}
+
+static void reverify_mark(void *addr, unsigned int size)
+{
+ assert(address_belongs_to_gc_heap(addr, gc_in_verify));
+
+ unsigned int heap_offset = (unsigned int)((POINTER_SIZE_INT)addr - (POINTER_SIZE_INT)gc_heap_base(gc_in_verify));
+ unsigned int card_offset = heap_offset & VERIFY_CARD_LOW_MASK;
+ Verify_Card *card = &mark_verify_cards[heap_offset >> VERIFY_CARD_SIZE_BYTES_SHIFT];
+
+ Vector_Block *block = card->block;
+ assert(block);
+
+ Obj_Addr obj_addr = compose_obj_addr(card_offset, size);
+
+ Obj_Addr *p_addr = block->head;
+ while(p_addr < block->tail){
+ if(obj_addr == *p_addr){
+ *p_addr = 0;
+ break;
+ }
+ p_addr++;
+ }
+ assert(p_addr < block->tail);
+}
+
+static void check_mark_cards(void)
+{
+ for(POINTER_SIZE_INT i=0; ihead;
+ while(p_addr < block->tail){
+ if(*p_addr){
+ unsigned int size = 0;
+ void *addr = NULL;
+ addr = decompose_obj_addr(*p_addr, i, size);
+ printf("Extra mark obj: %x size: %d\n", (POINTER_SIZE_INT)addr, size);
+ }
+ p_addr++;
+ }
+ vector_block_clear(block);
+ }
+}
+
+static void clear_verify_cards(void)
+{
+ for(POINTER_SIZE_INT i=0; iblock)
+ vector_block_clear(card->block);
+ }
+}
+
+static void summarize_sweep_verify(GC *gc)
+{
+ POINTER_SIZE_INT live_obj_num = 0;
+ for(unsigned int i=0; inum_collectors; ++i){
+ live_obj_num += gc->collectors[i]->live_obj_num;
+ }
+ printf("Live obj in sweeping: %d\n", live_obj_num);
+}
+
+void sspace_verify_free_area(POINTER_SIZE_INT *start, POINTER_SIZE_INT size)
+{
+ POINTER_SIZE_INT *p_value = start;
+
+ assert(!(size % BYTES_PER_WORD));
+ size /= BYTES_PER_WORD;
+ while(size--)
+ assert(!*p_value++);
+}
+
+void sspace_verify_after_collection(GC *gc)
+{
+ printf("Allocated obj: %d\n", alloc_obj);
+ alloc_obj = 0;
+ printf("Live obj in marking: %d\n", live_obj_in_mark);
+ live_obj_in_mark = 0;
+
+ summarize_sweep_verify(gc);
+
+ clear_verify_cards();
+
+ Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)gc);
+ Chunk_Header *chunk = (Chunk_Header*)space_heap_start((Space*)sspace);
+ Chunk_Header *sspace_ceiling = (Chunk_Header*)space_heap_end((Space*)sspace);
+ POINTER_SIZE_INT total_live_obj = 0;
+
+ for(; chunk < sspace_ceiling; chunk = (Chunk_Header*)CHUNK_END(chunk)){
+ /* chunk is free before GC */
+ if(chunk->status == CHUNK_FREE){
+ unsigned int header_size = sizeof(Free_Chunk);
+ //sspace_verify_free_area((POINTER_SIZE_INT*)((POINTER_SIZE_INT)chunk + header_size), (POINTER_SIZE_INT)chunk->adj_next - (POINTER_SIZE_INT)chunk - header_size);
+ continue;
+ }
+ if(chunk->status & CHUNK_ABNORMAL){
+ assert(chunk->status == (CHUNK_ABNORMAL | CHUNK_IN_USE));
+ assert(chunk->slot_size > SUPER_OBJ_THRESHOLD);
+ Partial_Reveal_Object *obj = (Partial_Reveal_Object*)chunk->base;
+ assert(chunk->slot_size == vm_object_size(obj));
+ assert(get_obj_info_raw(obj) & SUPER_OBJ_MASK);
+ }
+ /* chunk is used as a normal sized obj chunk */
+ unsigned int slot_num = chunk->slot_num;
+ POINTER_SIZE_INT *table = chunk->table;
+ POINTER_SIZE_INT live_obj_in_chunk = 0;
+
+ unsigned int word_index = 0;
+ for(unsigned int i=0; islot_size);
+ reverify_mark(p_obj, chunk->slot_size);
+ ++live_obj_in_chunk;
+ } else {
+ //sspace_verify_free_area((POINTER_SIZE_INT*)p_obj, chunk->slot_size);
+ }
+ }
+ total_live_obj += live_obj_in_chunk;
+ }
+ printf("Live obj after collection: %d\n", total_live_obj);
+ check_mark_cards();
+}
+
+/*
+void sspace_verify_super_obj(GC *gc)
+{
+ Sspace *sspace = (Sspace*)gc_get_pos((GC_Gen*)gc);
+ Chunk_Header *chunk = (Chunk_Header*)space_heap_start((Space*)sspace);
+ Chunk_Header *sspace_ceiling = (Chunk_Header*)space_heap_end((Space*)sspace);
+
+ for(; chunk < sspace_ceiling; chunk = (Chunk_Header*)CHUNK_END(chunk)){
+ if(chunk->status & CHUNK_ABNORMAL){
+ assert(chunk->status == (CHUNK_ABNORMAL | CHUNK_IN_USE));
+ assert(chunk->slot_size > SUPER_OBJ_THRESHOLD);
+ Partial_Reveal_Object *obj = (Partial_Reveal_Object*)chunk->base;
+ assert(chunk->slot_size == vm_object_size(obj));
+ assert(get_obj_info_raw(obj) & SUPER_OBJ_MASK);
+ }
+ }
+}
+*/
+
+
+/* sspace verify marking with vtable marking in advance */
+
+static Pool *trace_pool = NULL;
+static Vector_Block *trace_stack = NULL;
+POINTER_SIZE_INT live_obj_in_verify_marking = 0;
+
+static Boolean obj_mark_in_vtable(GC *gc, Partial_Reveal_Object *obj)
+{
+ assert(address_belongs_to_gc_heap(obj, gc));
+ assert((vm_object_size(obj) <= SUPER_OBJ_THRESHOLD) || (get_obj_info_raw(obj) & SUPER_OBJ_MASK));
+ Boolean marked = obj_mark_in_vt(obj);
+#ifdef SSPACE_VERIFY
+ if(marked) live_obj_in_verify_marking++;
+#endif
+ return marked;
+}
+
+static void tracestack_push(void *p_obj)
+{
+ vector_stack_push(trace_stack, (POINTER_SIZE_INT)p_obj);
+
+ if( !vector_stack_is_full(trace_stack)) return;
+
+ pool_put_entry(trace_pool, trace_stack);
+ trace_stack = free_task_pool_get_entry(&gc_metadata);
+ assert(trace_stack);
+}
+
+static FORCE_INLINE void scan_slot(GC *gc, REF *p_ref)
+{
+ Partial_Reveal_Object *p_obj = read_slot(p_ref);
+ if( p_obj == NULL) return;
+
+ if(obj_mark_in_vtable(gc, p_obj))
+ tracestack_push(p_obj);
+
+ return;
+}
+
+static FORCE_INLINE void scan_object(GC *gc, Partial_Reveal_Object *p_obj)
+{
+ if(!object_has_ref_field(p_obj) ) return;
+
+ REF *p_ref;
+
+ if (object_is_array(p_obj)) { /* scan array object */
+
+ Partial_Reveal_Array *array = (Partial_Reveal_Array*)p_obj;
+ unsigned int array_length = array->array_len;
+
+ p_ref = (REF*)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array));
+
+ for (unsigned int i = 0; i < array_length; i++) {
+ scan_slot(gc, p_ref+i);
+ }
+ } else { /* scan non-array object */
+
+ unsigned int num_refs = object_ref_field_num(p_obj);
+
+ int *ref_iterator = object_ref_iterator_init(p_obj);
+
+ for(unsigned int i=0; imetadata;
+ Pool *rootset_pool = metadata->gc_rootset_pool;
+
+ trace_stack = free_task_pool_get_entry(metadata);
+ trace_pool = sync_pool_create();
+
+ pool_iterator_init(rootset_pool);
+ Vector_Block *root_set = pool_iterator_next(rootset_pool);
+
+ while(root_set){
+ POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set);
+ while(!vector_block_iterator_end(root_set, iter)){
+ REF *p_ref = (REF*)*iter;
+ iter = vector_block_iterator_advance(root_set, iter);
+
+ Partial_Reveal_Object *p_obj = read_slot(p_ref);
+ assert(p_obj!=NULL);
+ if(obj_mark_in_vtable(gc, p_obj))
+ tracestack_push(p_obj);
+ }
+ root_set = pool_iterator_next(metadata->gc_rootset_pool);
+ }
+ /* put back the last trace_stack task */
+ pool_put_entry(trace_pool, trace_stack);
+
+ /* second step: iterate over the mark tasks and scan objects */
+ /* get a task buf for the mark stack */
+ trace_stack = free_task_pool_get_entry(metadata);
+
+ Vector_Block *mark_task = pool_get_entry(trace_pool);
+
+ while(mark_task){
+ POINTER_SIZE_INT *iter = vector_block_iterator_init(mark_task);
+ while(!vector_block_iterator_end(mark_task, iter)){
+ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)*iter;
+ iter = vector_block_iterator_advance(mark_task, iter);
+
+ trace_object(gc, p_obj);
+ }
+ /* run out one task, put back to the pool and grab another task */
+ vector_stack_clear(mark_task);
+ pool_put_entry(metadata->free_task_pool, mark_task);
+ mark_task = pool_get_entry(trace_pool);
+ }
+
+ /* put back the last mark stack to the free pool */
+ vector_stack_clear(trace_stack);
+ pool_put_entry(metadata->free_task_pool, trace_stack);
+ trace_stack = NULL;
+ sync_pool_destruct(trace_pool);
+ trace_pool = NULL;
+ printf("Live obj in vtable marking: %d\n", live_obj_in_verify_marking);
+ live_obj_in_verify_marking = 0;
+}
+
+
+#endif
+
+
+
+#ifdef SSPACE_TIME
+
+inline uint64 tsc()
+{
+ __asm _emit 0x0F;
+ __asm _emit 0x31
+}
+
+#define CPU_HZ 3000000 // per ms
+
+static uint64 gc_start_time;
+static uint64 mark_start_time;
+static uint64 sweep_start_time;
+static uint64 merge_start_time;
+
+void sspace_gc_time(GC *gc, Boolean before_gc)
+{
+ if(before_gc){
+ gc_start_time = tsc();
+ mark_start_time = gc_start_time;
+ } else {
+ uint64 end_time = tsc();
+ assert(end_time > gc_start_time);
+ printf("\n\nGC %d time: %dms\n\n", gc->num_collections, (end_time-gc_start_time) / CPU_HZ);
+ }
+}
+
+void sspace_mark_time(Boolean before_mark)
+{
+ assert(before_mark == FALSE);
+ if(before_mark){
+ mark_start_time = tsc();
+ } else {
+ uint64 end_time = tsc();
+ assert(end_time > mark_start_time);
+ printf("\nMark time: %dms\n", (end_time-mark_start_time) / CPU_HZ);
+ sweep_start_time = end_time;
+ }
+}
+
+void sspace_sweep_time(Boolean before_sweep)
+{
+ assert(before_sweep == FALSE);
+ if(before_sweep){
+ sweep_start_time = tsc();
+ } else {
+ uint64 end_time = tsc();
+ assert(end_time > sweep_start_time);
+ printf("\nSweep time: %dms\n", (end_time-sweep_start_time) / CPU_HZ);
+ merge_start_time = end_time;
+ }
+}
+
+void sspace_merge_time(Boolean before_merge)
+{
+ assert(before_merge == FALSE);
+ if(before_merge){
+ merge_start_time = tsc();
+ } else {
+ uint64 end_time = tsc();
+ assert(end_time > merge_start_time);
+ printf("\nMerge time: %dms\n\n", (end_time-merge_start_time) / CPU_HZ);
+ }
+}
+
+#endif
diff -ruN drlvm-0703-svn/vm/gc_gen/src/mark_sweep/sspace_verify.h drlvm-0703-new/vm/gc_gen/src/mark_sweep/sspace_verify.h
--- drlvm-0703-svn/vm/gc_gen/src/mark_sweep/sspace_verify.h 1970-01-01 08:00:00.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/mark_sweep/sspace_verify.h 2007-07-04 01:42:45.000000000 +0800
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _SSPACE_VERIFY_H_
+#define _SSPACE_VERIFY_H_
+
+#include "../common/gc_common.h"
+
+#define SSPACE_VERIFY
+#define SSPACE_CHUNK_INFO
+#define SSPACE_ALLOC_INFO
+//#define SSPACE_TIME
+
+struct Sspace;
+
+void sspace_verify_init(GC *gc);
+void sspace_verify_alloc(void *addr, unsigned int size);
+void sspace_verify_vtable_mark(GC *gc);
+void sspace_verify_mark(void *addr, unsigned int size);
+void sspace_verify_free_area(POINTER_SIZE_INT *start, POINTER_SIZE_INT size);
+void sspace_verify_after_collection(GC *gc);
+
+void sspace_chunks_info(Sspace *sspace, Boolean beore_gc);
+void sspace_alloc_info(unsigned int size);
+void sspace_alloc_info_summary(void);
+
+void sspace_gc_time(GC *gc, Boolean before_gc);
+void sspace_mark_time(Boolean before_mark);
+void sspace_sweep_time(Boolean before_sweep);
+void sspace_merge_time(Boolean before_merge);
+
+#endif // _SSPACE_VERIFY_H_
diff -ruN drlvm-0703-svn/vm/gc_gen/src/thread/collector.h drlvm-0703-new/vm/gc_gen/src/thread/collector.h
--- drlvm-0703-svn/vm/gc_gen/src/thread/collector.h 2007-07-03 23:22:25.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/thread/collector.h 2007-07-04 01:42:45.000000000 +0800
@@ -22,8 +22,11 @@
#define _COLLECTOR_H_
#include "../common/gc_space.h"
+#include "../mark_sweep/sspace_verify.h"
+
struct Block_Header;
struct Stealable_Stack;
+struct Free_Chunk_List;
#define NORMAL_SIZE_SEGMENT_GRANULARITY_BITS 8
#define NORMAL_SIZE_SEGMENT_GRANULARITY (1 << NORMAL_SIZE_SEGMENT_GRANULARITY_BITS)
@@ -63,6 +66,11 @@
Block_Header* cur_compact_block;
Block_Header* cur_target_block;
+ Free_Chunk_List *free_chunk_list;
+#ifdef SSPACE_VERIFY
+ POINTER_SIZE_INT live_obj_num;
+#endif
+
void(*task_func)(void*) ; /* current task */
POINTER_SIZE_INT non_los_live_obj_size;
diff -ruN drlvm-0703-svn/vm/gc_gen/src/thread/mutator.h drlvm-0703-new/vm/gc_gen/src/thread/mutator.h
--- drlvm-0703-svn/vm/gc_gen/src/thread/mutator.h 2007-07-03 23:22:25.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/thread/mutator.h 2007-07-04 01:42:45.000000000 +0800
@@ -23,6 +23,8 @@
#include "../common/gc_space.h"
+struct Chunk_Header;
+
/* Mutator thread local information for GC */
typedef struct Mutator {
/* <-- first couple of fields are overloaded as Allocator */
@@ -37,6 +39,8 @@
Vector_Block* rem_set;
Vector_Block* obj_with_fin;
+ Chunk_Header **small_chunks;
+ Chunk_Header **medium_chunks;
Mutator* next; /* The gc info area associated with the next active thread. */
} Mutator;
diff -ruN drlvm-0703-svn/vm/gc_gen/src/verify/verifier_common.cpp drlvm-0703-new/vm/gc_gen/src/verify/verifier_common.cpp
--- drlvm-0703-svn/vm/gc_gen/src/verify/verifier_common.cpp 2007-07-03 23:22:24.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/verify/verifier_common.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -253,4 +253,3 @@
printf(" %-14s: %-7s | Before %10d | After %10d |\n", "hashcode", "NUM", gc_verifier->num_hash_before_gc, gc_verifier->num_hash_after_gc);
}
-
diff -ruN drlvm-0703-svn/vm/gc_gen/src/verify/verifier_scanner.cpp drlvm-0703-new/vm/gc_gen/src/verify/verifier_scanner.cpp
--- drlvm-0703-svn/vm/gc_gen/src/verify/verifier_scanner.cpp 2007-07-03 23:22:24.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/verify/verifier_scanner.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -418,4 +418,3 @@
-
diff -ruN drlvm-0703-svn/vm/gc_gen/src/verify/verify_gc_effect.cpp drlvm-0703-new/vm/gc_gen/src/verify/verify_gc_effect.cpp
--- drlvm-0703-svn/vm/gc_gen/src/verify/verify_gc_effect.cpp 2007-07-03 23:22:24.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/verify/verify_gc_effect.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -528,4 +528,3 @@
-
diff -ruN drlvm-0703-svn/vm/gc_gen/src/verify/verify_gc_effect.h drlvm-0703-new/vm/gc_gen/src/verify/verify_gc_effect.h
--- drlvm-0703-svn/vm/gc_gen/src/verify/verify_gc_effect.h 2007-07-03 23:22:24.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/verify/verify_gc_effect.h 2007-07-04 01:42:45.000000000 +0800
@@ -97,4 +97,3 @@
#endif
-
diff -ruN drlvm-0703-svn/vm/gc_gen/src/verify/verify_live_heap.cpp drlvm-0703-new/vm/gc_gen/src/verify/verify_live_heap.cpp
--- drlvm-0703-svn/vm/gc_gen/src/verify/verify_live_heap.cpp 2007-07-03 23:22:24.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/verify/verify_live_heap.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -147,4 +147,3 @@
-
diff -ruN drlvm-0703-svn/vm/gc_gen/src/verify/verify_mutator_effect.cpp drlvm-0703-new/vm/gc_gen/src/verify/verify_mutator_effect.cpp
--- drlvm-0703-svn/vm/gc_gen/src/verify/verify_mutator_effect.cpp 2007-07-03 23:22:24.000000000 +0800
+++ drlvm-0703-new/vm/gc_gen/src/verify/verify_mutator_effect.cpp 2007-07-04 01:42:45.000000000 +0800
@@ -433,4 +433,3 @@
-