Index: vm/gc_gen/javasrc/org/apache/harmony/drlvm/gc_gen/GCHelper.java =================================================================== --- vm/gc_gen/javasrc/org/apache/harmony/drlvm/gc_gen/GCHelper.java (revision 493420) +++ vm/gc_gen/javasrc/org/apache/harmony/drlvm/gc_gen/GCHelper.java (working copy) @@ -22,15 +22,18 @@ import org.apache.harmony.drlvm.VMHelper; import org.vmmagic.unboxed.*; +import org.vmmagic.pragma.*; public class GCHelper { - static {System.loadLibrary("gc_gen");} + static { + System.loadLibrary("gc_gen"); + helperCallback(); + } public static final int TLS_GC_OFFSET = TLSGCOffset(); - public static Address alloc(int objSize, int allocationHandle) { - + public static Address alloc(int objSize, int allocationHandle) throws InlinePragma { Address TLS_BASE = VMHelper.getTlsBaseAddress(); Address allocator_addr = TLS_BASE.plus(TLS_GC_OFFSET); @@ -49,7 +52,46 @@ return VMHelper.newResolvedUsingAllocHandleAndSize(objSize, allocationHandle); } - + + private static final int ARRAY_LEN_OFFSET = 8; + private static final int GC_OBJECT_ALIGNMENT = 4; //TODO: EM64 or IPF could have 8! + + public static Address allocArray(int arrayLen, int elemSize, int allocationHandle) throws InlinePragma { + if (arrayLen >= 0) { + int firstElementOffset = ARRAY_LEN_OFFSET + (elemSize==8?8:4); + int size = firstElementOffset + elemSize*arrayLen; + size = (((size + (GC_OBJECT_ALIGNMENT - 1)) & (~(GC_OBJECT_ALIGNMENT - 1)))); + + Address arrayAddress = alloc(size, allocationHandle); //never null! + arrayAddress.store(arrayLen, Offset.fromInt(ARRAY_LEN_OFFSET)); + return arrayAddress; + } + return VMHelper.newVectorUsingAllocHandle(arrayLen, elemSize, allocationHandle); + } + + /** NOS (nursery object space) is higher in address than other spaces. + The boundary currently is produced in GC initialization. It can + be a constant in future. + */ + + //public static final int NOS_BOUNDARY = getNosBoundary(); + public static final int NOS_BOUNDARY = getNosBoundary(); + public static boolean GEN_MODE = getGenMode(); + + public static void write_barrier_slot_rem(Address p_objBase, Address p_objSlot, Address p_source) throws InlinePragma { + + /* If the slot is in NOS or the target is not in NOS, we simply return*/ + if(p_objSlot.toInt() >= NOS_BOUNDARY || p_source.toInt() < NOS_BOUNDARY || !GEN_MODE) { + p_objSlot.store(p_source); + return; + } + + VMHelper.writeBarrier(p_objBase, p_objSlot, p_source); + } + + private static native int helperCallback(); + private static native boolean getGenMode(); + private static native int getNosBoundary(); private static native int TLSGCOffset(); } Index: vm/gc_gen/src/common/fix_repointed_refs.h =================================================================== --- vm/gc_gen/src/common/fix_repointed_refs.h (revision 0) +++ vm/gc_gen/src/common/fix_repointed_refs.h (revision 0) @@ -0,0 +1,119 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2006/12/12 + */ + +#ifndef _FIX_REPOINTED_REFS_H_ +#define _FIX_REPOINTED_REFS_H_ + +#include "gc_common.h" +extern Boolean IS_MOVE_COMPACT; + +inline void slot_fix(Partial_Reveal_Object** p_ref) +{ + Partial_Reveal_Object* p_obj = *p_ref; + if(!p_obj) return; + + if(IS_MOVE_COMPACT){ + if(obj_is_moved(p_obj)) + *p_ref = obj_get_fw_in_table(p_obj); + }else{ + if(obj_is_fw_in_oi(p_obj) && obj_is_moved(p_obj)){ + /* Condition obj_is_moved(p_obj) is for preventing mistaking previous mark bit of large obj as fw bit when fallback happens. + * Because until fallback happens, perhaps the large obj hasn't been marked. So its mark bit remains as the last time. + * In major collection condition obj_is_fw_in_oi(p_obj) can be omitted, + * for whose which can be scanned in MOS & NOS must have been set fw bit in oi. + */ + assert((unsigned int)obj_get_fw_in_oi(p_obj) > DUAL_MARKBITS); + *p_ref = obj_get_fw_in_oi(p_obj); + } + } + + return; +} + +inline void object_fix_ref_slots(Partial_Reveal_Object* p_obj) +{ + if( !object_has_ref_field(p_obj) ) return; + + /* scan array object */ + if (object_is_array(p_obj)) { + Partial_Reveal_Object* array = p_obj; + assert(!obj_is_primitive_array(array)); + + int32 array_length = vector_get_length((Vector_Handle) array); + for (int i = 0; i < array_length; i++) { + Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)vector_get_element_address_ref((Vector_Handle) array, i); + slot_fix(p_ref); + } + return; + } + + /* scan non-array object */ + int *offset_scanner = init_object_scanner(p_obj); + while (true) { + Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj); + if (p_ref == NULL) break; /* terminating ref slot */ + + slot_fix(p_ref); + offset_scanner = offset_next_ref(offset_scanner); + } + + return; +} + +inline void block_fix_ref_after_copying(Block_Header* curr_block) +{ + unsigned int cur_obj = (unsigned int)curr_block->base; + unsigned int block_end = (unsigned int)curr_block->free; + while(cur_obj < block_end){ + object_fix_ref_slots((Partial_Reveal_Object*)cur_obj); + cur_obj = (unsigned int)cur_obj + vm_object_size((Partial_Reveal_Object*)cur_obj); + } + return; +} + +inline void block_fix_ref_after_marking(Block_Header* curr_block) +{ + void* start_pos; + Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &start_pos); + + while( p_obj ){ + assert( obj_is_marked_in_vt(p_obj)); + obj_unmark_in_vt(p_obj); + object_fix_ref_slots(p_obj); + p_obj = block_get_next_marked_object(curr_block, &start_pos); + } + return; +} + +inline void block_fix_ref_after_repointing(Block_Header* curr_block) +{ + void* start_pos; + Partial_Reveal_Object* p_obj = block_get_first_marked_obj_after_prefetch(curr_block, &start_pos); + + while( p_obj ){ + assert( obj_is_marked_in_vt(p_obj)); + object_fix_ref_slots(p_obj); + p_obj = block_get_next_marked_obj_after_prefetch(curr_block, &start_pos); + } + return; +} + + +#endif /* #ifndef _FIX_REPOINTED_REFS_H_ */ Index: vm/gc_gen/src/common/gc_block.h =================================================================== --- vm/gc_gen/src/common/gc_block.h (revision 493420) +++ vm/gc_gen/src/common/gc_block.h (working copy) @@ -21,8 +21,10 @@ #ifndef _BLOCK_H_ #define _BLOCK_H_ -#include "../common/gc_common.h" +#include "gc_common.h" +#define SYSTEM_ALLOC_UNIT 0x10000 + #define GC_BLOCK_SHIFT_COUNT 15 #define GC_BLOCK_SIZE_BYTES (1 << GC_BLOCK_SHIFT_COUNT) @@ -33,17 +35,22 @@ BLOCK_USED = 0x4, BLOCK_IN_COMPACT = 0x8, BLOCK_COMPACTED = 0x10, - BLOCK_TARGET = 0x20 + BLOCK_TARGET = 0x20, + BLOCK_DEST = 0x40 }; typedef struct Block_Header { void* base; void* free; void* ceiling; + void* new_free; /* used only during compaction */ unsigned int block_idx; volatile unsigned int status; + volatile unsigned int dest_counter; + Partial_Reveal_Object* src; + Partial_Reveal_Object* next_src; Block_Header* next; - unsigned int mark_table[1]; /* entry num == MARKBIT_TABLE_SIZE_WORDS */ + unsigned int table[1]; /* entry num == OFFSET_TABLE_SIZE_WORDS */ }Block_Header; typedef union Block{ @@ -51,17 +58,22 @@ unsigned char raw_bytes[GC_BLOCK_SIZE_BYTES]; }Block; -#define GC_BLOCK_HEADER_VARS_SIZE_BYTES (unsigned int)&(((Block_Header*)0)->mark_table) +#define GC_BLOCK_HEADER_VARS_SIZE_BYTES (unsigned int)&(((Block_Header*)0)->table) -/* BlockSize - MarkbitTable*32 = HeaderVars + MarkbitTable - => MarkbitTable = (BlockSize - HeaderVars)/33 */ -#define MARKBIT_TABLE_COMPUTE_DIVISOR 33 -/* +1 to round up*/ -#define MARKBIT_TABLE_COMPUTED_SIZE_BYTE ((GC_BLOCK_SIZE_BYTES-GC_BLOCK_HEADER_VARS_SIZE_BYTES)/MARKBIT_TABLE_COMPUTE_DIVISOR + 1) -#define MARKBIT_TABLE_SIZE_BYTES ((MARKBIT_TABLE_COMPUTED_SIZE_BYTE + MASK_OF_BYTES_PER_WORD)&~MASK_OF_BYTES_PER_WORD) -#define MARKBIT_TABLE_SIZE_WORDS (MARKBIT_TABLE_SIZE_BYTES >> BIT_SHIFT_TO_BYTES_PER_WORD) +#define SECTOR_SIZE_SHIFT_COUNT 8 +#define SECTOR_SIZE_BYTES (1 << SECTOR_SIZE_SHIFT_COUNT) +#define SECTOR_SIZE_WORDS (SECTOR_SIZE_BYTES >> BIT_SHIFT_TO_BYTES_PER_WORD) +/* one offset_table word maps to one SECTOR_SIZE_WORDS sector */ -#define GC_BLOCK_HEADER_SIZE_BYTES (MARKBIT_TABLE_SIZE_BYTES + GC_BLOCK_HEADER_VARS_SIZE_BYTES) +/* BlockSize - OffsetTableSize*SECTOR_SIZE_WORDS = HeaderVarsSize + OffsetTableSize + => OffsetTableSize = (BlockSize - HeaderVars)/(SECTOR_SIZE_WORDS+1) */ +#define OFFSET_TABLE_COMPUTE_DIVISOR (SECTOR_SIZE_WORDS + 1) +#define OFFSET_TABLE_COMPUTED_SIZE_BYTE ((GC_BLOCK_SIZE_BYTES-GC_BLOCK_HEADER_VARS_SIZE_BYTES)/OFFSET_TABLE_COMPUTE_DIVISOR + 1) +#define OFFSET_TABLE_SIZE_BYTES ((OFFSET_TABLE_COMPUTED_SIZE_BYTE + MASK_OF_BYTES_PER_WORD)&~MASK_OF_BYTES_PER_WORD) +#define OFFSET_TABLE_SIZE_WORDS (OFFSET_TABLE_SIZE_BYTES >> BIT_SHIFT_TO_BYTES_PER_WORD) +#define OBJECT_INDEX_TO_OFFSET_TABLE(p_obj) (ADDRESS_OFFSET_IN_BLOCK_BODY(p_obj) >> SECTOR_SIZE_SHIFT_COUNT) + +#define GC_BLOCK_HEADER_SIZE_BYTES (OFFSET_TABLE_SIZE_BYTES + GC_BLOCK_HEADER_VARS_SIZE_BYTES) #define GC_BLOCK_BODY_SIZE_BYTES (GC_BLOCK_SIZE_BYTES - GC_BLOCK_HEADER_SIZE_BYTES) #define GC_BLOCK_BODY(block) ((void*)((unsigned int)(block) + GC_BLOCK_HEADER_SIZE_BYTES)) #define GC_BLOCK_END(block) ((void*)((unsigned int)(block) + GC_BLOCK_SIZE_BYTES)) @@ -75,122 +87,168 @@ #define ADDRESS_OFFSET_TO_BLOCK_HEADER(addr) ((unsigned int)((unsigned int)addr&GC_BLOCK_LOW_MASK)) #define ADDRESS_OFFSET_IN_BLOCK_BODY(addr) ((unsigned int)(ADDRESS_OFFSET_TO_BLOCK_HEADER(addr)- GC_BLOCK_HEADER_SIZE_BYTES)) -#define OBJECT_BIT_INDEX_TO_MARKBIT_TABLE(p_obj) (ADDRESS_OFFSET_IN_BLOCK_BODY(p_obj) >> 2) -#define OBJECT_WORD_INDEX_TO_MARKBIT_TABLE(p_obj) (OBJECT_BIT_INDEX_TO_MARKBIT_TABLE(p_obj) >> BIT_SHIFT_TO_BITS_PER_WORD) -#define OBJECT_WORD_OFFSET_IN_MARKBIT_TABLE(p_obj) (OBJECT_BIT_INDEX_TO_MARKBIT_TABLE(p_obj) & BIT_MASK_TO_BITS_PER_WORD) +inline void block_init(Block_Header* block) +{ + block->free = (void*)((unsigned int)block + GC_BLOCK_HEADER_SIZE_BYTES); + block->ceiling = (void*)((unsigned int)block + GC_BLOCK_SIZE_BYTES); + block->base = block->free; + block->new_free = block->free; + block->status = BLOCK_FREE; + block->dest_counter = 0; + block->src = NULL; + block->next_src = NULL; +} -inline Partial_Reveal_Object* block_get_first_marked_object(Block_Header* block, unsigned int* mark_bit_idx) +inline Partial_Reveal_Object *obj_end(Partial_Reveal_Object *obj) { - unsigned int* mark_table = block->mark_table; - unsigned int* table_end = mark_table + MARKBIT_TABLE_SIZE_WORDS; + return (Partial_Reveal_Object *)((unsigned int)obj + vm_object_size(obj)); +} + +inline Partial_Reveal_Object *next_marked_obj_in_block(Partial_Reveal_Object *cur_obj, Partial_Reveal_Object *block_end) +{ + while(cur_obj < block_end){ + if( obj_is_marked_in_vt(cur_obj)) + return cur_obj; + cur_obj = obj_end(cur_obj); + } - unsigned j=0; - unsigned int k=0; - while( (mark_table + j) < table_end){ - unsigned int markbits = *(mark_table+j); - if(!markbits){ j++; continue; } - while(k<32){ - if( !(markbits& (1<mark_table; - unsigned int* table_end = mark_table + MARKBIT_TABLE_SIZE_WORDS; - unsigned int bit_index = *mark_bit_idx; + Partial_Reveal_Object* cur_obj = (Partial_Reveal_Object*)block->base; + Partial_Reveal_Object* block_end = (Partial_Reveal_Object*)block->free; + + Partial_Reveal_Object* first_marked_obj = next_marked_obj_in_block(cur_obj, block_end); + if(!first_marked_obj) + return NULL; - unsigned int j = bit_index >> BIT_SHIFT_TO_BITS_PER_WORD; - unsigned int k = (bit_index & BIT_MASK_TO_BITS_PER_WORD) + 1; - - while( (mark_table + j) < table_end){ - unsigned int markbits = *(mark_table+j); - if(!markbits){ j++; continue; } - while(k<32){ - if( !(markbits& (1<mark_table; - memset(mark_table, 0, MARKBIT_TABLE_SIZE_BYTES); - return; + Partial_Reveal_Object* cur_obj = *(Partial_Reveal_Object**)start_pos; + Partial_Reveal_Object* block_end = (Partial_Reveal_Object*)block->free; + + Partial_Reveal_Object* next_marked_obj = next_marked_obj_in_block(cur_obj, block_end); + if(!next_marked_obj) + return NULL; + + *start_pos = obj_end(next_marked_obj); + + return next_marked_obj; } -inline void block_clear_markbits(Block_Header* block) +inline Partial_Reveal_Object *block_get_first_marked_obj_prefetch_next(Block_Header *block, void **start_pos) { - unsigned int* mark_table = block->mark_table; - unsigned int* table_end = mark_table + MARKBIT_TABLE_SIZE_WORDS; + Partial_Reveal_Object *cur_obj = (Partial_Reveal_Object *)block->base; + Partial_Reveal_Object *block_end = (Partial_Reveal_Object *)block->free; - unsigned j=0; - while( (mark_table + j) < table_end){ - unsigned int markbits = *(mark_table+j); - if(!markbits){ j++; continue; } - unsigned int k=0; - while(k<32){ - if( !(markbits& (1<= block_end) + return first_marked_obj; + + Partial_Reveal_Object *next_marked_obj = next_marked_obj_in_block(next_obj, block_end); + + if(next_marked_obj){ + if(next_marked_obj != next_obj) + set_obj_info(next_obj, (Obj_Info_Type)next_marked_obj); + } else { + set_obj_info(next_obj, 0); + } + + return first_marked_obj; +} - block_clear_mark_table(block); - return; +inline Partial_Reveal_Object *block_get_first_marked_obj_after_prefetch(Block_Header *block, void **start_pos) +{ + return block_get_first_marked_object(block, start_pos); } -typedef struct Blocked_Space { - /* <-- first couple of fields are overloadded as Space */ - void* heap_start; - void* heap_end; - unsigned int reserved_heap_size; - unsigned int committed_heap_size; - unsigned int num_collections; - GC* gc; - Boolean move_object; - Boolean (*mark_object_func)(Space* space, Partial_Reveal_Object* p_obj); - /* END of Space --> */ +inline Partial_Reveal_Object *block_get_next_marked_obj_prefetch_next(Block_Header *block, void **start_pos) +{ + Partial_Reveal_Object *cur_obj = *(Partial_Reveal_Object **)start_pos; + Partial_Reveal_Object *block_end = (Partial_Reveal_Object *)block->free; - Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */ + if(cur_obj >= block_end) + return NULL; - /* FIXME:: the block indices should be replaced with block header addresses */ - unsigned int first_block_idx; - unsigned int ceiling_block_idx; - volatile unsigned int free_block_idx; + Partial_Reveal_Object *cur_marked_obj; - unsigned int num_used_blocks; - unsigned int num_managed_blocks; - unsigned int num_total_blocks; - /* END of Blocked_Space --> */ -}Blocked_Space; + if(obj_is_marked_in_vt(cur_obj)) + cur_marked_obj = cur_obj; + else + cur_marked_obj = (Partial_Reveal_Object *)get_obj_info_raw(cur_obj); + + if(!cur_marked_obj) + return NULL; + + Partial_Reveal_Object *next_obj = obj_end(cur_marked_obj); + *start_pos = next_obj; + + if(next_obj >= block_end) + return cur_marked_obj; + + Partial_Reveal_Object *next_marked_obj = next_marked_obj_in_block(next_obj, block_end); + + if(next_marked_obj){ + if(next_marked_obj != next_obj) + set_obj_info(next_obj, (Obj_Info_Type)next_marked_obj); + } else { + set_obj_info(next_obj, 0); + } + + return cur_marked_obj; +} +inline Partial_Reveal_Object *block_get_next_marked_obj_after_prefetch(Block_Header *block, void **start_pos) +{ + Partial_Reveal_Object *cur_obj = *(Partial_Reveal_Object **)start_pos; + Partial_Reveal_Object *block_end = (Partial_Reveal_Object *)block->free; + + if(cur_obj >= block_end) + return NULL; + + Partial_Reveal_Object *cur_marked_obj; + + if(obj_is_marked_in_vt(cur_obj) || obj_is_fw_in_oi(cur_obj)) + cur_marked_obj = cur_obj; + else + cur_marked_obj = (Partial_Reveal_Object *)get_obj_info_raw(cur_obj); + + if(!cur_marked_obj) + return NULL; + + Partial_Reveal_Object *next_obj = obj_end(cur_marked_obj); + *start_pos = next_obj; + + return cur_marked_obj; +} + +inline Partial_Reveal_Object * obj_get_fw_in_table(Partial_Reveal_Object *p_obj) +{ + /* only for inter-sector compaction */ + unsigned int index = OBJECT_INDEX_TO_OFFSET_TABLE(p_obj); + Block_Header *curr_block = GC_BLOCK_HEADER(p_obj); + return (Partial_Reveal_Object *)(((unsigned int)p_obj) - curr_block->table[index]); +} + +inline void block_clear_table(Block_Header* block) +{ + unsigned int* table = block->table; + memset(table, 0, OFFSET_TABLE_SIZE_BYTES); + return; +} + + #endif //#ifndef _BLOCK_H_ Index: vm/gc_gen/src/common/gc_common.cpp =================================================================== --- vm/gc_gen/src/common/gc_common.cpp (revision 493420) +++ vm/gc_gen/src/common/gc_common.cpp (working copy) @@ -21,15 +21,26 @@ #include "gc_common.h" #include "gc_metadata.h" #include "../thread/mutator.h" -#include "../verify/verify_live_heap.h" #include "../finalizer_weakref/finalizer_weakref.h" +#include "../gen/gen.h" +#include "../common/space_tuner.h" -extern Boolean NEED_BARRIER; -extern unsigned int NUM_COLLECTORS; +unsigned int Cur_Mark_Bit = 0x1; +unsigned int Cur_Forward_Bit = 0x2; + extern Boolean GC_VERIFY; + extern unsigned int NOS_SIZE; -extern Boolean NOS_PARTIAL_FORWARD; +extern unsigned int MIN_NOS_SIZE; +extern Boolean FORCE_FULL_COMPACT; +extern Boolean MINOR_ALGORITHM; +extern Boolean MAJOR_ALGORITHM; + +extern unsigned int NUM_COLLECTORS; +extern unsigned int MINOR_COLLECTORS; +extern unsigned int MAJOR_COLLECTORS; + unsigned int HEAP_SIZE_DEFAULT = 256 * MB; unsigned int min_heap_size_bytes = 32 * MB; unsigned int max_heap_size_bytes = 0; @@ -105,7 +116,7 @@ return res; } -void gc_parse_options() +void gc_parse_options(GC* gc) { unsigned int max_heap_size = HEAP_SIZE_DEFAULT; unsigned int min_heap_size = min_heap_size_bytes; @@ -138,62 +149,127 @@ NOS_SIZE = get_size_property("gc.nos_size"); } + if (is_property_set("gc.min_nos_size", VM_PROPERTIES) == 1) { + MIN_NOS_SIZE = get_size_property("gc.min_nos_size"); + } + if (is_property_set("gc.num_collectors", VM_PROPERTIES) == 1) { unsigned int num = get_int_property("gc.num_collectors"); NUM_COLLECTORS = (num==0)? NUM_COLLECTORS:num; } - if (is_property_set("gc.gen_mode", VM_PROPERTIES) == 1) { - NEED_BARRIER = get_boolean_property("gc.gen_mode"); + /* GC algorithm decision */ + /* Step 1: */ + char* minor_algo = NULL; + char* major_algo = NULL; + + if (is_property_set("gc.minor_algorithm", VM_PROPERTIES) == 1) { + minor_algo = get_property("gc.minor_algorithm", VM_PROPERTIES); } + + if (is_property_set("gc.major_algorithm", VM_PROPERTIES) == 1) { + major_algo = get_property("gc.major_algorithm", VM_PROPERTIES); + } + + gc_decide_collection_algorithm((GC_Gen*)gc, minor_algo, major_algo); + gc->generate_barrier = gc_is_gen_mode(); + if( minor_algo) destroy_property_value(minor_algo); + if( major_algo) destroy_property_value(major_algo); + + /* Step 2: */ + /* NOTE:: this has to stay after above!! */ + if (is_property_set("gc.force_major_collect", VM_PROPERTIES) == 1) { + FORCE_FULL_COMPACT = get_boolean_property("gc.force_major_collect"); + if(FORCE_FULL_COMPACT){ + gc_disable_gen_mode(); + gc->generate_barrier = FALSE; + } + } + + /* Step 3: */ + /* NOTE:: this has to stay after above!! */ + if (is_property_set("gc.generate_barrier", VM_PROPERTIES) == 1) { + Boolean generate_barrier = get_boolean_property("gc.generate_barrier"); + gc->generate_barrier = generate_barrier || gc->generate_barrier; + } + if (is_property_set("gc.nos_partial_forward", VM_PROPERTIES) == 1) { NOS_PARTIAL_FORWARD = get_boolean_property("gc.nos_partial_forward"); } + + if (is_property_set("gc.minor_collectors", VM_PROPERTIES) == 1) { + MINOR_COLLECTORS = get_int_property("gc.minor_collectors"); + } + if (is_property_set("gc.major_collectors", VM_PROPERTIES) == 1) { + MAJOR_COLLECTORS = get_int_property("gc.major_collectors"); + } + + if (is_property_set("gc.ignore_finref", VM_PROPERTIES) == 1) { + IGNORE_FINREF = get_boolean_property("gc.ignore_finref"); + } + if (is_property_set("gc.verify", VM_PROPERTIES) == 1) { GC_VERIFY = get_boolean_property("gc.verify"); } - return; + return; } -struct GC_Gen; -void gc_gen_reclaim_heap(GC_Gen* gc); -unsigned int gc_decide_collection_kind(GC_Gen* gc, unsigned int gc_cause); +void gc_copy_interior_pointer_table_to_rootset(); void gc_reclaim_heap(GC* gc, unsigned int gc_cause) { - gc->num_collections++; + /* FIXME:: before mutators suspended, the ops below should be very careful + to avoid racing with mutators. */ + gc->num_collections++; - gc->collect_kind = gc_decide_collection_kind((GC_Gen*)gc, gc_cause); - //gc->collect_kind = MAJOR_COLLECTION; + gc_decide_collection_kind((GC_Gen*)gc, gc_cause); + + //For_LOS_extend! + gc_space_tune(gc, gc_cause); + + +#ifdef MARK_BIT_FLIPPING + if(gc->collect_kind == MINOR_COLLECTION) + mark_bit_flip(); +#endif + gc_metadata_verify(gc, TRUE); - gc_finalizer_weakref_metadata_verify((GC*)gc, TRUE); +#ifndef BUILD_IN_REFERENT + gc_finref_metadata_verify((GC*)gc, TRUE); +#endif /* Stop the threads and collect the roots. */ gc_reset_rootset(gc); vm_enumerate_root_set_all_threads(); + gc_copy_interior_pointer_table_to_rootset(); gc_set_rootset(gc); - gc_set_objects_with_finalizer(gc); - - if(verify_live_heap) gc_verify_heap(gc, TRUE); + /* this has to be done after all mutators are suspended */ + gc_reset_mutator_context(gc); - gc_gen_reclaim_heap((GC_Gen*)gc); + if(!IGNORE_FINREF ) + gc_set_obj_with_fin(gc); - if(verify_live_heap) gc_verify_heap(gc, FALSE); - + gc_gen_reclaim_heap((GC_Gen*)gc); + gc_metadata_verify(gc, FALSE); - gc_finalizer_weakref_metadata_verify(gc, FALSE); + + if(gc_is_gen_mode()) + gc_prepare_mutator_remset(gc); - gc_reset_finalizer_weakref_metadata(gc); - gc_reset_mutator_context(gc); + if(!IGNORE_FINREF ){ + gc_reset_finref_metadata(gc); + gc_activate_finref_threads((GC*)gc); + } + + //For_LOS_extend! + gc_space_tuner_reset(gc); - gc_activate_finalizer_weakref_threads((GC*)gc); vm_resume_threads_after(); - return; } Index: vm/gc_gen/src/common/gc_common.h =================================================================== --- vm/gc_gen/src/common/gc_common.h (revision 493420) +++ vm/gc_gen/src/common/gc_common.h (working copy) @@ -21,9 +21,6 @@ #ifndef _GC_COMMON_H_ #define _GC_COMMON_H_ -#include -#include - #include "port_vmem.h" #include "platform_lowlevel.h" @@ -37,17 +34,18 @@ #include "gc_for_class.h" #include "gc_platform.h" +#include "../gen/gc_for_barrier.h" + #define null 0 -#define MB 1048576 -#define KB 1024 +#define KB (1<<10) +#define MB (1<<20) #define BYTES_PER_WORD 4 #define BITS_PER_BYTE 8 #define BITS_PER_WORD 32 #define MASK_OF_BYTES_PER_WORD (BYTES_PER_WORD-1) /* 0x11 */ -#define WORD_SIZE_ROUND_UP(addr) (((unsigned int)addr+MASK_OF_BYTES_PER_WORD)& ~MASK_OF_BYTES_PER_WORD) #define BIT_SHIFT_TO_BYTES_PER_WORD 2 /* 2 */ #define BIT_SHIFT_TO_BITS_PER_BYTE 3 @@ -60,13 +58,28 @@ typedef void (*TaskType)(void*); -typedef std::map ObjectMap; +enum Collection_Algorithm{ + COLLECTION_ALGOR_NIL, + + /*minor nongen collection*/ + MINOR_NONGEN_FORWARD_POOL, + + /* minor gen collection */ + MINOR_GEN_FORWARD_POOL, + + /* major collection */ + MAJOR_COMPACT_SLIDE, + +}; enum Collection_Kind { MINOR_COLLECTION, - MAJOR_COLLECTION + MAJOR_COLLECTION, + FALLBACK_COLLECTION }; +extern Boolean IS_FALLBACK_COMPACTION; /* only for mark/fw bits debugging purpose */ + enum GC_CAUSE{ GC_CAUSE_NIL, GC_CAUSE_NOS_IS_FULL, @@ -74,28 +87,31 @@ GC_CAUSE_RUNTIME_FORCE_GC }; -inline unsigned int vm_object_size(Partial_Reveal_Object *obj) -{ - Boolean arrayp = object_is_array (obj); - if (arrayp) { - return vm_vector_size(obj_get_class_handle(obj), vector_get_length((Vector_Handle)obj)); - } else { - return nonarray_object_size(obj); - } -} - inline POINTER_SIZE_INT round_up_to_size(POINTER_SIZE_INT size, int block_size) { return (size + block_size - 1) & ~(block_size - 1); } inline POINTER_SIZE_INT round_down_to_size(POINTER_SIZE_INT size, int block_size) { return size & ~(block_size - 1); } -inline Boolean obj_is_in_gc_heap(Partial_Reveal_Object *p_obj) +/****************************************/ +/* Return a pointer to the ref field offset array. */ +inline int* object_ref_iterator_init(Partial_Reveal_Object *obj) { - return p_obj >= gc_heap_base_address() && p_obj < gc_heap_ceiling_address(); + GC_VTable_Info *gcvt = obj_get_gcvt(obj); + return gcvt->gc_ref_offset_array; } -/* Return a pointer to the ref field offset array. */ +inline Partial_Reveal_Object** object_ref_iterator_get(int* iterator, Partial_Reveal_Object* obj) +{ + return (Partial_Reveal_Object**)((int)obj + *iterator); +} + +inline int* object_ref_iterator_next(int* iterator) +{ + return iterator+1; +} + +/* original design */ inline int *init_object_scanner (Partial_Reveal_Object *obj) { GC_VTable_Info *gcvt = obj_get_gcvt(obj); @@ -106,104 +122,151 @@ { return (*offset == 0)? NULL: (void*)((Byte*) obj + *offset); } inline int *offset_next_ref (int *offset) -{ return (int *)((Byte *)offset + sizeof (int)); } +{ return offset + 1; } -Boolean obj_is_forwarded_in_vt(Partial_Reveal_Object *obj); +/****************************************/ + inline Boolean obj_is_marked_in_vt(Partial_Reveal_Object *obj) -{ return ((POINTER_SIZE_INT)obj->vt_raw & MARK_BIT_MASK); } +{ return ((POINTER_SIZE_INT)obj_get_vt_raw(obj) & CONST_MARK_BIT); } -inline void obj_mark_in_vt(Partial_Reveal_Object *obj) -{ obj->vt_raw = (Partial_Reveal_VTable *)((POINTER_SIZE_INT)obj->vt_raw | MARK_BIT_MASK); - assert(!obj_is_forwarded_in_vt(obj)); +inline Boolean obj_mark_in_vt(Partial_Reveal_Object *obj) +{ + Partial_Reveal_VTable* vt = obj_get_vt_raw(obj); + if((unsigned int)vt & CONST_MARK_BIT) return FALSE; + obj_set_vt(obj, (unsigned int)vt | CONST_MARK_BIT); + return TRUE; } inline void obj_unmark_in_vt(Partial_Reveal_Object *obj) -{ - assert(!obj_is_forwarded_in_vt(obj)); - assert(obj_is_marked_in_vt(obj)); - obj->vt_raw = (Partial_Reveal_VTable *)((POINTER_SIZE_INT)obj->vt_raw & ~MARK_BIT_MASK); +{ + Partial_Reveal_VTable* vt = obj_get_vt_raw(obj); + obj_set_vt(obj, (unsigned int)vt & ~CONST_MARK_BIT); } -inline void obj_set_forward_in_vt(Partial_Reveal_Object *obj) -{ - assert(!obj_is_marked_in_vt(obj)); - obj->vt_raw = (Partial_Reveal_VTable *)((POINTER_SIZE_INT)obj->vt_raw | FORWARDING_BIT_MASK); +inline Boolean obj_is_marked_or_fw_in_oi(Partial_Reveal_Object *obj) +{ return get_obj_info_raw(obj) & DUAL_MARKBITS; } + + +inline void obj_clear_dual_bits_in_oi(Partial_Reveal_Object *obj) +{ + Obj_Info_Type info = get_obj_info_raw(obj); + set_obj_info(obj, (unsigned int)info & DUAL_MARKBITS_MASK); } -inline Boolean obj_is_forwarded_in_vt(Partial_Reveal_Object *obj) -{ return (POINTER_SIZE_INT)obj->vt_raw & FORWARDING_BIT_MASK; } +/****************************************/ +#ifndef MARK_BIT_FLIPPING -inline void obj_clear_forward_in_vt(Partial_Reveal_Object *obj) +inline Partial_Reveal_Object *obj_get_fw_in_oi(Partial_Reveal_Object *obj) { - assert(obj_is_forwarded_in_vt(obj) && !obj_is_marked_in_vt(obj)); - obj->vt_raw = (Partial_Reveal_VTable *)((POINTER_SIZE_INT)obj->vt_raw & ~FORWARDING_BIT_MASK); + assert(get_obj_info_raw(obj) & CONST_FORWARD_BIT); + return (Partial_Reveal_Object*) (get_obj_info_raw(obj) & ~CONST_FORWARD_BIT); } -inline void obj_set_forwarding_pointer_in_vt(Partial_Reveal_Object *obj, void *dest) -{ - assert(!obj_is_marked_in_vt(obj)); - obj->vt_raw = (Partial_Reveal_VTable *)((POINTER_SIZE_INT)dest | FORWARDING_BIT_MASK); +inline Boolean obj_is_fw_in_oi(Partial_Reveal_Object *obj) +{ return (get_obj_info_raw(obj) & CONST_FORWARD_BIT); } + +inline void obj_set_fw_in_oi(Partial_Reveal_Object *obj,void *dest) +{ + assert(!(get_obj_info_raw(obj) & CONST_FORWARD_BIT)); + set_obj_info(obj,(Obj_Info_Type)dest | CONST_FORWARD_BIT); } -inline Partial_Reveal_Object *obj_get_forwarding_pointer_in_vt(Partial_Reveal_Object *obj) -{ - assert(obj_is_forwarded_in_vt(obj) && !obj_is_marked_in_vt(obj)); - return (Partial_Reveal_Object *)obj_get_vt(obj); + +inline Boolean obj_is_marked_in_oi(Partial_Reveal_Object *obj) +{ return ( get_obj_info_raw(obj) & CONST_MARK_BIT ); } + +inline Boolean obj_mark_in_oi(Partial_Reveal_Object *obj) +{ + Obj_Info_Type info = get_obj_info_raw(obj); + if ( info & CONST_MARK_BIT ) return FALSE; + + set_obj_info(obj, info|CONST_MARK_BIT); + return TRUE; } -inline Partial_Reveal_Object *get_forwarding_pointer_in_obj_info(Partial_Reveal_Object *obj) -{ - assert(get_obj_info(obj) & FORWARDING_BIT_MASK); - return (Partial_Reveal_Object*) (get_obj_info(obj) & ~FORWARDING_BIT_MASK); +inline void obj_unmark_in_oi(Partial_Reveal_Object *obj) +{ + Obj_Info_Type info = get_obj_info_raw(obj); + info = info & ~CONST_MARK_BIT; + set_obj_info(obj, info); + return; } -inline Boolean obj_is_forwarded_in_obj_info(Partial_Reveal_Object *obj) +/* ********************************** */ +#else /* ifndef MARK_BIT_FLIPPING */ + +inline void mark_bit_flip() +{ + FLIP_FORWARD_BIT = FLIP_MARK_BIT; + FLIP_MARK_BIT ^= DUAL_MARKBITS; +} + +inline Partial_Reveal_Object *obj_get_fw_in_oi(Partial_Reveal_Object *obj) { - return (get_obj_info(obj) & FORWARDING_BIT_MASK); + assert(get_obj_info_raw(obj) & FLIP_FORWARD_BIT); + return (Partial_Reveal_Object*) get_obj_info(obj); } -inline void set_forwarding_pointer_in_obj_info(Partial_Reveal_Object *obj,void *dest) -{ set_obj_info(obj,(Obj_Info_Type)dest | FORWARDING_BIT_MASK); } +inline Boolean obj_is_fw_in_oi(Partial_Reveal_Object *obj) +{ return (get_obj_info_raw(obj) & FLIP_FORWARD_BIT); } -struct GC; -/* all Spaces inherit this Space structure */ -typedef struct Space{ - void* heap_start; - void* heap_end; - unsigned int reserved_heap_size; - unsigned int committed_heap_size; - unsigned int num_collections; - GC* gc; - Boolean move_object; - Boolean (*mark_object_func)(Space* space, Partial_Reveal_Object* p_obj); -}Space; +inline void obj_set_fw_in_oi(Partial_Reveal_Object *obj, void *dest) +{ + assert(IS_FALLBACK_COMPACTION || (!(get_obj_info_raw(obj) & FLIP_FORWARD_BIT))); + /* This assert should always exist except it's fall back compaction. In fall-back compaction + an object can be marked in last time minor collection, which is exactly this time's fw bit, + because the failed minor collection flipped the bits. */ -inline unsigned int space_committed_size(Space* space){ return space->committed_heap_size;} -inline void* space_heap_start(Space* space){ return space->heap_start; } -inline void* space_heap_end(Space* space){ return space->heap_end; } + /* It's important to clear the FLIP_FORWARD_BIT before collection ends, since it is the same as + next minor cycle's FLIP_MARK_BIT. And if next cycle is major, it is also confusing + as FLIP_FORWARD_BIT. (The bits are flipped only in minor collection). */ + set_obj_info(obj,(Obj_Info_Type)dest | FLIP_FORWARD_BIT); +} -inline Boolean address_belongs_to_space(void* addr, Space* space) +inline Boolean obj_mark_in_oi(Partial_Reveal_Object* p_obj) { - return (addr >= space_heap_start(space) && addr < space_heap_end(space)); + Obj_Info_Type info = get_obj_info_raw(p_obj); + assert((info & DUAL_MARKBITS ) != DUAL_MARKBITS); + + if( info & FLIP_MARK_BIT ) return FALSE; + + info = info & DUAL_MARKBITS_MASK; + set_obj_info(p_obj, info|FLIP_MARK_BIT); + return TRUE; } -inline Boolean obj_belongs_to_space(Partial_Reveal_Object *p_obj, Space* space) +inline Boolean obj_unmark_in_oi(Partial_Reveal_Object* p_obj) { - return address_belongs_to_space((Partial_Reveal_Object*)p_obj, space); + Obj_Info_Type info = get_obj_info_raw(p_obj); + info = info & DUAL_MARKBITS_MASK; + set_obj_info(p_obj, info); + return TRUE; } +inline Boolean obj_is_marked_in_oi(Partial_Reveal_Object* p_obj) +{ + Obj_Info_Type info = get_obj_info_raw(p_obj); + return (info & FLIP_MARK_BIT); +} + +#endif /* MARK_BIT_FLIPPING */ + /* all GCs inherit this GC structure */ struct Mutator; struct Collector; struct GC_Metadata; -struct Finalizer_Weakref_Metadata; +struct Finref_Metadata; struct Vector_Block; +struct Space_Tuner; + typedef struct GC{ void* heap_start; void* heap_end; unsigned int reserved_heap_size; unsigned int committed_heap_size; unsigned int num_collections; + int64 time_collections; + float survive_ratio; /* mutation related info */ Mutator *mutator_list; @@ -217,19 +280,29 @@ /* metadata is the pool for rootset, tracestack, etc. */ GC_Metadata* metadata; - Finalizer_Weakref_Metadata *finalizer_weakref_metadata; + Finref_Metadata *finref_metadata; + unsigned int collect_kind; /* MAJOR or MINOR */ + unsigned int last_collect_kind; + Boolean collect_result; /* succeed or fail */ + + Boolean generate_barrier; + /* FIXME:: this is wrong! root_set belongs to mutator */ Vector_Block* root_set; - /* mem info */ - apr_pool_t *aux_pool; - port_vmem_t *allocated_memory; + //For_LOS_extend + Space_Tuner* tuner; }GC; -void mark_scan_heap(Collector* collector); +void mark_scan_pool(Collector* collector); +inline void mark_scan_heap(Collector* collector) +{ + mark_scan_pool(collector); +} + inline void* gc_heap_base(GC* gc){ return gc->heap_start; } inline void* gc_heap_ceiling(GC* gc){ return gc->heap_end; } inline Boolean address_belongs_to_gc_heap(void* addr, GC* gc) @@ -237,7 +310,37 @@ return (addr >= gc_heap_base(gc) && addr < gc_heap_ceiling(gc)); } -void gc_parse_options(); +void gc_parse_options(GC* gc); void gc_reclaim_heap(GC* gc, unsigned int gc_cause); +/* generational GC related */ + +extern Boolean NOS_PARTIAL_FORWARD; + +//#define STATIC_NOS_MAPPING + +#ifdef STATIC_NOS_MAPPING + + //#define NOS_BOUNDARY ((void*)0x2ea20000) //this is for 512M + #define NOS_BOUNDARY ((void*)0x40000000) //this is for 256M + + #define nos_boundary NOS_BOUNDARY + +#else /* STATIC_NOS_MAPPING */ + + extern void* nos_boundary; + +#endif /* STATIC_NOS_MAPPING */ + +inline Boolean addr_belongs_to_nos(void* addr) +{ return addr >= nos_boundary; } + +inline Boolean obj_belongs_to_nos(Partial_Reveal_Object* p_obj) +{ return addr_belongs_to_nos(p_obj); } + +extern void* los_boundary; + +inline Boolean obj_is_moved(Partial_Reveal_Object* p_obj) +{ return p_obj >= los_boundary; } + #endif //_GC_COMMON_H_ Index: vm/gc_gen/src/common/gc_for_class.cpp =================================================================== --- vm/gc_gen/src/common/gc_for_class.cpp (revision 493420) +++ vm/gc_gen/src/common/gc_for_class.cpp (working copy) @@ -19,7 +19,7 @@ */ #include "gc_common.h" -#include "../finalizer_weakref/finalizer_weakref_metadata.h" +#include "../finalizer_weakref/finalizer_weakref.h" /* Setter functions for the gc class property field. */ void gc_set_prop_alignment_mask (GC_VTable_Info *gcvt, unsigned int the_mask) @@ -42,9 +42,9 @@ { gcvt->gc_class_properties |= CL_PROP_FINALIZABLE_MASK; } -void gc_set_prop_reference(Partial_Reveal_VTable *vt, WeakReferenceType type) +void gc_set_prop_reference(GC_VTable_Info *gcvt, WeakReferenceType type) { - vtable_get_gcvt(vt)->gc_class_properties |= (unsigned int)type << CL_PROP_REFERENCE_TYPE_SHIFT; + gcvt->gc_class_properties |= (unsigned int)type << CL_PROP_REFERENCE_TYPE_SHIFT; } @@ -61,8 +61,9 @@ return 0; } -static int *build_ref_offset_array(Class_Handle ch, GC_VTable_Info *gcvt, WeakReferenceType type) +static unsigned int class_num_ref_fields(Class_Handle ch) { + WeakReferenceType is_reference = class_is_reference(ch); unsigned num_ref_fields = 0; unsigned num_fields = class_num_instance_fields_recursive(ch); @@ -74,8 +75,8 @@ } } - int skip = -1; // not skip any reference - if (type != NOT_REFERENCE) { +#ifndef BUILD_IN_REFERENT + if (is_reference != NOT_REFERENCE) { int offset = class_get_referent_offset(ch); unsigned int gc_referent_offset = get_gc_referent_offset(); if (gc_referent_offset == 0) { @@ -84,28 +85,26 @@ assert(gc_referent_offset == offset); } - skip = offset; // skip global referent offset num_ref_fields--; } - - if( num_ref_fields ) - gcvt->gc_object_has_ref_field = true; - else - return NULL; - - /* add a null-termination slot */ - unsigned int size = (num_ref_fields+1) * sizeof (unsigned int); +#endif - /* alloc from gcvt pool */ - int *result = (int*) STD_MALLOC(size); - assert(result); + return num_ref_fields; +} - int *new_ref_array = result; - for(idx = 0; idx < num_fields; idx++) { +static void build_ref_offset_array(Class_Handle ch, GC_VTable_Info *gcvt) +{ + unsigned num_fields = class_num_instance_fields_recursive(ch); + WeakReferenceType is_reference = class_is_reference(ch); + unsigned int gc_referent_offset = get_gc_referent_offset(); + + int *new_ref_array = gcvt->gc_ref_offset_array; + int *result = new_ref_array; + for(unsigned int idx = 0; idx < num_fields; idx++) { Field_Handle fh = class_get_instance_field_recursive(ch, idx); if(field_is_reference(fh)) { int offset = field_get_offset(fh); - if (offset == skip) continue; + if(is_reference && offset == gc_referent_offset) continue; *new_ref_array = field_get_offset(fh); new_ref_array++; } @@ -114,15 +113,12 @@ /* ref array is NULL-terminated */ *new_ref_array = 0; - gcvt->gc_number_of_ref_fields = num_ref_fields; - + unsigned int num_ref_fields = gcvt->gc_number_of_ref_fields; /* offsets were built with idx, may not be in order. Let's sort it anyway. FIXME: verify_live_heap depends on ordered offset array. */ - qsort(result, num_ref_fields, sizeof(*result), intcompare); - - gcvt->gc_ref_offset_array = result; + qsort(gcvt->gc_ref_offset_array, num_ref_fields, sizeof(int), intcompare); - return new_ref_array; + return; } void gc_class_prepared (Class_Handle ch, VTable_Handle vth) @@ -132,48 +128,70 @@ assert(vth); Partial_Reveal_VTable *vt = (Partial_Reveal_VTable *)vth; + + unsigned int num_ref_fields = class_num_ref_fields(ch); + unsigned int gcvt_size = sizeof(GC_VTable_Info); + if(num_ref_fields){ + gcvt_size += num_ref_fields * sizeof(unsigned int); + } - /* FIXME: gcvts are too random is memory */ - gcvt = (GC_VTable_Info *) STD_MALLOC(sizeof(GC_VTable_Info)); + gcvt_size = (gcvt_size + GCVT_ALIGN_MASK) & ~GCVT_ALIGN_MASK; + gcvt = (GC_VTable_Info*) malloc(gcvt_size); assert(gcvt); - vtable_set_gcvt(vt, gcvt); - memset((void *)gcvt, 0, sizeof(GC_VTable_Info)); + assert(!((unsigned int)gcvt % GCVT_ALIGNMENT)); + + memset((void *)gcvt, 0, gcvt_size); gcvt->gc_clss = ch; gcvt->gc_class_properties = 0; - gcvt->gc_object_has_ref_field = false; - gc_set_prop_alignment_mask(gcvt, class_get_alignment(ch)); + if(num_ref_fields){ + gcvt->gc_number_of_ref_fields = num_ref_fields; + /* Build the offset array */ + build_ref_offset_array(ch, gcvt); + } + if(class_is_array(ch)) { Class_Handle array_element_class = class_get_array_element_class(ch); gc_set_prop_array(gcvt); - gcvt->gc_array_element_size = class_element_size(ch); + + gcvt->array_elem_size = class_element_size(ch); unsigned int the_offset = vector_first_element_offset_unboxed(array_element_class); - gcvt->gc_array_first_element_offset = the_offset; + gcvt->array_first_elem_offset = the_offset; if (class_is_non_ref_array (ch)) { gc_set_prop_non_ref_array(gcvt); }else{ - gcvt->gc_object_has_ref_field = true; + gcvt->gc_number_of_ref_fields = 1; } } - + if (class_is_finalizable(ch)) { gc_set_prop_finalizable(gcvt); } WeakReferenceType type = class_is_reference(ch); - gc_set_prop_reference(vt, type); + gc_set_prop_reference(gcvt, type); unsigned int size = class_get_boxed_data_size(ch); gcvt->gc_allocated_size = size; - /* Build the offset array */ - build_ref_offset_array(ch, gcvt, type); - gcvt->gc_class_name = class_get_name(ch); assert (gcvt->gc_class_name); + /* these should be set last to use the gcvt pointer */ + if(gcvt->gc_number_of_ref_fields) + gcvt = (GC_VTable_Info*)((unsigned int)gcvt | GC_CLASS_FLAG_REFS); + + if(class_is_array(ch)) + gcvt = (GC_VTable_Info*)((unsigned int)gcvt | GC_CLASS_FLAG_ARRAY); + + if(class_is_finalizable(ch)) + gcvt = (GC_VTable_Info*)((unsigned int)gcvt | GC_CLASS_FLAG_FINALIZER); + + vtable_set_gcvt(vt, gcvt); + + return; } /* gc_class_prepared */ Index: vm/gc_gen/src/common/gc_for_class.h =================================================================== --- vm/gc_gen/src/common/gc_for_class.h (revision 493420) +++ vm/gc_gen/src/common/gc_for_class.h (working copy) @@ -22,32 +22,74 @@ #define _GC_TYPES_H_ #include "open/types.h" +#include "gc_platform.h" -#define FORWARDING_BIT_MASK 0x1 -#define MARK_BIT_MASK 0x2 +/* CONST_MARK_BIT is used in mark_scan in vt, no matter MARK_BIT_FLIPPING used or not. + MARK_BIT_FLIPPING is used in oi for marking and forwarding in non-gen nursery forwarding + (the marking is for those objects not in nos.) + For gen mode, we can use or not use MARK_BIT_FLIPPING, because we never mark any object not + in nos. And for live objects in nos, its bits are reset when forwared. So there is no need + to use a lower-performance bit flipping in gen mode. + When MARK_BIT_FLIPPING is defined, all configurations are working. + If it is not defined, we can't run one configuration: non-gen-mode nos-trace-forwarding. We have + to run nos-mark-forwarding/copying which has an extra pass to reset the mark bit. + + Important invariants: + 1. We never put forwarding pointer in vt. + 2. Forwarding pointer only exists during collection. No obj has fw (or fw_bit) in oi during execution. + 3. During app execution, no obj has mark_bit set without MARK_BIT_FLIPPING defined. + +*/ +#define CONST_MARK_BIT 0x1 +#define DUAL_MARKBITS 0x3 +#define DUAL_MARKBITS_MASK (~DUAL_MARKBITS) + +#define MARK_BIT_FLIPPING + +#ifdef MARK_BIT_FLIPPING + + extern unsigned int Cur_Mark_Bit; + extern unsigned int Cur_Forward_Bit; + #define FLIP_MARK_BIT Cur_Mark_Bit + #define FLIP_FORWARD_BIT Cur_Forward_Bit + + #define FORWARD_BIT FLIP_FORWARD_BIT + +#else /* #ifdef MARK_BIT_FLIPPING*/ + + #define CONST_FORWARD_BIT 0x2 + #define FORWARD_BIT CONST_FORWARD_BIT + +#endif /* else MARK_BIT_FLIPPING */ + typedef void *Thread_Handle; + +#define GC_CLASS_FLAG_FINALIZER 1 +#define GC_CLASS_FLAG_ARRAY 2 +#define GC_CLASS_FLAG_REFS 4 +#define GC_CLASS_IS_REF_ARRAY (GC_CLASS_FLAG_ARRAY|GC_CLASS_FLAG_REFS) +#define GC_CLASS_FLAGS_MASK (~(GC_CLASS_IS_REF_ARRAY|GC_CLASS_FLAG_FINALIZER)) + +#define GC_OBJECT_ALIGN_MASK (GC_OBJECT_ALIGNMENT-1) +#define GCVT_ALIGNMENT 8 +#define GCVT_ALIGN_MASK (GCVT_ALIGNMENT-1) + typedef POINTER_SIZE_INT Obj_Info_Type; typedef struct GC_VTable_Info { - unsigned int gc_object_has_ref_field; + unsigned int gc_number_of_ref_fields; uint32 gc_class_properties; // This is the same as class_properties in VM's VTable. - unsigned int instance_data_size; - - // Offset from the top by CLASS_ALLOCATED_SIZE_OFFSET - // The number of bytes allocated for this object. It is the same as - // instance_data_size with the constraint bit cleared. This includes - // the OBJECT_HEADER_SIZE as well as the OBJECT_VTABLE_POINTER_SIZE unsigned int gc_allocated_size; - unsigned int gc_array_element_size; + unsigned int array_elem_size; // This is the offset from the start of the object to the first element in the // array. It isn't a constant since we pad double words. - int gc_array_first_element_offset; + int array_first_elem_offset; // The GC needs access to the class name for debugging and for collecting information // about the allocation behavior of certain classes. Store the name of the class here. @@ -55,11 +97,8 @@ Class_Handle gc_clss; // This array holds an array of offsets to the pointer fields in - // an instance of this class, including the weak referent field. - // It would be nice if this - // was located immediately prior to the vtable, since that would - // eliminate a dereference. - int *gc_ref_offset_array; + // an instance of this class, including or not the weak referent field depending on compilation option + int gc_ref_offset_array[1]; } GC_VTable_Info; @@ -72,33 +111,60 @@ Obj_Info_Type obj_info; } Partial_Reveal_Object; +typedef struct Partial_Reveal_Array { + Partial_Reveal_VTable *vt_raw; + Obj_Info_Type obj_info; + unsigned int array_len; +} Partial_Reveal_Array; + +inline Obj_Info_Type get_obj_info_raw(Partial_Reveal_Object *obj) +{ assert(obj); return obj->obj_info; } + +#ifndef MARK_BIT_FLIPPING + inline Obj_Info_Type get_obj_info(Partial_Reveal_Object *obj) -{ return obj->obj_info; } +{ assert(obj); return obj->obj_info & ~CONST_MARK_BIT; } +#else + +inline Obj_Info_Type get_obj_info(Partial_Reveal_Object *obj) +{ assert(obj); return obj->obj_info & DUAL_MARKBITS_MASK; } + +#endif /* MARK_BIT_FLIPPING */ + inline void set_obj_info(Partial_Reveal_Object *obj, Obj_Info_Type new_obj_info) -{ obj->obj_info = new_obj_info; } +{ assert(obj); obj->obj_info = new_obj_info; } inline Obj_Info_Type *get_obj_info_addr(Partial_Reveal_Object *obj) -{ return &obj->obj_info; } +{ assert(obj); return &obj->obj_info; } -inline Partial_Reveal_VTable *obj_get_vtraw(Partial_Reveal_Object *obj) -{ return obj->vt_raw; } +inline Partial_Reveal_VTable *obj_get_vt_raw(Partial_Reveal_Object *obj) +{ assert(obj && obj->vt_raw); return obj->vt_raw; } -inline Partial_Reveal_VTable **obj_get_vtraw_addr(Partial_Reveal_Object *obj) -{ return &obj->vt_raw; } +inline Partial_Reveal_VTable **obj_get_vt_addr(Partial_Reveal_Object *obj) +{ assert(obj && obj->vt_raw); return &obj->vt_raw; } inline Partial_Reveal_VTable *obj_get_vt(Partial_Reveal_Object *obj) -{ return (Partial_Reveal_VTable *)((POINTER_SIZE_INT)obj->vt_raw & ~(FORWARDING_BIT_MASK | MARK_BIT_MASK)); } +{ assert(obj && obj->vt_raw); return (Partial_Reveal_VTable *)((POINTER_SIZE_INT)obj->vt_raw & ~CONST_MARK_BIT); } inline void obj_set_vt(Partial_Reveal_Object *obj, Allocation_Handle ah) -{ obj->vt_raw = (Partial_Reveal_VTable *)ah; } +{ assert(obj && ah); obj->vt_raw = (Partial_Reveal_VTable *)ah; } +inline GC_VTable_Info *vtable_get_gcvt_raw(Partial_Reveal_VTable *vt) +{ assert(vt && vt->gcvt); return vt->gcvt; } + inline GC_VTable_Info *vtable_get_gcvt(Partial_Reveal_VTable *vt) -{ return vt->gcvt; } +{ assert(vt && vt->gcvt); return (GC_VTable_Info*)((unsigned int)vt->gcvt & GC_CLASS_FLAGS_MASK); } inline void vtable_set_gcvt(Partial_Reveal_VTable *vt, GC_VTable_Info *new_gcvt) -{ vt->gcvt = new_gcvt; } +{ assert(vt && new_gcvt); vt->gcvt = new_gcvt; } +inline GC_VTable_Info *obj_get_gcvt_raw(Partial_Reveal_Object *obj) +{ + Partial_Reveal_VTable *vt = obj_get_vt(obj); + return vtable_get_gcvt_raw(vt); +} + inline GC_VTable_Info *obj_get_gcvt(Partial_Reveal_Object *obj) { Partial_Reveal_VTable *vt = obj_get_vt(obj); @@ -107,27 +173,32 @@ inline Boolean object_has_ref_field(Partial_Reveal_Object *obj) { - GC_VTable_Info *gcvt = obj_get_gcvt(obj); - return gcvt->gc_object_has_ref_field; + GC_VTable_Info *gcvt = obj_get_gcvt_raw(obj); + return (unsigned int)gcvt & GC_CLASS_FLAG_REFS; } -inline Boolean object_ref_field_num(Partial_Reveal_Object *obj) +inline Boolean object_has_ref_field_before_scan(Partial_Reveal_Object *obj) { + Partial_Reveal_VTable *vt = obj_get_vt_raw(obj); + GC_VTable_Info *gcvt = vtable_get_gcvt_raw(vt); + return (unsigned int)gcvt & GC_CLASS_FLAG_REFS; +} + +inline unsigned int object_ref_field_num(Partial_Reveal_Object *obj) +{ GC_VTable_Info *gcvt = obj_get_gcvt(obj); return gcvt->gc_number_of_ref_fields; } inline Boolean object_is_array(Partial_Reveal_Object *obj) { - GC_VTable_Info *gcvt = obj_get_gcvt(obj); - return (gcvt->gc_class_properties & CL_PROP_ARRAY_MASK); - + GC_VTable_Info *gcvt = obj_get_gcvt_raw(obj); + return ((unsigned int)gcvt & GC_CLASS_FLAG_ARRAY); } inline Boolean obj_is_primitive_array(Partial_Reveal_Object *obj) { - GC_VTable_Info *gcvt = obj_get_gcvt(obj); - return (gcvt->gc_class_properties & CL_PROP_NON_REF_ARRAY_MASK); + return object_is_array(obj) && !object_has_ref_field(obj); } inline Class_Handle obj_get_class_handle(Partial_Reveal_Object *obj) @@ -142,6 +213,25 @@ return gcvt->gc_allocated_size; } +inline unsigned int array_first_element_offset(Partial_Reveal_Array *obj) +{ + GC_VTable_Info *gcvt = obj_get_gcvt((Partial_Reveal_Object*)obj); + return gcvt->array_first_elem_offset; +} + +inline unsigned int array_object_size(Partial_Reveal_Object *obj) +{ + GC_VTable_Info *gcvt = obj_get_gcvt(obj); + int array_len = ((Partial_Reveal_Array*)obj)->array_len; + return (gcvt->array_first_elem_offset + gcvt->array_elem_size * array_len + GC_OBJECT_ALIGN_MASK) & (~GC_OBJECT_ALIGN_MASK); +} + +inline unsigned int vm_object_size(Partial_Reveal_Object *obj) +{ + Boolean is_array = object_is_array(obj); + return is_array? array_object_size(obj) : nonarray_object_size(obj); +} + #define CL_PROP_REFERENCE_TYPE_SHIFT 16 #define CL_PROP_REFERENCE_TYPE_MASK 0x00030000 @@ -153,8 +243,8 @@ inline Boolean type_has_finalizer(Partial_Reveal_VTable *vt) { - GC_VTable_Info *gcvt = vtable_get_gcvt(vt); - return gcvt->gc_class_properties & CL_PROP_FINALIZABLE_MASK; + GC_VTable_Info *gcvt = vtable_get_gcvt_raw(vt); + return (unsigned int)gcvt & GC_CLASS_FLAG_FINALIZER; } #endif //#ifndef _GC_TYPES_H_ Index: vm/gc_gen/src/common/gc_for_vm.cpp =================================================================== --- vm/gc_gen/src/common/gc_for_vm.cpp (revision 493420) +++ vm/gc_gen/src/common/gc_for_vm.cpp (working copy) @@ -31,21 +31,27 @@ void gc_tls_init(); +Boolean gc_requires_barriers() +{ return p_global_gc->generate_barrier; } + void gc_init() -{ - gc_parse_options(); - +{ assert(p_global_gc == NULL); GC* gc = (GC*)STD_MALLOC(sizeof(GC_Gen)); assert(gc); memset(gc, 0, sizeof(GC)); p_global_gc = gc; + + gc_parse_options(gc); + gc_tls_init(); gc_gen_initialize((GC_Gen*)gc, min_heap_size_bytes, max_heap_size_bytes); gc_metadata_initialize(gc); /* root set and mark stack */ - gc_finalizer_weakref_metadata_initialize(gc); +#ifndef BUILD_IN_REFERENT + gc_finref_metadata_initialize(gc); +#endif collector_initialize(gc); gc_init_heap_verification(gc); @@ -57,7 +63,9 @@ GC* gc = p_global_gc; gc_gen_destruct((GC_Gen*)gc); gc_metadata_destruct(gc); /* root set and mark stack */ - gc_finalizer_weakref_metadata_destruct(gc); +#ifndef BUILD_IN_REFERENT + gc_finref_metadata_destruct(gc); +#endif collector_destruct(gc); if( verify_live_heap ){ @@ -73,10 +81,15 @@ void gc_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned) { Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)ref; - if (*p_ref == NULL) return; - assert( !obj_is_marked_in_vt(*p_ref)); - assert( !obj_is_forwarded_in_vt(*p_ref) && !obj_is_forwarded_in_obj_info(*p_ref)); - assert( obj_is_in_gc_heap(*p_ref)); + Partial_Reveal_Object* p_obj = *p_ref; + if (p_obj == NULL) return; + assert( !obj_is_marked_in_vt(p_obj)); + /* for Minor_collection, it's possible for p_obj be forwarded in non-gen mark-forward GC. + The forward bit is actually last cycle's mark bit. + For Major collection, it's possible for p_obj be marked in last cycle. Since we don't + flip the bit for major collection, we may find it's marked there. + So we can't do assert about oi except we really want. */ + assert( address_belongs_to_gc_heap(p_obj, p_global_gc)); gc_rootset_add_entry(p_global_gc, p_ref); } @@ -116,7 +129,7 @@ /* java heap size.*/ int64 gc_total_memory() { - return (int64)((POINTER_SIZE_INT)gc_heap_ceiling(p_global_gc) - (POINTER_SIZE_INT)gc_heap_base(p_global_gc)); + return (int64)((POINTER_SIZE_INT)gc_gen_total_memory_size((GC_Gen*)p_global_gc)); } void gc_vm_initialized() @@ -137,10 +150,14 @@ unsigned int gc_time_since_last_gc() { assert(0); return 0; } +//int32 gc_get_hashcode(Managed_Object_Handle p_object) +//{ return 0; } + void gc_finalize_on_exit() { - process_objects_with_finalizer_on_exit(p_global_gc); + if(!IGNORE_FINREF ) + put_all_fin_on_exit(p_global_gc); } /* for future use Index: vm/gc_gen/src/common/gc_metadata.cpp =================================================================== --- vm/gc_gen/src/common/gc_metadata.cpp (revision 493420) +++ vm/gc_gen/src/common/gc_metadata.cpp (working copy) @@ -19,17 +19,16 @@ */ #include "gc_metadata.h" -#include "../thread/mutator.h" -#include "../thread/collector.h" #include "interior_pointer.h" #include "../finalizer_weakref/finalizer_weakref.h" +#include "gc_block.h" -#define GC_METADATA_SIZE_BYTES 48*MB +#define GC_METADATA_SIZE_BYTES (1*MB) +#define GC_METADATA_EXTEND_SIZE_BYTES (1*MB) -#define METADATA_BLOCK_SIZE_BIT_SHIFT 12 -#define METADATA_BLOCK_SIZE_BYTES (1<> METADATA_BLOCK_SIZE_BIT_SHIFT; + unsigned int num_blocks = GC_METADATA_SIZE_BYTES/METADATA_BLOCK_SIZE_BYTES; for(i=0; i> 2; + unsigned num_tasks = num_blocks >> 1; gc_metadata.free_task_pool = sync_pool_create(); for(i=0; i> 1) + num_tasks; gc_metadata.free_set_pool = sync_pool_create(); /* initialize free rootset pool so that mutators can use them */ - for(; icollector_remset_pool); sync_pool_destruct(metadata->collector_repset_pool); - STD_FREE(metadata->heap_start); + for(unsigned int i=0; inum_alloc_segs; i++){ + assert(metadata->segments[i]); + STD_FREE(metadata->segments[i]); + } + gc->metadata = NULL; } +Vector_Block* gc_metadata_extend(Pool* pool) +{ + GC_Metadata *metadata = &gc_metadata; + lock(metadata->alloc_lock); + Vector_Block* block = pool_get_entry(pool); + if( block ){ + unlock(metadata->alloc_lock); + return block; + } + + unsigned int num_alloced = metadata->num_alloc_segs; + if(num_alloced == GC_METADATA_SEGMENT_NUM){ + printf("Run out GC metadata, please give it more segments!\n"); + exit(0); + } + unsigned int seg_size = GC_METADATA_EXTEND_SIZE_BYTES + METADATA_BLOCK_SIZE_BYTES; + void *new_segment = STD_MALLOC(seg_size); + memset(new_segment, 0, seg_size); + metadata->segments[num_alloced] = new_segment; + new_segment = (void*)round_up_to_size((unsigned int)new_segment, METADATA_BLOCK_SIZE_BYTES); + metadata->num_alloc_segs = num_alloced + 1; + + unsigned int num_blocks = GC_METADATA_EXTEND_SIZE_BYTES/METADATA_BLOCK_SIZE_BYTES; + + unsigned int i=0; + for(i=0; ialloc_lock); + + return block; +} + +extern Boolean IS_MOVE_COMPACT; + +static void gc_update_repointed_sets(GC* gc, Pool* pool) +{ + GC_Metadata* metadata = gc->metadata; + + /* NOTE:: this is destructive to the root sets. */ + pool_iterator_init(pool); + Vector_Block* root_set = pool_iterator_next(pool); + + while(root_set){ + unsigned int* iter = vector_block_iterator_init(root_set); + while(!vector_block_iterator_end(root_set,iter)){ + Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; + iter = vector_block_iterator_advance(root_set,iter); + + Partial_Reveal_Object* p_obj = *p_ref; + if(IS_MOVE_COMPACT){ + if(obj_is_moved(p_obj)) + *p_ref = obj_get_fw_in_table(p_obj); + } else { + if( // obj_is_fw_in_oi(p_obj) && //NOTE:: we removed the minor_copy algorithm at the moment, so we don't need this check + obj_is_moved(p_obj)){ + /* Condition obj_is_moved(p_obj) is for preventing mistaking previous mark bit of large obj as fw bit when fallback happens. + * Because until fallback happens, perhaps the large obj hasn't been marked. So its mark bit remains as the last time. + * In major collection condition obj_is_fw_in_oi(p_obj) can be omitted, + * for whose which can be scanned in MOS & NOS must have been set fw bit in oi. + */ + assert(address_belongs_to_gc_heap(obj_get_fw_in_oi(p_obj), gc)); + *p_ref = obj_get_fw_in_oi(p_obj); + } + } + } + root_set = pool_iterator_next(pool); + } + + return; +} + +void gc_fix_rootset(Collector* collector) +{ + GC* gc = collector->gc; + GC_Metadata* metadata = gc->metadata; + + /* generational MINOR_COLLECTION doesn't need rootset update, but need reset */ + if( gc->collect_kind != MINOR_COLLECTION ) /* MINOR but not forwarding */ + gc_update_repointed_sets(gc, metadata->gc_rootset_pool); + + gc_set_pool_clear(metadata->gc_rootset_pool); + +#ifndef BUILD_IN_REFERENT + gc_update_finref_repointed_refs(gc); +#endif + + update_rootset_interior_pointer(); + /* it was pointing to the last root_set entry in gc_rootset_pool (before rem_sets). */ + gc->root_set = NULL; + + return; +} + void gc_set_rootset(GC* gc) { GC_Metadata* metadata = gc->metadata; @@ -105,9 +222,15 @@ /* put back last rootset block */ pool_put_entry(gc_rootset_pool, gc->root_set); - gc->root_set = NULL; - if(!gc_requires_barriers()) return; + /* we only reset gc->root_set here for non gen mode, because we need it to remember the border + between root_set and rem_set in gc_rootset_pool for gen mode. This is useful when a minor + gen collection falls back to compaction, we can clear all the blocks in + gc_rootset_pool after the entry pointed by gc->root_set. So we clear this value + only after we know we are not going to fallback. */ + // gc->root_set = NULL; + + if(!gc_is_gen_mode()) return; /* put back last remset block of each mutator */ Mutator *mutator = gc->mutator_list; @@ -115,7 +238,7 @@ pool_put_entry(mutator_remset_pool, mutator->rem_set); mutator->rem_set = NULL; mutator = mutator->next; - } + } /* put back last remset block of each collector (saved in last collection) */ unsigned int num_active_collectors = gc->num_active_collectors; @@ -128,7 +251,7 @@ collector->rem_set = NULL; } - if( gc->collect_kind == MAJOR_COLLECTION ){ + if( gc->collect_kind != MINOR_COLLECTION ){ /* all the remsets are useless now */ /* clean and put back mutator remsets */ root_set = pool_get_entry( mutator_remset_pool ); @@ -167,136 +290,36 @@ } -void mutator_remset_add_entry(Mutator* mutator, Partial_Reveal_Object** p_ref) +void gc_reset_rootset(GC* gc) { - assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address()); - - Vector_Block* root_set = mutator->rem_set; - vector_block_add_entry(root_set, (unsigned int)p_ref); + assert(pool_is_empty(gc_metadata.gc_rootset_pool)); + assert(gc->root_set == NULL); + gc->root_set = free_set_pool_get_entry(&gc_metadata); - if( !vector_block_is_full(root_set)) return; - - pool_put_entry(gc_metadata.mutator_remset_pool, root_set); - mutator->rem_set = pool_get_entry(gc_metadata.free_set_pool); - assert(mutator->rem_set); -} + assert(vector_block_is_empty(gc->root_set)); + return; +} -void collector_repset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref) +void gc_clear_remset(GC* gc) { -// assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address()); + assert(gc->root_set != NULL); - Vector_Block* root_set = collector->rep_set; - vector_block_add_entry(root_set, (unsigned int)p_ref); - - if( !vector_block_is_full(root_set)) return; - - pool_put_entry(gc_metadata.collector_repset_pool, root_set); - collector->rep_set = pool_get_entry(gc_metadata.free_set_pool); - assert(collector->rep_set); -} - -void collector_remset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref) -{ - assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address()); - - Vector_Block* root_set = collector->rem_set; - vector_block_add_entry(root_set, (unsigned int)p_ref); - - if( !vector_block_is_full(root_set)) return; - - pool_put_entry(gc_metadata.collector_remset_pool, root_set); - collector->rem_set = pool_get_entry(gc_metadata.free_set_pool); - assert(collector->rem_set); -} - -void collector_tracestack_push(Collector* collector, void* p_task) -{ - /* we don't have assert as others because p_task is a p_obj for marking, - or a p_ref for trace forwarding. The latter can be a root set pointer */ - Vector_Block* trace_task = (Vector_Block*)collector->trace_stack; - vector_stack_push(trace_task, (unsigned int)p_task); - - if( !vector_stack_is_full(trace_task)) return; - - pool_put_entry(gc_metadata.mark_task_pool, trace_task); - collector->trace_stack = pool_get_entry(gc_metadata.free_task_pool); - assert(collector->trace_stack); -} - -void gc_rootset_add_entry(GC* gc, Partial_Reveal_Object** p_ref) -{ - assert( p_ref < gc_heap_base_address() || p_ref >= gc_heap_ceiling_address()); - - Vector_Block* root_set = gc->root_set; - vector_block_add_entry(root_set, (unsigned int)p_ref); - - if( !vector_block_is_full(root_set)) return; - - pool_put_entry(gc_metadata.gc_rootset_pool, root_set); - gc->root_set = pool_get_entry(gc_metadata.free_set_pool); - assert(gc->root_set); -} - - -static void gc_update_repointed_sets(GC* gc, Pool* pool) -{ - GC_Metadata* metadata = gc->metadata; - - /* NOTE:: this is destructive to the root sets. */ - Vector_Block* root_set = pool_get_entry(pool); - - while(root_set){ - unsigned int* iter = vector_block_iterator_init(root_set); - while(!vector_block_iterator_end(root_set,iter)){ - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; - iter = vector_block_iterator_advance(root_set,iter); - - Partial_Reveal_Object* p_obj = *p_ref; - /* For repset, this check is unnecessary, since all slots are repointed; otherwise - they will not be recorded. For root set, it is possible to point to LOS or other - non-moved space. */ -#ifdef _DEBUG - if( pool != metadata->gc_rootset_pool) - assert(obj_is_forwarded_in_obj_info(p_obj)); - else -#endif - if(!obj_is_forwarded_in_obj_info(p_obj)) continue; - *p_ref = get_forwarding_pointer_in_obj_info(p_obj); - } - vector_block_clear(root_set); - pool_put_entry(metadata->free_set_pool, root_set); - root_set = pool_get_entry(pool); - } - - return; -} - -void gc_update_repointed_refs(Collector* collector) -{ - GC* gc = collector->gc; - GC_Metadata* metadata = gc->metadata; - - /* generational MINOR_COLLECTION doesn't need rootset update */ - if( !gc_requires_barriers() || gc->collect_kind == MAJOR_COLLECTION ){ - gc_update_repointed_sets(gc, metadata->gc_rootset_pool); - gc_update_repointed_sets(gc, metadata->collector_repset_pool); + Pool* pool = gc_metadata.gc_rootset_pool; + Vector_Block* rem_set = pool_get_entry(pool); + while(rem_set != gc->root_set){ + vector_block_clear(rem_set); + pool_put_entry(gc_metadata.free_set_pool, rem_set); + rem_set = pool_get_entry(pool); } - - gc_update_finalizer_weakref_repointed_refs(gc); - update_rootset_interior_pointer(); + + assert(rem_set == gc->root_set); + /* put back root set */ + pool_put_entry(pool, rem_set); return; -} +} -void gc_reset_rootset(GC* gc) -{ - assert(pool_is_empty(gc_metadata.gc_rootset_pool)); - gc->root_set = pool_get_entry(gc_metadata.free_set_pool); - - assert(vector_block_is_empty(gc->root_set)); - return; -} - +extern Boolean verify_live_heap; void gc_metadata_verify(GC* gc, Boolean is_before_gc) { GC_Metadata* metadata = gc->metadata; @@ -304,13 +327,18 @@ assert(pool_is_empty(metadata->collector_repset_pool)); assert(pool_is_empty(metadata->mark_task_pool)); - if(!is_before_gc || !gc_requires_barriers()) + if(!is_before_gc || !gc_is_gen_mode()) assert(pool_is_empty(metadata->mutator_remset_pool)); - if(!gc_requires_barriers()){ + if(!gc_is_gen_mode()){ /* FIXME:: even for gen gc, it should be empty if NOS is forwarding_all */ assert(pool_is_empty(metadata->collector_remset_pool)); } + + if(verify_live_heap ){ + unsigned int free_pool_size = pool_size(metadata->free_set_pool); + printf("===========%s, free_pool_size = %d =============\n", is_before_gc?"before GC":"after GC", free_pool_size); + } return; } Index: vm/gc_gen/src/common/gc_metadata.h =================================================================== --- vm/gc_gen/src/common/gc_metadata.h (revision 493420) +++ vm/gc_gen/src/common/gc_metadata.h (working copy) @@ -23,11 +23,16 @@ #include "gc_common.h" #include "../utils/vector_block.h" #include "../utils/sync_pool.h" +#include "../thread/collector.h" +#include "../thread/mutator.h" -typedef struct GC_Metadata{ - void* heap_start; - void* heap_end; - +#define GC_METADATA_SEGMENT_NUM 128 + +typedef struct GC_Metadata{ + void *segments[GC_METADATA_SEGMENT_NUM]; /* address array of malloced segments for free pool */ + unsigned int num_alloc_segs; /* next available position in pool_segments array */ + SpinLock alloc_lock; + Pool* free_task_pool; /* list of free buffers for mark tasks */ Pool* mark_task_pool; /* list of mark tasks */ @@ -40,20 +45,131 @@ }GC_Metadata; +extern GC_Metadata gc_metadata; + void gc_metadata_initialize(GC* gc); void gc_metadata_destruct(GC* gc); void gc_metadata_verify(GC* gc, Boolean is_before_gc); void gc_set_rootset(GC* gc); void gc_reset_rootset(GC* gc); -void gc_update_repointed_refs(Collector* collector); +void gc_fix_rootset(Collector* collector); -void collector_tracestack_push(Collector* collector, void* p_task); +void gc_clear_remset(GC* gc); +inline void gc_task_pool_clear(Pool* task_pool) +{ + Vector_Block* task = pool_get_entry(task_pool); + while(task){ + vector_stack_clear(task); + pool_put_entry(gc_metadata.free_task_pool, task); + task = pool_get_entry(task_pool); + } + return; +} -void mutator_remset_add_entry(Mutator* mutator, Partial_Reveal_Object** p_slot); -void collector_remset_add_entry(Collector* collector, Partial_Reveal_Object** p_slot); -void gc_rootset_add_entry(GC* gc, Partial_Reveal_Object** p_slot); +inline void gc_set_pool_clear(Pool* set_pool) +{ + Vector_Block* set = pool_get_entry(set_pool); + while(set){ + vector_block_clear(set); + pool_put_entry(gc_metadata.free_set_pool, set); + set = pool_get_entry(set_pool); + } + return; +} -void collector_repset_add_entry(Collector* collector, Partial_Reveal_Object** p_slot); +Vector_Block* gc_metadata_extend(Pool* pool); +inline Vector_Block *free_set_pool_get_entry(GC_Metadata *metadata) +{ + Vector_Block *block = pool_get_entry(metadata->free_set_pool); + + while(!block) + block = gc_metadata_extend(metadata->free_set_pool); + + assert(vector_block_is_empty(block)); + return block; +} + +inline Vector_Block *free_task_pool_get_entry(GC_Metadata *metadata) +{ + Vector_Block *block = pool_get_entry(metadata->free_task_pool); + + while(!block) + block = gc_metadata_extend(metadata->free_task_pool); + + assert(vector_stack_is_empty(block)); + return block; +} + +inline void mutator_remset_add_entry(Mutator* mutator, Partial_Reveal_Object** p_ref) +{ + assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address()); + + Vector_Block* root_set = mutator->rem_set; + vector_block_add_entry(root_set, (unsigned int)p_ref); + + if( !vector_block_is_full(root_set)) return; + + pool_put_entry(gc_metadata.mutator_remset_pool, root_set); + mutator->rem_set = free_set_pool_get_entry(&gc_metadata); + assert(mutator->rem_set); +} + +inline void collector_repset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref) +{ +// assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address()); + + Vector_Block* root_set = collector->rep_set; + vector_block_add_entry(root_set, (unsigned int)p_ref); + + if( !vector_block_is_full(root_set)) return; + + pool_put_entry(gc_metadata.collector_repset_pool, root_set); + collector->rep_set = free_set_pool_get_entry(&gc_metadata); + assert(collector->rep_set); +} + +inline void collector_remset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref) +{ + //assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address()); + + Vector_Block* root_set = collector->rem_set; + vector_block_add_entry(root_set, (unsigned int)p_ref); + + if( !vector_block_is_full(root_set)) return; + + pool_put_entry(gc_metadata.collector_remset_pool, root_set); + collector->rem_set = free_set_pool_get_entry(&gc_metadata); + assert(collector->rem_set); +} + +inline void collector_tracestack_push(Collector* collector, void* p_task) +{ + /* we don't have assert as others because p_task is a p_obj for marking, + or a p_ref for trace forwarding. The latter can be a root set pointer */ + Vector_Block* trace_task = (Vector_Block*)collector->trace_stack; + vector_stack_push(trace_task, (unsigned int)p_task); + + if( !vector_stack_is_full(trace_task)) return; + + pool_put_entry(gc_metadata.mark_task_pool, trace_task); + collector->trace_stack = free_task_pool_get_entry(&gc_metadata); + assert(collector->trace_stack); +} + +inline void gc_rootset_add_entry(GC* gc, Partial_Reveal_Object** p_ref) +{ + assert( p_ref < gc_heap_base_address() || p_ref >= gc_heap_ceiling_address()); + + Vector_Block* root_set = gc->root_set; + vector_block_add_entry(root_set, (unsigned int)p_ref); + + if( !vector_block_is_full(root_set)) return; + + pool_put_entry(gc_metadata.gc_rootset_pool, root_set); + gc->root_set = free_set_pool_get_entry(&gc_metadata); + assert(gc->root_set); +} + #endif /* #ifndef _GC_METADATA_H_ */ Index: vm/gc_gen/src/common/gc_platform.h =================================================================== --- vm/gc_gen/src/common/gc_platform.h (revision 493420) +++ vm/gc_gen/src/common/gc_platform.h (working copy) @@ -21,12 +21,26 @@ #ifndef _GC_PLATFORM_H_ #define _GC_PLATFORM_H_ +#include +#include #include #include #include + +#ifndef _DEBUG + +//#define RELEASE_DEBUG + +#ifdef RELEASE_DEBUG +#undef assert +#define assert(x) do{ if(!(x)) __asm{int 3}}while(0) +#endif + +#endif //_DEBUG + #define USEC_PER_SEC INT64_C(1000000) #define VmThreadHandle void* @@ -49,19 +63,13 @@ } inline int vm_create_event(VmEventHandle* event) -{ - return hysem_create(event, 0, 1); -} +{ return hysem_create(event, 0, 1); } inline void vm_thread_yield() -{ - hythread_yield(); -} +{ hythread_yield(); } inline void* vm_thread_local() -{ - return hythread_self(); -} +{ return hythread_self(); } inline int vm_create_thread(int (*func)(void*), void *data) { @@ -80,35 +88,128 @@ inline uint32 atomic_cas32(volatile apr_uint32_t *mem, apr_uint32_t swap, - apr_uint32_t cmp) { - return (uint32)apr_atomic_cas32(mem, swap, cmp); + apr_uint32_t cmp) +{ return (uint32)apr_atomic_cas32(mem, swap, cmp); } + +inline uint32 atomic_inc32(volatile apr_uint32_t *mem) +{ return (uint32)apr_atomic_inc32(mem); } + +inline uint32 atomic_dec32(volatile apr_uint32_t *mem) +{ return (uint32)apr_atomic_dec32(mem); } + +inline uint32 atomic_add32(volatile apr_uint32_t *mem, apr_uint32_t val) +{ return (uint32)apr_atomic_add32(mem, val); } + +inline Boolean pool_create(apr_pool_t **newpool, apr_pool_t *parent) +{ return (Boolean)apr_pool_create(newpool, parent);} + +inline void pool_destroy(apr_pool_t *p) +{ apr_pool_destroy(p); } + +#ifndef _WIN32 +#include +#endif + +inline void *vm_map_mem(void* start, unsigned int size) +{ + void* address; +#ifdef _WIN32 + address = VirtualAlloc(start, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); +#else + address = mmap(start, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if(address == MAP_FAILED) address = NULL; + +#endif /* ifdef _WIN32 else */ + + return address; } -inline uint32 atomic_inc32(volatile apr_uint32_t *mem){ - return (uint32)apr_atomic_inc32(mem); +inline Boolean vm_unmap_mem(void* start, unsigned int size) +{ + unsigned int result; +#ifdef _WIN32 + result = VirtualFree(start, 0, MEM_RELEASE); +#else + result = munmap(start, size); + if(result == -1) result = 0; + +#endif /* ifdef _WIN32 else */ + + return result; } -inline uint32 atomic_dec32(volatile apr_uint32_t *mem){ - return (uint32)apr_atomic_dec32(mem); +inline void *vm_alloc_mem(void* start, unsigned int size) +{ + void* address; +#ifdef _WIN32 + address = VirtualAlloc(start, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); +#else + address = mmap(start, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if(address == MAP_FAILED) address = NULL; + +#endif /* ifdef _WIN32 else */ + + return address; } -inline uint32 atomic_add32(volatile apr_uint32_t *mem, apr_uint32_t val) { - return (uint32)apr_atomic_add32(mem, val); +inline Boolean vm_free_mem(void* start, unsigned int size) +{ + return vm_unmap_mem(start, size); } -inline Boolean pool_create(apr_pool_t **newpool, apr_pool_t *parent) { - return (Boolean)apr_pool_create(newpool, parent); +inline void *vm_reserve_mem(void* start, unsigned int size) +{ + void* address; +#ifdef _WIN32 + address = VirtualAlloc(start, size, MEM_RESERVE, PAGE_READWRITE); +#else + address = mmap(start, size, PROT_READ|PROT_WRITE, FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if(address == MAP_FAILED) address = NULL; + +#endif /* ifdef _WIN32 else */ + + return address; } -inline void pool_destroy(apr_pool_t *p) { - apr_pool_destroy(p); +inline void *vm_commit_mem(void* start, unsigned int size) +{ + void* address; +#ifdef _WIN32 + address = VirtualAlloc(start, size, MEM_COMMIT, PAGE_READWRITE); +#else + +#endif /* ifdef _WIN32 else */ + + return address; } +inline Boolean vm_decommit_mem(void* start, unsigned int size) +{ + unsigned int result; +#ifdef _WIN32 + result = VirtualFree(start, size, MEM_DECOMMIT); +#else + +#endif /* ifdef _WIN32 else */ -inline int64 time_now() { - return apr_time_now(); + return result; } +inline int64 time_now() +{ return apr_time_now(); } + +inline void string_to_upper(char* s) +{ + while(*s){ + *s = toupper(*s); + s++; + } +} + +#ifdef PLATFORM_POSIX +#define max(x, y) ((x)>(y)?(x):(y)) +#endif + typedef volatile unsigned int SpinLock; enum Lock_State{ Index: vm/gc_gen/src/common/gc_space.h =================================================================== --- vm/gc_gen/src/common/gc_space.h (revision 0) +++ vm/gc_gen/src/common/gc_space.h (revision 0) @@ -0,0 +1,163 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2006/10/05 + */ + +#ifndef _GC_SPACE_H_ +#define _GC_SPACE_H_ + +#include "gc_block.h" + +struct GC; +/* all Spaces inherit this Space structure */ +typedef struct Space{ + void* heap_start; + void* heap_end; + unsigned int reserved_heap_size; + unsigned int committed_heap_size; + unsigned int num_collections; + int64 time_collections; + float survive_ratio; + unsigned int collect_algorithm; + GC* gc; + Boolean move_object; +}Space; + +inline unsigned int space_committed_size(Space* space){ return space->committed_heap_size;} +inline void* space_heap_start(Space* space){ return space->heap_start; } +inline void* space_heap_end(Space* space){ return space->heap_end; } + +inline Boolean address_belongs_to_space(void* addr, Space* space) +{ + return (addr >= space_heap_start(space) && addr < space_heap_end(space)); +} + +inline Boolean obj_belongs_to_space(Partial_Reveal_Object *p_obj, Space* space) +{ + return address_belongs_to_space((Partial_Reveal_Object*)p_obj, space); +} + + +typedef struct Blocked_Space { + /* <-- first couple of fields are overloadded as Space */ + void* heap_start; + void* heap_end; + unsigned int reserved_heap_size; + unsigned int committed_heap_size; + unsigned int num_collections; + int64 time_collections; + float survive_ratio; + unsigned int collect_algorithm; + GC* gc; + Boolean move_object; + /* END of Space --> */ + + Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */ + + /* FIXME:: the block indices should be replaced with block header addresses */ + unsigned int first_block_idx; + unsigned int ceiling_block_idx; + volatile unsigned int free_block_idx; + + unsigned int num_used_blocks; + unsigned int num_managed_blocks; + unsigned int num_total_blocks; + /* END of Blocked_Space --> */ +}Blocked_Space; + +inline Boolean space_has_free_block(Blocked_Space* space){ return space->free_block_idx <= space->ceiling_block_idx; } +inline unsigned int space_free_memory_size(Blocked_Space* space){ return GC_BLOCK_SIZE_BYTES * (space->ceiling_block_idx - space->free_block_idx + 1); } +inline Boolean space_used_memory_size(Blocked_Space* space){ return GC_BLOCK_SIZE_BYTES * (space->free_block_idx - space->first_block_idx); } + +inline void space_init_blocks(Blocked_Space* space) +{ + Block* blocks = (Block*)space->heap_start; + Block_Header* last_block = (Block_Header*)blocks; + unsigned int start_idx = space->first_block_idx; + for(unsigned int i=0; i < space->num_managed_blocks; i++){ + Block_Header* block = (Block_Header*)&(blocks[i]); + block_init(block); + block->block_idx = i + start_idx; + last_block->next = block; + last_block = block; + } + last_block->next = NULL; + space->blocks = blocks; + + return; +} + + +inline void blocked_space_shrink(Blocked_Space* space, unsigned int changed_size) +{ + unsigned int block_dec_count = changed_size >> GC_BLOCK_SHIFT_COUNT; + void* new_base = (void*)&(space->blocks[space->num_managed_blocks - block_dec_count]); + + void* decommit_base = (void*)round_down_to_size((unsigned int)new_base, SYSTEM_ALLOC_UNIT); + + assert( ((Block_Header*)decommit_base)->block_idx >= space->free_block_idx); + + void* old_end = (void*)&space->blocks[space->num_managed_blocks]; + unsigned int decommit_size = (unsigned int)old_end - (unsigned int)decommit_base; + assert(decommit_size && !(decommit_size%GC_BLOCK_SIZE_BYTES)); + + Boolean result = vm_decommit_mem(decommit_base, decommit_size); + assert(result == TRUE); + + space->committed_heap_size = (unsigned int)decommit_base - (unsigned int)space->heap_start; + space->num_managed_blocks = space->committed_heap_size >> GC_BLOCK_SHIFT_COUNT; + + Block_Header* new_last_block = (Block_Header*)&space->blocks[space->num_managed_blocks - 1]; + space->ceiling_block_idx = new_last_block->block_idx; + new_last_block->next = NULL; +} + +inline void blocked_space_extend(Blocked_Space* space, unsigned int changed_size) +{ + unsigned int block_inc_count = changed_size >> GC_BLOCK_SHIFT_COUNT; + + void* old_base = (void*)&space->blocks[space->num_managed_blocks]; + void* commit_base = (void*)round_down_to_size((unsigned int)old_base, SYSTEM_ALLOC_UNIT); + unsigned int block_diff_count = ((unsigned int)old_base - (unsigned int)commit_base) >> GC_BLOCK_SHIFT_COUNT; + block_inc_count += block_diff_count; + + unsigned int commit_size = block_inc_count << GC_BLOCK_SHIFT_COUNT; + void* result = vm_commit_mem(commit_base, commit_size); + assert(result == commit_base); + + void* new_end = (void*)((unsigned int)commit_base + commit_size); + space->committed_heap_size = (unsigned int)new_end - (unsigned int)space->heap_start; + + /* init the grown blocks */ + Block_Header* block = (Block_Header*)commit_base; + Block_Header* last_block = (Block_Header*)((Block*)block -1); + unsigned int start_idx = last_block->block_idx + 1; + unsigned int i; + for(i=0; block < new_end; i++){ + block_init(block); + block->block_idx = start_idx + i; + last_block->next = block; + last_block = block; + block = (Block_Header*)((Block*)block + 1); + } + last_block->next = NULL; + space->ceiling_block_idx = last_block->block_idx; + space->num_managed_blocks = space->committed_heap_size >> GC_BLOCK_SHIFT_COUNT; +} + +#endif //#ifndef _GC_SPACE_H_ Index: vm/gc_gen/src/common/interior_pointer.cpp =================================================================== --- vm/gc_gen/src/common/interior_pointer.cpp (revision 493420) +++ vm/gc_gen/src/common/interior_pointer.cpp (working copy) @@ -27,6 +27,7 @@ typedef struct slot_offset_entry_struct{ void** slot; unsigned int offset; + Partial_Reveal_Object *base; } slot_offset_entry; static std::vector interior_pointer_set; @@ -49,10 +50,19 @@ slot_offset_entry* push_back_entry = (slot_offset_entry*)&interior_pointer_set[interior_pointer_num_count++]; push_back_entry->offset = offset; push_back_entry->slot = slot; - *slot = p_obj; - gc_add_root_set_entry((Managed_Object_Handle*)slot, is_pinned); + push_back_entry->base = p_obj; } +void gc_copy_interior_pointer_table_to_rootset() +{ + unsigned int i; + for( i = 0; ibase)), FALSE); + } +} + void update_rootset_interior_pointer() { unsigned int i; @@ -60,12 +70,10 @@ { slot_offset_entry* entry_traverser = (slot_offset_entry*)&interior_pointer_set[i]; void** root_slot = entry_traverser->slot; - Partial_Reveal_Object* root_base = (Partial_Reveal_Object*)*root_slot;//entry_traverser->base; + Partial_Reveal_Object* root_base = (Partial_Reveal_Object*)entry_traverser->base; unsigned int root_offset = entry_traverser->offset; void *new_slot_contents = (void *)((Byte*)root_base + root_offset); *root_slot = new_slot_contents; } - interior_pointer_set.clear(); - assert(interior_pointer_set.size()==0); interior_pointer_num_count = 0; } Index: vm/gc_gen/src/common/interior_pointer.h =================================================================== --- vm/gc_gen/src/common/interior_pointer.h (revision 493420) +++ vm/gc_gen/src/common/interior_pointer.h (working copy) @@ -24,6 +24,7 @@ #include "gc_common.h" void add_root_set_entry_interior_pointer(void **slot, int offset, Boolean is_pinned); +void gc_copy_interior_pointer_table_to_rootset(); void update_rootset_interior_pointer(); #endif //INTERIOR_POINTER_H Index: vm/gc_gen/src/common/mark_scan.cpp =================================================================== --- vm/gc_gen/src/common/mark_scan.cpp (revision 493420) +++ vm/gc_gen/src/common/mark_scan.cpp (working copy) @@ -1,219 +0,0 @@ -/* - * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @author Xiao-Feng Li, 2006/10/05 - */ - -#include "gc_metadata.h" -#include "../thread/collector.h" -#include "../gen/gen.h" - -#include "../finalizer_weakref/finalizer_weakref.h" - -static void scan_slot(Collector* collector, Partial_Reveal_Object** p_ref) -{ - Partial_Reveal_Object* p_obj = *p_ref; - if(p_obj==NULL) return; - - Space* obj_space = space_of_addr(collector->gc, p_obj); - - /* if obj to be moved, its ref slot needs remembering for later update */ - if(obj_space->move_object) - collector_repset_add_entry(collector, p_ref); - - if(obj_space->mark_object_func(obj_space, p_obj)) - collector_tracestack_push(collector, p_obj); - - return; -} - -static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) -{ - if( !object_has_ref_field(p_obj) ) return; - - /* scan array object */ - if (object_is_array(p_obj)) { - Partial_Reveal_Object* array = p_obj; - assert(!obj_is_primitive_array(array)); - - int32 array_length = vector_get_length((Vector_Handle) array); - for (int i = 0; i < array_length; i++) { - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)vector_get_element_address_ref((Vector_Handle) array, i); - scan_slot(collector, p_ref); - } - return; - } - - /* scan non-array object */ - int *offset_scanner = init_object_scanner(p_obj); - while (true) { - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj); - if (p_ref == NULL) break; /* terminating ref slot */ - - scan_slot(collector, p_ref); - offset_scanner = offset_next_ref(offset_scanner); - } - - scan_weak_reference(collector, p_obj, scan_slot); - - return; -} - - -static void trace_object(Collector* collector, Partial_Reveal_Object *p_obj) -{ - scan_object(collector, p_obj); - - Vector_Block* trace_stack = collector->trace_stack; - while( !vector_stack_is_empty(trace_stack)){ - p_obj = (Partial_Reveal_Object *)vector_stack_pop(trace_stack); - scan_object(collector, p_obj); - trace_stack = collector->trace_stack; - } - - return; -} - -/* for marking phase termination detection */ -static volatile unsigned int num_finished_collectors = 0; - -/* NOTE:: Only marking in object header is idempotent */ -void mark_scan_heap(Collector* collector) -{ - GC* gc = collector->gc; - GC_Metadata* metadata = gc->metadata; - - /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */ - unsigned int num_active_collectors = gc->num_active_collectors; - atomic_cas32( &num_finished_collectors, 0, num_active_collectors); - - collector->trace_stack = pool_get_entry(metadata->free_task_pool); - - Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool); - - /* first step: copy all root objects to mark tasks. - FIXME:: can be done sequentially before coming here to eliminate atomic ops */ - while(root_set){ - unsigned int* iter = vector_block_iterator_init(root_set); - while(!vector_block_iterator_end(root_set,iter)){ - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; - iter = vector_block_iterator_advance(root_set,iter); - - Partial_Reveal_Object* p_obj = *p_ref; - /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */ - assert( (gc->collect_kind==MINOR_COLLECTION && !gc_requires_barriers()) || (gc->collect_kind==MAJOR_COLLECTION) && (p_obj!= NULL)); - if(p_obj==NULL) continue; - /* we have to mark the object before put it into marktask, because - it is possible to have two slots containing a same object. They will - be scanned twice and their ref slots will be recorded twice. Problem - occurs after the ref slot is updated first time with new position - and the second time the value is the ref slot is the old position as expected. - This can be worked around if we want. - */ - Space* space = space_of_addr(gc, p_obj); - if( !space->mark_object_func(space, p_obj) ) continue; - - collector_tracestack_push(collector, p_obj); - } - root_set = pool_iterator_next(metadata->gc_rootset_pool); - } - /* put back the last trace_stack task */ - pool_put_entry(metadata->mark_task_pool, collector->trace_stack); - - /* second step: iterate over the mark tasks and scan objects */ - /* get a task buf for the mark stack */ - collector->trace_stack = pool_get_entry(metadata->free_task_pool); - -retry: - Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool); - - while(mark_task){ - unsigned int* iter = vector_block_iterator_init(mark_task); - while(!vector_block_iterator_end(mark_task,iter)){ - Partial_Reveal_Object* p_obj = (Partial_Reveal_Object *)*iter; - iter = vector_block_iterator_advance(mark_task,iter); - - /* FIXME:: we should not let mark_task empty during working, , other may want to steal it. - degenerate my stack into mark_task, and grab another mark_task */ - trace_object(collector, p_obj); - } - /* run out one task, put back to the pool and grab another task */ - vector_stack_clear(mark_task); - pool_put_entry(metadata->free_task_pool, mark_task); - mark_task = pool_get_entry(metadata->mark_task_pool); - } - - /* termination detection. This is also a barrier. - NOTE:: We can simply spin waiting for num_finished_collectors, because each - generated new task would surely be processed by its generating collector eventually. - So code below is only for load balance optimization. */ - atomic_inc32(&num_finished_collectors); - while(num_finished_collectors != num_active_collectors){ - if( !pool_is_empty(metadata->mark_task_pool)){ - atomic_dec32(&num_finished_collectors); - goto retry; - } - } - - /* put back the last mark stack to the free pool */ - mark_task = (Vector_Block*)collector->trace_stack; - vector_stack_clear(mark_task); - pool_put_entry(metadata->free_task_pool, mark_task); - collector->trace_stack = NULL; - - /* put back last repointed refs set recorded during marking */ - pool_put_entry(metadata->collector_repset_pool, collector->rep_set); - collector->rep_set = NULL; - - return; -} - -void resurrect_obj_tree_after_mark(Collector *collector, Partial_Reveal_Object *p_obj) -{ - GC *gc = collector->gc; - GC_Metadata* metadata = gc->metadata; - - Space* space = space_of_addr(gc, p_obj); -// if(!space->mark_object_func(space, p_obj)) { assert(0); } - space->mark_object_func(space, p_obj); - collector->trace_stack = pool_get_entry(metadata->free_task_pool); - collector_tracestack_push(collector, p_obj); - pool_put_entry(metadata->mark_task_pool, collector->trace_stack); - -//collector->rep_set = pool_get_entry(metadata->free_set_pool); /* has got collector->rep_set in caller */ - collector->trace_stack = pool_get_entry(metadata->free_task_pool); - Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool); - while(mark_task){ - unsigned int* iter = vector_block_iterator_init(mark_task); - while(!vector_block_iterator_end(mark_task,iter)){ - Partial_Reveal_Object* p_obj = (Partial_Reveal_Object *)*iter; - trace_object(collector, p_obj); - iter = vector_block_iterator_advance(mark_task, iter); - } - /* run out one task, put back to the pool and grab another task */ - vector_stack_clear(mark_task); - pool_put_entry(metadata->free_task_pool, mark_task); - mark_task = pool_get_entry(metadata->mark_task_pool); - } - - mark_task = (Vector_Block*)collector->trace_stack; - vector_stack_clear(mark_task); - pool_put_entry(metadata->free_task_pool, mark_task); - collector->trace_stack = NULL; -//pool_put_entry(metadata->collector_repset_pool, collector->rep_set); /* has got collector->rep_set in caller */ -//collector->rep_set = NULL; /* has got collector->rep_set in caller */ -} Index: vm/gc_gen/src/common/mark_scan_pool.cpp =================================================================== --- vm/gc_gen/src/common/mark_scan_pool.cpp (revision 0) +++ vm/gc_gen/src/common/mark_scan_pool.cpp (revision 0) @@ -0,0 +1,224 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2006/10/05 + */ + +#include "gc_metadata.h" +#include "../thread/collector.h" +#include "../gen/gen.h" +#include "../finalizer_weakref/finalizer_weakref.h" + +static void scan_slot(Collector* collector, Partial_Reveal_Object** p_ref) +{ + Partial_Reveal_Object* p_obj = *p_ref; + if(p_obj==NULL) return; + + if(obj_mark_in_vt(p_obj)) + collector_tracestack_push(collector, p_obj); + + return; +} + + +static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) +{ + if( !object_has_ref_field(p_obj) ) return; + + Partial_Reveal_Object **p_ref; + + if (object_is_array(p_obj)) { /* scan array object */ + + Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj; + unsigned int array_length = array->array_len; + + p_ref = (Partial_Reveal_Object**)((int)array + (int)array_first_element_offset(array)); + + for (unsigned int i = 0; i < array_length; i++) { + scan_slot(collector, p_ref+i); + } + + }else{ /* scan non-array object */ + + unsigned int num_refs = object_ref_field_num(p_obj); + + int* ref_iterator = object_ref_iterator_init(p_obj); + + for(unsigned int i=0; itrace_stack; + while( !vector_stack_is_empty(trace_stack)){ + p_obj = (Partial_Reveal_Object *)vector_stack_pop(trace_stack); + scan_object(collector, p_obj); + trace_stack = collector->trace_stack; + } + + return; +} + +/* for marking phase termination detection */ +static volatile unsigned int num_finished_collectors = 0; + +/* NOTE:: Only marking in object header is idempotent. + Originally, we have to mark the object before put it into markstack, to + guarantee there is only one occurrance of an object in markstack. This is to + guarantee there is only one occurrance of a repointed ref slot in repset (they + are put to the set when the object is scanned). If the same object is put to + markstack twice, they will be scanned twice and their ref slots will be recorded twice. + Problem occurs when the ref slot is updated first time with new position, + the second time the value in the ref slot is not the old position as expected. + It needs to read the original obj header for forwarding pointer. With the new value, + it will read something nonsense since the obj is not moved yet. + This can be worked around if we want. + To do this we have to use atomic instruction for marking, which is undesirable. + So we abondoned this design. We no longer use the repset to remember repointed slots +*/ + +void mark_scan_pool(Collector* collector) +{ + GC* gc = collector->gc; + GC_Metadata* metadata = gc->metadata; + + /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */ + unsigned int num_active_collectors = gc->num_active_collectors; + atomic_cas32( &num_finished_collectors, 0, num_active_collectors); + + collector->trace_stack = free_task_pool_get_entry(metadata); + + Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool); + + /* first step: copy all root objects to mark tasks. + FIXME:: can be done sequentially before coming here to eliminate atomic ops */ + while(root_set){ + unsigned int* iter = vector_block_iterator_init(root_set); + while(!vector_block_iterator_end(root_set,iter)){ + Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; + iter = vector_block_iterator_advance(root_set,iter); + + Partial_Reveal_Object* p_obj = *p_ref; + /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */ + assert(p_obj!=NULL); + /* we have to mark the object before put it into marktask, because + it is possible to have two slots containing a same object. They will + be scanned twice and their ref slots will be recorded twice. Problem + occurs after the ref slot is updated first time with new position + and the second time the value is the ref slot is the old position as expected. + This can be worked around if we want. + */ + if(obj_mark_in_vt(p_obj)) + collector_tracestack_push(collector, p_obj); + + } + root_set = pool_iterator_next(metadata->gc_rootset_pool); + } + /* put back the last trace_stack task */ + pool_put_entry(metadata->mark_task_pool, collector->trace_stack); + + /* second step: iterate over the mark tasks and scan objects */ + /* get a task buf for the mark stack */ + collector->trace_stack = free_task_pool_get_entry(metadata); + +retry: + Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool); + + while(mark_task){ + unsigned int* iter = vector_block_iterator_init(mark_task); + while(!vector_block_iterator_end(mark_task,iter)){ + Partial_Reveal_Object* p_obj = (Partial_Reveal_Object *)*iter; + iter = vector_block_iterator_advance(mark_task,iter); + + /* FIXME:: we should not let mark_task empty during working, , other may want to steal it. + degenerate my stack into mark_task, and grab another mark_task */ + trace_object(collector, p_obj); + } + /* run out one task, put back to the pool and grab another task */ + vector_stack_clear(mark_task); + pool_put_entry(metadata->free_task_pool, mark_task); + mark_task = pool_get_entry(metadata->mark_task_pool); + } + + /* termination detection. This is also a barrier. + NOTE:: We can simply spin waiting for num_finished_collectors, because each + generated new task would surely be processed by its generating collector eventually. + So code below is only for load balance optimization. */ + atomic_inc32(&num_finished_collectors); + while(num_finished_collectors != num_active_collectors){ + if( !pool_is_empty(metadata->mark_task_pool)){ + atomic_dec32(&num_finished_collectors); + goto retry; + } + } + + /* put back the last mark stack to the free pool */ + mark_task = (Vector_Block*)collector->trace_stack; + vector_stack_clear(mark_task); + pool_put_entry(metadata->free_task_pool, mark_task); + collector->trace_stack = NULL; + + return; +} + +/* this is to resurrect p_obj and its decedants for some reason, here for finalizables */ +void resurrect_obj_tree_after_mark(Collector *collector, Partial_Reveal_Object *p_obj) +{ + GC *gc = collector->gc; + GC_Metadata* metadata = gc->metadata; + + obj_mark_in_vt(p_obj); + collector->trace_stack = free_task_pool_get_entry(metadata); + collector_tracestack_push(collector, p_obj); + pool_put_entry(metadata->mark_task_pool, collector->trace_stack); + +//collector->rep_set = free_set_pool_get_entry(metadata); /* has got collector->rep_set in caller */ + collector->trace_stack = free_task_pool_get_entry(metadata); + Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool); + while(mark_task){ + unsigned int* iter = vector_block_iterator_init(mark_task); + while(!vector_block_iterator_end(mark_task,iter)){ + Partial_Reveal_Object* p_obj = (Partial_Reveal_Object *)*iter; + trace_object(collector, p_obj); + iter = vector_block_iterator_advance(mark_task, iter); + } + /* run out one task, put back to the pool and grab another task */ + vector_stack_clear(mark_task); + pool_put_entry(metadata->free_task_pool, mark_task); + mark_task = pool_get_entry(metadata->mark_task_pool); + } + + mark_task = (Vector_Block*)collector->trace_stack; + vector_stack_clear(mark_task); + pool_put_entry(metadata->free_task_pool, mark_task); + collector->trace_stack = NULL; +//pool_put_entry(metadata->collector_repset_pool, collector->rep_set); /* has got collector->rep_set in caller */ +//collector->rep_set = NULL; /* has got collector->rep_set in caller */ +} Index: vm/gc_gen/src/common/space_tuner.cpp =================================================================== --- vm/gc_gen/src/common/space_tuner.cpp (revision 0) +++ vm/gc_gen/src/common/space_tuner.cpp (revision 0) @@ -0,0 +1,72 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2006/10/05 + */ + +#include "space_tuner.h" + +#define GC_LOS_MIN_VARY_SIZE ( 2 * 1024 * 1024 ) + + +struct GC_Gen; +Space* gc_get_mos(GC_Gen* gc); +Space* gc_get_nos(GC_Gen* gc); + +void gc_space_tune(GC* gc, unsigned int cause) +{ + if((gc->collect_kind == MINOR_COLLECTION) || (cause != GC_CAUSE_LOS_IS_FULL) ) + return; + + Space_Tuner* tuner = gc->tuner; + tuner->kind = TRANS_FROM_MOS_TO_LOS; + + Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc); + Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc); + + unsigned int mos_free_block_nr = (mspace->ceiling_block_idx - mspace->free_block_idx + 1); + unsigned int nos_used_block_nr = fspace->free_block_idx - fspace->first_block_idx; + unsigned int mos_wast_block_nr = mos_free_block_nr - nos_used_block_nr; + unsigned int min_vary_block_nr = (GC_LOS_MIN_VARY_SIZE >> GC_BLOCK_SHIFT_COUNT); + if( mos_wast_block_nr > min_vary_block_nr ){ + tuner->tuning_size = min_vary_block_nr << GC_BLOCK_SHIFT_COUNT; + }else{ + tuner->tuning_size = mos_wast_block_nr << GC_BLOCK_SHIFT_COUNT; + } + + if(tuner->tuning_size == 0) tuner->kind = TRANS_NOTHING; + + return; +} + +void gc_space_tuner_reset(GC* gc) +{ + if(gc->collect_kind != MINOR_COLLECTION){ + Space_Tuner* tuner = gc->tuner; + memset(tuner, 0, sizeof(Space_Tuner)); + } +} + +void gc_space_tuner_initialize(GC* gc) +{ + Space_Tuner* tuner = (Space_Tuner*)STD_MALLOC(sizeof(Space_Tuner)); + assert(tuner); + memset(tuner, 0, sizeof(Space_Tuner)); + tuner->kind = TRANS_NOTHING; + tuner->tuning_size = 0; + gc->tuner = tuner; +} Index: vm/gc_gen/src/common/space_tuner.h =================================================================== --- vm/gc_gen/src/common/space_tuner.h (revision 0) +++ vm/gc_gen/src/common/space_tuner.h (revision 0) @@ -0,0 +1,45 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2006/10/05 + */ + +#ifndef _SPACE_TUNER_H_ +#define _SPACE_TUNER_H_ + +#include "gc_common.h" +#include "gc_space.h" + +//For_LOS_extend +enum Transform_Kind { + TRANS_NOTHING = 0, + TRANS_FROM_LOS_TO_MOS = 0x1, + TRANS_FROM_MOS_TO_LOS = 0x2, +}; + +typedef struct Space_Tuner{ + /*fixme: Now we use static value of GC_LOS_MIN_VARY_SIZE. */ + unsigned int tuning_threshold; + Transform_Kind kind; + unsigned int tuning_size; +}Space_Tuner; + +void gc_space_tune(GC* gc, unsigned int cause); +void gc_space_tuner_reset(GC* gc); +void gc_space_tuner_initialize(GC* gc); + +#endif /* _SPACE_TUNER_H_ */ Index: vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp =================================================================== --- vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (revision 493420) +++ vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (working copy) @@ -27,22 +27,24 @@ #include "../mark_sweep/lspace.h" #include "../gen/gen.h" -/* reset objects_with_finalizer vector block of each mutator */ -void mutator_reset_objects_with_finalizer(Mutator *mutator) +Boolean IGNORE_FINREF = TRUE; + +/* reset obj_with_fin vector block of each mutator */ +void mutator_reset_obj_with_fin(Mutator *mutator) { - mutator->objects_with_finalizer = finalizer_weakref_get_free_block(); + mutator->obj_with_fin = finref_get_free_block(); } -void gc_set_objects_with_finalizer(GC *gc) +void gc_set_obj_with_fin(GC *gc) { - Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata; - Pool *objects_with_finalizer_pool = metadata->objects_with_finalizer_pool; + Finref_Metadata *metadata = gc->finref_metadata; + Pool *obj_with_fin_pool = metadata->obj_with_fin_pool; - /* put back last objects_with_finalizer block of each mutator */ + /* put back last obj_with_fin block of each mutator */ Mutator *mutator = gc->mutator_list; while(mutator){ - pool_put_entry(objects_with_finalizer_pool, mutator->objects_with_finalizer); - mutator->objects_with_finalizer = NULL; + pool_put_entry(obj_with_fin_pool, mutator->obj_with_fin); + mutator->obj_with_fin = NULL; mutator = mutator->next; } return; @@ -51,23 +53,23 @@ /* reset weak references vetctor block of each collector */ void collector_reset_weakref_sets(Collector *collector) { - collector->softref_set = finalizer_weakref_get_free_block(); - collector->weakref_set = finalizer_weakref_get_free_block(); - collector->phanref_set= finalizer_weakref_get_free_block(); + collector->softref_set = finref_get_free_block(); + collector->weakref_set = finref_get_free_block(); + collector->phanref_set= finref_get_free_block(); } -static void gc_set_weakref_sets(GC *gc) +void gc_set_weakref_sets(GC *gc) { - Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata; + Finref_Metadata *metadata = gc->finref_metadata; /* put back last weak references block of each collector */ unsigned int num_active_collectors = gc->num_active_collectors; for(unsigned int i = 0; i < num_active_collectors; i++) { Collector* collector = gc->collectors[i]; - pool_put_entry(metadata->softref_set_pool, collector->softref_set); - pool_put_entry(metadata->weakref_set_pool, collector->weakref_set); - pool_put_entry(metadata->phanref_set_pool, collector->phanref_set); + pool_put_entry(metadata->softref_pool, collector->softref_set); + pool_put_entry(metadata->weakref_pool, collector->weakref_set); + pool_put_entry(metadata->phanref_pool, collector->phanref_set); collector->softref_set = NULL; collector->weakref_set= NULL; collector->phanref_set= NULL; @@ -76,26 +78,14 @@ } -extern Boolean obj_is_dead_in_minor_forward_collection(Collector *collector, Partial_Reveal_Object *p_obj); -static inline Boolean obj_is_dead_in_minor_copy_collection(Collector *collector, Partial_Reveal_Object *p_obj) +extern Boolean obj_is_dead_in_minor_forward_gc(Collector *collector, Partial_Reveal_Object *p_obj); +static inline Boolean obj_is_dead_in_minor_copy_gc(Collector *collector, Partial_Reveal_Object *p_obj) { - GC *gc = collector->gc; - Lspace *los = ((GC_Gen *)gc)->los; - - if(space_of_addr(gc, p_obj) != (Space *)los) - return !obj_is_marked_in_vt(p_obj); - else - return !lspace_object_is_marked(los, p_obj); + return !obj_is_marked_in_vt(p_obj); } -static inline Boolean obj_is_dead_in_major_collection(Collector *collector, Partial_Reveal_Object *p_obj) +static inline Boolean obj_is_dead_in_major_gc(Collector *collector, Partial_Reveal_Object *p_obj) { - GC *gc = collector->gc; - Lspace *los = ((GC_Gen *)gc)->los; - - if(space_of_addr(gc, p_obj) != (Space *)los) - return !obj_is_marked_in_vt(p_obj); - else - return !lspace_object_is_marked(los, p_obj); + return !obj_is_marked_in_vt(p_obj); } // clear the two least significant bits of p_obj first static inline Boolean obj_is_dead(Collector *collector, Partial_Reveal_Object *p_obj) @@ -104,17 +94,17 @@ assert(p_obj); if(gc->collect_kind == MINOR_COLLECTION){ - if( gc_requires_barriers()) - return obj_is_dead_in_minor_forward_collection(collector, p_obj); + if( gc_is_gen_mode()) + return obj_is_dead_in_minor_forward_gc(collector, p_obj); else - return obj_is_dead_in_minor_copy_collection(collector, p_obj); + return obj_is_dead_in_minor_copy_gc(collector, p_obj); } else { - return obj_is_dead_in_major_collection(collector, p_obj); + return obj_is_dead_in_major_gc(collector, p_obj); } } -static inline Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Space *space) +static inline Boolean fspace_obj_to_be_forwarded(Partial_Reveal_Object *p_obj, Space *space) { if(!obj_belongs_to_space(p_obj, (Space*)space)) return FALSE; return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary); @@ -124,8 +114,8 @@ assert(!obj_is_dead(collector, p_obj)); GC *gc = collector->gc; - if(gc_requires_barriers() && gc->collect_kind == MINOR_COLLECTION) - return fspace_object_to_be_forwarded(p_obj, collector->collect_space); + if(gc_is_gen_mode() && gc->collect_kind == MINOR_COLLECTION) + return fspace_obj_to_be_forwarded(p_obj, collector->collect_space); Space *space = space_of_addr(gc, p_obj); return space->move_object; @@ -134,11 +124,11 @@ extern void resurrect_obj_tree_after_trace(Collector *collector, Partial_Reveal_Object **p_ref); extern void resurrect_obj_tree_after_mark(Collector *collector, Partial_Reveal_Object *p_obj); -static inline void resurrect_obj_tree_in_minor_copy_collection(Collector *collector, Partial_Reveal_Object *p_obj) +static inline void resurrect_obj_tree_in_minor_copy_gc(Collector *collector, Partial_Reveal_Object *p_obj) { resurrect_obj_tree_after_mark(collector, p_obj); } -static inline void resurrect_obj_tree_in_major_collection(Collector *collector, Partial_Reveal_Object *p_obj) +static inline void resurrect_obj_tree_in_major_gc(Collector *collector, Partial_Reveal_Object *p_obj) { resurrect_obj_tree_after_mark(collector, p_obj); } @@ -148,23 +138,23 @@ { GC *gc = collector->gc; - if(!gc_requires_barriers() || !(gc->collect_kind == MINOR_COLLECTION)) + if(!gc_is_gen_mode() || !(gc->collect_kind == MINOR_COLLECTION)) collector_repset_add_entry(collector, p_ref); if(!obj_is_dead(collector, *p_ref)){ - if(gc_requires_barriers() && gc->collect_kind == MINOR_COLLECTION && obj_need_move(collector, *p_ref)) - *p_ref = obj_get_forwarding_pointer_in_vt(*p_ref); + if(gc_is_gen_mode() && gc->collect_kind == MINOR_COLLECTION && obj_need_move(collector, *p_ref)) + *p_ref = obj_get_fw_in_oi(*p_ref); return; } Partial_Reveal_Object* p_obj = *p_ref; assert(p_obj); if(gc->collect_kind == MINOR_COLLECTION){ - if( gc_requires_barriers()) + if( gc_is_gen_mode()) resurrect_obj_tree_after_trace(collector, p_ref); else - resurrect_obj_tree_in_minor_copy_collection(collector, p_obj); + resurrect_obj_tree_in_minor_copy_gc(collector, p_obj); } else { - resurrect_obj_tree_in_major_collection(collector, p_obj); + resurrect_obj_tree_in_major_gc(collector, p_obj); } } @@ -175,27 +165,27 @@ GC *gc = collector->gc; assert(!collector->rep_set); - if(gc_requires_barriers() && gc->collect_kind == MINOR_COLLECTION) + if(gc_is_gen_mode() && gc->collect_kind == MINOR_COLLECTION) return; - collector->rep_set = pool_get_entry(gc->metadata->free_set_pool); + collector->rep_set = free_set_pool_get_entry(gc->metadata); } /* called after loop of resurrect_obj_tree() */ static inline void collector_put_repset(Collector *collector) { GC *gc = collector->gc; - if(gc_requires_barriers() && gc->collect_kind == MINOR_COLLECTION) + if(gc_is_gen_mode() && gc->collect_kind == MINOR_COLLECTION) return; pool_put_entry(gc->metadata->collector_repset_pool, collector->rep_set); collector->rep_set = NULL; } -void finalizer_weakref_repset_add_entry_from_pool(Collector *collector, Pool *pool) +static void finref_add_repset_from_pool(Collector *collector, Pool *pool) { GC *gc = collector->gc; - finalizer_weakref_reset_repset(gc); + finref_reset_repset(gc); pool_iterator_init(pool); while(Vector_Block *block = pool_iterator_next(pool)){ @@ -206,32 +196,33 @@ iter = vector_block_iterator_advance(block, iter); if(*p_ref && obj_need_move(collector, *p_ref)) - finalizer_weakref_repset_add_entry(gc, p_ref); + finref_repset_add_entry(gc, p_ref); } } - finalizer_weakref_put_repset(gc); + finref_put_repset(gc); } -static void process_objects_with_finalizer(Collector *collector) +static void identify_finalizable_objects(Collector *collector) { GC *gc = collector->gc; - Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata; - Pool *objects_with_finalizer_pool = metadata->objects_with_finalizer_pool; - Pool *finalizable_objects_pool = metadata->finalizable_objects_pool; + Finref_Metadata *metadata = gc->finref_metadata; + Pool *obj_with_fin_pool = metadata->obj_with_fin_pool; + Pool *finalizable_obj_pool = metadata->finalizable_obj_pool; gc_reset_finalizable_objects(gc); - pool_iterator_init(objects_with_finalizer_pool); - while(Vector_Block *block = pool_iterator_next(objects_with_finalizer_pool)){ + pool_iterator_init(obj_with_fin_pool); + while(Vector_Block *block = pool_iterator_next(obj_with_fin_pool)){ unsigned int block_has_ref = 0; unsigned int *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ - Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter; + Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter; + Partial_Reveal_Object *p_obj = *p_ref; if(!p_obj) continue; if(obj_is_dead(collector, p_obj)){ - gc_finalizable_objects_add_entry(gc, p_obj); - *iter = NULL; + gc_add_finalizable_obj(gc, p_obj); + *p_ref = NULL; } else { ++block_has_ref; } @@ -241,10 +232,10 @@ } gc_put_finalizable_objects(gc); - collector_reset_repset(collector); - if(!finalizable_objects_pool_is_empty(gc)){ - pool_iterator_init(finalizable_objects_pool); - while(Vector_Block *block = pool_iterator_next(finalizable_objects_pool)){ + if(!finalizable_obj_pool_is_empty(gc)){ + collector_reset_repset(collector); + pool_iterator_init(finalizable_obj_pool); + while(Vector_Block *block = pool_iterator_next(finalizable_obj_pool)){ unsigned int *iter = vector_block_iterator_init(block); while(!vector_block_iterator_end(block, iter)){ assert(*iter); @@ -253,20 +244,20 @@ } } metadata->pending_finalizers = TRUE; + collector_put_repset(collector); } - collector_put_repset(collector); - finalizer_weakref_repset_add_entry_from_pool(collector, objects_with_finalizer_pool); + finref_add_repset_from_pool(collector, obj_with_fin_pool); /* fianlizable objects have been added to collector repset pool */ - //finalizer_weakref_repset_add_entry_from_pool(collector, finalizable_objects_pool); + //finref_add_repset_from_pool(collector, finalizable_obj_pool); } -static void post_process_finalizable_objects(GC *gc) +static void put_finalizable_obj_to_vm(GC *gc) { - Pool *finalizable_objects_pool = gc->finalizer_weakref_metadata->finalizable_objects_pool; - Pool *free_pool = gc->finalizer_weakref_metadata->free_pool; + Pool *finalizable_obj_pool = gc->finref_metadata->finalizable_obj_pool; + Pool *free_pool = gc->finref_metadata->free_pool; - while(Vector_Block *block = pool_get_entry(finalizable_objects_pool)){ + while(Vector_Block *block = pool_get_entry(finalizable_obj_pool)){ unsigned int *iter = vector_block_iterator_init(block); while(!vector_block_iterator_end(block, iter)){ assert(*iter); @@ -279,106 +270,149 @@ } } -static void process_soft_references(Collector *collector) +static void update_referent_ignore_finref(Collector *collector, Pool *pool) { GC *gc = collector->gc; + + while(Vector_Block *block = pool_get_entry(pool)){ + unsigned int *iter = vector_block_iterator_init(block); + for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ + Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter; + Partial_Reveal_Object *p_obj = *p_ref; + assert(p_obj); + Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj); + Partial_Reveal_Object *p_referent = *p_referent_field; + + if(!p_referent){ // referent field has been cleared + *p_ref = NULL; + continue; + } + if(!obj_is_dead(collector, p_referent)){ // referent is alive + if(obj_need_move(collector, p_referent)) + finref_repset_add_entry(gc, p_referent_field); + *p_ref = NULL; + continue; + } + *p_referent_field = NULL; /* referent is softly reachable: clear the referent field */ + } + } +} + +void update_ref_ignore_finref(Collector *collector) +{ + GC *gc = collector->gc; + Finref_Metadata *metadata = gc->finref_metadata; + + finref_reset_repset(gc); + update_referent_ignore_finref(collector, metadata->softref_pool); + update_referent_ignore_finref(collector, metadata->weakref_pool); + update_referent_ignore_finref(collector, metadata->phanref_pool); + finref_put_repset(gc); +} + +static void identify_dead_softrefs(Collector *collector) +{ + GC *gc = collector->gc; if(gc->collect_kind == MINOR_COLLECTION){ - assert(softref_set_pool_is_empty(gc)); + assert(softref_pool_is_empty(gc)); return; } - Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata; - Pool *softref_set_pool = metadata->softref_set_pool; + Finref_Metadata *metadata = gc->finref_metadata; + Pool *softref_pool = metadata->softref_pool; - finalizer_weakref_reset_repset(gc); - pool_iterator_init(softref_set_pool); - while(Vector_Block *block = pool_iterator_next(softref_set_pool)){ + finref_reset_repset(gc); + pool_iterator_init(softref_pool); + while(Vector_Block *block = pool_iterator_next(softref_pool)){ unsigned int *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ - Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter; + Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter; + Partial_Reveal_Object *p_obj = *p_ref; assert(p_obj); Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj); Partial_Reveal_Object *p_referent = *p_referent_field; if(!p_referent){ // referent field has been cleared - *iter = NULL; + *p_ref = NULL; continue; } if(!obj_is_dead(collector, p_referent)){ // referent is alive if(obj_need_move(collector, p_referent)) - finalizer_weakref_repset_add_entry(gc, p_referent_field); - *iter = NULL; + finref_repset_add_entry(gc, p_referent_field); + *p_ref = NULL; continue; } *p_referent_field = NULL; /* referent is softly reachable: clear the referent field */ } } - finalizer_weakref_put_repset(gc); + finref_put_repset(gc); - finalizer_weakref_repset_add_entry_from_pool(collector, softref_set_pool); + finref_add_repset_from_pool(collector, softref_pool); return; } -static void process_weak_references(Collector *collector) +static void identify_dead_weakrefs(Collector *collector) { GC *gc = collector->gc; - Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata; - Pool *weakref_set_pool = metadata->weakref_set_pool; + Finref_Metadata *metadata = gc->finref_metadata; + Pool *weakref_pool = metadata->weakref_pool; - finalizer_weakref_reset_repset(gc); - pool_iterator_init(weakref_set_pool); - while(Vector_Block *block = pool_iterator_next(weakref_set_pool)){ + finref_reset_repset(gc); + pool_iterator_init(weakref_pool); + while(Vector_Block *block = pool_iterator_next(weakref_pool)){ unsigned int *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ - Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter; + Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter; + Partial_Reveal_Object *p_obj = *p_ref; assert(p_obj); Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj); Partial_Reveal_Object *p_referent = *p_referent_field; if(!p_referent){ // referent field has been cleared - *iter = NULL; + *p_ref = NULL; continue; } if(!obj_is_dead(collector, p_referent)){ // referent is alive if(obj_need_move(collector, p_referent)) - finalizer_weakref_repset_add_entry(gc, p_referent_field); - *iter = NULL; + finref_repset_add_entry(gc, p_referent_field); + *p_ref = NULL; continue; } *p_referent_field = NULL; /* referent is weakly reachable: clear the referent field */ } } - finalizer_weakref_put_repset(gc); + finref_put_repset(gc); - finalizer_weakref_repset_add_entry_from_pool(collector, weakref_set_pool); + finref_add_repset_from_pool(collector, weakref_pool); return; } -static void process_phantom_references(Collector *collector) +static void identify_dead_phanrefs(Collector *collector) { GC *gc = collector->gc; - Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata; - Pool *phanref_set_pool = metadata->phanref_set_pool; + Finref_Metadata *metadata = gc->finref_metadata; + Pool *phanref_pool = metadata->phanref_pool; - finalizer_weakref_reset_repset(gc); + finref_reset_repset(gc); // collector_reset_repset(collector); - pool_iterator_init(phanref_set_pool); - while(Vector_Block *block = pool_iterator_next(phanref_set_pool)){ + pool_iterator_init(phanref_pool); + while(Vector_Block *block = pool_iterator_next(phanref_pool)){ unsigned int *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ - Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter; + Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter; + Partial_Reveal_Object *p_obj = *p_ref; assert(p_obj); Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj); Partial_Reveal_Object *p_referent = *p_referent_field; if(!p_referent){ // referent field has been cleared - *iter = NULL; + *p_ref = NULL; continue; } if(!obj_is_dead(collector, p_referent)){ // referent is alive if(obj_need_move(collector, p_referent)) - finalizer_weakref_repset_add_entry(gc, p_referent_field); - *iter = NULL; + finref_repset_add_entry(gc, p_referent_field); + *p_ref = NULL; continue; } *p_referent_field = NULL; @@ -392,15 +426,15 @@ } } // collector_put_repset(collector); - finalizer_weakref_put_repset(gc); + finref_put_repset(gc); - finalizer_weakref_repset_add_entry_from_pool(collector, phanref_set_pool); + finref_add_repset_from_pool(collector, phanref_pool); return; } -static inline void post_process_special_reference_pool(GC *gc, Pool *reference_pool) +static inline void put_dead_refs_to_vm(GC *gc, Pool *reference_pool) { - Pool *free_pool = gc->finalizer_weakref_metadata->free_pool; + Pool *free_pool = gc->finref_metadata->free_pool; while(Vector_Block *block = pool_get_entry(reference_pool)){ unsigned int *iter = vector_block_iterator_init(block); @@ -415,48 +449,48 @@ } } -static void post_process_special_references(GC *gc) +static void put_dead_weak_refs_to_vm(GC *gc) { - if(softref_set_pool_is_empty(gc) - && weakref_set_pool_is_empty(gc) - && phanref_set_pool_is_empty(gc)){ - gc_clear_special_reference_pools(gc); + if(softref_pool_is_empty(gc) + && weakref_pool_is_empty(gc) + && phanref_pool_is_empty(gc)){ + gc_clear_weakref_pools(gc); return; } - gc->finalizer_weakref_metadata->pending_weak_references = TRUE; + gc->finref_metadata->pending_weakrefs = TRUE; - Pool *softref_set_pool = gc->finalizer_weakref_metadata->softref_set_pool; - Pool *weakref_set_pool = gc->finalizer_weakref_metadata->weakref_set_pool; - Pool *phanref_set_pool = gc->finalizer_weakref_metadata->phanref_set_pool; - Pool *free_pool = gc->finalizer_weakref_metadata->free_pool; + Pool *softref_pool = gc->finref_metadata->softref_pool; + Pool *weakref_pool = gc->finref_metadata->weakref_pool; + Pool *phanref_pool = gc->finref_metadata->phanref_pool; + Pool *free_pool = gc->finref_metadata->free_pool; - post_process_special_reference_pool(gc, softref_set_pool); - post_process_special_reference_pool(gc, weakref_set_pool); - post_process_special_reference_pool(gc, phanref_set_pool); + put_dead_refs_to_vm(gc, softref_pool); + put_dead_refs_to_vm(gc, weakref_pool); + put_dead_refs_to_vm(gc, phanref_pool); } -void collector_process_finalizer_weakref(Collector *collector) +void collector_identify_finref(Collector *collector) { GC *gc = collector->gc; gc_set_weakref_sets(gc); - process_soft_references(collector); - process_weak_references(collector); - process_objects_with_finalizer(collector); - process_phantom_references(collector); + identify_dead_softrefs(collector); + identify_dead_weakrefs(collector); + identify_finalizable_objects(collector); + identify_dead_phanrefs(collector); } -void gc_post_process_finalizer_weakref(GC *gc) +void gc_put_finref_to_vm(GC *gc) { - post_process_special_references(gc); - post_process_finalizable_objects(gc); + put_dead_weak_refs_to_vm(gc); + put_finalizable_obj_to_vm(gc); } -void process_objects_with_finalizer_on_exit(GC *gc) +void put_all_fin_on_exit(GC *gc) { - Pool *objects_with_finalizer_pool = gc->finalizer_weakref_metadata->objects_with_finalizer_pool; - Pool *free_pool = gc->finalizer_weakref_metadata->free_pool; + Pool *obj_with_fin_pool = gc->finref_metadata->obj_with_fin_pool; + Pool *free_pool = gc->finref_metadata->free_pool; vm_gc_lock_enum(); /* FIXME: holding gc lock is not enough, perhaps there are mutators that are allocating objects with finalizer @@ -465,9 +499,9 @@ * allocating mem and adding the objects with finalizer to the pool */ lock(gc->mutator_list_lock); - gc_set_objects_with_finalizer(gc); + gc_set_obj_with_fin(gc); unlock(gc->mutator_list_lock); - while(Vector_Block *block = pool_get_entry(objects_with_finalizer_pool)){ + while(Vector_Block *block = pool_get_entry(obj_with_fin_pool)){ unsigned int *iter = vector_block_iterator_init(block); while(!vector_block_iterator_end(block, iter)){ Managed_Object_Handle p_obj = (Managed_Object_Handle)*iter; @@ -481,9 +515,9 @@ vm_gc_unlock_enum(); } -void gc_update_finalizer_weakref_repointed_refs(GC* gc) +void gc_update_finref_repointed_refs(GC* gc) { - Finalizer_Weakref_Metadata* metadata = gc->finalizer_weakref_metadata; + Finref_Metadata* metadata = gc->finref_metadata; Pool *repset_pool = metadata->repset_pool; /* NOTE:: this is destructive to the root sets. */ @@ -499,17 +533,10 @@ /* For repset, this check is unnecessary, since all slots are repointed; otherwise they will not be recorded. For root set, it is possible to point to LOS or other non-moved space. */ -#ifdef _DEBUG - if( !gc_requires_barriers() || gc->collect_kind == MAJOR_COLLECTION ){ - assert(obj_is_forwarded_in_obj_info(p_obj)); - } else - assert(obj_is_forwarded_in_vt(p_obj)); -#endif Partial_Reveal_Object* p_target_obj; - if( !gc_requires_barriers() || gc->collect_kind == MAJOR_COLLECTION ) - p_target_obj = get_forwarding_pointer_in_obj_info(p_obj); - else - p_target_obj = obj_get_forwarding_pointer_in_vt(p_obj); + assert(obj_is_fw_in_oi(p_obj)); + p_target_obj = obj_get_fw_in_oi(p_obj); + *p_ref = p_target_obj; } vector_block_clear(root_set); @@ -520,13 +547,13 @@ return; } -void gc_activate_finalizer_weakref_threads(GC *gc) +void gc_activate_finref_threads(GC *gc) { - Finalizer_Weakref_Metadata* metadata = gc->finalizer_weakref_metadata; + Finref_Metadata* metadata = gc->finref_metadata; - if(metadata->pending_finalizers || metadata->pending_weak_references){ + if(metadata->pending_finalizers || metadata->pending_weakrefs){ metadata->pending_finalizers = FALSE; - metadata->pending_weak_references = FALSE; + metadata->pending_weakrefs = FALSE; vm_hint_finalize(); } } Index: vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h =================================================================== --- vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h (revision 493420) +++ vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h (working copy) @@ -18,12 +18,16 @@ * @author Li-Gang Wang, 2006/11/30 */ -#ifndef _FINALIZER_WEAKREF_H_ -#define _FINALIZER_WEAKREF_H_ +#ifndef _FINREF_H_ +#define _FINREF_H_ +#define BUILD_IN_REFERENT + #include "finalizer_weakref_metadata.h" #include "../thread/collector.h" +extern Boolean IGNORE_FINREF; + /* Phantom status: for future use * #define PHANTOM_REF_ENQUEUE_STATUS_MASK 0x3 * #define PHANTOM_REF_ENQUEUED_MASK 0x1 @@ -64,13 +68,13 @@ if(collect_kind==MINOR_COLLECTION) scan_slot(collector, p_referent_field); else - collector_softref_set_add_entry(collector, p_obj); + collector_add_softref(collector, p_obj); break; case WEAK_REFERENCE : - collector_weakref_set_add_entry(collector, p_obj); + collector_add_weakref(collector, p_obj); break; case PHANTOM_REFERENCE : - collector_phanref_set_add_entry(collector, p_obj); + collector_add_phanref(collector, p_obj); break; default : assert(0); @@ -79,15 +83,17 @@ } -extern void mutator_reset_objects_with_finalizer(Mutator *mutator); -extern void gc_set_objects_with_finalizer(GC *gc); +extern void mutator_reset_obj_with_fin(Mutator *mutator); +extern void gc_set_obj_with_fin(GC *gc); extern void collector_reset_weakref_sets(Collector *collector); -extern void collector_process_finalizer_weakref(Collector *collector); -extern void gc_post_process_finalizer_weakref(GC *gc); -extern void process_objects_with_finalizer_on_exit(GC *gc); +extern void gc_set_weakref_sets(GC *gc); +extern void update_ref_ignore_finref(Collector *collector); +extern void collector_identify_finref(Collector *collector); +extern void gc_put_finref_to_vm(GC *gc); +extern void put_all_fin_on_exit(GC *gc); -extern void gc_update_finalizer_weakref_repointed_refs(GC* gc); -extern void gc_activate_finalizer_weakref_threads(GC *gc); +extern void gc_update_finref_repointed_refs(GC* gc); +extern void gc_activate_finref_threads(GC *gc); -#endif // _FINALIZER_WEAKREF_H_ +#endif // _FINREF_H_ Index: vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp =================================================================== --- vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp (revision 493420) +++ vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp (working copy) @@ -28,168 +28,168 @@ #define METADATA_BLOCK_SIZE_BIT_SHIFT 10 #define METADATA_BLOCK_SIZE_BYTES (1<> METADATA_BLOCK_SIZE_BIT_SHIFT; for(unsigned int i=0; ifinalizer_weakref_metadata = &finalizer_weakref_metadata; + gc->finref_metadata = &finref_metadata; return; } -void gc_finalizer_weakref_metadata_destruct(GC *gc) +void gc_finref_metadata_destruct(GC *gc) { - Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata; + Finref_Metadata *metadata = gc->finref_metadata; sync_pool_destruct(metadata->free_pool); - sync_pool_destruct(metadata->objects_with_finalizer_pool); - sync_pool_destruct(metadata->finalizable_objects_pool); - sync_pool_destruct(metadata->softref_set_pool); - sync_pool_destruct(metadata->weakref_set_pool); - sync_pool_destruct(metadata->phanref_set_pool); + sync_pool_destruct(metadata->obj_with_fin_pool); + sync_pool_destruct(metadata->finalizable_obj_pool); + sync_pool_destruct(metadata->softref_pool); + sync_pool_destruct(metadata->weakref_pool); + sync_pool_destruct(metadata->phanref_pool); sync_pool_destruct(metadata->repset_pool); - metadata->finalizable_objects = NULL; + metadata->finalizable_obj_set = NULL; metadata->repset = NULL; - for(unsigned int i=0; inext_segment_pos; i++){ + for(unsigned int i=0; inum_alloc_segs; i++){ assert(metadata->pool_segments[i]); STD_FREE(metadata->pool_segments[i]); } - gc->finalizer_weakref_metadata = NULL; + gc->finref_metadata = NULL; } -void gc_finalizer_weakref_metadata_verify(GC *gc, Boolean is_before_gc) +void gc_finref_metadata_verify(GC *gc, Boolean is_before_gc) { - Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata; + Finref_Metadata *metadata = gc->finref_metadata; - assert(pool_is_empty(metadata->finalizable_objects_pool)); - assert(pool_is_empty(metadata->softref_set_pool)); - assert(pool_is_empty(metadata->weakref_set_pool)); - assert(pool_is_empty(metadata->phanref_set_pool)); + assert(pool_is_empty(metadata->finalizable_obj_pool)); + assert(pool_is_empty(metadata->softref_pool)); + assert(pool_is_empty(metadata->weakref_pool)); + assert(pool_is_empty(metadata->phanref_pool)); assert(pool_is_empty(metadata->repset_pool)); - assert(metadata->finalizable_objects == NULL); + assert(metadata->finalizable_obj_set == NULL); assert(metadata->repset == NULL); return; } -void gc_reset_finalizer_weakref_metadata(GC *gc) +void gc_reset_finref_metadata(GC *gc) { - Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata; - Pool *objects_with_finalizer_pool = metadata->objects_with_finalizer_pool; - Pool *finalizable_objects_pool = metadata->finalizable_objects_pool; + Finref_Metadata *metadata = gc->finref_metadata; + Pool *obj_with_fin_pool = metadata->obj_with_fin_pool; + Pool *finalizable_obj_pool = metadata->finalizable_obj_pool; - assert(pool_is_empty(finalizable_objects_pool)); - assert(pool_is_empty(metadata->softref_set_pool)); - assert(pool_is_empty(metadata->weakref_set_pool)); - assert(pool_is_empty(metadata->phanref_set_pool)); + assert(pool_is_empty(finalizable_obj_pool)); + assert(pool_is_empty(metadata->softref_pool)); + assert(pool_is_empty(metadata->weakref_pool)); + assert(pool_is_empty(metadata->phanref_pool)); assert(pool_is_empty(metadata->repset_pool)); - assert(metadata->finalizable_objects == NULL); + assert(metadata->finalizable_obj_set == NULL); assert(metadata->repset == NULL); - while(Vector_Block *block = pool_get_entry(objects_with_finalizer_pool)){ + while(Vector_Block *block = pool_get_entry(obj_with_fin_pool)){ unsigned int *iter = vector_block_iterator_init(block); if(vector_block_iterator_end(block, iter)){ vector_block_clear(block); pool_put_entry(metadata->free_pool, block); } else { - pool_put_entry(finalizable_objects_pool, block); + pool_put_entry(finalizable_obj_pool, block); } } - assert(pool_is_empty(objects_with_finalizer_pool)); - metadata->objects_with_finalizer_pool = finalizable_objects_pool; - metadata->finalizable_objects_pool = objects_with_finalizer_pool; + assert(pool_is_empty(obj_with_fin_pool)); + metadata->obj_with_fin_pool = finalizable_obj_pool; + metadata->finalizable_obj_pool = obj_with_fin_pool; } -/* called when there is no Vector_Block in finalizer_weakref_metadata->free_pool +/* called when there is no Vector_Block in finref_metadata->free_pool * extend the pool by a pool segment */ -static void gc_finalizer_weakref_metadata_extend(void) +static void finref_metadata_extend(void) { - Finalizer_Weakref_Metadata metadata = finalizer_weakref_metadata; + Finref_Metadata *metadata = &finref_metadata; - unsigned int segment_pos = metadata.next_segment_pos; - while(segment_pos < POOL_SEGMENT_NUM){ - unsigned int next_segment_pos = segment_pos + 1; - unsigned int temp = (unsigned int)atomic_cas32((volatile unsigned int *)&metadata.next_segment_pos, next_segment_pos, segment_pos); - if(temp == segment_pos) + unsigned int pos = metadata->num_alloc_segs; + while(pos < POOL_SEGMENT_NUM){ + unsigned int next_pos = pos + 1; + unsigned int temp = (unsigned int)atomic_cas32((volatile unsigned int *)&metadata->num_alloc_segs, next_pos, pos); + if(temp == pos) break; - segment_pos = metadata.next_segment_pos; + pos = metadata->num_alloc_segs; } - if(segment_pos > POOL_SEGMENT_NUM) + if(pos > POOL_SEGMENT_NUM) return; void *pool_segment = STD_MALLOC(POOL_SEGMENT_SIZE_BYTES); memset(pool_segment, 0, POOL_SEGMENT_SIZE_BYTES); - metadata.pool_segments[segment_pos] = pool_segment; + metadata->pool_segments[pos] = pool_segment; unsigned int num_blocks = POOL_SEGMENT_SIZE_BYTES >> METADATA_BLOCK_SIZE_BIT_SHIFT; for(unsigned int i=0; ifree_pool, (void *)block); } return; } -Vector_Block *finalizer_weakref_get_free_block(void) +Vector_Block *finref_get_free_block(void) { Vector_Block *block; - while(!(block = pool_get_entry(finalizer_weakref_metadata.free_pool))) - gc_finalizer_weakref_metadata_extend(); + while(!(block = pool_get_entry(finref_metadata.free_pool))) + finref_metadata_extend(); return block; } -/* called when GC completes and there is no Vector_Block in the last five pools of gc->finalizer_weakref_metadata +/* called when GC completes and there is no Vector_Block in the last five pools of gc->finref_metadata * shrink the free pool by half */ -void gc_finalizer_weakref_metadata_shrink(GC *gc) +void finref_metadata_shrink(GC *gc) { } -static inline void finalizer_weakref_metadata_general_add_entry(Vector_Block* &vector_block_in_use, Pool *pool, Partial_Reveal_Object *ref) +static inline void finref_metadata_add_entry(Vector_Block* &vector_block_in_use, Pool *pool, Partial_Reveal_Object *ref) { assert(vector_block_in_use); assert(ref); @@ -200,41 +200,41 @@ if(!vector_block_is_full(block)) return; pool_put_entry(pool, block); - vector_block_in_use = finalizer_weakref_get_free_block(); + vector_block_in_use = finref_get_free_block(); } -void mutator_finalizer_add_entry(Mutator *mutator, Partial_Reveal_Object *ref) +void mutator_add_finalizer(Mutator *mutator, Partial_Reveal_Object *ref) { - finalizer_weakref_metadata_general_add_entry(mutator->objects_with_finalizer, finalizer_weakref_metadata.objects_with_finalizer_pool, ref); + finref_metadata_add_entry(mutator->obj_with_fin, finref_metadata.obj_with_fin_pool, ref); } -void gc_finalizable_objects_add_entry(GC *gc, Partial_Reveal_Object *ref) +void gc_add_finalizable_obj(GC *gc, Partial_Reveal_Object *ref) { - finalizer_weakref_metadata_general_add_entry(finalizer_weakref_metadata.finalizable_objects, finalizer_weakref_metadata.finalizable_objects_pool, ref); + finref_metadata_add_entry(finref_metadata.finalizable_obj_set, finref_metadata.finalizable_obj_pool, ref); } -void collector_softref_set_add_entry(Collector *collector, Partial_Reveal_Object *ref) +void collector_add_softref(Collector *collector, Partial_Reveal_Object *ref) { - finalizer_weakref_metadata_general_add_entry(collector->softref_set, finalizer_weakref_metadata.softref_set_pool, ref); + finref_metadata_add_entry(collector->softref_set, finref_metadata.softref_pool, ref); } -void collector_weakref_set_add_entry(Collector *collector, Partial_Reveal_Object *ref) +void collector_add_weakref(Collector *collector, Partial_Reveal_Object *ref) { - finalizer_weakref_metadata_general_add_entry(collector->weakref_set, finalizer_weakref_metadata.weakref_set_pool, ref); + finref_metadata_add_entry(collector->weakref_set, finref_metadata.weakref_pool, ref); } -void collector_phanref_set_add_entry(Collector *collector, Partial_Reveal_Object *ref) +void collector_add_phanref(Collector *collector, Partial_Reveal_Object *ref) { - finalizer_weakref_metadata_general_add_entry(collector->phanref_set, finalizer_weakref_metadata.phanref_set_pool, ref); + finref_metadata_add_entry(collector->phanref_set, finref_metadata.phanref_pool, ref); } -void finalizer_weakref_repset_add_entry(GC *gc, Partial_Reveal_Object **p_ref) +void finref_repset_add_entry(GC *gc, Partial_Reveal_Object **p_ref) { assert(*p_ref); - finalizer_weakref_metadata_general_add_entry(finalizer_weakref_metadata.repset, finalizer_weakref_metadata.repset_pool, (Partial_Reveal_Object *)p_ref); + finref_metadata_add_entry(finref_metadata.repset, finref_metadata.repset_pool, (Partial_Reveal_Object *)p_ref); } -static inline Boolean pool_has_no_reference(Pool *pool) +static inline Boolean pool_has_no_ref(Pool *pool) { if(pool_is_empty(pool)) return TRUE; @@ -250,48 +250,48 @@ return TRUE; } -Boolean objects_with_finalizer_pool_is_empty(GC *gc) +Boolean obj_with_fin_pool_is_empty(GC *gc) { - return pool_has_no_reference(gc->finalizer_weakref_metadata->objects_with_finalizer_pool); + return pool_has_no_ref(gc->finref_metadata->obj_with_fin_pool); } -Boolean finalizable_objects_pool_is_empty(GC *gc) +Boolean finalizable_obj_pool_is_empty(GC *gc) { - return pool_has_no_reference(gc->finalizer_weakref_metadata->finalizable_objects_pool); + return pool_has_no_ref(gc->finref_metadata->finalizable_obj_pool); } -Boolean softref_set_pool_is_empty(GC *gc) +Boolean softref_pool_is_empty(GC *gc) { - return pool_has_no_reference(gc->finalizer_weakref_metadata->softref_set_pool); + return pool_has_no_ref(gc->finref_metadata->softref_pool); } -Boolean weakref_set_pool_is_empty(GC *gc) +Boolean weakref_pool_is_empty(GC *gc) { - return pool_has_no_reference(gc->finalizer_weakref_metadata->weakref_set_pool); + return pool_has_no_ref(gc->finref_metadata->weakref_pool); } -Boolean phanref_set_pool_is_empty(GC *gc) +Boolean phanref_pool_is_empty(GC *gc) { - return pool_has_no_reference(gc->finalizer_weakref_metadata->phanref_set_pool); + return pool_has_no_ref(gc->finref_metadata->phanref_pool); } -Boolean finalizer_weakref_repset_pool_is_empty(GC *gc) +Boolean finref_repset_pool_is_empty(GC *gc) { - return pool_has_no_reference(gc->finalizer_weakref_metadata->repset_pool); + return pool_has_no_ref(gc->finref_metadata->repset_pool); } -static inline void finalizer_weakref_metadata_clear_pool(Pool *pool) +static inline void finref_metadata_clear_pool(Pool *pool) { while(Vector_Block* block = pool_get_entry(pool)) { vector_block_clear(block); - pool_put_entry(finalizer_weakref_metadata.free_pool, block); + pool_put_entry(finref_metadata.free_pool, block); } } -void gc_clear_special_reference_pools(GC *gc) +void gc_clear_weakref_pools(GC *gc) { - finalizer_weakref_metadata_clear_pool(gc->finalizer_weakref_metadata->softref_set_pool); - finalizer_weakref_metadata_clear_pool(gc->finalizer_weakref_metadata->weakref_set_pool); - finalizer_weakref_metadata_clear_pool(gc->finalizer_weakref_metadata->phanref_set_pool); + finref_metadata_clear_pool(gc->finref_metadata->softref_pool); + finref_metadata_clear_pool(gc->finref_metadata->weakref_pool); + finref_metadata_clear_pool(gc->finref_metadata->phanref_pool); } Index: vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h =================================================================== --- vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h (revision 493420) +++ vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h (working copy) @@ -18,8 +18,8 @@ * @author Li-Gang Wang, 2006/11/29 */ -#ifndef _FINALIZER_WEAKREF_METADATA_H_ -#define _FINALIZER_WEAKREF_METADATA_H_ +#ifndef _FINREF_METADATA_H_ +#define _FINREF_METADATA_H_ #include "../common/gc_common.h" #include "../utils/vector_block.h" @@ -27,90 +27,89 @@ #define POOL_SEGMENT_NUM 256 -typedef struct Finalizer_Weakref_Metadata{ +typedef struct Finref_Metadata{ void *pool_segments[POOL_SEGMENT_NUM]; // malloced free pool segments' addresses array - unsigned int next_segment_pos; // next available position in pool_segments array + unsigned int num_alloc_segs; // next available position in pool_segments array Pool *free_pool; // list of free buffers for the five pools below - Pool *objects_with_finalizer_pool; // list of objects that have finalizer; + Pool *obj_with_fin_pool; // list of objects that have finalizer; // these objects are added in when they are allocated - Pool *finalizable_objects_pool; // temporary buffer for finalizable objects identified during one single GC + Pool *finalizable_obj_pool; // temporary buffer for finalizable objects identified during one single GC - Pool *softref_set_pool; // temporary buffer for soft references identified during one single GC - Pool *weakref_set_pool; // temporary buffer for weak references identified during one single GC - Pool *phanref_set_pool; // temporary buffer for phantom references identified during one single GC + Pool *softref_pool; // temporary buffer for soft references identified during one single GC + Pool *weakref_pool; // temporary buffer for weak references identified during one single GC + Pool *phanref_pool; // temporary buffer for phantom references identified during one single GC Pool *repset_pool; // repointed reference slot sets - Vector_Block *finalizable_objects; // buffer for finalizable_objects_pool + Vector_Block *finalizable_obj_set; // buffer for finalizable_objects_pool Vector_Block *repset; // buffer for repset_pool Boolean pending_finalizers; // there are objects waiting to be finalized - Boolean pending_weak_references; // there are weak references waiting to be enqueued + Boolean pending_weakrefs; // there are weak references waiting to be enqueued unsigned int gc_referent_offset; // the referent field's offset in Reference Class -}Finalizer_Weakref_Metadata; +}Finref_Metadata; extern unsigned int get_gc_referent_offset(void); extern void set_gc_referent_offset(unsigned int offset); -extern void gc_finalizer_weakref_metadata_initialize(GC *gc); -extern void gc_finalizer_weakref_metadata_destruct(GC *gc); -extern void gc_finalizer_weakref_metadata_verify(GC *gc, Boolean is_before_gc); -extern void gc_reset_finalizer_weakref_metadata(GC *gc); -extern Vector_Block *finalizer_weakref_get_free_block(void); -extern void gc_finalizer_weakref_metadata_shrink(GC *gc); +extern void gc_finref_metadata_initialize(GC *gc); +extern void gc_finref_metadata_destruct(GC *gc); +extern void gc_finref_metadata_verify(GC *gc, Boolean is_before_gc); +extern void gc_reset_finref_metadata(GC *gc); +extern Vector_Block *finref_get_free_block(void); -extern void mutator_finalizer_add_entry(Mutator *mutator, Partial_Reveal_Object *ref); -extern void gc_finalizable_objects_add_entry(GC *gc, Partial_Reveal_Object *ref); -extern void collector_softref_set_add_entry(Collector *collector, Partial_Reveal_Object *ref); -extern void collector_weakref_set_add_entry(Collector *collector, Partial_Reveal_Object *ref); -extern void collector_phanref_set_add_entry(Collector *collector, Partial_Reveal_Object *ref); -extern void finalizer_weakref_repset_add_entry(GC *gc, Partial_Reveal_Object **ref); +extern void mutator_add_finalizer(Mutator *mutator, Partial_Reveal_Object *ref); +extern void gc_add_finalizable_obj(GC *gc, Partial_Reveal_Object *ref); +extern void collector_add_softref(Collector *collector, Partial_Reveal_Object *ref); +extern void collector_add_weakref(Collector *collector, Partial_Reveal_Object *ref); +extern void collector_add_phanref(Collector *collector, Partial_Reveal_Object *ref); +extern void finref_repset_add_entry(GC *gc, Partial_Reveal_Object **ref); -extern Boolean objects_with_finalizer_pool_is_empty(GC *gc); -extern Boolean finalizable_objects_pool_is_empty(GC *gc); -extern Boolean softref_set_pool_is_empty(GC *gc); -extern Boolean weakref_set_pool_is_empty(GC *gc); -extern Boolean phanref_set_pool_is_empty(GC *gc); -extern Boolean finalizer_weakref_repset_pool_is_empty(GC *gc); +extern Boolean obj_with_fin_pool_is_empty(GC *gc); +extern Boolean finalizable_obj_pool_is_empty(GC *gc); +extern Boolean softref_pool_is_empty(GC *gc); +extern Boolean weakref_pool_is_empty(GC *gc); +extern Boolean phanref_pool_is_empty(GC *gc); +extern Boolean finref_repset_pool_is_empty(GC *gc); -extern void gc_clear_special_reference_pools(GC *gc); +extern void gc_clear_weakref_pools(GC *gc); /* called before loop of recording finalizable objects */ inline void gc_reset_finalizable_objects(GC *gc) { - Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata; + Finref_Metadata *metadata = gc->finref_metadata; - assert(!metadata->finalizable_objects); - metadata->finalizable_objects = pool_get_entry(metadata->free_pool); + assert(!metadata->finalizable_obj_set); + metadata->finalizable_obj_set = pool_get_entry(metadata->free_pool); } /* called after loop of recording finalizable objects */ inline void gc_put_finalizable_objects(GC *gc) { - Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata; + Finref_Metadata *metadata = gc->finref_metadata; - pool_put_entry(metadata->finalizable_objects_pool, metadata->finalizable_objects); - metadata->finalizable_objects = NULL; + pool_put_entry(metadata->finalizable_obj_pool, metadata->finalizable_obj_set); + metadata->finalizable_obj_set = NULL; } /* called before loop of recording repointed reference */ -inline void finalizer_weakref_reset_repset(GC *gc) +inline void finref_reset_repset(GC *gc) { - Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata; + Finref_Metadata *metadata = gc->finref_metadata; assert(!metadata->repset); metadata->repset = pool_get_entry(metadata->free_pool); } /* called after loop of recording repointed reference */ -inline void finalizer_weakref_put_repset(GC *gc) +inline void finref_put_repset(GC *gc) { - Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata; + Finref_Metadata *metadata = gc->finref_metadata; pool_put_entry(metadata->repset_pool, metadata->repset); metadata->repset = NULL; } -#endif // _FINALIZER_WEAKREF_METADATA_H_ +#endif // _FINREF_METADATA_H_ Index: vm/gc_gen/src/gen/gc_for_barrier.cpp =================================================================== --- vm/gc_gen/src/gen/gc_for_barrier.cpp (revision 493420) +++ vm/gc_gen/src/gen/gc_for_barrier.cpp (working copy) @@ -19,34 +19,33 @@ */ #include "../gen/gen.h" - #include "../thread/mutator.h" +#include "gc_for_barrier.h" /* All the write barrier interfaces need cleanup */ -Boolean NEED_BARRIER = TRUE; +Boolean gen_mode; -Boolean gc_requires_barriers() -{ return NEED_BARRIER; } - /* The implementations are only temporary */ static void gc_slot_write_barrier(Managed_Object_Handle *p_slot, Managed_Object_Handle p_target) { - Mutator *mutator = (Mutator *)gc_get_tls(); - GC_Gen* gc = (GC_Gen*)mutator->gc; - if( address_belongs_to_nursery((void *)p_target, gc) && - !address_belongs_to_nursery((void *)p_slot, gc)) - { + if(p_target >= nos_boundary && p_slot < nos_boundary){ + + Mutator *mutator = (Mutator *)gc_get_tls(); + assert( addr_belongs_to_nos(p_target) && !addr_belongs_to_nos(p_slot)); + mutator_remset_add_entry(mutator, (Partial_Reveal_Object**)p_slot); } + return; } static void gc_object_write_barrier(Managed_Object_Handle p_object) { + + if( addr_belongs_to_nos(p_object)) return; + Mutator *mutator = (Mutator *)gc_get_tls(); - GC_Gen* gc = (GC_Gen*)mutator->gc; - if( address_belongs_to_nursery((void *)p_object, gc)) return; Partial_Reveal_Object **p_slot; /* scan array object */ @@ -57,7 +56,7 @@ int32 array_length = vector_get_length((Vector_Handle) array); for (int i = 0; i < array_length; i++) { p_slot = (Partial_Reveal_Object **)vector_get_element_address_ref((Vector_Handle) array, i); - if( *p_slot != NULL && address_belongs_to_nursery((void *)*p_slot, gc)){ + if( *p_slot != NULL && addr_belongs_to_nos(*p_slot)){ mutator_remset_add_entry(mutator, p_slot); } } @@ -70,7 +69,7 @@ while (true) { p_slot = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj); if (p_slot == NULL) break; - if( address_belongs_to_nursery((void *)*p_slot, gc)){ + if( addr_belongs_to_nos(*p_slot)){ mutator_remset_add_entry(mutator, p_slot); } offset_scanner = offset_next_ref(offset_scanner); @@ -81,7 +80,7 @@ void gc_heap_wrote_object (Managed_Object_Handle p_obj_written) { - if( !NEED_BARRIER ) return; + if( !gc_is_gen_mode() ) return; if( object_has_ref_field((Partial_Reveal_Object*)p_obj_written)){ /* for array copy and object clone */ gc_object_write_barrier(p_obj_written); @@ -97,7 +96,7 @@ { *p_slot = p_target; - if( !NEED_BARRIER ) return; + if( !gc_is_gen_mode() ) return; gc_slot_write_barrier(p_slot, p_target); } Index: vm/gc_gen/src/gen/gc_for_barrier.h =================================================================== --- vm/gc_gen/src/gen/gc_for_barrier.h (revision 0) +++ vm/gc_gen/src/gen/gc_for_barrier.h (revision 0) @@ -0,0 +1,50 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2006/10/05 + */ + +#ifndef _GC_FOR_BARRIER_H_ +#define _GC_FOR_BARRIER_H_ + +#include "../jni/java_support.h" + +extern Boolean gen_mode; + +inline Boolean gc_is_gen_mode() +{ return gen_mode; } + +inline void gc_enable_gen_mode() +{ + gen_mode = TRUE; + HelperClass_set_GenMode(TRUE); +} + +inline void gc_disable_gen_mode() +{ + gen_mode = FALSE; + HelperClass_set_GenMode(FALSE); +} + +inline void gc_set_gen_mode(Boolean status) +{ + gen_mode = status; + HelperClass_set_GenMode(status); +} + +#endif /* _GC_FOR_BARRIER_H_ */ + Index: vm/gc_gen/src/gen/gen.cpp =================================================================== --- vm/gc_gen/src/gen/gen.cpp (revision 493420) +++ vm/gc_gen/src/gen/gen.cpp (working copy) @@ -21,12 +21,26 @@ #include "port_sysinfo.h" #include "gen.h" +#include "../finalizer_weakref/finalizer_weakref.h" +#include "../verify/verify_live_heap.h" +#include "../common/space_tuner.h" /* fspace size limit is not interesting. only for manual tuning purpose */ -unsigned int min_nos_size_bytes = 2 * MB; -unsigned int max_nos_size_bytes = 64 * MB; +unsigned int min_nos_size_bytes = 16 * MB; +unsigned int max_nos_size_bytes = 256 * MB; unsigned int NOS_SIZE = 0; +unsigned int MIN_NOS_SIZE = 0; +unsigned int MAX_NOS_SIZE = 0; +static unsigned int MINOR_ALGO = 0; +static unsigned int MAJOR_ALGO = 0; + +#ifndef STATIC_NOS_MAPPING +void* nos_boundary; +#endif + +#define RESERVE_BOTTOM ((void*)0x1000000) + static void gc_gen_get_system_info(GC_Gen *gc_gen) { gc_gen->_machine_page_size_bytes = port_vmem_page_sizes()[0]; @@ -36,73 +50,133 @@ void gc_gen_initialize(GC_Gen *gc_gen, unsigned int min_heap_size, unsigned int max_heap_size) { assert(gc_gen); + + /*Give GC a hint of gc survive ratio.*/ + gc_gen->survive_ratio = 0.2f; + + /*fixme: max_heap_size should not beyond 448 MB*/ + max_heap_size = round_down_to_size(max_heap_size, SPACE_ALLOC_UNIT); + min_heap_size = round_up_to_size(min_heap_size, SPACE_ALLOC_UNIT); assert(max_heap_size <= max_heap_size_bytes); + assert(max_heap_size > min_heap_size_bytes); - min_heap_size = round_up_to_size(min_heap_size, GC_BLOCK_SIZE_BYTES); - max_heap_size = round_up_to_size(max_heap_size, GC_BLOCK_SIZE_BYTES); - gc_gen_get_system_info(gc_gen); + min_nos_size_bytes *= gc_gen->_num_processors; + + if( MIN_NOS_SIZE ) min_nos_size_bytes = MIN_NOS_SIZE; - void *reserved_base = NULL; + unsigned int los_size = max_heap_size >> 7; + if(los_size < GC_MIN_LOS_SIZE) + los_size = GC_MIN_LOS_SIZE; + + los_size = round_down_to_size(los_size, SPACE_ALLOC_UNIT); - /* allocate memory for gc_gen */ - gc_gen->allocated_memory = NULL; - pool_create(&gc_gen->aux_pool, 0); + /* let's compute and reserve the space for committing */ - apr_status_t status = port_vmem_reserve(&gc_gen->allocated_memory, - &reserved_base, max_heap_size, - PORT_VMEM_MODE_READ | PORT_VMEM_MODE_WRITE, - gc_gen->_machine_page_size_bytes, gc_gen->aux_pool); + /* heuristic nos + mos + LOS = max, and nos*ratio = mos */ + unsigned int nos_reserve_size, nos_commit_size; + unsigned int mos_reserve_size, mos_commit_size; + unsigned int los_mos_size; - while(APR_SUCCESS != status){ - max_heap_size -= gc_gen->_machine_page_size_bytes; - status = port_vmem_reserve(&gc_gen->allocated_memory, - &reserved_base, max_heap_size, - PORT_VMEM_MODE_READ | PORT_VMEM_MODE_WRITE, - gc_gen->_machine_page_size_bytes, gc_gen->aux_pool); + + if(NOS_SIZE){ + los_mos_size = max_heap_size - NOS_SIZE; + mos_reserve_size = los_mos_size - los_size; + + nos_commit_size = NOS_SIZE; + nos_reserve_size = NOS_SIZE; + + }else{ + los_mos_size = max_heap_size; + mos_reserve_size = los_mos_size - los_size; + nos_commit_size = (unsigned int)(((float)(max_heap_size - los_size))/(1.0f + gc_gen->survive_ratio)); + nos_reserve_size = mos_reserve_size; } - assert(max_heap_size > min_heap_size_bytes); - gc_gen->reserved_heap_size = max_heap_size; + + nos_commit_size = round_down_to_size(nos_commit_size, SPACE_ALLOC_UNIT); + mos_commit_size = max_heap_size - los_size - nos_commit_size; + + /* allocate memory for gc_gen */ + void* reserved_base; + void* reserved_end; + void* nos_base; + +#ifdef STATIC_NOS_MAPPING + + assert((unsigned int)nos_boundary%SPACE_ALLOC_UNIT == 0); + nos_base = vm_reserve_mem(nos_boundary, nos_reserve_size); + if( nos_base != nos_boundary ){ + printf("Static NOS mapping: Can't reserve memory at %x for size %x for NOS.\n", nos_boundary, nos_reserve_size); + printf("Please not use static NOS mapping by undefining STATIC_NOS_MAPPING, or adjusting NOS_BOUNDARY value.\n"); + exit(0); + } + reserved_end = (void*)((unsigned int)nos_base + nos_reserve_size); + + void* los_mos_base = (void*)((unsigned int)nos_base - los_mos_size); + assert(!((unsigned int)los_mos_base%SPACE_ALLOC_UNIT)); + reserved_base = vm_reserve_mem(los_mos_base, los_mos_size); + while( !reserved_base || reserved_base >= nos_base){ + los_mos_base = (void*)((unsigned int)los_mos_base - SPACE_ALLOC_UNIT); + if(los_mos_base < RESERVE_BOTTOM){ + printf("Static NOS mapping: Can't allocate memory at address %x for specified size %x for MOS", reserved_base, los_mos_size); + exit(0); + } + reserved_base = vm_reserve_mem(los_mos_base, los_mos_size); + } + +#else /* STATIC_NOS_MAPPING */ + + reserved_base = vm_reserve_mem(0, max_heap_size); + while( !reserved_base ){ + printf("Non-static NOS mapping: Can't allocate memory at address %x for specified size %x", reserved_base, max_heap_size); + exit(0); + } + reserved_end = (void*)((unsigned int)reserved_base + max_heap_size); + + /* compute first time nos_boundary */ + nos_base = (void*)((unsigned int)reserved_base + mos_commit_size + los_size); + /* init nos_boundary if NOS is not statically mapped */ + nos_boundary = nos_base; + +#endif /* STATIC_NOS_MAPPING else */ + + gc_gen->reserved_heap_size = los_size + nos_reserve_size + mos_reserve_size; gc_gen->heap_start = reserved_base; - gc_gen->heap_end = (void*)((unsigned int)reserved_base + max_heap_size); + gc_gen->heap_end = reserved_end; gc_gen->blocks = (Block*)reserved_base; gc_gen->num_collections = 0; - - /* heuristic nos + mos + LOS */ - unsigned int los_size = max_heap_size >> 2; + gc_gen->time_collections = 0; + gc_gen->force_major_collect = FALSE; + gc_los_initialize(gc_gen, reserved_base, los_size); - unsigned int mos_size = max_heap_size >> 1; reserved_base = (void*)((unsigned int)reserved_base + los_size); - gc_mos_initialize(gc_gen, reserved_base, mos_size); - - unsigned int nos_size; - if(NOS_SIZE){ - assert( NOS_SIZE>=min_nos_size_bytes && NOS_SIZE<=max_nos_size_bytes); - nos_size = NOS_SIZE; - }else - nos_size = max_heap_size >> 4; - - if(nos_size < min_nos_size_bytes ) nos_size = min_nos_size_bytes; - if(nos_size > max_nos_size_bytes ) nos_size = max_nos_size_bytes; - - reserved_base = (void*)((unsigned int)reserved_base + mos_size); - gc_nos_initialize(gc_gen, reserved_base, nos_size); + gc_mos_initialize(gc_gen, reserved_base, mos_reserve_size, mos_commit_size); + gc_nos_initialize(gc_gen, nos_base, nos_reserve_size, nos_commit_size); + /* connect mos and nos, so that they can be compacted as one space */ Blocked_Space* mos = (Blocked_Space*)gc_get_mos(gc_gen); Blocked_Space* nos = (Blocked_Space*)gc_get_nos(gc_gen); Block_Header* mos_last_block = (Block_Header*)&mos->blocks[mos->num_managed_blocks-1]; Block_Header* nos_first_block = (Block_Header*)&nos->blocks[0]; mos_last_block->next = nos_first_block; - assert(space_heap_end((Space*)mos) == space_heap_start((Space*)nos)); + + nos->collect_algorithm = MINOR_ALGO; + mos->collect_algorithm = MAJOR_ALGO; + + /*Give GC a hint of space survive ratio.*/ + nos->survive_ratio = gc_gen->survive_ratio; + mos->survive_ratio = gc_gen->survive_ratio; + gc_space_tuner_initialize((GC*)gc_gen); gc_gen->committed_heap_size = space_committed_size((Space*)gc_gen->nos) + space_committed_size((Space*)gc_gen->mos) + space_committed_size((Space*)gc_gen->los); - set_native_finalizer_thread_flag(TRUE); - set_native_ref_enqueue_thread_flag(TRUE); + + set_native_finalizer_thread_flag(!IGNORE_FINREF); + set_native_ref_enqueue_thread_flag(!IGNORE_FINREF); return; } @@ -118,6 +192,14 @@ gc_los_destruct(gc_gen); gc_gen->los = NULL; + Space* nos = (Space*)gc_gen->nos; + Space* mos = (Space*)gc_gen->mos; + Space* los = (Space*)gc_gen->los; + + vm_unmap_mem(nos->heap_start, space_committed_size(nos)); + vm_unmap_mem(mos->heap_start, space_committed_size(mos)); + vm_unmap_mem(los->heap_start, space_committed_size(los)); + return; } @@ -132,45 +214,135 @@ void gc_set_los(GC_Gen* gc, Space* los){ gc->los = (Lspace*)los;} unsigned int gc_get_processor_num(GC_Gen* gc){ return gc->_num_processors;} + static Boolean major_collection_needed(GC_Gen* gc) { - return mspace_free_memory_size(gc->mos) < fspace_used_memory_size(gc->nos); + return space_used_memory_size((Blocked_Space*)gc->nos)*gc->survive_ratio > (space_free_memory_size((Blocked_Space*)gc->mos)); } -unsigned int gc_decide_collection_kind(GC_Gen* gc, unsigned int cause) +Boolean FORCE_FULL_COMPACT = FALSE; + +void gc_decide_collection_kind(GC_Gen* gc, unsigned int cause) { - if(major_collection_needed(gc) || cause== GC_CAUSE_LOS_IS_FULL) - return MAJOR_COLLECTION; + /* this is for debugging. */ + gc->last_collect_kind = gc->collect_kind; + + if(gc->force_major_collect || cause== GC_CAUSE_LOS_IS_FULL || FORCE_FULL_COMPACT) + gc->collect_kind = MAJOR_COLLECTION; + else + gc->collect_kind = MINOR_COLLECTION; + + return; +} + +void gc_decide_collection_algorithm(GC_Gen* gc, char* minor_algo, char* major_algo) +{ + if(!minor_algo){ + MINOR_ALGO = MINOR_NONGEN_FORWARD_POOL; + gc_disable_gen_mode(); + + }else{ + string_to_upper(minor_algo); + + if(!strcmp(minor_algo, "MINOR_NONGEN_FORWARD_POOL")){ + MINOR_ALGO = MINOR_NONGEN_FORWARD_POOL; + gc_disable_gen_mode(); + + }else if(!strcmp(minor_algo, "MINOR_GEN_FORWARD_POOL")){ + MINOR_ALGO = MINOR_GEN_FORWARD_POOL; + gc_enable_gen_mode(); - return MINOR_COLLECTION; + }else{ + printf("\nGC algorithm setting incorrect. Will use default value.\n"); + + } + } + + if(!major_algo){ + MAJOR_ALGO= MAJOR_COMPACT_SLIDE; + + }else{ + string_to_upper(major_algo); + + if(!strcmp(major_algo, "MAJOR_COMPACT_SLIDE")){ + MAJOR_ALGO= MAJOR_COMPACT_SLIDE; + + }else{ + printf("\nGC algorithm setting incorrect. Will use default algorithm.\n"); + + } + } + + return; + } +Boolean IS_FALLBACK_COMPACTION = FALSE; /* only for debugging, don't use it. */ + void gc_gen_reclaim_heap(GC_Gen* gc) -{ +{ + if(verify_live_heap) gc_verify_heap((GC*)gc, TRUE); + + int64 start_time = time_now(); + + Blocked_Space* fspace = (Blocked_Space*)gc->nos; + Blocked_Space* mspace = (Blocked_Space*)gc->mos; + mspace->num_used_blocks = mspace->free_block_idx - mspace->first_block_idx; + fspace->num_used_blocks = fspace->free_block_idx - fspace->first_block_idx; + + gc->collect_result = TRUE; + if(gc->collect_kind == MINOR_COLLECTION){ - if( gc_requires_barriers()) /* normal gen gc nos collection */ - fspace_collection(gc->nos); - else{ /* copy nos to mos for non-gen gc */ - /* we don't move mos objects in MINOR_COLLECTION. This is true for both - gen or non-gen collections, but only meaningful for non-gen GC, because - non-gen GC need mark the heap in order to find the refs from mos/los to nos. - This can save lots of reloc table space for slots having ref pointing to mos. - For gen GC, MINOR_COLLECTION doesn't really mark the heap. It has remsets that - have all the refs from mos/los to nos, which are actually the same thing as reloc table */ - gc->mos->move_object = FALSE; - fspace_collection(gc->nos); - gc->mos->move_object = TRUE; + /* FIXME:: move_object is only useful for nongen_slide_copy */ + gc->mos->move_object = FALSE; + + fspace_collection(gc->nos); + + gc->mos->move_object = TRUE; + - /* these are only needed for non-gen MINOR_COLLECTION, because - both mos and los will be collected (and reset) in MAJOR_COLLECTION */ - reset_mspace_after_copy_nursery(gc->mos); - reset_lspace_after_copy_nursery(gc->los); - } }else{ + /* process mos and nos together in one compaction */ mspace_collection(gc->mos); /* fspace collection is included */ lspace_collection(gc->los); + } + + if(gc->collect_result == FALSE && gc->collect_kind == MINOR_COLLECTION){ + + if(gc_is_gen_mode()) + gc_clear_remset((GC*)gc); + + /* runout mspace in minor collection */ + assert(mspace->free_block_idx == mspace->ceiling_block_idx + 1); + mspace->num_used_blocks = mspace->num_managed_blocks; + + IS_FALLBACK_COMPACTION = TRUE; + + gc_reset_collect_result((GC*)gc); + gc->collect_kind = FALLBACK_COLLECTION; + + mspace_collection(gc->mos); /* fspace collection is included */ + lspace_collection(gc->los); + + IS_FALLBACK_COMPACTION = FALSE; + + } + if( gc->collect_result == FALSE){ + printf("Out of Memory!\n"); + assert(0); + exit(0); + } + + int64 pause_time = time_now() - start_time; + + gc->time_collections += pause_time; + + if(verify_live_heap) gc_verify_heap((GC*)gc, FALSE); + + gc_gen_adapt(gc, pause_time); + return; } Index: vm/gc_gen/src/gen/gen.h =================================================================== --- vm/gc_gen/src/gen/gen.h (revision 493420) +++ vm/gc_gen/src/gen/gen.h (working copy) @@ -21,13 +21,17 @@ #ifndef _GC_GEN_H_ #define _GC_GEN_H_ +extern unsigned int NOS_SIZE; + #include "../common/gc_common.h" #include "../thread/gc_thread.h" #include "../trace_forward/fspace.h" #include "../mark_compact/mspace.h" #include "../mark_sweep/lspace.h" #include "../finalizer_weakref/finalizer_weakref_metadata.h" - + +#define SPACE_ALLOC_UNIT ( ( GC_BLOCK_SIZE_BYTES > SYSTEM_ALLOC_UNIT) ? GC_BLOCK_SIZE_BYTES : SYSTEM_ALLOC_UNIT) + enum Write_Barrier_Kind{ WRITE_BARRIER_NIL, WRITE_BARRIER_SLOT, @@ -52,6 +56,8 @@ unsigned int reserved_heap_size; unsigned int committed_heap_size; unsigned int num_collections; + int64 time_collections; + float survive_ratio; /* mutation related info */ Mutator *mutator_list; @@ -65,21 +71,28 @@ /* metadata is the pool for rootset, markstack, etc. */ GC_Metadata* metadata; - Finalizer_Weakref_Metadata *finalizer_weakref_metadata; + Finref_Metadata *finref_metadata; + unsigned int collect_kind; /* MAJOR or MINOR */ + unsigned int last_collect_kind; + Boolean collect_result; /* succeed or fail */ + + Boolean generate_barrier; + /* FIXME:: this is wrong! root_set belongs to mutator */ Vector_Block* root_set; - /* mem info */ - apr_pool_t *aux_pool; - port_vmem_t *allocated_memory; + //For_LOS_extend + Space_Tuner* tuner; /* END of GC --> */ Block* blocks; Fspace *nos; Mspace *mos; Lspace *los; - + + Boolean force_major_collect; + /* system info */ unsigned int _machine_page_size_bytes; unsigned int _num_processors; @@ -92,20 +105,25 @@ void gc_gen_destruct(GC_Gen *gc); inline unsigned int gc_gen_free_memory_size(GC_Gen* gc) -{ return fspace_free_memory_size(gc->nos) + - mspace_free_memory_size(gc->mos) + - lspace_free_memory_size(gc->los); } - +{ return space_free_memory_size((Blocked_Space*)gc->nos) + + space_free_memory_size((Blocked_Space*)gc->mos) + + lspace_free_memory_size(gc->los); } + +inline unsigned int gc_gen_total_memory_size(GC_Gen* gc) +{ return space_committed_size((Space*)gc->nos) + + space_committed_size((Space*)gc->mos) + + lspace_committed_size(gc->los); } + ///////////////////////////////////////////////////////////////////////////////////////// -inline void gc_nos_initialize(GC_Gen* gc, void* start, unsigned int nos_size) -{ fspace_initialize((GC*)gc, start, nos_size); } +inline void gc_nos_initialize(GC_Gen* gc, void* start, unsigned int nos_size, unsigned int commit_size) +{ fspace_initialize((GC*)gc, start, nos_size, commit_size); } inline void gc_nos_destruct(GC_Gen* gc) { fspace_destruct(gc->nos); } -inline void gc_mos_initialize(GC_Gen* gc, void* start, unsigned int mos_size) -{ mspace_initialize((GC*)gc, start, mos_size); } +inline void gc_mos_initialize(GC_Gen* gc, void* start, unsigned int mos_size, unsigned int commit_size) +{ mspace_initialize((GC*)gc, start, mos_size, commit_size); } inline void gc_mos_destruct(GC_Gen* gc) { mspace_destruct(gc->mos); } @@ -116,12 +134,6 @@ inline void gc_los_destruct(GC_Gen* gc) { lspace_destruct(gc->los); } -inline Boolean address_belongs_to_nursery(void* addr, GC_Gen* gc) -{ return address_belongs_to_space(addr, (Space*)gc->nos); } - -extern void* nos_boundary; -extern void* los_boundary; - inline Space* space_of_addr(GC* gc, void* addr) { assert(address_belongs_to_gc_heap(addr, gc)); @@ -141,7 +153,11 @@ void gc_set_los(GC_Gen* gc, Space* los); unsigned int gc_get_processor_num(GC_Gen* gc); -unsigned int gc_decide_collection_kind(GC_Gen* gc, unsigned int cause); +void gc_decide_collection_algorithm(GC_Gen* gc, char* minor_algo, char* major_algo); +void gc_decide_collection_kind(GC_Gen* gc, unsigned int cause); + +void gc_gen_adapt(GC_Gen* gc, int64 pause_time); + void gc_gen_reclaim_heap(GC_Gen* gc); #endif /* ifndef _GC_GEN_H_ */ Index: vm/gc_gen/src/gen/gen_adapt.cpp =================================================================== --- vm/gc_gen/src/gen/gen_adapt.cpp (revision 0) +++ vm/gc_gen/src/gen/gen_adapt.cpp (revision 0) @@ -0,0 +1,270 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2006/10/05 + */ + +#include "gen.h" + +#define NOS_COPY_RESERVE_DELTA (GC_BLOCK_SIZE_BYTES<<5) + +#include + +static float Tslow = 0.0f; +static unsigned int SMax = 0; +static unsigned int last_total_free_size = 0; + +static float mini_free_ratio(float k, float m) +{ + /*fixme: the check should be proved!*/ + if(m < 0.005f) m = 0.005f; + if(k > 100.f) k = 100.f; + + float b = - (2 + 2 * k * m); + float c = k * m * m + 2 * m + 1; + float D = b * b - 4 * c; + if (D <= 0) { + //printf("output 0.8f from k: %5.3f, m: %5.3f\n", k, m); + return 0.8f; + } + float pm = sqrt (D) / 2 ; + float base = - b / 2 ; + float res = base - pm; + if (res > 1.f) res = 0.8f; + + /*fixme: the check should be proved!*/ + if (res < 0.0f) res = 0.8f; + + //printf("output %5.3f from k: %5.3f, m: %5.3f\n", res, k, m); + return res; +} + +#define GC_MOS_MIN_EXTRA_REMAIN_SIZE (36*1024*1024) +static void gc_decide_next_collect(GC_Gen* gc, int64 pause_time) +{ + Blocked_Space* fspace = (Blocked_Space*)gc->nos; + Blocked_Space* mspace = (Blocked_Space*)gc->mos; + + float survive_ratio = 0; + + unsigned int mos_free_size = space_free_memory_size(mspace); + unsigned int nos_free_size = space_free_memory_size(fspace); + unsigned int total_free_size = mos_free_size + nos_free_size; + + if(gc->collect_kind != MINOR_COLLECTION) + { + mspace->time_collections += pause_time; + + Tslow = (float)pause_time; + SMax = total_free_size; + gc->force_major_collect = FALSE; + + unsigned int major_survive_size = space_committed_size((Space*)mspace) - mos_free_size; + survive_ratio = (float)major_survive_size/(float)gc_gen_total_memory_size(gc); + mspace->survive_ratio = survive_ratio; + + }else{ + /*Give a hint to mini_free_ratio. */ + if(gc->num_collections == 1){ + /*fixme: This is only set for tuning the first warehouse!*/ + Tslow = pause_time / gc->survive_ratio; + SMax = (unsigned int)((float)gc->committed_heap_size * ( 1 - gc->survive_ratio )); + last_total_free_size = gc->committed_heap_size - gc->los->committed_heap_size; + } + + fspace->time_collections += pause_time; + unsigned int free_size_threshold; + + unsigned int minor_survive_size = last_total_free_size - total_free_size; + + float k = Tslow * fspace->num_collections/fspace->time_collections; + float m = ((float)minor_survive_size)*1.0f/((float)(SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE )); + float free_ratio_threshold = mini_free_ratio(k, m); + free_size_threshold = (unsigned int)(free_ratio_threshold * (SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE ) + GC_MOS_MIN_EXTRA_REMAIN_SIZE ); + + if ((mos_free_size + nos_free_size)< free_size_threshold) { + gc->force_major_collect = TRUE; + } + + survive_ratio = (float)minor_survive_size/(float)space_committed_size((Space*)fspace); + fspace->survive_ratio = survive_ratio; + } + + gc->survive_ratio = (gc->survive_ratio + survive_ratio)/2.0f; + + last_total_free_size = total_free_size; + + return; +} + + +Boolean gc_compute_new_space_size(GC_Gen* gc, unsigned int* mos_size, unsigned int* nos_size) +{ + Blocked_Space* fspace = (Blocked_Space*)gc->nos; + Blocked_Space* mspace = (Blocked_Space*)gc->mos; + Blocked_Space* lspace = (Blocked_Space*)gc->los; + + unsigned int new_nos_size; + unsigned int new_mos_size; + + unsigned int curr_nos_size = space_committed_size((Space*)fspace); + unsigned int used_mos_size = space_used_memory_size(mspace); + unsigned int free_mos_size = space_committed_size((Space*)mspace) - used_mos_size; + + unsigned int total_size; + +#ifdef STATIC_NOS_MAPPING + total_size = max_heap_size_bytes - lspace->committed_heap_size; +#else + total_size = (unsigned int)gc->heap_end - (unsigned int)mspace->heap_start; +#endif + + /* check if curr nos size is too small to shrink */ + /* + if(curr_nos_size <= min_nos_size_bytes){ + //after major, should not allow this size + assert(gc->collect_kind == MINOR_COLLECTION); + return FALSE; + } + */ + + unsigned int total_free = total_size - used_mos_size; + /* predict NOS + NOS*ratio = total_free_size */ + int nos_reserve_size; + nos_reserve_size = (int)(((float)total_free)/(1.0f + fspace->survive_ratio)); + new_nos_size = round_down_to_size((unsigned int)nos_reserve_size, SPACE_ALLOC_UNIT); +#ifdef STATIC_NOS_MAPPING + if(new_nos_size > fspace->reserved_heap_size) new_nos_size = fspace->reserved_heap_size; +#endif + if(new_nos_size > GC_MOS_MIN_EXTRA_REMAIN_SIZE) new_nos_size -= GC_MOS_MIN_EXTRA_REMAIN_SIZE ; + + new_mos_size = total_size - new_nos_size; +#ifdef STATIC_NOS_MAPPING + if(new_mos_size > mspace->reserved_heap_size) new_mos_size = mspace->reserved_heap_size; +#endif + assert(new_nos_size + new_mos_size == total_size); + *nos_size = new_nos_size; + *mos_size = new_mos_size; + return TRUE;; +} + +#ifndef STATIC_NOS_MAPPING + +void gc_gen_adapt(GC_Gen* gc, int64 pause_time) +{ + gc_decide_next_collect(gc, pause_time); + + if(NOS_SIZE) return; + + Blocked_Space* fspace = (Blocked_Space*)gc->nos; + Blocked_Space* mspace = (Blocked_Space*)gc->mos; + + unsigned int new_nos_size; + unsigned int new_mos_size; + + Boolean result = gc_compute_new_space_size(gc, &new_mos_size, &new_nos_size); + + if(!result) return; + + unsigned int curr_nos_size = space_committed_size((Space*)fspace); + + if( abs((int)new_nos_size - (int)curr_nos_size) < NOS_COPY_RESERVE_DELTA ) + return; + + /* below are ajustment */ + + nos_boundary = (void*)((unsigned int)gc->heap_end - new_nos_size); + + fspace->heap_start = nos_boundary; + fspace->blocks = (Block*)nos_boundary; + fspace->committed_heap_size = new_nos_size; + fspace->num_managed_blocks = new_nos_size >> GC_BLOCK_SHIFT_COUNT; + fspace->num_total_blocks = fspace->num_managed_blocks; + fspace->first_block_idx = ((Block_Header*)nos_boundary)->block_idx; + fspace->free_block_idx = fspace->first_block_idx; + + mspace->heap_end = nos_boundary; + mspace->committed_heap_size = new_mos_size; + mspace->num_managed_blocks = new_mos_size >> GC_BLOCK_SHIFT_COUNT; + mspace->num_total_blocks = mspace->num_managed_blocks; + mspace->ceiling_block_idx = ((Block_Header*)nos_boundary)->block_idx - 1; + + Block_Header* mos_last_block = (Block_Header*)&mspace->blocks[mspace->num_managed_blocks-1]; + assert(mspace->ceiling_block_idx == mos_last_block->block_idx); + Block_Header* nos_first_block = (Block_Header*)&fspace->blocks[0]; + /* this is redundant: mos_last_block->next = nos_first_block; */ + + HelperClass_set_NosBoundary(nos_boundary); + + return; +} + +#else /* ifndef STATIC_NOS_MAPPING */ + +void gc_gen_adapt(GC_Gen* gc, int64 pause_time) +{ + gc_decide_next_collect(gc, pause_time); + + if(NOS_SIZE) return; + + unsigned int new_nos_size; + unsigned int new_mos_size; + + Boolean result = gc_compute_new_space_size(gc, &new_mos_size, &new_nos_size); + + if(!result) return; + + Blocked_Space* fspace = (Blocked_Space*)gc->nos; + Blocked_Space* mspace = (Blocked_Space*)gc->mos; + + unsigned int curr_nos_size = space_committed_size((Space*)fspace); + + if( abs((int)new_nos_size - (int)curr_nos_size) < NOS_COPY_RESERVE_DELTA ) + return; + + unsigned int used_mos_size = space_used_memory_size((Blocked_Space*)mspace); + unsigned int free_mos_size = space_free_memory_size((Blocked_Space*)mspace); + + unsigned int new_free_mos_size = new_mos_size - used_mos_size; + + unsigned int curr_mos_end = (unsigned int)&mspace->blocks[mspace->free_block_idx - mspace->first_block_idx]; + unsigned int mos_border = (unsigned int)mspace->heap_end; + if( curr_mos_end + new_free_mos_size > mos_border){ + /* we can't let mos cross border */ + new_free_mos_size = mos_border - curr_mos_end; + } + + if(new_nos_size < curr_nos_size){ + /* lets shrink nos */ + assert(new_free_mos_size > free_mos_size); + blocked_space_shrink((Blocked_Space*)fspace, curr_nos_size - new_nos_size); + blocked_space_extend((Blocked_Space*)mspace, new_free_mos_size - free_mos_size); + }else if(new_nos_size > curr_nos_size){ + /* lets grow nos */ + assert(new_free_mos_size < free_mos_size); + blocked_space_shrink((Blocked_Space*)mspace, free_mos_size - new_free_mos_size); + blocked_space_extend((Blocked_Space*)fspace, new_nos_size - curr_nos_size); + } + + Block_Header* mos_last_block = (Block_Header*)&mspace->blocks[mspace->num_managed_blocks-1]; + Block_Header* nos_first_block = (Block_Header*)&fspace->blocks[0]; + mos_last_block->next = nos_first_block; + + return; +} + +#endif /* STATIC_NOS_MAPPING */ Index: vm/gc_gen/src/jni/helper.cpp =================================================================== --- vm/gc_gen/src/jni/helper.cpp (revision 493420) +++ vm/gc_gen/src/jni/helper.cpp (working copy) @@ -1,22 +0,0 @@ -#include -#include -#include "../thread/gc_thread.h" - - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * Class: org_apache_harmony_drlvm_gc_gen_GCHelper - * Method: TLSFreeOffset - * Signature: ()I - */ -JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_TLSGCOffset(JNIEnv *e, jclass c) -{ - return (jint)tls_gc_offset; -} - -#ifdef __cplusplus -} -#endif Index: vm/gc_gen/src/jni/java_natives.cpp =================================================================== --- vm/gc_gen/src/jni/java_natives.cpp (revision 0) +++ vm/gc_gen/src/jni/java_natives.cpp (revision 0) @@ -0,0 +1,45 @@ +#include +#include +#include "open/vm_util.h" +#include "environment.h" +#include "../thread/gc_thread.h" +#include "../gen/gen.h" +#include "java_support.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Class: org_apache_harmony_drlvm_gc_gen_GCHelper + * Method: TLSFreeOffset + * Signature: ()I + */ +JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_TLSGCOffset(JNIEnv *e, jclass c) +{ + return (jint)tls_gc_offset; +} + +JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getNosBoundary(JNIEnv *e, jclass c) +{ + return (jint)nos_boundary; +} + +JNIEXPORT jboolean JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getGenMode(JNIEnv *e, jclass c) +{ + return (jboolean)gc_is_gen_mode(); +} + +JNIEXPORT void JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_helperCallback(JNIEnv *e, jclass c) +{ + java_helper_inlined = TRUE; + + unsigned int obj = *(unsigned int*)c; + + Class_Handle *vm_class_ptr = (Class_Handle *)(obj + VM_Global_State::loader_env->vm_class_offset); + GCHelper_clss = *vm_class_ptr; +} + +#ifdef __cplusplus +} +#endif Index: vm/gc_gen/src/jni/java_support.cpp =================================================================== --- vm/gc_gen/src/jni/java_support.cpp (revision 0) +++ vm/gc_gen/src/jni/java_support.cpp (revision 0) @@ -0,0 +1,80 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2006/10/05 + */ + +#include +#include +#include "jit_intf.h" +#include "java_support.h" + +Class_Handle GCHelper_clss; +Boolean java_helper_inlined; + +void HelperClass_set_GenMode(Boolean status) +{ + if(!java_helper_inlined) return; + + unsigned int nfields = class_number_fields(GCHelper_clss); + unsigned int i; + for(i=0; iFindClass("GCHelper"); + jfieldID gen_mode = jni_env->GetStaticFieldID(GCHelper, "GEN_MODE", "Z"); + assert(gen_mode); + + jni_env->SetStaticBooleanField(GCHelper, gen_mode, status?JNI_TRUE:JNI_FALSE); + + hythread_suspend_disable(); +*/ + return; +} + + +void HelperClass_set_NosBoundary(void* boundary) +{ + if(!java_helper_inlined) return; + + unsigned int nfields = class_number_fields(GCHelper_clss); + unsigned int i + for(i=0; itrace_stack; + while( !vector_stack_is_empty(trace_stack)){ + p_ref = (Partial_Reveal_Object **)vector_stack_pop(trace_stack); + scan_object(collector, p_ref); + trace_stack = collector->trace_stack; + } + + return; +} + +/* for marking phase termination detection */ +static volatile unsigned int num_finished_collectors = 0; + +void fallback_mark_scan_heap(Collector* collector) +{ + GC* gc = collector->gc; + GC_Metadata* metadata = gc->metadata; + + assert(gc->collect_kind == FALLBACK_COLLECTION); + + /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */ + unsigned int num_active_collectors = gc->num_active_collectors; + atomic_cas32( &num_finished_collectors, 0, num_active_collectors); + + collector->trace_stack = free_task_pool_get_entry(metadata); + + Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool); + + /* first step: copy all root objects to mark tasks. + FIXME:: can be done sequentially before coming here to eliminate atomic ops */ + while(root_set){ + unsigned int* iter = vector_block_iterator_init(root_set); + while(!vector_block_iterator_end(root_set,iter)){ + Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; + iter = vector_block_iterator_advance(root_set,iter); + + Partial_Reveal_Object* p_obj = *p_ref; + /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */ + assert(p_obj != NULL); + + collector_tracestack_push(collector, p_ref); + + } + root_set = pool_iterator_next(metadata->gc_rootset_pool); + } + /* put back the last trace_stack task */ + pool_put_entry(metadata->mark_task_pool, collector->trace_stack); + + /* second step: iterate over the mark tasks and scan objects */ + /* get a task buf for the mark stack */ + collector->trace_stack = free_task_pool_get_entry(metadata); + +retry: + Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool); + + while(mark_task){ + unsigned int* iter = vector_block_iterator_init(mark_task); + while(!vector_block_iterator_end(mark_task,iter)){ + Partial_Reveal_Object** p_ref = (Partial_Reveal_Object **)*iter; + iter = vector_block_iterator_advance(mark_task,iter); + + /* FIXME:: we should not let mark_task empty during working, , other may want to steal it. + degenerate my stack into mark_task, and grab another mark_task */ + trace_object(collector, p_ref); + } + /* run out one task, put back to the pool and grab another task */ + vector_stack_clear(mark_task); + pool_put_entry(metadata->free_task_pool, mark_task); + mark_task = pool_get_entry(metadata->mark_task_pool); + } + + /* termination detection. This is also a barrier. + NOTE:: We can simply spin waiting for num_finished_collectors, because each + generated new task would surely be processed by its generating collector eventually. + So code below is only for load balance optimization. */ + atomic_inc32(&num_finished_collectors); + while(num_finished_collectors != num_active_collectors){ + if( !pool_is_empty(metadata->mark_task_pool)){ + atomic_dec32(&num_finished_collectors); + goto retry; + } + } + + /* put back the last mark stack to the free pool */ + mark_task = (Vector_Block*)collector->trace_stack; + vector_stack_clear(mark_task); + pool_put_entry(metadata->free_task_pool, mark_task); + collector->trace_stack = NULL; + + return; +} Index: vm/gc_gen/src/mark_compact/mspace.cpp =================================================================== --- vm/gc_gen/src/mark_compact/mspace.cpp (revision 493420) +++ vm/gc_gen/src/mark_compact/mspace.cpp (working copy) @@ -25,31 +25,11 @@ return; } -static void mspace_init_blocks(Mspace* mspace) -{ - Block* blocks = (Block*)mspace->heap_start; - Block_Header* last_block = (Block_Header*)blocks; - unsigned int start_idx = mspace->first_block_idx; - for(unsigned int i=0; i < mspace->num_managed_blocks; i++){ - Block_Header* block = (Block_Header*)&(blocks[i]); - block->free = (void*)((unsigned int)block + GC_BLOCK_HEADER_SIZE_BYTES); - block->ceiling = (void*)((unsigned int)block + GC_BLOCK_SIZE_BYTES); - block->base = block->free; - block->block_idx = i + start_idx; - block->status = BLOCK_FREE; - last_block->next = block; - last_block = block; - } - last_block->next = NULL; - mspace->blocks = blocks; - - return; -} - struct GC_Gen; extern void gc_set_mos(GC_Gen* gc, Space* space); -extern Space* gc_set_nos(GC_Gen* gc); -void mspace_initialize(GC* gc, void* start, unsigned int mspace_size) +extern Space* gc_get_nos(GC_Gen* gc); + +void mspace_initialize(GC* gc, void* start, unsigned int mspace_size, unsigned int commit_size) { Mspace* mspace = (Mspace*)STD_MALLOC( sizeof(Mspace)); assert(mspace); @@ -59,14 +39,14 @@ mspace->num_total_blocks = mspace_size >> GC_BLOCK_SHIFT_COUNT; void* reserved_base = start; - int status = port_vmem_commit(&reserved_base, mspace_size, gc->allocated_memory); - assert(status == APR_SUCCESS && reserved_base == start); + /* commit mspace mem */ + vm_commit_mem(reserved_base, commit_size); + memset(reserved_base, 0, commit_size); - memset(reserved_base, 0, mspace_size); - mspace->committed_heap_size = mspace_size; + mspace->committed_heap_size = commit_size; mspace->heap_start = reserved_base; - mspace->heap_end = (void *)((unsigned int)reserved_base + mspace->reserved_heap_size); - mspace->num_managed_blocks = mspace_size >> GC_BLOCK_SHIFT_COUNT; + mspace->heap_end = (void *)((unsigned int)reserved_base + mspace_size); + mspace->num_managed_blocks = commit_size >> GC_BLOCK_SHIFT_COUNT; mspace->first_block_idx = GC_BLOCK_INDEX_FROM(gc->heap_start, reserved_base); mspace->ceiling_block_idx = mspace->first_block_idx + mspace->num_managed_blocks - 1; @@ -74,10 +54,12 @@ mspace->num_used_blocks = 0; mspace->free_block_idx = mspace->first_block_idx; - mspace_init_blocks(mspace); - - mspace->mark_object_func = mspace_mark_object; + space_init_blocks((Blocked_Space*)mspace); + mspace->num_collections = 0; + mspace->time_collections = 0; + mspace->survive_ratio = 0.2f; + mspace->move_object = TRUE; mspace->gc = gc; gc_set_mos((GC_Gen*)gc, (Space*)mspace); @@ -90,42 +72,78 @@ { //FIXME:: when map the to-half, the decommission start address should change mspace_destruct_blocks(mspace); - port_vmem_decommit(mspace->heap_start, mspace->committed_heap_size, mspace->gc->allocated_memory); STD_FREE(mspace); } - /* for non-gen MINOR_COLLECTION, mspace has both obj and marktable to be cleared, - because the marking phase will mark them, but then never touch them - - FIXME:: the marking choice between header and mark table has to be decided. - Obj header marking has advantage of idempotent, while table marking can prefetch - If we choose only one, we will not have the two version clearings: one after - MAJOR_COLLECTION, one after non-gen MINOR_COLLECTION */ - -void reset_mspace_after_copy_nursery(Mspace* mspace) -{ - /* for major collection we do nothing, the reset is done there */ - assert( mspace->gc->collect_kind == MINOR_COLLECTION ); +void mspace_block_iterator_init_free(Mspace* mspace) +{ + mspace->block_iterator = (Block_Header*)&mspace->blocks[mspace->free_block_idx - mspace->first_block_idx]; +} - unsigned int new_num_used = mspace->free_block_idx - mspace->first_block_idx; - unsigned int old_num_used = mspace->num_used_blocks; +//For_LOS_extend +#include "../common/space_tuner.h" +void mspace_block_iterator_init(Mspace* mspace) +{ + GC* gc = mspace->gc; + if(gc->tuner->kind == TRANS_FROM_MOS_TO_LOS){ + unsigned int tuning_blocks = ((mspace->gc)->tuner->tuning_size >> GC_BLOCK_SHIFT_COUNT); + mspace->block_iterator = (Block_Header*)&(mspace->blocks[tuning_blocks]); + return; + } + + mspace->block_iterator = (Block_Header*)mspace->blocks; + return; +} - /* At the moment, for MINOR_COLLECTION, only non-gen collection does copying. - The generational version does forwarding */ - assert( !gc_requires_barriers()); + +Block_Header* mspace_block_iterator_get(Mspace* mspace) +{ + return (Block_Header*)mspace->block_iterator; +} + +Block_Header* mspace_block_iterator_next(Mspace* mspace) +{ + Block_Header* cur_block = (Block_Header*)mspace->block_iterator; - Block* blocks = mspace->blocks; - for(unsigned int i=0; i < old_num_used; i++){ - Block_Header* block = (Block_Header*)&(blocks[i]); - block_clear_markbits(block); + while(cur_block != NULL){ + Block_Header* next_block = cur_block->next; + + Block_Header* temp = (Block_Header*)atomic_casptr((volatile void **)&mspace->block_iterator, next_block, cur_block); + if(temp != cur_block){ + cur_block = (Block_Header*)mspace->block_iterator; + continue; + } + return cur_block; } + /* run out space blocks */ + return NULL; +} - for(unsigned int i=old_num_used; i < new_num_used; i++){ - Block_Header* block = (Block_Header*)&(blocks[i]); - block->status = BLOCK_USED; +#include "../common/fix_repointed_refs.h" + +void mspace_fix_after_copy_nursery(Collector* collector, Mspace* mspace) +{ + //the first block is not set yet + Block_Header* curr_block = mspace_block_iterator_next(mspace); + unsigned int first_block_idx = mspace->first_block_idx; + unsigned int old_num_used = mspace->num_used_blocks; + unsigned int old_free_idx = first_block_idx + old_num_used; + unsigned int new_free_idx = mspace->free_block_idx; + + /* for NOS copy, we are sure about the last block for fixing */ + Block_Header* space_end = (Block_Header*)&mspace->blocks[new_free_idx-first_block_idx]; + + while( curr_block < space_end){ + assert(curr_block->status == BLOCK_USED); + if( curr_block->block_idx < old_free_idx) + /* for blocks used before nos copy */ + block_fix_ref_after_marking(curr_block); + + else /* for blocks used for nos copy */ + block_fix_ref_after_copying(curr_block); + + curr_block = mspace_block_iterator_next(mspace); } - - mspace->num_used_blocks = new_num_used; - return; + + return; } - Index: vm/gc_gen/src/mark_compact/mspace.h =================================================================== --- vm/gc_gen/src/mark_compact/mspace.h (revision 493420) +++ vm/gc_gen/src/mark_compact/mspace.h (working copy) @@ -21,7 +21,6 @@ #ifndef _MSC_SPACE_H_ #define _MSC_SPACE_H_ -#include "../common/gc_block.h" #include "../thread/gc_thread.h" /* Mark-compaction space is orgnized into blocks*/ @@ -32,9 +31,11 @@ unsigned int reserved_heap_size; unsigned int committed_heap_size; unsigned int num_collections; + int64 time_collections; + float survive_ratio; + unsigned int collect_algorithm; GC* gc; Boolean move_object; - Boolean (*mark_object_func)(Mspace* space, Partial_Reveal_Object* p_obj); /* END of Space --> */ Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */ @@ -48,24 +49,22 @@ unsigned int num_managed_blocks; unsigned int num_total_blocks; /* END of Blocked_Space --> */ - + + volatile Block_Header* block_iterator; + }Mspace; -void mspace_initialize(GC* gc, void* reserved_base, unsigned int mspace_size); +void mspace_initialize(GC* gc, void* reserved_base, unsigned int mspace_size, unsigned int commit_size); void mspace_destruct(Mspace* mspace); -inline Boolean mspace_has_free_block(Mspace* mspace){ return mspace->free_block_idx <= mspace->ceiling_block_idx; } -inline unsigned int mspace_free_memory_size(Mspace* mspace){ return GC_BLOCK_SIZE_BYTES * (mspace->ceiling_block_idx - mspace->free_block_idx + 1); } -inline Boolean mspace_used_memory_size(Mspace* mspace){ return GC_BLOCK_SIZE_BYTES * mspace->num_used_blocks; } - void* mspace_alloc(unsigned size, Allocator *allocator); void mspace_collection(Mspace* mspace); -void reset_mspace_after_copy_nursery(Mspace* mspace); +void mspace_block_iterator_init(Mspace* mspace); +void mspace_block_iterator_init_free(Mspace* mspace); +Block_Header* mspace_block_iterator_next(Mspace* mspace); +Block_Header* mspace_block_iterator_get(Mspace* mspace); +void mspace_fix_after_copy_nursery(Collector* collector, Mspace* mspace); -Boolean mspace_mark_object(Mspace* mspace, Partial_Reveal_Object *p_obj); -void mspace_save_reloc(Mspace* mspace, Partial_Reveal_Object** p_ref); -void mspace_update_reloc(Mspace* mspace); - #endif //#ifdef _MSC_SPACE_H_ Index: vm/gc_gen/src/mark_compact/mspace_alloc.cpp =================================================================== --- vm/gc_gen/src/mark_compact/mspace_alloc.cpp (revision 493420) +++ vm/gc_gen/src/mark_compact/mspace_alloc.cpp (working copy) @@ -22,13 +22,7 @@ static Boolean mspace_alloc_block(Mspace* mspace, Allocator* allocator) { - Block_Header* alloc_block = (Block_Header* )allocator->alloc_block; - /* put back the used block */ - if(alloc_block != NULL){ /* it is NULL at first time */ - assert(alloc_block->status == BLOCK_IN_USE); - alloc_block->status = BLOCK_USED; - alloc_block->free = allocator->free; - } + alloc_context_reset(allocator); /* now try to get a new block */ unsigned int old_free_idx = mspace->free_block_idx; @@ -41,27 +35,36 @@ continue; } /* ok, got one */ - alloc_block = (Block_Header*)&(mspace->blocks[allocated_idx - mspace->first_block_idx]); + Block_Header* alloc_block = (Block_Header*)&(mspace->blocks[allocated_idx - mspace->first_block_idx]); assert(alloc_block->status == BLOCK_FREE); alloc_block->status = BLOCK_IN_USE; - mspace->num_used_blocks++; - memset(alloc_block->free, 0, GC_BLOCK_BODY_SIZE_BYTES); /* set allocation context */ - allocator->free = alloc_block->free; + void* new_free = alloc_block->free; + allocator->free = new_free; + +#ifndef ALLOC_ZEROING + allocator->ceiling = alloc_block->ceiling; + memset(new_free, 0, GC_BLOCK_BODY_SIZE_BYTES); + +#else + + /* the first-time zeroing area includes block header, to make subsequent allocs page aligned */ + unsigned int zeroing_size = ZEROING_SIZE - GC_BLOCK_HEADER_SIZE_BYTES; + allocator->ceiling = (void*)((unsigned int)new_free + zeroing_size); + memset(new_free, 0, zeroing_size); + +#endif /* #ifndef ALLOC_ZEROING */ + + allocator->end = alloc_block->ceiling; allocator->alloc_block = (Block*)alloc_block; return TRUE; } - /* if Mspace is used for mutator allocation, here a collection should be triggered. - else if this is only for collector allocation, when code goes here, it means - Mspace is not enough to hold Nursery live objects, so the invoker of this routine - should throw out-of-memory exception. - But because in our design, we don't do any Mspace allocation during collection, this - path should never be reached. That's why we assert(0) here. */ - assert(0); + /* Mspace is out, a collection should be triggered. It can be caused by mutator allocation + And it can be caused by collector allocation during nos forwarding. */ return FALSE; } @@ -84,7 +87,7 @@ /* grab a new block */ Boolean ok = mspace_alloc_block(mspace, allocator); - assert(ok); + if(!ok) return NULL; p_return = thread_local_alloc(size, allocator); assert(p_return); Index: vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp =================================================================== --- vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (revision 493420) +++ vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (working copy) @@ -15,21 +15,44 @@ */ /** - * @author Xiao-Feng Li, 2006/10/05 + * @author Xiao-Feng Li, 2006/12/12 */ -#include "mspace.h" -#include "../thread/collector.h" -#include "../trace_forward/fspace.h" -#include "../finalizer_weakref/finalizer_weakref.h" +#include "mspace_collect_compact.h" +Boolean IS_MOVE_COMPACT; + struct GC_Gen; Space* gc_get_nos(GC_Gen* gc); -Space* gc_get_mos(GC_Gen* gc); -Space* gc_get_los(GC_Gen* gc); -static void reset_mspace_after_compaction(Mspace* mspace) +static volatile Block_Header* next_block_for_compact; +static volatile Block_Header* next_block_for_target; + +void update_mspace_info_for_los_extension(Mspace *mspace) { + Space_Tuner *tuner = mspace->gc->tuner; + + if(tuner->kind != TRANS_FROM_MOS_TO_LOS) + return; + + unsigned int tune_size = tuner->tuning_size; + unsigned int tune_blocks = tune_size >> GC_BLOCK_SHIFT_COUNT; + + mspace->blocks = &mspace->blocks[tune_blocks]; + mspace->heap_start = mspace->blocks; + mspace->committed_heap_size -= tune_size; + mspace->reserved_heap_size -= tune_size; + mspace->first_block_idx += tune_blocks; + mspace->num_managed_blocks -= tune_blocks; + mspace->num_total_blocks -= tune_blocks; + if(mspace->num_used_blocks > tune_blocks) + mspace->num_used_blocks -= tune_blocks; + else + mspace->num_used_blocks = 0; +} + +void mspace_reset_after_compaction(Mspace* mspace) +{ unsigned int old_num_used = mspace->num_used_blocks; unsigned int new_num_used = mspace->free_block_idx - mspace->first_block_idx; unsigned int num_used = old_num_used>new_num_used? old_num_used:new_num_used; @@ -38,8 +61,12 @@ unsigned int i; for(i=0; i < num_used; i++){ Block_Header* block = (Block_Header*)&(blocks[i]); - block_clear_mark_table(block); block->status = BLOCK_USED; + block->free = block->new_free; + block->new_free = block->base; + block->src = NULL; + block->next_src = NULL; + assert(!block->dest_counter); if(i >= new_num_used){ block->status = BLOCK_FREE; @@ -51,15 +78,16 @@ /* we should clear the remaining blocks which are set to be BLOCK_COMPACTED or BLOCK_TARGET */ for(; i < mspace->num_managed_blocks; i++){ Block_Header* block = (Block_Header*)&(blocks[i]); - assert(block->status& (BLOCK_COMPACTED|BLOCK_TARGET)); + assert(block->status& (BLOCK_COMPACTED|BLOCK_TARGET|BLOCK_DEST)); block->status = BLOCK_FREE; + block->src = NULL; + block->next_src = NULL; + block->free = GC_BLOCK_BODY(block); + assert(!block->dest_counter); } } -static volatile Block_Header* next_block_for_compact; -static volatile Block_Header* next_block_for_target; - -static void gc_reset_block_for_collectors(GC* gc, Mspace* mspace) +void gc_reset_block_for_collectors(GC* gc, Mspace* mspace) { unsigned int free_blk_idx = mspace->first_block_idx; for(unsigned int i=0; inum_active_collectors; i++){ @@ -70,49 +98,62 @@ collector->cur_target_block = NULL; collector->cur_compact_block = NULL; } - mspace->free_block_idx = free_blk_idx+1; + mspace->free_block_idx = free_blk_idx+1; return; } -static void gc_init_block_for_collectors(GC* gc, Mspace* mspace) +void gc_init_block_for_collectors(GC* gc, Mspace* mspace) { unsigned int i; Block_Header* block; - for(i=0; inum_active_collectors; i++){ - Collector* collector = gc->collectors[i]; + Space_Tuner* tuner = gc->tuner; + /*Needn't change LOS size.*/ + if(tuner->kind == TRANS_NOTHING){ + for(i=0; inum_active_collectors; i++){ + Collector* collector = gc->collectors[i]; + block = (Block_Header*)&mspace->blocks[i]; + collector->cur_target_block = block; + collector->cur_compact_block = block; + block->status = BLOCK_TARGET; + } + block = (Block_Header*)&mspace->blocks[i]; - collector->cur_target_block = block; - collector->cur_compact_block = block; - block->status = BLOCK_TARGET; + next_block_for_target = block; + next_block_for_compact = block; + return; } - - block = (Block_Header*)&mspace->blocks[i]; - next_block_for_target = block; - next_block_for_compact = block; - return; + //For_LOS_extend + else if(tuner->kind == TRANS_FROM_MOS_TO_LOS) + { + Blocked_Space* nos = (Blocked_Space*)gc_get_nos((GC_Gen*)gc); + Block_Header* nos_last_block = (Block_Header*)&nos->blocks[nos->num_managed_blocks-1]; + Block_Header* mos_first_block = (Block_Header*)&mspace->blocks[0]; + unsigned int trans_blocks = (tuner->tuning_size >> GC_BLOCK_SHIFT_COUNT); + nos_last_block->next = mos_first_block; + ((Block_Header*)&(mspace->blocks[trans_blocks - 1]))->next = NULL; + + for(i=0; i< gc->num_active_collectors; i++){ + Collector* collector = gc->collectors[i]; + block = (Block_Header*)&mspace->blocks[i + trans_blocks]; + collector->cur_target_block = block; + collector->cur_compact_block = block; + block->status = BLOCK_TARGET; + } + + block = (Block_Header*)&mspace->blocks[i+trans_blocks]; + next_block_for_target = block; + next_block_for_compact = block; + return; + } } -static Boolean gc_collection_result(GC* gc) -{ - Boolean result = TRUE; - for(unsigned i=0; inum_active_collectors; i++){ - Collector* collector = gc->collectors[i]; - result &= collector->result; - } - return result; -} - -static Block_Header* mspace_get_first_compact_block(Mspace* mspace) +Block_Header* mspace_get_first_compact_block(Mspace* mspace) { return (Block_Header*)mspace->blocks; } -static Block_Header* mspace_get_first_target_block(Mspace* mspace) +Block_Header* mspace_get_first_target_block(Mspace* mspace) { return (Block_Header*)mspace->blocks; } - -static Block_Header* mspace_get_next_compact_block1(Mspace* mspace, Block_Header* block) -{ return block->next; } - -static Block_Header* mspace_get_next_compact_block(Collector* collector, Mspace* mspace) +Block_Header* mspace_get_next_compact_block(Collector* collector, Mspace* mspace) { /* firstly put back the compacted block. If it's not BLOCK_TARGET, it will be set to BLOCK_COMPACTED */ unsigned int block_status = collector->cur_compact_block->status; @@ -142,7 +183,7 @@ return NULL; } -static Block_Header* mspace_get_next_target_block(Collector* collector, Mspace* mspace) +Block_Header* mspace_get_next_target_block(Collector* collector, Mspace* mspace) { Block_Header* cur_target_block = (Block_Header*)next_block_for_target; @@ -167,7 +208,8 @@ /* nos is higher than mos, we cant use nos block for compaction target */ Block_Header* mspace_heap_end = (Block_Header*)space_heap_end((Space*)mspace); while( cur_target_block < mspace_heap_end ){ - assert( cur_target_block <= collector->cur_compact_block); + //For_LOS_extend + //assert( cur_target_block <= collector->cur_compact_block); Block_Header* next_target_block = cur_target_block->next; volatile unsigned int* p_block_status = &cur_target_block->status; unsigned int block_status = cur_target_block->status; @@ -199,195 +241,48 @@ return NULL; } -Boolean mspace_mark_object(Mspace* mspace, Partial_Reveal_Object *p_obj) -{ -#ifdef _DEBUG - if( obj_is_marked_in_vt(p_obj)) return FALSE; -#endif - - obj_mark_in_vt(p_obj); - - unsigned int obj_word_index = OBJECT_WORD_INDEX_TO_MARKBIT_TABLE(p_obj); - unsigned int obj_offset_in_word = OBJECT_WORD_OFFSET_IN_MARKBIT_TABLE(p_obj); - - unsigned int *p_word = &(GC_BLOCK_HEADER(p_obj)->mark_table[obj_word_index]); - unsigned int word_mask = (1<cur_compact_block; - Block_Header* dest_block = collector->cur_target_block; - - void* dest_addr = GC_BLOCK_BODY(dest_block); - - while( curr_block ){ - unsigned int mark_bit_idx; - Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &mark_bit_idx); - - while( p_obj ){ - assert( obj_is_marked_in_vt(p_obj)); - - unsigned int obj_size = vm_object_size(p_obj); - - if( ((unsigned int)dest_addr + obj_size) > (unsigned int)GC_BLOCK_END(dest_block)){ - dest_block->free = dest_addr; - dest_block = mspace_get_next_target_block(collector, mspace); - if(dest_block == NULL){ - collector->result = FALSE; - return; - } - - dest_addr = GC_BLOCK_BODY(dest_block); - } - assert(((unsigned int)dest_addr + obj_size) <= (unsigned int)GC_BLOCK_END(dest_block)); - - Obj_Info_Type obj_info = get_obj_info(p_obj); - if( obj_info != 0 ) { - collector->obj_info_map->insert(ObjectMap::value_type((Partial_Reveal_Object*)dest_addr, obj_info)); - } - - set_forwarding_pointer_in_obj_info(p_obj, dest_addr); - - /* FIXME: should use alloc to handle alignment requirement */ - dest_addr = (void *) WORD_SIZE_ROUND_UP((unsigned int) dest_addr + obj_size); - p_obj = block_get_next_marked_object(curr_block, &mark_bit_idx); - - } - curr_block = mspace_get_next_compact_block(collector, mspace); - } - - return; -} - -#include "../verify/verify_live_heap.h" - -static void mspace_sliding_compact(Collector* collector, Mspace* mspace) +void mspace_collection(Mspace* mspace) { - Block_Header* curr_block = mspace_get_first_compact_block(mspace); - - while( curr_block ){ - unsigned int mark_bit_idx; - Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &mark_bit_idx); - - while( p_obj ){ - assert( obj_is_marked_in_vt(p_obj)); - obj_unmark_in_vt(p_obj); - - unsigned int obj_size = vm_object_size(p_obj); - Partial_Reveal_Object *p_target_obj = get_forwarding_pointer_in_obj_info(p_obj); - if( p_obj != p_target_obj){ - memmove(p_target_obj, p_obj, obj_size); + // printf("Major Collection "); - if (verify_live_heap) - /* we forwarded it, we need remember it for verification */ - event_collector_move_obj(p_obj, p_target_obj, collector); - } - - set_obj_info(p_target_obj, 0); - - p_obj = block_get_next_marked_object(curr_block, &mark_bit_idx); - } - - curr_block = mspace_get_next_compact_block1(mspace, curr_block); - } + mspace->num_collections++; - return; -} + GC* gc = mspace->gc; -void gc_update_repointed_refs(Collector* collector); + /* init the pool before starting multiple collectors */ -static volatile unsigned int num_marking_collectors = 0; -static volatile unsigned int num_installing_collectors = 0; + pool_iterator_init(gc->metadata->gc_rootset_pool); -static void mark_compact_mspace(Collector* collector) -{ - GC* gc = collector->gc; - Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc); - Fspace* fspace = (Fspace*)gc_get_nos((GC_Gen*)gc); + /* dual mark bits will consume two bits in obj info, that makes current + header hashbits only 5 bits. That's not enough. We implement on-demend + hash field allocation in obj during moving. move_compact doesn't support it. + Dual mark bits is used for MINOR_NONGEN_FORWARD algorithm */ - /* Pass 1: mark all live objects in heap, and save all the slots that - have references that are going to be repointed */ - unsigned int num_active_collectors = gc->num_active_collectors; - - /* Pass 1: mark all live objects in heap, and save all the slots that - have references that are going to be repointed */ - unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1); - - mark_scan_heap(collector); - - old_num = atomic_inc32(&num_marking_collectors); - if( ++old_num == num_active_collectors ){ - /* last collector's world here */ - /* prepare for next phase */ - gc_init_block_for_collectors(gc, mspace); + //For_LOS_extend + if(gc->tuner->kind != TRANS_NOTHING){ + // printf("for LOS extention"); + collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace); - collector_process_finalizer_weakref(collector); - - /* let other collectors go */ - num_marking_collectors++; - } - - while(num_marking_collectors != num_active_collectors + 1); - - /* Pass 2: assign target addresses for all to-be-moved objects */ - atomic_cas32( &num_installing_collectors, 0, num_active_collectors+1); + }else if (gc->collect_kind == FALLBACK_COLLECTION){ + // printf("for Fallback"); + collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace); - mspace_compute_object_target(collector, mspace); - - old_num = atomic_inc32(&num_installing_collectors); - if( ++old_num == num_active_collectors ){ - /* single thread world */ - if(!gc_collection_result(gc)){ - printf("Out of Memory!\n"); - assert(0); /* mos is out. FIXME:: throw exception */ + }else{ + + switch(mspace->collect_algorithm){ + case MAJOR_COMPACT_SLIDE: + collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace); + break; + + default: + printf("\nThe speficied major collection algorithm doesn't exist!\n"); + exit(0); + break; } - gc_reset_block_for_collectors(gc, mspace); - num_installing_collectors++; - } - - while(num_installing_collectors != num_active_collectors + 1); - /* FIXME:: temporary. let only one thread go forward */ - if( collector->thread_handle != 0 ) return; - - /* Pass 3: update all references whose objects are to be moved */ - gc_update_repointed_refs(collector); - - gc_post_process_finalizer_weakref(gc); - - /* Pass 4: do the compaction and reset blocks */ - next_block_for_compact = mspace_get_first_compact_block(mspace); - mspace_sliding_compact(collector, mspace); - /* FIXME:: should be collector_restore_obj_info(collector) */ - gc_restore_obj_info(gc); + } - reset_mspace_after_compaction(mspace); - reset_fspace_for_allocation(fspace); - - return; -} - -void mspace_collection(Mspace* mspace) -{ - mspace->num_collections++; - - GC* gc = mspace->gc; - - pool_iterator_init(gc->metadata->gc_rootset_pool); - - collector_execute_task(gc, (TaskType)mark_compact_mspace, (Space*)mspace); - + // printf("...end.\n"); return; } + Index: vm/gc_gen/src/mark_compact/mspace_collect_compact.h =================================================================== --- vm/gc_gen/src/mark_compact/mspace_collect_compact.h (revision 0) +++ vm/gc_gen/src/mark_compact/mspace_collect_compact.h (revision 0) @@ -0,0 +1,49 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2006/12/12 + */ + +#ifndef _MSPACE_COLLECT_COMPACT_H_ +#define _MSPACE_COLLECT_COMPACT_H_ + +#include "mspace.h" +#include "../thread/collector.h" +#include "../common/space_tuner.h" + +void gc_reset_block_for_collectors(GC* gc, Mspace* mspace); +void gc_init_block_for_collectors(GC* gc, Mspace* mspace); + +void update_mspace_info_for_los_extension(Mspace* mspace); +void mspace_reset_after_compaction(Mspace* mspace); + +Block_Header* mspace_get_first_compact_block(Mspace* mspace); +Block_Header* mspace_get_first_target_block(Mspace* mspace); +Block_Header* mspace_get_next_compact_block(Collector* collector, Mspace* mspace); +Block_Header* mspace_get_next_target_block(Collector* collector, Mspace* mspace); + +void slide_compact_mspace(Collector* collector); +void move_compact_mspace(Collector* collector); + +void fallback_mark_scan_heap(Collector* collector); + +void mspace_extend_compact(Collector *collector); + +extern Boolean IS_MOVE_COMPACT; + +#endif /* _MSPACE_COLLECT_COMPACT_H_ */ + Index: vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp =================================================================== --- vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp (revision 0) +++ vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp (revision 0) @@ -0,0 +1,305 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2006/10/05 + */ + +#include "mspace_collect_compact.h" +#include "../trace_forward/fspace.h" +#include "../mark_sweep/lspace.h" +#include "../finalizer_weakref/finalizer_weakref.h" +#include "../gen/gen.h" +#include "../common/fix_repointed_refs.h" +#include "../common/interior_pointer.h" + +#define SPACE_ALLOC_UNIT ( ( GC_BLOCK_SIZE_BYTES > SYSTEM_ALLOC_UNIT) ? GC_BLOCK_SIZE_BYTES : SYSTEM_ALLOC_UNIT) + +static volatile unsigned int mem_changed_size; +static volatile Block *mos_first_new_block = NULL; +static volatile Block *nos_first_free_block = NULL; +static volatile Block *first_block_to_move = NULL; + +static void set_first_and_end_block_to_move(Collector *collector) +{ + GC_Gen *gc_gen = (GC_Gen *)collector->gc; + Mspace *mspace = gc_gen->mos; + Fspace *fspace = gc_gen->nos; + + assert(mem_changed_size % SPACE_ALLOC_UNIT); + + unsigned int mos_added_block_num = mem_changed_size >> GC_BLOCK_SHIFT_COUNT; // block number needing moving + nos_first_free_block = &mspace->blocks[mspace->free_block_idx - mspace->first_block_idx]; + first_block_to_move = nos_first_free_block - mos_added_block_num; + if(first_block_to_move < (Block *)space_heap_start((Space *)fspace)) + first_block_to_move = (Block *)space_heap_start((Space *)fspace); +} + +static unsigned int fspace_shrink(Fspace *fspace, Block *nos_first_free_block) +{ + void *committed_nos_end = (void *)((unsigned int)space_heap_start((Space *)fspace) + fspace->committed_heap_size); + + unsigned int nos_used_size = (unsigned int)nos_first_free_block - (unsigned int)fspace->heap_start; + unsigned int nos_free_size = (unsigned int)committed_nos_end - (unsigned int)nos_first_free_block; + unsigned int decommit_size = (nos_used_size <= nos_free_size) ? nos_used_size : nos_free_size; + assert(decommit_size); + + void *decommit_base = (void *)((unsigned int)committed_nos_end - decommit_size); + decommit_base = (void *)round_down_to_size((unsigned int)decommit_base, SPACE_ALLOC_UNIT); + if(decommit_base < (void *)nos_first_free_block) + decommit_base = (void *)((unsigned int)decommit_base + SPACE_ALLOC_UNIT); + decommit_size = (unsigned int)committed_nos_end - (unsigned int)decommit_base; + assert(decommit_size && !(decommit_size % SPACE_ALLOC_UNIT)); + + Boolean result = vm_decommit_mem(decommit_base, decommit_size); + assert(result == TRUE); + + fspace->committed_heap_size = (unsigned int)decommit_base - (unsigned int)fspace->heap_start; + fspace->num_managed_blocks = fspace->committed_heap_size >> GC_BLOCK_SHIFT_COUNT; + + Block_Header *new_last_block = (Block_Header *)&fspace->blocks[fspace->num_managed_blocks - 1]; + fspace->ceiling_block_idx = new_last_block->block_idx; + new_last_block->next = NULL; + + return decommit_size; +} + +static Block *mspace_extend(Mspace *mspace, Fspace *fspace, unsigned int commit_size) +{ + assert(commit_size && !(commit_size % SPACE_ALLOC_UNIT)); + + void *committed_mos_end = (void *)((unsigned int)space_heap_start((Space *)mspace) + mspace->committed_heap_size); + void *commit_base = committed_mos_end; + assert((unsigned int)committed_mos_end % SPACE_ALLOC_UNIT); + + void *result = vm_commit_mem(commit_base, commit_size); + assert(result == commit_base); + + void *new_end = (void *)((unsigned int)commit_base + commit_size); + mspace->committed_heap_size = (unsigned int)new_end - (unsigned int)mspace->heap_start; + + /* init the grown blocks */ + Block_Header *block = (Block_Header *)commit_base; + Block_Header *last_block = (Block_Header *)((Block *)block -1); + unsigned int start_idx = last_block->block_idx + 1; + unsigned int i; + for(i=0; block < (Block_Header *)new_end; i++){ + block_init(block); + block->block_idx = start_idx + i; + last_block->next = block; + last_block = block; + block = (Block_Header *)((Block *)block + 1); + } + last_block->next = (Block_Header *)space_heap_start((Space *)fspace); + mspace->ceiling_block_idx = last_block->block_idx; + mspace->num_managed_blocks = mspace->committed_heap_size >> GC_BLOCK_SHIFT_COUNT; + + return (Block *)commit_base; +} + +static void mspace_block_iter_init_for_extension(Mspace *mspace, Block_Header *start_block) +{ + mspace->block_iterator = start_block; +} + +static Block_Header *mspace_block_iter_next_for_extension(Mspace *mspace, Block_Header *end_block) +{ + Block_Header *cur_block = (Block_Header *)mspace->block_iterator; + + while(cur_block && cur_block < end_block){ + Block_Header *next_block = cur_block->next; + + Block_Header *temp = (Block_Header *)atomic_casptr((volatile void **)&mspace->block_iterator, next_block, cur_block); + if(temp != cur_block){ + cur_block = (Block_Header*)mspace->block_iterator; + continue; + } + return cur_block; + } + /* run out space blocks */ + return NULL; +} + +static void mspace_recompute_object_target(Collector *collector) +{ + GC_Gen *gc_gen = (GC_Gen *)collector->gc; + Mspace *mspace = gc_gen->mos; + Fspace *fspace = gc_gen->nos; + + unsigned int block_diff = first_block_to_move - mos_first_new_block; + unsigned int addr_diff = block_diff << GC_BLOCK_SHIFT_COUNT; + + assert(!collector->rem_set); + collector->rem_set = free_set_pool_get_entry(collector->gc->metadata); + Block_Header *dest_block; + while(Block_Header *block = mspace_block_iter_next_for_extension(mspace, (Block_Header *)nos_first_free_block)){ + Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)block->base; + Partial_Reveal_Object *block_end = (Partial_Reveal_Object *)block->new_free; // new_free or free depends on whether reset is done or not + unsigned int dest_addr; + + while(p_obj < block_end){ + dest_addr = (unsigned int)p_obj - addr_diff; + Obj_Info_Type obj_info = get_obj_info(p_obj); + if(obj_info != 0){ + collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr); + collector_remset_add_entry(collector, (Partial_Reveal_Object **)obj_info); + } + obj_set_fw_in_oi(p_obj, (void *)dest_addr); + p_obj = obj_end(p_obj); + } + dest_block = (Block_Header *)((Block *)block - block_diff); + dest_block->new_free = (void *)dest_addr; + } + pool_put_entry(collector->gc->metadata->collector_remset_pool, collector->rem_set); + collector->rem_set = NULL; + + if(first_block_to_move == (Block *)space_heap_start((Space *)fspace)){ + unsigned int free_block_idx = dest_block->block_idx + 1; + unsigned int cur_free_idx = (unsigned int)mspace->free_block_idx; + while(free_block_idx > cur_free_idx){ + atomic_cas32(&mspace->free_block_idx, free_block_idx, cur_free_idx); + cur_free_idx = (unsigned int)mspace->free_block_idx; + } + } else { + mspace->free_block_idx = ((Block_Header *)first_block_to_move)->block_idx; + } +} + + +static void mspace_refix_repointed_refs(Collector *collector, Mspace *mspace) +{ + Block_Header *mspace_first_free_block = (Block_Header *)&mspace->blocks[mspace->free_block_idx - mspace->first_block_idx]; + + while(Block_Header *block = mspace_block_iter_next_for_extension(mspace, mspace_first_free_block)){ + Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)block->base; + Partial_Reveal_Object *block_end = (Partial_Reveal_Object *)block->new_free; // new_free or free depends on whether reset is done or not + while(p_obj < block_end){ + object_fix_ref_slots(p_obj); + p_obj = obj_end(p_obj); + } + } +} + +static void move_compacted_blocks_to_mspace(Collector *collector) +{ + GC_Gen *gc_gen = (GC_Gen *)collector->gc; + Mspace *mspace = gc_gen->mos; + Fspace *fspace = gc_gen->nos; + + while(Block_Header *block = mspace_block_iter_next_for_extension(mspace, (Block_Header *)nos_first_free_block)){ + Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)block->base; + Block_Header *dest_block = GC_BLOCK_HEADER(obj_get_fw_in_oi(p_obj)); + void *src_base = (void *)p_obj; + void *block_end = block->new_free; // new_free or free depends on whether reset is done or not + unsigned int size = (unsigned int)block_end - (unsigned int)src_base; + memmove(dest_block->base, src_base, size); + } +} + +static volatile unsigned int num_space_changing_collectors = 0; +static volatile unsigned int num_recomputing_collectors = 0; +static volatile unsigned int num_refixing_collectors = 0; +static volatile unsigned int num_moving_collectors = 0; +static volatile unsigned int num_restoring_collectors = 0; + +void mspace_extend_compact(Collector *collector) +{ + GC_Gen *gc_gen = (GC_Gen *)collector->gc; + Mspace *mspace = gc_gen->mos; + Fspace *fspace = gc_gen->nos; + Lspace *lspace = gc_gen->los; + + unsigned int num_active_collectors = gc_gen->num_active_collectors; + unsigned int old_num; + + assert(gc_gen->tuner->kind == TRANS_NOTHING); + + Block *nos_first_block = fspace->blocks; + Block *nos_first_free_block = &mspace->blocks[mspace->free_block_idx - mspace->first_block_idx]; + assert(nos_first_free_block > nos_first_block); + + while(nos_first_free_block > nos_first_block){ + + atomic_cas32( &num_space_changing_collectors, 0, num_active_collectors + 1); + old_num = atomic_inc32(&num_space_changing_collectors); + if( old_num == 0 ){ + mem_changed_size = fspace_shrink(fspace, nos_first_free_block); + mos_first_new_block = mspace_extend(mspace, fspace, mem_changed_size); + + set_first_and_end_block_to_move(collector); + mspace_block_iter_init_for_extension(mspace, (Block_Header *)first_block_to_move); + + num_restoring_collectors++; + } + while(num_restoring_collectors != num_active_collectors + 1); + + + atomic_cas32( &num_recomputing_collectors, 0, num_active_collectors+1); + + mspace_recompute_object_target(collector); + + old_num = atomic_inc32(&num_recomputing_collectors); + if( ++old_num == num_active_collectors ){ + /* init the iterator: prepare for refixing */ + mspace_block_iter_init_for_extension(mspace, (Block_Header *)mspace->blocks); + num_recomputing_collectors++; + } + while(num_recomputing_collectors != num_active_collectors + 1); + + + atomic_cas32( &num_refixing_collectors, 0, num_active_collectors+1); + + mspace_refix_repointed_refs(collector, mspace); + + old_num = atomic_inc32(&num_refixing_collectors); + if( ++old_num == num_active_collectors ){ + /* init the iterator: prepare for refixing */ + lspace_fix_repointed_refs(collector, lspace); + gc_fix_rootset(collector); + + mspace_block_iter_init_for_extension(mspace, (Block_Header *)first_block_to_move); + + num_refixing_collectors++; + } + while(num_refixing_collectors != num_active_collectors + 1); + + + atomic_cas32( &num_moving_collectors, 0, num_active_collectors+1); + + move_compacted_blocks_to_mspace(collector); + + old_num = atomic_inc32(&num_moving_collectors); + if( ++old_num == num_active_collectors ){ + /* init the iterator: prepare for refixing */ + lspace_fix_repointed_refs(collector, lspace); + gc_fix_rootset(collector); + + num_moving_collectors++; + } + while(num_moving_collectors != num_active_collectors + 1); + + + atomic_cas32( &num_restoring_collectors, 0, num_active_collectors); + + collector_restore_obj_info(collector); + + atomic_inc32(&num_restoring_collectors); + while(num_restoring_collectors != num_active_collectors); + + + nos_first_free_block = &mspace->blocks[mspace->free_block_idx - mspace->first_block_idx]; + } +} Index: vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp =================================================================== --- vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp (revision 0) +++ vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp (revision 0) @@ -0,0 +1,581 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2006/10/05 + */ + +#include "mspace_collect_compact.h" +#include "../trace_forward/fspace.h" +#include "../mark_sweep/lspace.h" +#include "../finalizer_weakref/finalizer_weakref.h" + +//#define VERIFY_SLIDING_COMPACT + +struct GC_Gen; +Space* gc_get_nos(GC_Gen* gc); +Space* gc_get_mos(GC_Gen* gc); +Space* gc_get_los(GC_Gen* gc); + +#ifdef VERIFY_SLIDING_COMPACT +typedef struct { + unsigned int addr; + unsigned int dest_counter; + unsigned int collector; + Block_Header *src_list[1021]; +} Block_Verify_Info; +static Block_Verify_Info block_info[32*1024][2]; +#endif + +static volatile Block_Header *last_block_for_dest; + +static void mspace_compute_object_target(Collector* collector, Mspace* mspace) +{ + Block_Header *curr_block = collector->cur_compact_block; + Block_Header *dest_block = collector->cur_target_block; + void *dest_addr = dest_block->base; + Block_Header *last_src; + +#ifdef VERIFY_SLIDING_COMPACT + block_info[(Block*)dest_block-mspace->blocks][0].collector = (unsigned int)collector->thread_handle + 1; +#endif + + assert(!collector->rem_set); + collector->rem_set = free_set_pool_get_entry(collector->gc->metadata); + + while( curr_block ){ + void* start_pos; + Partial_Reveal_Object *first_obj = block_get_first_marked_obj_prefetch_next(curr_block, &start_pos); + if(first_obj){ + ++curr_block->dest_counter; + if(!dest_block->src) + dest_block->src = first_obj; + else + last_src->next_src = first_obj; + last_src = curr_block; + } + Partial_Reveal_Object* p_obj = first_obj; + + while( p_obj ){ + assert( obj_is_marked_in_vt(p_obj)); + + unsigned int obj_size = (unsigned int)start_pos - (unsigned int)p_obj; + + if( ((unsigned int)dest_addr + obj_size) > (unsigned int)GC_BLOCK_END(dest_block)){ + dest_block->new_free = dest_addr; + dest_block = mspace_get_next_target_block(collector, mspace); + if(dest_block == NULL){ + collector->result = FALSE; + return; + } + dest_addr = dest_block->base; + dest_block->src = p_obj; + last_src = curr_block; + if(p_obj != first_obj) + ++curr_block->dest_counter; + +#ifdef VERIFY_SLIDING_COMPACT + block_info[(Block*)dest_block-mspace->blocks][0].collector = (unsigned int)collector->thread_handle + 1; +#endif + } + assert(((unsigned int)dest_addr + obj_size) <= (unsigned int)GC_BLOCK_END(dest_block)); + + Obj_Info_Type obj_info = get_obj_info(p_obj); + + if( obj_info != 0 ) { + collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr); + collector_remset_add_entry(collector, (Partial_Reveal_Object **)obj_info); + } + + obj_set_fw_in_oi(p_obj, dest_addr); + + /* FIXME: should use alloc to handle alignment requirement */ + dest_addr = (void *)((unsigned int) dest_addr + obj_size); + p_obj = block_get_next_marked_obj_prefetch_next(curr_block, &start_pos); + } + + curr_block = mspace_get_next_compact_block(collector, mspace); + + } + + pool_put_entry(collector->gc->metadata->collector_remset_pool, collector->rem_set); + collector->rem_set = NULL; + dest_block->new_free = dest_addr; + + Block_Header *cur_last_dest = (Block_Header *)last_block_for_dest; + while(dest_block > last_block_for_dest){ + atomic_casptr((volatile void **)&last_block_for_dest, dest_block, cur_last_dest); + cur_last_dest = (Block_Header *)last_block_for_dest; + } + + return; +} + +#include "../common/fix_repointed_refs.h" + +static void mspace_fix_repointed_refs(Collector* collector, Mspace* mspace) +{ + Block_Header* curr_block = mspace_block_iterator_next(mspace); + + /* for MAJOR_COLLECTION, we must iterate over all compact blocks */ + while( curr_block){ + block_fix_ref_after_repointing(curr_block); + curr_block = mspace_block_iterator_next(mspace); + } + + return; +} + +typedef struct{ + volatile Block_Header *block; + SpinLock lock; +} Cur_Dest_Block; + +static Cur_Dest_Block current_dest_block; +static volatile Block_Header *next_block_for_dest; + +static inline Block_Header *set_next_block_for_dest(Mspace *mspace) +{ + assert(!next_block_for_dest); + + Block_Header *block = mspace_block_iterator_get(mspace); + + if(block->status != BLOCK_DEST) + return block; + + while(block->status == BLOCK_DEST) + block = block->next; + next_block_for_dest = block; + return block; +} + +#define DEST_NOT_EMPTY ((Block_Header *)0xFF) + +static Block_Header *get_next_dest_block(Mspace *mspace) +{ + Block_Header *cur_dest_block; + + if(next_block_for_dest){ + cur_dest_block = (Block_Header*)next_block_for_dest; + while(cur_dest_block->status == BLOCK_DEST){ + cur_dest_block = cur_dest_block->next; + } + next_block_for_dest = cur_dest_block; + } else { + cur_dest_block = set_next_block_for_dest(mspace); + } + +// printf("Getting next dest block:\n"); +// printf("next_block_for_dest: %d\n\n", next_block_for_dest ? next_block_for_dest->block_idx : 0); + + unsigned int total_dest_counter = 0; + Block_Header *last_dest_block = (Block_Header *)last_block_for_dest; + for(; cur_dest_block <= last_dest_block; cur_dest_block = cur_dest_block->next){ + if(cur_dest_block->status == BLOCK_DEST){ +// printf("idx: %d DEST ", cur_dest_block->block_idx); + continue; + } + if(cur_dest_block->dest_counter == 0 && cur_dest_block->src){ +// printf("idx: %d DEST FOUND!\n\n", cur_dest_block->block_idx); + cur_dest_block->status = BLOCK_DEST; + return cur_dest_block; + } else if(cur_dest_block->dest_counter == 1 && GC_BLOCK_HEADER(cur_dest_block->src) == cur_dest_block){ +// printf("idx: %d NON_DEST FOUND!\n\n", cur_dest_block->block_idx); + return cur_dest_block; + } else if(cur_dest_block->dest_counter == 0 && !cur_dest_block->src){ +// printf("idx: %d NO_SRC ", cur_dest_block->block_idx); + cur_dest_block->status = BLOCK_DEST; + } else { +// printf("OTHER "); + total_dest_counter += cur_dest_block->dest_counter; + } + } + + if(total_dest_counter){ +// printf("\nNeed refind!\n\n"); + return DEST_NOT_EMPTY; + } + return NULL; +} + +static Block_Header *check_dest_block(Mspace *mspace) +{ + Block_Header *cur_dest_block; + + if(next_block_for_dest){ + cur_dest_block = (Block_Header*)next_block_for_dest; + while(cur_dest_block->status == BLOCK_DEST){ + cur_dest_block = cur_dest_block->next; + } + } else { + cur_dest_block = set_next_block_for_dest(mspace); + } + + unsigned int total_dest_counter = 0; + Block_Header *last_dest_block = (Block_Header *)last_block_for_dest; + for(; cur_dest_block < last_dest_block; cur_dest_block = cur_dest_block->next){ + if(cur_dest_block->status == BLOCK_DEST) + continue; + if(cur_dest_block->dest_counter == 0 && cur_dest_block->src){ + return cur_dest_block; + } else if(cur_dest_block->dest_counter == 1 && GC_BLOCK_HEADER(cur_dest_block->src) == cur_dest_block){ + return cur_dest_block; + } else if(cur_dest_block->dest_counter == 0 && !cur_dest_block->src){ + cur_dest_block->status = BLOCK_DEST; + } else { + total_dest_counter += cur_dest_block->dest_counter; + } + } + + if(total_dest_counter) return DEST_NOT_EMPTY; + return NULL; +} + +static inline Partial_Reveal_Object *get_next_first_src_obj(Mspace *mspace) +{ + Partial_Reveal_Object *first_src_obj; + + while(TRUE){ + lock(current_dest_block.lock); + Block_Header *next_dest_block = (Block_Header *)current_dest_block.block; + + if (!next_dest_block || !(first_src_obj = next_dest_block->src)){ + next_dest_block = get_next_dest_block(mspace); + if(!next_dest_block){ + unlock(current_dest_block.lock); + return NULL; + } else if(next_dest_block == DEST_NOT_EMPTY){ + unlock(current_dest_block.lock); + while(check_dest_block(mspace)==DEST_NOT_EMPTY); + continue; + } + first_src_obj = next_dest_block->src; + if(next_dest_block->status == BLOCK_DEST){ + assert(!next_dest_block->dest_counter); + current_dest_block.block = next_dest_block; + } + } + + Partial_Reveal_Object *next_src_obj = GC_BLOCK_HEADER(first_src_obj)->next_src; + if(next_src_obj && GC_BLOCK_HEADER(get_obj_info_raw(next_src_obj)) != next_dest_block){ + next_src_obj = NULL; + } + next_dest_block->src = next_src_obj; + unlock(current_dest_block.lock); + return first_src_obj; + } +} + +static inline void gc_init_block_for_sliding_compact(GC *gc, Mspace *mspace) +{ + /* initialize related static variables */ + next_block_for_dest = NULL; + current_dest_block.block = NULL; + current_dest_block.lock = FREE_LOCK; + mspace_block_iterator_init(mspace); + + return; +} + + +#include "../verify/verify_live_heap.h" +extern unsigned int mspace_free_block_idx; + +static void mspace_sliding_compact(Collector* collector, Mspace* mspace) +{ + void *start_pos; + Block_Header *nos_fw_start_block = (Block_Header *)&mspace->blocks[mspace_free_block_idx - mspace->first_block_idx]; + Boolean is_fallback = (collector->gc->collect_kind == FALLBACK_COLLECTION); + + while(Partial_Reveal_Object *p_obj = get_next_first_src_obj(mspace)){ + Block_Header *src_block = GC_BLOCK_HEADER(p_obj); + assert(src_block->dest_counter); + + Partial_Reveal_Object *p_target_obj = obj_get_fw_in_oi(p_obj); + Block_Header *dest_block = GC_BLOCK_HEADER(p_target_obj); + + /* We don't set start_pos as p_obj in case that memmove of this obj may overlap itself. + * In that case we can't get the correct vt and obj_info. + */ + start_pos = obj_end(p_obj); + + do { + assert(obj_is_marked_in_vt(p_obj)); + obj_unmark_in_vt(p_obj); + + unsigned int obj_size = (unsigned int)start_pos - (unsigned int)p_obj; + if(p_obj != p_target_obj){ + memmove(p_target_obj, p_obj, obj_size); + + if(verify_live_heap){ + /* we forwarded it, we need remember it for verification */ + if(is_fallback && src_block>=nos_fw_start_block && obj_belongs_to_space(p_obj, (Space*)mspace)) + event_collector_move_obj(p_obj, p_target_obj, collector); + else + event_collector_move_obj(p_obj, p_target_obj, collector); + } + } + set_obj_info(p_target_obj, 0); + + p_obj = block_get_next_marked_obj_after_prefetch(src_block, &start_pos); + if(!p_obj) + break; + p_target_obj = obj_get_fw_in_oi(p_obj); + + } while(GC_BLOCK_HEADER(p_target_obj) == dest_block); + +#ifdef VERIFY_SLIDING_COMPACT + printf("dest_block: %x src_block: %x collector: %x\n", (unsigned int)dest_block, (unsigned int)src_block, (unsigned int)collector->thread_handle); +#endif + + atomic_dec32(&src_block->dest_counter); + } + +#ifdef VERIFY_SLIDING_COMPACT + static unsigned int fax = 0; + fax++; + printf("\n\n\nCollector %d Sliding compact ends! %d \n\n\n", (unsigned int)collector->thread_handle, fax); +#endif + +} + +#ifdef VERIFY_SLIDING_COMPACT + +static void verify_sliding_compact(Mspace *mspace, Boolean before) +{ + unsigned int i, j, k; + Block_Header *header; + + if(before) + j = 0; + else + j = 1; + + for(i = 0, header = (Block_Header *)mspace->blocks; + header; + header=header->next, ++i) + { + block_info[i][j].addr = (unsigned int)header; + block_info[i][j].dest_counter = header->dest_counter; + if(header->src){ + Partial_Reveal_Object *src_obj = header->src; + k = 0; + printf("\nHeader: %x %x Collector: %x ", (unsigned int)header, block_info[i][j].dest_counter, block_info[i][j].collector); + Block_Header *dest_header = GC_BLOCK_HEADER(obj_get_fw_in_oi(src_obj)); + while(dest_header == header){ + block_info[i][j].src_list[k] = dest_header; + Block_Header *src_header = GC_BLOCK_HEADER(src_obj); + printf("%x %x ", (unsigned int)src_header, src_header->dest_counter); + src_obj = src_header->next_src; + if(!src_obj) + break; + dest_header = GC_BLOCK_HEADER(obj_get_fw_in_oi(src_obj)); + if(++k >= 1021) + assert(0); + } + } + } + + if(!before){ + for(i = 0, header = (Block_Header *)mspace->blocks; + header; + header=header->next, ++i) + { + Boolean correct = TRUE; + if(block_info[i][0].addr != block_info[i][1].addr) + correct = FALSE; + if(block_info[i][0].dest_counter != block_info[i][1].dest_counter) + correct = FALSE; + for(k = 0; k < 1021; k++){ + if(block_info[i][0].src_list[k] != block_info[i][1].src_list[k]){ + correct = FALSE; + break; + } + } + if(!correct) + printf("header: %x %x dest_counter: %x %x src: %x %x", + block_info[i][0].addr, block_info[i][1].addr, + block_info[i][0].dest_counter, block_info[i][1].dest_counter, + block_info[i][0].src_list[k], block_info[i][1].src_list[k]); + } + + unsigned int *array = (unsigned int *)block_info; + memset(array, 0, 1024*32*1024*2); + } +} +#endif + +/* +#define OI_RESTORING_THRESHOLD 8 +static volatile Boolean parallel_oi_restoring; +unsigned int mspace_saved_obj_info_size(GC*gc){ return pool_size(gc->metadata->collector_remset_pool);} +*/ + +static volatile unsigned int num_marking_collectors = 0; +static volatile unsigned int num_repointing_collectors = 0; +static volatile unsigned int num_fixing_collectors = 0; +static volatile unsigned int num_moving_collectors = 0; +static volatile unsigned int num_restoring_collectors = 0; +static volatile unsigned int num_extending_collectors = 0; + +//For_LOS_extend +void mspace_restore_block_chain(Mspace* mspace) +{ + GC* gc = mspace->gc; + Fspace* fspace = (Fspace*)gc_get_nos((GC_Gen*)gc); + if(gc->tuner->kind == TRANS_FROM_MOS_TO_LOS) { + Block_Header* fspace_last_block = (Block_Header*)&fspace->blocks[fspace->num_managed_blocks - 1]; + fspace_last_block->next = NULL; + } +} + +void slide_compact_mspace(Collector* collector) +{ + GC* gc = collector->gc; + Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc); + Fspace* fspace = (Fspace*)gc_get_nos((GC_Gen*)gc); + Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)gc); + + unsigned int num_active_collectors = gc->num_active_collectors; + + /* Pass 1: ************************************************** + mark all live objects in heap, and save all the slots that + have references that are going to be repointed */ + unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1); + + if(gc->collect_kind != FALLBACK_COLLECTION) + mark_scan_heap(collector); + else + fallback_mark_scan_heap(collector); + + old_num = atomic_inc32(&num_marking_collectors); + if( ++old_num == num_active_collectors ){ + /* last collector's world here */ + /* prepare for next phase */ + gc_init_block_for_collectors(gc, mspace); + + if(!IGNORE_FINREF ) + collector_identify_finref(collector); +#ifndef BUILD_IN_REFERENT + else { + gc_set_weakref_sets(gc); + update_ref_ignore_finref(collector); + } +#endif + + last_block_for_dest = NULL; + + /* let other collectors go */ + num_marking_collectors++; + } + while(num_marking_collectors != num_active_collectors + 1); + + /* Pass 2: ************************************************** + assign target addresses for all to-be-moved objects */ + atomic_cas32( &num_repointing_collectors, 0, num_active_collectors+1); + + mspace_compute_object_target(collector, mspace); + + old_num = atomic_inc32(&num_repointing_collectors); + if( ++old_num == num_active_collectors ){ + /* single thread world */ + gc->collect_result = gc_collection_result(gc); + if(!gc->collect_result){ + num_repointing_collectors++; + assert(0); // Now we should not be out of mem here. mspace_extend_compact() is backing up for this case. + return; + } + + gc_reset_block_for_collectors(gc, mspace); + mspace_block_iterator_init(mspace); + num_repointing_collectors++; + } + while(num_repointing_collectors != num_active_collectors + 1); + if(!gc->collect_result) return; + + /* Pass 3: ************************************************** + update all references whose objects are to be moved */ + old_num = atomic_cas32( &num_fixing_collectors, 0, num_active_collectors+1); + + mspace_fix_repointed_refs(collector, mspace); + + old_num = atomic_inc32(&num_fixing_collectors); + if( ++old_num == num_active_collectors ){ + /* last collector's world here */ + lspace_fix_repointed_refs(collector, lspace); + gc_fix_rootset(collector); + + if(!IGNORE_FINREF ) + gc_put_finref_to_vm(gc); + +#ifdef VERIFY_SLIDING_COMPACT + verify_sliding_compact(mspace, TRUE); +#endif + + gc_init_block_for_sliding_compact(gc, mspace); + num_fixing_collectors++; + } + while(num_fixing_collectors != num_active_collectors + 1); + + /* Pass 4: ************************************************** + move objects */ + atomic_cas32( &num_moving_collectors, 0, num_active_collectors); + + mspace_sliding_compact(collector, mspace); + + atomic_inc32(&num_moving_collectors); + while(num_moving_collectors != num_active_collectors); + + /* Pass 5: ************************************************** + restore obj_info */ + atomic_cas32( &num_restoring_collectors, 0, num_active_collectors+1); + + collector_restore_obj_info(collector); + + old_num = atomic_inc32(&num_restoring_collectors); + if( ++old_num == num_active_collectors ){ + update_mspace_info_for_los_extension(mspace); + + num_restoring_collectors++; + } + while(num_restoring_collectors != num_active_collectors + 1); + + /* Dealing with out of memory in mspace */ + if(mspace->free_block_idx > fspace->first_block_idx){ + atomic_cas32( &num_extending_collectors, 0, num_active_collectors); + + mspace_extend_compact(collector); + + atomic_inc32(&num_extending_collectors); + while(num_extending_collectors != num_active_collectors); + } + + if( collector->thread_handle != 0 ) + return; + + /* Leftover: ************************************************** + */ + + mspace_reset_after_compaction(mspace); + fspace_reset_for_allocation(fspace); + + //For_LOS_extend + mspace_restore_block_chain(mspace); + + gc_set_pool_clear(gc->metadata->gc_rootset_pool); + + return; +} Index: vm/gc_gen/src/mark_sweep/free_area_pool.cpp =================================================================== --- vm/gc_gen/src/mark_sweep/free_area_pool.cpp (revision 493420) +++ vm/gc_gen/src/mark_sweep/free_area_pool.cpp (working copy) @@ -23,12 +23,13 @@ void free_area_pool_init(Free_Area_Pool* pool) { for(unsigned int i = 0; i < NUM_FREE_LIST; i ++){ - Bidir_List* list = &pool->sized_area_list[i]; + Bidir_List* list = (Bidir_List*)(&pool->sized_area_list[i]); list->next = list->prev = list; + ((Lockable_Bidir_List*)list)->lock = 0; + ((Lockable_Bidir_List*)list)->zero = 0; } memset((void*)pool->list_bit_flag, 0, NUM_FLAG_WORDS << BIT_SHIFT_TO_BYTES_PER_WORD); - pool->free_pool_lock = 0; return; } @@ -51,7 +52,7 @@ if(index == NUM_FREE_LIST) return NULL; - Bidir_List* list = &pool->sized_area_list[index]; + Bidir_List* list = (Bidir_List*)&pool->sized_area_list[index]; Free_Area* area = (Free_Area*)list->next; if(index != MAX_LIST_INDEX) Index: vm/gc_gen/src/mark_sweep/free_area_pool.h =================================================================== --- vm/gc_gen/src/mark_sweep/free_area_pool.h (revision 493420) +++ vm/gc_gen/src/mark_sweep/free_area_pool.h (working copy) @@ -31,8 +31,18 @@ #define NUM_FREE_LIST 128 +typedef struct Lockable_Bidir_List{ + /* <-- First couple of fields overloadded as Bidir_List */ + unsigned int zero; + Bidir_List* next; + Bidir_List* prev; + /* END of Bidir_List --> */ + unsigned int lock; +}Lockable_Bidir_List; + typedef struct Free_Area{ /* <-- First couple of fields overloadded as Bidir_List */ + unsigned int zero; Bidir_List* next; Bidir_List* prev; /* END of Bidir_List --> */ @@ -44,10 +54,13 @@ { assert(ADDRESS_IS_KB_ALIGNED(start)); assert(ADDRESS_IS_KB_ALIGNED(size)); - + + //memset(start, 0, size); + if( size < GC_OBJ_SIZE_THRESHOLD) return NULL; Free_Area* area = (Free_Area*)start; - memset(area, 0, size); + area->zero = 0; + area->next = area->prev = (Bidir_List*)area; area->size = size; return area; } @@ -55,10 +68,9 @@ #define NUM_FLAG_WORDS (NUM_FREE_LIST >> BIT_SHIFT_TO_BITS_PER_WORD) typedef struct Free_Area_Pool{ - Bidir_List sized_area_list[NUM_FREE_LIST]; + Lockable_Bidir_List sized_area_list[NUM_FREE_LIST]; /* each list corresponds to one bit in below vector */ unsigned int list_bit_flag[NUM_FLAG_WORDS]; - volatile unsigned int free_pool_lock; }Free_Area_Pool; #define MAX_LIST_INDEX (NUM_FREE_LIST - 1) @@ -93,7 +105,7 @@ assert( free_area->size >= GC_OBJ_SIZE_THRESHOLD); unsigned int index = pool_list_index_with_size(free_area->size); - bidir_list_add_item(&(pool->sized_area_list[index]), (Bidir_List*)free_area); + bidir_list_add_item((Bidir_List*)&(pool->sized_area_list[index]), (Bidir_List*)free_area); /* set bit flag of the list */ pool_list_set_flag(pool, index); @@ -106,7 +118,7 @@ bidir_list_remove_item((Bidir_List*)free_area); /* set bit flag of the list */ - Bidir_List* list = &(pool->sized_area_list[index]); + Bidir_List* list = (Bidir_List*)&(pool->sized_area_list[index]); if(list->next == list){ pool_list_clear_flag(pool, index); } Index: vm/gc_gen/src/mark_sweep/lspace.cpp =================================================================== --- vm/gc_gen/src/mark_sweep/lspace.cpp (revision 493420) +++ vm/gc_gen/src/mark_sweep/lspace.cpp (working copy) @@ -30,22 +30,17 @@ assert(lspace); memset(lspace, 0, sizeof(Lspace)); + /* commit mspace mem */ void* reserved_base = start; unsigned int committed_size = lspace_size; - int status = port_vmem_commit(&reserved_base, committed_size, gc->allocated_memory); - assert(status == APR_SUCCESS && reserved_base == start); + vm_commit_mem(reserved_base, lspace_size); + memset(reserved_base, 0, lspace_size); - memset(reserved_base, 0, committed_size); lspace->committed_heap_size = committed_size; - lspace->reserved_heap_size = lspace_size - committed_size; + lspace->reserved_heap_size = committed_size; lspace->heap_start = reserved_base; lspace->heap_end = (void *)((unsigned int)reserved_base + committed_size); - /*Treat with mark bit table*/ - unsigned int num_words = LSPACE_SIZE_TO_MARKTABLE_SIZE_WORDS(lspace_size); - lspace->mark_table = (unsigned int*)STD_MALLOC( num_words*BYTES_PER_WORD ); - memset(lspace->mark_table, 0, num_words*BYTES_PER_WORD); - lspace->mark_object_func = lspace_mark_object; lspace->move_object = FALSE; lspace->gc = gc; @@ -56,6 +51,10 @@ initial_fa->size = lspace->committed_heap_size; free_pool_add_area(lspace->free_pool, initial_fa); + lspace->num_collections = 0; + lspace->time_collections = 0; + lspace->survive_ratio = 0.5f; + gc_set_los((GC_Gen*)gc, (Space*)lspace); los_boundary = lspace->heap_end; @@ -64,47 +63,42 @@ void lspace_destruct(Lspace* lspace) { - //FIXME:: decommit lspace space - STD_FREE(lspace->mark_table); STD_FREE(lspace); lspace = NULL; return; } -Boolean lspace_mark_object(Lspace* lspace, Partial_Reveal_Object* p_obj) -{ - assert( obj_belongs_to_space(p_obj, (Space*)lspace)); - unsigned int word_index = OBJECT_WORD_INDEX_TO_LSPACE_MARKBIT_TABLE(lspace, p_obj); - unsigned int bit_offset_in_word = OBJECT_WORD_OFFSET_IN_LSPACE_MARKBIT_TABLE(lspace, p_obj); +#include "../common/fix_repointed_refs.h" - unsigned int* p_word = &(lspace->mark_table[word_index]); - unsigned int word_mask = (1<committed_heap_size); - memset(lspace->mark_table, 0, marktable_size); - return; + unsigned int start_pos = 0; + Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &start_pos); + while( p_obj){ + assert(obj_is_marked_in_vt(p_obj)); + object_fix_ref_slots(p_obj); + p_obj = lspace_get_next_marked_object(lspace, &start_pos); + } } void lspace_collection(Lspace* lspace) { /* heap is marked already, we need only sweep here. */ + lspace->num_collections ++; + lspace_reset_after_collection(lspace); lspace_sweep(lspace); - unsigned int marktable_size = LSPACE_SIZE_TO_MARKTABLE_SIZE_BYTES(lspace->committed_heap_size); - memset(lspace->mark_table, 0, marktable_size); return; } Index: vm/gc_gen/src/mark_sweep/lspace.h =================================================================== --- vm/gc_gen/src/mark_sweep/lspace.h (revision 493420) +++ vm/gc_gen/src/mark_sweep/lspace.h (working copy) @@ -25,6 +25,9 @@ #include "../thread/gc_thread.h" #include "free_area_pool.h" +#define GC_MIN_LOS_SIZE ( 4 * 1024 * 1024) + + typedef struct Lspace{ /* <-- first couple of fields are overloadded as Space */ void* heap_start; @@ -32,104 +35,63 @@ unsigned int reserved_heap_size; unsigned int committed_heap_size; unsigned int num_collections; + int64 time_collections; + float survive_ratio; GC* gc; Boolean move_object; - Boolean (*mark_object_func)(Lspace* space, Partial_Reveal_Object* p_obj); /* END of Space --> */ // void* alloc_free; Free_Area_Pool* free_pool; - unsigned int* mark_table; - }Lspace; void lspace_initialize(GC* gc, void* reserved_base, unsigned int lspace_size); void lspace_destruct(Lspace* lspace); Managed_Object_Handle lspace_alloc(unsigned int size, Allocator* allocator); void lspace_sweep(Lspace* lspace); +void lspace_reset_after_collection(Lspace* lspace); void lspace_collection(Lspace* lspace); inline unsigned int lspace_free_memory_size(Lspace* lspace){ /* FIXME:: */ return 0; } +inline unsigned int lspace_committed_size(Lspace* lspace){ return lspace->committed_heap_size; } +inline Partial_Reveal_Object* lspace_get_next_marked_object( Lspace* lspace, unsigned int* iterate_index) +{ + unsigned int next_area_start = (unsigned int)lspace->heap_start + (*iterate_index) * KB; + BOOLEAN reach_heap_end = 0; -#define LSPACE_SIZE_TO_MARKTABLE_SIZE_BITS(space_size) (((space_size) >> BIT_SHIFT_TO_KILO)+1) -#define LSPACE_SIZE_TO_MARKTABLE_SIZE_BYTES(space_size) ((LSPACE_SIZE_TO_MARKTABLE_SIZE_BITS(space_size)>> BIT_SHIFT_TO_BITS_PER_BYTE)+1) -#define LSPACE_SIZE_TO_MARKTABLE_SIZE_WORDS(space_size) ((LSPACE_SIZE_TO_MARKTABLE_SIZE_BYTES(space_size)>> BIT_SHIFT_TO_BYTES_PER_WORD)+1) + while(!reach_heap_end){ + //FIXME: This while shoudl be if, try it! + while(!*((unsigned int *)next_area_start)){ + next_area_start += ((Free_Area*)next_area_start)->size; + } + if(next_area_start < (unsigned int)lspace->heap_end){ + //If there is a living object at this addr, return it, and update iterate_index + if(obj_is_marked_in_vt((Partial_Reveal_Object*)next_area_start)){ + unsigned int obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start)); + *iterate_index = (next_area_start + obj_size - (unsigned int)lspace->heap_start) >> BIT_SHIFT_TO_KILO; + return (Partial_Reveal_Object*)next_area_start; + //If this is a dead object, go on to find a living one. + }else{ + unsigned int obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start)); + next_area_start += obj_size; + } + }else{ + reach_heap_end = 1; + } + } + return NULL; -/* The assumption is the offset below is always aligned at word size, because both numbers are aligned */ -#define ADDRESS_OFFSET_IN_LSPACE_BODY(lspace, p_obj) ((unsigned int)p_obj - (unsigned int)space_heap_start((Space*)lspace)) -#define OBJECT_BIT_INDEX_TO_LSPACE_MARKBIT_TABLE(lspace, p_obj) (ADDRESS_OFFSET_IN_LSPACE_BODY(lspace, p_obj) >> BIT_SHIFT_TO_KILO) -#define OBJECT_WORD_INDEX_TO_LSPACE_MARKBIT_TABLE(lspace, p_obj) (OBJECT_BIT_INDEX_TO_LSPACE_MARKBIT_TABLE(lspace, p_obj) >> BIT_SHIFT_TO_BITS_PER_WORD) -#define OBJECT_WORD_OFFSET_IN_LSPACE_MARKBIT_TABLE(lspace, p_obj) (OBJECT_BIT_INDEX_TO_LSPACE_MARKBIT_TABLE(lspace, p_obj) & BIT_MASK_TO_BITS_PER_WORD) - -inline Boolean lspace_object_is_marked(Lspace* lspace, Partial_Reveal_Object* p_obj) -{ - assert( obj_belongs_to_space(p_obj, (Space*)lspace)); - unsigned int word_index = OBJECT_WORD_INDEX_TO_LSPACE_MARKBIT_TABLE(lspace, p_obj); - unsigned int bit_offset_in_word = OBJECT_WORD_OFFSET_IN_LSPACE_MARKBIT_TABLE(lspace, p_obj); - - unsigned int markbits = lspace->mark_table[word_index]; - return markbits & (1<mark_table; - unsigned int* table_end = mark_table + LSPACE_SIZE_TO_MARKTABLE_SIZE_WORDS(lspace->committed_heap_size); - - unsigned j=0; - unsigned int k=0; - while( (mark_table + j) < table_end){ - unsigned int markbits = *(mark_table+j); - if(!markbits){ j++; continue; } - while(k<32){ - if( !(markbits& (1<heap_start + kilo_bytes_index * KB); - *mark_bit_idx = kilo_bytes_index; - return p_obj; - } - j++; - k=0; - } - *mark_bit_idx = 0; - return NULL; + return lspace_get_next_marked_object(lspace, mark_bit_idx); } +void lspace_fix_after_copy_nursery(Collector* collector, Lspace* lspace); -inline Partial_Reveal_Object* lspace_get_next_marked_object(Lspace* lspace, unsigned int* mark_bit_idx) -{ - unsigned int* mark_table = lspace->mark_table; - unsigned int* table_end = mark_table + LSPACE_SIZE_TO_MARKTABLE_SIZE_WORDS(lspace->committed_heap_size); - unsigned int bit_index = *mark_bit_idx; - - unsigned int j = bit_index >> BIT_SHIFT_TO_BITS_PER_WORD; - unsigned int k = (bit_index & BIT_MASK_TO_BITS_PER_WORD) + 1; - - while( (mark_table + j) < table_end){ - unsigned int markbits = *(mark_table+j); - if(!markbits){ j++; continue; } - while(k<32){ - if( !(markbits& (1<heap_start + kilo_byte_index * KB); - *mark_bit_idx = kilo_byte_index; - return p_obj; - } - j++; - k=0; - } - - *mark_bit_idx = 0; - return NULL; +void lspace_fix_repointed_refs(Collector* collector, Lspace* lspace); -} - -Boolean lspace_mark_object(Lspace* lspace, Partial_Reveal_Object* p_obj); - -void reset_lspace_after_copy_nursery(Lspace* lspace); - #endif /*_LSPACE_H_ */ Index: vm/gc_gen/src/mark_sweep/lspace_alloc_collect.cpp =================================================================== --- vm/gc_gen/src/mark_sweep/lspace_alloc_collect.cpp (revision 493420) +++ vm/gc_gen/src/mark_sweep/lspace_alloc_collect.cpp (working copy) @@ -19,64 +19,223 @@ */ #include "lspace.h" -struct GC_Gen; -Space* gc_get_los(GC_Gen* gc); +#include "../gen/gen.h" +#include "../common/space_tuner.h" -void* lspace_alloc(unsigned int size, Allocator *allocator) +inline void free_pool_lock_nr_list(Free_Area_Pool* pool, unsigned int list_index){ + Lockable_Bidir_List* list_head = &pool->sized_area_list[list_index]; + while (apr_atomic_casptr( + (volatile void **) &(list_head->lock), + (void *) 1, (void *) 0) + != (void *) 0) { + while (list_head->lock == 1) { + ; + } + } + +} +inline void free_pool_unlock_nr_list(Free_Area_Pool* pool, unsigned int list_index){ + ((Lockable_Bidir_List*)(&pool->sized_area_list[list_index]))->lock = 0; +} +inline unsigned int free_pool_nr_list_is_empty(Free_Area_Pool* pool, unsigned int list_index){ + Bidir_List* head = (Bidir_List*)(&pool->sized_area_list[list_index]); + return (head->next == head); +} +inline void* free_pool_former_lists_atomic_take_area_piece(Free_Area_Pool* pool, unsigned int list_hint, unsigned int size) { - vm_gc_lock_enum(); - unsigned int try_count = 0; - Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)allocator->gc); - Free_Area_Pool* pool = lspace->free_pool; - Free_Area* free_area; - Free_Area* remain_area; - void* p_return = NULL; + Free_Area* free_area; + void* p_result; + unsigned int remain_size; + unsigned int alloc_size = ALIGN_UP_TO_KILO(size); + unsigned int new_list_nr = 0; + Lockable_Bidir_List* head = &pool->sized_area_list[list_hint]; - while( try_count < 2 ){ - free_area = free_pool_find_size_area(pool, size); - - /*Got one free area!*/ - if(free_area != NULL){ - assert(free_area->size >= size); - free_pool_remove_area(pool, free_area); - p_return = (void*)free_area; - unsigned int old_size = free_area->size; - memset(p_return, 0, sizeof(Free_Area)); + assert(list_hint < MAX_LIST_INDEX); - /* we need put the remaining area back if it size is ok.*/ - void* new_start = (Free_Area*)ALIGN_UP_TO_KILO(((unsigned int)free_area + size)); - unsigned int alloc_size = (unsigned int)new_start - (unsigned int)free_area; - unsigned int new_size = old_size - alloc_size; - - remain_area = free_area_new(new_start, new_size); - if( remain_area ) - free_pool_add_area(pool, remain_area); + free_pool_lock_nr_list(pool, list_hint); + /*Other LOS allocation may race with this one, so check list status here.*/ + if(free_pool_nr_list_is_empty(pool, list_hint)){ + free_pool_unlock_nr_list(pool, list_hint); + return NULL; + } - vm_gc_unlock_enum(); - return p_return; + free_area = (Free_Area*)(head->next); + remain_size = free_area->size - alloc_size; + if( remain_size >= GC_OBJ_SIZE_THRESHOLD){ + new_list_nr = pool_list_index_with_size(remain_size); + p_result = (void*)((unsigned int)free_area + remain_size); + if(new_list_nr == list_hint){ + free_area->size = remain_size; + free_pool_unlock_nr_list(pool, list_hint); + return p_result; + }else{ + free_pool_remove_area(pool, free_area); + free_pool_unlock_nr_list(pool, list_hint); + free_area->size = remain_size; + free_pool_lock_nr_list(pool, new_list_nr); + free_pool_add_area(pool, free_area); + free_pool_unlock_nr_list(pool, new_list_nr); + return p_result; + } } + else if(remain_size >= 0) + { + free_pool_remove_area(pool, free_area); + free_pool_unlock_nr_list(pool, list_hint); + p_result = (void*)((unsigned int)free_area + remain_size); + if(remain_size > 0){ + assert((remain_size >= KB) && (remain_size < GC_OBJ_SIZE_THRESHOLD)); + free_area->size = remain_size; + } + return p_result; + } + /*We never get here, because if the list head is not NULL, it definitely satisfy the request. */ + assert(0); + return NULL; +} - if(try_count++ == 0) - gc_reclaim_heap(allocator->gc, GC_CAUSE_LOS_IS_FULL); +inline void* free_pool_last_list_atomic_take_area_piece(Free_Area_Pool* pool, unsigned int size) +{ + void* p_result; + int remain_size = 0; + unsigned int alloc_size = ALIGN_UP_TO_KILO(size); + Free_Area* free_area = NULL; + Free_Area* new_area = NULL; + unsigned int new_list_nr = 0; + Lockable_Bidir_List* head = &(pool->sized_area_list[MAX_LIST_INDEX]); + + free_pool_lock_nr_list(pool, MAX_LIST_INDEX ); + /*The last list is empty.*/ + if(free_pool_nr_list_is_empty(pool, MAX_LIST_INDEX)){ + free_pool_unlock_nr_list(pool, MAX_LIST_INDEX ); + return NULL; + } + + free_area = (Free_Area*)(head->next); + while( free_area != (Free_Area*)head ){ + remain_size = free_area->size - alloc_size; + if( remain_size >= GC_OBJ_SIZE_THRESHOLD){ + new_list_nr = pool_list_index_with_size(remain_size); + p_result = (void*)((unsigned int)free_area + remain_size); + if(new_list_nr == MAX_LIST_INDEX){ + free_area->size = remain_size; + free_pool_unlock_nr_list(pool, MAX_LIST_INDEX); + return p_result; + }else{ + free_pool_remove_area(pool, free_area); + free_pool_unlock_nr_list(pool, MAX_LIST_INDEX); + free_area->size = remain_size; + free_pool_lock_nr_list(pool, new_list_nr); + free_pool_add_area(pool, free_area); + free_pool_unlock_nr_list(pool, new_list_nr); + return p_result; + } + } + else if(remain_size >= 0) + { + free_pool_remove_area(pool, free_area); + free_pool_unlock_nr_list(pool, MAX_LIST_INDEX); + p_result = (void*)((unsigned int)free_area + remain_size); + if(remain_size > 0){ + assert((remain_size >= KB) && (remain_size < GC_OBJ_SIZE_THRESHOLD)); + free_area->size = remain_size; + } + return p_result; + } + else free_area = (Free_Area*)free_area->next; + } + /*No adequate area in the last list*/ + free_pool_unlock_nr_list(pool, MAX_LIST_INDEX ); + return NULL; +} - } +void* lspace_alloc(unsigned int size, Allocator *allocator) +{ + unsigned int try_count = 0; + void* p_result = NULL; + unsigned int list_hint = 0; + unsigned int alloc_size = ALIGN_UP_TO_KILO(size); + Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)allocator->gc); + Free_Area_Pool* pool = lspace->free_pool; - vm_gc_unlock_enum(); - return NULL; + while( try_count < 2 ){ + list_hint = pool_list_index_with_size(alloc_size); + list_hint = pool_list_get_next_flag(pool, list_hint); + while((!p_result) && (list_hint <= MAX_LIST_INDEX)){ + /*List hint is not the last list, so look for it in former lists.*/ + if(list_hint < MAX_LIST_INDEX){ + p_result = free_pool_former_lists_atomic_take_area_piece(pool, list_hint, alloc_size); + if(p_result){ + memset(p_result, 0, size); + return p_result; + }else{ + list_hint ++; + list_hint = pool_list_get_next_flag(pool, list_hint); + continue; + } + } + /*List hint is the last list, so look for it in the last list.*/ + else + { + p_result = free_pool_last_list_atomic_take_area_piece(pool, alloc_size); + if(p_result){ + memset(p_result, 0, size); + return p_result; + } + else break; + } + } + /*Failled, no adequate area found in all lists, so GC at first, then get another try.*/ + if(try_count == 0){ + vm_gc_lock_enum(); + gc_reclaim_heap(allocator->gc, GC_CAUSE_LOS_IS_FULL); + vm_gc_unlock_enum(); + try_count ++; + }else{ + try_count ++; + } + } + return NULL; } +void lspace_reset_after_collection(Lspace* lspace) +{ + GC* gc = lspace->gc; + Space_Tuner* tuner = gc->tuner; + unsigned int trans_size = tuner->tuning_size; + assert(!(trans_size%GC_BLOCK_SIZE_BYTES)); + //For_LOS_extend + if(tuner->kind == TRANS_FROM_MOS_TO_LOS){ + void* origin_end = lspace->heap_end; + lspace->heap_end = (void*)(((GC_Gen*)gc)->mos->blocks); + + Free_Area* trans_fa = free_area_new(origin_end, trans_size); + free_pool_add_area(lspace->free_pool, trans_fa); + lspace->committed_heap_size += trans_size; + lspace->reserved_heap_size += trans_size; + } + los_boundary = lspace->heap_end; +} + void lspace_sweep(Lspace* lspace) { /* reset the pool first because its info is useless now. */ free_area_pool_reset(lspace->free_pool); - unsigned int mark_bit_idx, cur_size; + unsigned int mark_bit_idx = 0, cur_size = 0; void *cur_area_start, *cur_area_end; Partial_Reveal_Object* p_prev_obj = (Partial_Reveal_Object *)lspace->heap_start; Partial_Reveal_Object* p_next_obj = lspace_get_first_marked_object(lspace, &mark_bit_idx); + if(p_next_obj){ + obj_unmark_in_vt(p_next_obj); + /* we need this because, in hybrid situation of gen_mode and non_gen_mode, LOS will only be marked + in non_gen_mode, and not reset in gen_mode. When it switches back from gen_mode to non_gen_mode, + the last time marked object is thought to be already marked and not scanned for this cycle. */ + obj_clear_dual_bits_in_oi(p_next_obj); + } cur_area_start = (void*)ALIGN_UP_TO_KILO(p_prev_obj); cur_area_end = (void*)ALIGN_DOWN_TO_KILO(p_next_obj); @@ -92,6 +251,10 @@ p_prev_obj = p_next_obj; p_next_obj = lspace_get_next_marked_object(lspace, &mark_bit_idx); + if(p_next_obj){ + obj_unmark_in_vt(p_next_obj); + obj_clear_dual_bits_in_oi(p_next_obj); + } cur_area_start = (void*)ALIGN_UP_TO_KILO((unsigned int)p_prev_obj + vm_object_size(p_prev_obj)); cur_area_end = (void*)ALIGN_DOWN_TO_KILO(p_next_obj); @@ -106,6 +269,8 @@ if( cur_area ) free_pool_add_area(lspace->free_pool, cur_area); + mark_bit_idx = 0; + assert(!lspace_get_first_marked_object(lspace, &mark_bit_idx)); return; } Index: vm/gc_gen/src/thread/collector.cpp =================================================================== --- vm/gc_gen/src/thread/collector.cpp (revision 493420) +++ vm/gc_gen/src/thread/collector.cpp (working copy) @@ -24,31 +24,29 @@ #include "../mark_compact/mspace.h" #include "../finalizer_weakref/finalizer_weakref.h" +unsigned int MINOR_COLLECTORS = 0; +unsigned int MAJOR_COLLECTORS = 0; -static void collector_restore_obj_info(Collector* collector) +void collector_restore_obj_info(Collector* collector) { - ObjectMap* objmap = collector->obj_info_map; - ObjectMap::iterator obj_iter; - for( obj_iter=objmap->begin(); obj_iter!=objmap->end(); obj_iter++){ - Partial_Reveal_Object* p_target_obj = obj_iter->first; - Obj_Info_Type obj_info = obj_iter->second; - set_obj_info(p_target_obj, obj_info); + Pool *remset_pool = collector->gc->metadata->collector_remset_pool; + Pool *free_pool = collector->gc->metadata->free_set_pool; + assert(!collector->rem_set); + + while(Vector_Block *oi_block = pool_get_entry(remset_pool)){ + unsigned int *iter = vector_block_iterator_init(oi_block); + while(!vector_block_iterator_end(oi_block, iter)){ + Partial_Reveal_Object *p_target_obj = (Partial_Reveal_Object *)*iter; + iter = vector_block_iterator_advance(oi_block, iter); + Obj_Info_Type obj_info = (Obj_Info_Type)*iter; + iter = vector_block_iterator_advance(oi_block, iter); + set_obj_info(p_target_obj, obj_info); + } + vector_block_clear(oi_block); + pool_put_entry(free_pool, oi_block); } - objmap->clear(); - return; } -void gc_restore_obj_info(GC* gc) -{ - for(unsigned int i=0; inum_active_collectors; i++) - { - Collector* collector = gc->collectors[i]; - collector_restore_obj_info(collector); - } - return; - -} - static void collector_reset_thread(Collector *collector) { collector->task_func = NULL; @@ -57,22 +55,25 @@ vm_reset_event(collector->task_assigned_event); vm_reset_event(collector->task_finished_event); */ - - alloc_context_reset((Allocator*)collector); - + GC_Metadata* metadata = collector->gc->metadata; +/* TO_REMOVE + assert(collector->rep_set==NULL); - if( !gc_requires_barriers() || collector->gc->collect_kind != MINOR_COLLECTION){ - collector->rep_set = pool_get_entry(metadata->free_set_pool); + if( !gc_is_gen_mode() || collector->gc->collect_kind != MINOR_COLLECTION){ + collector->rep_set = free_set_pool_get_entry(metadata); } +*/ - if(gc_requires_barriers()){ + if(gc_is_gen_mode() && collector->gc->collect_kind==MINOR_COLLECTION && NOS_PARTIAL_FORWARD){ assert(collector->rem_set==NULL); - collector->rem_set = pool_get_entry(metadata->free_set_pool); + collector->rem_set = free_set_pool_get_entry(metadata); } +#ifndef BUILD_IN_REFERENT collector_reset_weakref_sets(collector); +#endif collector->result = TRUE; return; @@ -101,7 +102,14 @@ static void assign_collector_with_task(GC* gc, TaskType task_func, Space* space) { /* FIXME:: to adaptively identify the num_collectors_to_activate */ - gc->num_active_collectors = gc->num_collectors; + if( MINOR_COLLECTORS && gc->collect_kind == MINOR_COLLECTION){ + gc->num_active_collectors = MINOR_COLLECTORS; + }else if ( MAJOR_COLLECTORS && gc->collect_kind != MINOR_COLLECTION){ + gc->num_active_collectors = MAJOR_COLLECTORS; + }else{ + gc->num_active_collectors = gc->num_collectors; + } + for(unsigned int i=0; inum_active_collectors; i++) { Collector* collector = gc->collectors[i]; @@ -121,7 +129,7 @@ { Collector* collector = gc->collectors[i]; wait_collector_to_finish(collector); - } + } return; } @@ -139,7 +147,9 @@ if(task_func == NULL) return 1; task_func(collector); - + + alloc_context_reset((Allocator*)collector); + collector_notify_work_done(collector); } @@ -148,7 +158,6 @@ static void collector_init_thread(Collector *collector) { - collector->obj_info_map = new ObjectMap(); collector->rem_set = NULL; collector->rep_set = NULL; @@ -193,14 +202,14 @@ struct GC_Gen; unsigned int gc_get_processor_num(GC_Gen*); + void collector_initialize(GC* gc) { //FIXME:: - unsigned int nthreads = gc_get_processor_num((GC_Gen*)gc); + unsigned int num_processors = gc_get_processor_num((GC_Gen*)gc); - nthreads = (NUM_COLLECTORS==0)?nthreads:NUM_COLLECTORS; + unsigned int nthreads = max( max( MAJOR_COLLECTORS, MINOR_COLLECTORS), max(NUM_COLLECTORS, num_processors)); - gc->num_collectors = nthreads; unsigned int size = sizeof(Collector *) * nthreads; gc->collectors = (Collector **) STD_MALLOC(size); memset(gc->collectors, 0, size); @@ -218,6 +227,8 @@ gc->collectors[i] = collector; } + gc->num_collectors = NUM_COLLECTORS? NUM_COLLECTORS:num_processors; + return; } Index: vm/gc_gen/src/thread/collector.h =================================================================== --- vm/gc_gen/src/thread/collector.h (revision 493420) +++ vm/gc_gen/src/thread/collector.h (working copy) @@ -21,13 +21,15 @@ #ifndef _COLLECTOR_H_ #define _COLLECTOR_H_ -#include "../common/gc_common.h" +#include "../common/gc_space.h" struct Block_Header; +struct Stealable_Stack; typedef struct Collector{ /* <-- first couple of fields are overloaded as Allocator */ void *free; void *ceiling; + void *end; void *alloc_block; Space* alloc_space; GC* gc; @@ -52,9 +54,6 @@ Block_Header* cur_compact_block; Block_Header* cur_target_block; - /* during compaction, save non-zero obj_info who's overwritten by forwarding pointer */ - ObjectMap* obj_info_map; - void(*task_func)(void*) ; /* current task */ unsigned int result; @@ -67,9 +66,29 @@ void collector_execute_task(GC* gc, TaskType task_func, Space* space); -Partial_Reveal_Object* collector_forward_object(Collector* collector, Partial_Reveal_Object* p_obj); +void collector_restore_obj_info(Collector* collector); -void gc_restore_obj_info(GC* gc); +inline Boolean gc_collection_result(GC* gc) +{ + Boolean result = TRUE; + for(unsigned i=0; inum_active_collectors; i++){ + Collector* collector = gc->collectors[i]; + result &= collector->result; + } + return result; +} +inline void gc_reset_collect_result(GC* gc) +{ + for(unsigned i=0; inum_active_collectors; i++){ + Collector* collector = gc->collectors[i]; + collector->result = TRUE; + } + + gc->collect_result = TRUE; + return; +} + + #endif //#ifndef _COLLECTOR_H_ Index: vm/gc_gen/src/thread/collector_alloc.cpp =================================================================== --- vm/gc_gen/src/thread/collector_alloc.cpp (revision 493420) +++ vm/gc_gen/src/thread/collector_alloc.cpp (working copy) @@ -1,59 +0,0 @@ -/* - * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @author Xiao-Feng Li, 2006/10/05 - */ - -#include "gc_thread.h" - -void* mos_alloc(unsigned size, Allocator *allocator); - -/* NOS forward obj to MOS in MINOR_COLLECTION */ -Partial_Reveal_Object* collector_forward_object(Collector* collector, Partial_Reveal_Object* p_obj) -{ - Partial_Reveal_VTable* vt = obj_get_vtraw(p_obj); - - /* forwarded by somebody else */ - if ((unsigned int)vt & FORWARDING_BIT_MASK){ - assert(!obj_is_marked_in_vt(p_obj)); - return NULL; - } - - /* otherwise, try to alloc it. mos should always has enough space to hold nos during collection */ - unsigned int size = vm_object_size(p_obj); - Partial_Reveal_Object* p_targ_obj = (Partial_Reveal_Object*)mos_alloc(size, (Allocator*)collector); - assert(p_targ_obj); - - /* else, take the obj by setting the forwarding flag atomically - we don't put a simple bit in vt because we need compute obj size later. */ - if ((unsigned int)vt != atomic_cas32((unsigned int*)obj_get_vtraw_addr(p_obj), ((unsigned int)p_targ_obj|FORWARDING_BIT_MASK), (unsigned int)vt)) { - /* forwarded by other, we need unalloc the allocated obj. We may waste some space if the allocation switched - block. The remaining part of the switched block cannot be revivied for next allocation of - object that has smaller size than this one. */ - assert( obj_is_forwarded_in_vt(p_obj) && !obj_is_marked_in_vt(p_obj)); - thread_local_unalloc(size, (Allocator*)collector); - return NULL; - } - - /* we forwarded the object */ - memcpy(p_targ_obj, p_obj, size); - /* because p_obj has forwarding pointer in its vt, we set it seperately here */ - obj_set_vt(p_targ_obj, (Allocation_Handle)vt); - - return p_targ_obj; - -} Index: vm/gc_gen/src/thread/collector_alloc.h =================================================================== --- vm/gc_gen/src/thread/collector_alloc.h (revision 0) +++ vm/gc_gen/src/thread/collector_alloc.h (revision 0) @@ -0,0 +1,80 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2006/10/05 + */ + +#ifndef _COLLECTOR_ALLOC_H_ +#define _COLLECTOR_ALLOC_H_ + +#include "gc_thread.h" + +void* mos_alloc(unsigned size, Allocator *allocator); + +/* NOS forward obj to MOS in MINOR_COLLECTION */ +inline Partial_Reveal_Object* collector_forward_object(Collector* collector, Partial_Reveal_Object* p_obj) +{ + Obj_Info_Type oi = get_obj_info_raw(p_obj); + + /* forwarded by somebody else */ + if ((unsigned int)oi & FORWARD_BIT){ + return NULL; + } + + /* otherwise, try to alloc it. mos should always has enough space to hold nos during collection */ + unsigned int size = vm_object_size(p_obj); + + Partial_Reveal_Object* p_targ_obj = thread_local_alloc(size, (Allocator*)collector); + if(!p_targ_obj) + p_targ_obj = (Partial_Reveal_Object*)mos_alloc(size, (Allocator*)collector); + + if(p_targ_obj == NULL){ + /* failed to forward an obj */ + collector->result = FALSE; + return NULL; + } + + /* else, take the obj by setting the forwarding flag atomically + we don't put a simple bit in vt because we need compute obj size later. */ + if ((unsigned int)oi != atomic_cas32((unsigned int*)get_obj_info_addr(p_obj), ((unsigned int)p_targ_obj|FORWARD_BIT), (unsigned int)oi)) { + /* forwarded by other, we need unalloc the allocated obj. We may waste some space if the allocation switched + block. The remaining part of the switched block cannot be revivied for next allocation of + object that has smaller size than this one. */ + assert( obj_is_fw_in_oi(p_obj)); + thread_local_unalloc(size, (Allocator*)collector); + return NULL; + } + + /* we forwarded the object */ + memcpy(p_targ_obj, p_obj, size); + + /* we need clear the bit to give major collection a clean status. */ + if(gc_is_gen_mode()) + set_obj_info(p_targ_obj, oi&DUAL_MARKBITS_MASK); + +#ifdef MARK_BIT_FLIPPING + /* we need set MARK_BIT to indicate this object is processed for nongen forwarding */ + else + set_obj_info(p_targ_obj, oi|FLIP_MARK_BIT); + +#endif + + return p_targ_obj; + +} + +#endif /* _COLLECTOR_ALLOC_H_ */ Index: vm/gc_gen/src/thread/gc_thread.h =================================================================== --- vm/gc_gen/src/thread/gc_thread.h (revision 493420) +++ vm/gc_gen/src/thread/gc_thread.h (working copy) @@ -21,9 +21,12 @@ #ifndef _GC_THREAD_H_ #define _GC_THREAD_H_ -#include "../common/gc_block.h" +#include "../common/gc_space.h" #include "../common/gc_metadata.h" +#define ALLOC_ZEROING +#define ZEROING_SIZE 2*KB + extern unsigned int tls_gc_offset; inline void* gc_get_tls() @@ -42,6 +45,7 @@ typedef struct Allocator{ void *free; void *ceiling; + void* end; Block *alloc_block; Space* alloc_space; GC *gc; @@ -50,31 +54,76 @@ inline void thread_local_unalloc(unsigned int size, Allocator* allocator) { - void* free = allocator->free; - allocator->free = (void*)((unsigned int)free - size); - return; + void* free = allocator->free; + allocator->free = (void*)((unsigned int)free - size); + return; } +#ifdef ALLOC_ZEROING + +inline Partial_Reveal_Object* thread_local_alloc_zeroing(unsigned int size, Allocator* allocator) +{ + unsigned int free = (unsigned int)allocator->free; + unsigned int ceiling = (unsigned int)allocator->ceiling; + + unsigned int new_free = free + size; + + unsigned int block_ceiling = (unsigned int)allocator->end; + if( new_free > block_ceiling) + return NULL; + + unsigned int new_ceiling; + new_ceiling = new_free + ZEROING_SIZE; + if( new_ceiling > block_ceiling ) + new_ceiling = block_ceiling; + + allocator->ceiling = (void*)new_ceiling; + allocator->free = (void*)new_free; + memset((void*)ceiling, 0, new_ceiling - ceiling); + return (Partial_Reveal_Object*)free; + +} + +#endif /* ALLOC_ZEROING */ + inline Partial_Reveal_Object* thread_local_alloc(unsigned int size, Allocator* allocator) { - void* free = allocator->free; - void* ceiling = allocator->ceiling; + unsigned int free = (unsigned int)allocator->free; + unsigned int ceiling = (unsigned int)allocator->ceiling; + + unsigned int new_free = free + size; - void* new_free = (void*)((unsigned int)free + size); - - if (new_free <= ceiling){ - allocator->free= new_free; - return (Partial_Reveal_Object*)free; - } + if (new_free <= ceiling){ + allocator->free= (void*)new_free; + return (Partial_Reveal_Object*)free; + } - return NULL; +#ifndef ALLOC_ZEROING + + return NULL; + +#else + + return thread_local_alloc_zeroing(size, allocator); + +#endif /* #ifndef ALLOC_ZEROING */ + } inline void alloc_context_reset(Allocator* allocator) { - allocator->free = NULL; - allocator->ceiling = NULL; - allocator->alloc_block = NULL; + Block_Header* block = (Block_Header*)allocator->alloc_block; + /* it can be NULL if GC happens before the mutator resumes, or called by collector */ + if( block != NULL ){ + assert(block->status == BLOCK_IN_USE); + block->free = allocator->free; + block->status = BLOCK_USED; + allocator->alloc_block = NULL; + } + + allocator->free = NULL; + allocator->ceiling = NULL; + allocator->end = NULL; return; } Index: vm/gc_gen/src/thread/mutator.cpp =================================================================== --- vm/gc_gen/src/thread/mutator.cpp (revision 493420) +++ vm/gc_gen/src/thread/mutator.cpp (working copy) @@ -28,18 +28,19 @@ { /* FIXME:: make sure gc_info is cleared */ Mutator *mutator = (Mutator *)STD_MALLOC(sizeof(Mutator)); - mutator->free = NULL; - mutator->ceiling = NULL; - mutator->alloc_block = NULL; + memset(mutator, 0, sizeof(Mutator)); mutator->alloc_space = gc_get_nos((GC_Gen*)gc); mutator->gc = gc; - if(gc_requires_barriers()){ - mutator->rem_set = pool_get_entry(gc->metadata->free_set_pool); + if(gc_is_gen_mode()){ + mutator->rem_set = free_set_pool_get_entry(gc->metadata); assert(vector_block_is_empty(mutator->rem_set)); } - mutator->objects_with_finalizer = finalizer_weakref_get_free_block(); + if(!IGNORE_FINREF ) + mutator->obj_with_fin = finref_get_free_block(); + else + mutator->obj_with_fin = NULL; lock(gc->mutator_list_lock); // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv @@ -60,14 +61,16 @@ Mutator *mutator = (Mutator *)gc_get_tls(); - if(gc_requires_barriers()){ /* put back the remset when a mutator exits */ + alloc_context_reset((Allocator*)mutator); + + if(gc_is_gen_mode()){ /* put back the remset when a mutator exits */ pool_put_entry(gc->metadata->mutator_remset_pool, mutator->rem_set); mutator->rem_set = NULL; } - if(mutator->objects_with_finalizer){ - pool_put_entry(gc->finalizer_weakref_metadata->objects_with_finalizer_pool, mutator->objects_with_finalizer); - mutator->objects_with_finalizer = NULL; + if(mutator->obj_with_fin){ + pool_put_entry(gc->finref_metadata->obj_with_fin_pool, mutator->obj_with_fin); + mutator->obj_with_fin = NULL; } lock(gc->mutator_list_lock); // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv @@ -96,11 +99,21 @@ { Mutator *mutator = gc->mutator_list; while (mutator) { - mutator->rem_set = pool_get_entry(gc->metadata->free_set_pool); alloc_context_reset((Allocator*)mutator); - mutator_reset_objects_with_finalizer(mutator); mutator = mutator->next; } return; } +void gc_prepare_mutator_remset(GC* gc) +{ + Mutator *mutator = gc->mutator_list; + while (mutator) { + mutator->rem_set = free_set_pool_get_entry(gc->metadata); + if(!IGNORE_FINREF ) + mutator_reset_obj_with_fin(mutator); + mutator = mutator->next; + } + return; +} + Index: vm/gc_gen/src/thread/mutator.h =================================================================== --- vm/gc_gen/src/thread/mutator.h (revision 493420) +++ vm/gc_gen/src/thread/mutator.h (working copy) @@ -21,13 +21,14 @@ #ifndef _MUTATOR_H_ #define _MUTATOR_H_ -#include "../common/gc_common.h" +#include "../common/gc_space.h" /* Mutator thread local information for GC */ typedef struct Mutator { /* <-- first couple of fields are overloaded as Allocator */ - void* free; - void* ceiling; + void* free; + void* ceiling; + void* end; void* alloc_block; Space* alloc_space; GC* gc; @@ -35,7 +36,7 @@ /* END of Allocator --> */ Vector_Block* rem_set; - Vector_Block* objects_with_finalizer; + Vector_Block* obj_with_fin; Mutator* next; /* The gc info area associated with the next active thread. */ } Mutator; @@ -44,5 +45,6 @@ void mutator_reset(GC *gc); void gc_reset_mutator_context(GC* gc); +void gc_prepare_mutator_remset(GC* gc); #endif /*ifndef _MUTATOR_H_ */ Index: vm/gc_gen/src/thread/mutator_alloc.cpp =================================================================== --- vm/gc_gen/src/thread/mutator_alloc.cpp (revision 493420) +++ vm/gc_gen/src/thread/mutator_alloc.cpp (working copy) @@ -22,12 +22,8 @@ #include "../gen/gen.h" -#include "../finalizer_weakref/finalizer_weakref_metadata.h" +#include "../finalizer_weakref/finalizer_weakref.h" -/* classloader sometimes sets the bit for finalizible objects (?) */ -inline unsigned int get_instance_data_size (unsigned int encoded_size) -{ return (encoded_size & NEXT_TO_HIGH_BIT_CLEAR_MASK); } - Managed_Object_Handle gc_alloc(unsigned size, Allocation_Handle ah, void *unused_gc_tls) { Managed_Object_Handle p_obj = NULL; @@ -36,22 +32,20 @@ assert((size % GC_OBJECT_ALIGNMENT) == 0); assert(ah); - /* FIXME:: this is outdated actually */ - size = get_instance_data_size(size); - - Mutator* mutator = (Mutator*)gc_get_tls(); - + Allocator* allocator = (Allocator*)gc_get_tls(); + if ( size > GC_OBJ_SIZE_THRESHOLD ) - p_obj = (Managed_Object_Handle)los_alloc(size, (Allocator*)mutator); - else - p_obj = (Managed_Object_Handle)nos_alloc(size, (Allocator*)mutator); + p_obj = (Managed_Object_Handle)los_alloc(size, allocator); + else{ + p_obj = (Managed_Object_Handle)nos_alloc(size, allocator); + } if( p_obj == NULL ) return NULL; obj_set_vt((Partial_Reveal_Object*)p_obj, ah); - if(type_has_finalizer((Partial_Reveal_VTable *)ah)) - mutator_finalizer_add_entry(mutator, (Partial_Reveal_Object*)p_obj); + if(!IGNORE_FINREF && type_has_finalizer((Partial_Reveal_VTable *)ah)) + mutator_add_finalizer((Mutator*)allocator, (Partial_Reveal_Object*)p_obj); return (Managed_Object_Handle)p_obj; } Index: vm/gc_gen/src/trace_forward/fspace.cpp =================================================================== --- vm/gc_gen/src/trace_forward/fspace.cpp (revision 493420) +++ vm/gc_gen/src/trace_forward/fspace.cpp (working copy) @@ -22,64 +22,20 @@ #include "fspace.h" -Boolean NOS_PARTIAL_FORWARD = TRUE; +Boolean NOS_PARTIAL_FORWARD = FALSE; -void* nos_boundary = null; /* this is only for speeding up write barrier */ - Boolean forward_first_half; void* object_forwarding_boundary=NULL; -Boolean fspace_mark_object(Fspace* fspace, Partial_Reveal_Object *p_obj) -{ - obj_mark_in_vt(p_obj); - - unsigned int obj_word_index = OBJECT_WORD_INDEX_TO_MARKBIT_TABLE(p_obj); - unsigned int obj_offset_in_word = OBJECT_WORD_OFFSET_IN_MARKBIT_TABLE(p_obj); - - unsigned int *p_word = &(GC_BLOCK_HEADER(p_obj)->mark_table[obj_word_index]); - unsigned int word_mask = (1<heap_start; - Block_Header* last_block = (Block_Header*)blocks; - unsigned int start_idx = fspace->first_block_idx; - for(unsigned int i=0; i < fspace->num_managed_blocks; i++){ - Block_Header* block = (Block_Header*)&(blocks[i]); - block->free = (void*)((unsigned int)block + GC_BLOCK_HEADER_SIZE_BYTES); - block->ceiling = (void*)((unsigned int)block + GC_BLOCK_SIZE_BYTES); - block->base = block->free; - block->block_idx = i + start_idx; - block->status = BLOCK_FREE; - last_block->next = block; - last_block = block; - } - last_block->next = NULL; - fspace->blocks = blocks; - - return; -} - struct GC_Gen; void gc_set_nos(GC_Gen* gc, Space* space); -void fspace_initialize(GC* gc, void* start, unsigned int fspace_size) + +void fspace_initialize(GC* gc, void* start, unsigned int fspace_size, unsigned int commit_size) { assert( (fspace_size%GC_BLOCK_SIZE_BYTES) == 0 ); Fspace* fspace = (Fspace *)STD_MALLOC(sizeof(Fspace)); @@ -90,14 +46,20 @@ fspace->num_total_blocks = fspace_size >> GC_BLOCK_SHIFT_COUNT; void* reserved_base = start; - int status = port_vmem_commit(&reserved_base, fspace_size, gc->allocated_memory); - assert(status == APR_SUCCESS && reserved_base == start); - - memset(reserved_base, 0, fspace_size); - fspace->committed_heap_size = fspace_size; + /* commit fspace mem */ + vm_commit_mem(reserved_base, commit_size); + memset(reserved_base, 0, commit_size); + + fspace->committed_heap_size = commit_size; fspace->heap_start = reserved_base; + +#ifdef STATIC_NOS_MAPPING fspace->heap_end = (void *)((unsigned int)reserved_base + fspace->reserved_heap_size); - fspace->num_managed_blocks = fspace_size >> GC_BLOCK_SHIFT_COUNT; +#else /* for dynamic mapping, nos->heap_end is gc->heap_end */ + fspace->heap_end = (void *)((unsigned int)reserved_base + fspace->committed_heap_size); +#endif + + fspace->num_managed_blocks = commit_size >> GC_BLOCK_SHIFT_COUNT; fspace->first_block_idx = GC_BLOCK_INDEX_FROM(gc->heap_start, reserved_base); fspace->ceiling_block_idx = fspace->first_block_idx + fspace->num_managed_blocks - 1; @@ -105,47 +67,51 @@ fspace->num_used_blocks = 0; fspace->free_block_idx = fspace->first_block_idx; - fspace_init_blocks(fspace); + space_init_blocks((Blocked_Space*)fspace); - fspace->mark_object_func = fspace_mark_object; - fspace->move_object = TRUE; fspace->num_collections = 0; + fspace->time_collections = 0; + fspace->survive_ratio = 0.2f; + fspace->gc = gc; gc_set_nos((GC_Gen*)gc, (Space*)fspace); /* above is same as Mspace init --> */ - nos_boundary = fspace->heap_start; - forward_first_half = TRUE; + /* we always disable partial forwarding in non-gen mode. */ + if( !gc_is_gen_mode() ) + NOS_PARTIAL_FORWARD = FALSE; + if( NOS_PARTIAL_FORWARD ) object_forwarding_boundary = (void*)&fspace->blocks[fspace->num_managed_blocks >>1 ]; else object_forwarding_boundary = (void*)&fspace->blocks[fspace->num_managed_blocks]; - + return; } void fspace_destruct(Fspace *fspace) { fspace_destruct_blocks(fspace); - port_vmem_decommit(fspace->heap_start, fspace->committed_heap_size, fspace->gc->allocated_memory); - STD_FREE(fspace); - + STD_FREE(fspace); } -void reset_fspace_for_allocation(Fspace* fspace) +void fspace_reset_for_allocation(Fspace* fspace) { unsigned int first_idx = fspace->first_block_idx; - unsigned int marked_start_idx = 0; + unsigned int marked_start_idx = 0; //was for oi markbit reset, now useless unsigned int marked_last_idx = 0; + Boolean is_major_collection = (fspace->gc->collect_kind != MINOR_COLLECTION); + Boolean gen_mode = gc_is_gen_mode(); - if( fspace->gc->collect_kind == MAJOR_COLLECTION || - NOS_PARTIAL_FORWARD == FALSE || !gc_requires_barriers()) + if( is_major_collection || + NOS_PARTIAL_FORWARD == FALSE || !gen_mode) { fspace->free_block_idx = first_idx; fspace->ceiling_block_idx = first_idx + fspace->num_managed_blocks - 1; forward_first_half = TRUE; /* only useful for not-FORWARD_ALL*/ + }else{ if(forward_first_half){ fspace->free_block_idx = first_idx; @@ -160,7 +126,6 @@ } forward_first_half = forward_first_half^1; } - Block* blocks = fspace->blocks; unsigned int num_freed = 0; @@ -168,41 +133,55 @@ unsigned int new_last_idx = fspace->ceiling_block_idx - first_idx; for(unsigned int i = new_start_idx; i <= new_last_idx; i++){ Block_Header* block = (Block_Header*)&(blocks[i]); + block->src = NULL; + block->next_src = NULL; + assert(!block->dest_counter); if(block->status == BLOCK_FREE) continue; block->status = BLOCK_FREE; - block->free = GC_BLOCK_BODY(block); - if( !gc_requires_barriers() || fspace->gc->collect_kind == MAJOR_COLLECTION ) - block_clear_mark_table(block); + block->free = block->base; num_freed ++; } - for(unsigned int i = marked_start_idx; i <= marked_last_idx; i++){ - Block_Header* block = (Block_Header*)&(blocks[i]); - if(block->status == BLOCK_FREE) continue; - block_clear_markbits(block); - } fspace->num_used_blocks = fspace->num_used_blocks - num_freed; - + return; } void collector_execute_task(GC* gc, TaskType task_func, Space* space); +#include "../gen/gen.h" +unsigned int mspace_free_block_idx; + /* world is stopped when starting fspace_collection */ void fspace_collection(Fspace *fspace) { fspace->num_collections++; GC* gc = fspace->gc; + mspace_free_block_idx = ((GC_Gen*)gc)->mos->free_block_idx; + + /* we should not destruct rootset structure in case we need fall back */ + pool_iterator_init(gc->metadata->gc_rootset_pool); - if(gc_requires_barriers()){ - /* generational GC. Only trace nos */ - collector_execute_task(gc, (TaskType)trace_forward_fspace, (Space*)fspace); - }else{ - /* non-generational GC. Mark the whole heap (nos, mos, and los) */ - pool_iterator_init(gc->metadata->gc_rootset_pool); - collector_execute_task(gc, (TaskType)mark_copy_fspace, (Space*)fspace); + switch(fspace->collect_algorithm){ + +#ifdef MARK_BIT_FLIPPING + + case MINOR_NONGEN_FORWARD_POOL: + collector_execute_task(gc, (TaskType)nongen_forward_pool, (Space*)fspace); + break; + +#endif /*#ifdef MARK_BIT_FLIPPING */ + + case MINOR_GEN_FORWARD_POOL: + collector_execute_task(gc, (TaskType)gen_forward_pool, (Space*)fspace); + break; + + default: + printf("\nSpecified minor collection algorithm doesn't exist in built module!\n"); + exit(0); + break; } - + return; } Index: vm/gc_gen/src/trace_forward/fspace.h =================================================================== --- vm/gc_gen/src/trace_forward/fspace.h (revision 493420) +++ vm/gc_gen/src/trace_forward/fspace.h (working copy) @@ -22,6 +22,7 @@ #define _FROM_SPACE_H_ #include "../thread/gc_thread.h" +#include "../thread/collector_alloc.h" /* * In our Gen GC, not all live objects are copied to tspace space, the newer baby will @@ -39,9 +40,11 @@ unsigned int reserved_heap_size; unsigned int committed_heap_size; unsigned int num_collections; + int64 time_collections; + float survive_ratio; + unsigned int collect_algorithm; GC* gc; Boolean move_object; - Boolean (*mark_object_func)(Fspace* space, Partial_Reveal_Object* p_obj); /* END of Space --> */ Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */ @@ -58,27 +61,32 @@ } Fspace; -void fspace_initialize(GC* gc, void* start, unsigned int fspace_size); +void fspace_initialize(GC* gc, void* start, unsigned int fspace_size, unsigned int commit_size); void fspace_destruct(Fspace *fspace); -inline Boolean fspace_has_free_block(Fspace* fspace){ return fspace->free_block_idx <= fspace->ceiling_block_idx; } -inline unsigned int fspace_free_memory_size(Fspace* fspace){ return GC_BLOCK_SIZE_BYTES * (fspace->ceiling_block_idx - fspace->free_block_idx + 1); } -inline Boolean fspace_used_memory_size(Fspace* fspace){ return GC_BLOCK_SIZE_BYTES * fspace->num_used_blocks; } +inline Boolean obj_is_dead_in_minor_forward_gc(Collector *collector, Partial_Reveal_Object *p_obj) +{ + return (!obj_is_marked_or_fw_in_oi(p_obj)) ; +} - void* fspace_alloc(unsigned size, Allocator *allocator); -Boolean fspace_mark_object(Fspace* fspace, Partial_Reveal_Object *p_obj); +void fspace_reset_for_allocation(Fspace* fspace); -void reset_fspace_for_allocation(Fspace* fspace); +/* gen mode */ +void gen_forward_pool(Collector* collector); +void gen_forward_steal(Collector* collector); +/* nongen mode */ +void nongen_slide_copy(Collector* collector); +#ifdef MARK_BIT_FLIPPING -Boolean fspace_compute_object_target(Collector* collector, Fspace* fspace); -void fspace_copy_collect(Collector* collector, Fspace* fspace); +void nongen_forward_steal(Collector* collector); +void nongen_forward_pool(Collector* collector); -void trace_forward_fspace(Collector* collector); -void mark_copy_fspace(Collector* collector); +#endif /* MARK_BIT_FLIPPING */ + void fspace_collection(Fspace* fspace); #endif // _FROM_SPACE_H_ Index: vm/gc_gen/src/trace_forward/fspace_alloc.cpp =================================================================== --- vm/gc_gen/src/trace_forward/fspace_alloc.cpp (revision 493420) +++ vm/gc_gen/src/trace_forward/fspace_alloc.cpp (working copy) @@ -22,13 +22,7 @@ static Boolean fspace_alloc_block(Fspace* fspace, Allocator* allocator) { - Block_Header* alloc_block = (Block_Header* )allocator->alloc_block; - /* put back the used block */ - if(alloc_block != NULL){ /* it is NULL at first time */ - assert(alloc_block->status == BLOCK_IN_USE); - alloc_block->status = BLOCK_USED; - alloc_block->free = allocator->free; - } + alloc_context_reset(allocator); /* now try to get a new block */ unsigned int old_free_idx = fspace->free_block_idx; @@ -41,17 +35,30 @@ continue; } /* ok, got one */ - alloc_block = (Block_Header*)&(fspace->blocks[allocated_idx - fspace->first_block_idx]); + Block_Header* alloc_block = (Block_Header*)&(fspace->blocks[allocated_idx - fspace->first_block_idx]); assert(alloc_block->status == BLOCK_FREE); alloc_block->status = BLOCK_IN_USE; - fspace->num_used_blocks++; - memset(alloc_block->free, 0, GC_BLOCK_BODY_SIZE_BYTES); /* set allocation context */ - allocator->free = alloc_block->free; + void* new_free = alloc_block->free; + allocator->free = new_free; + +#ifndef ALLOC_ZEROING + allocator->ceiling = alloc_block->ceiling; + memset(new_free, 0, GC_BLOCK_BODY_SIZE_BYTES); + +#else + /* the first-time zeroing area includes block header, to make subsequent allocs page aligned */ + unsigned int zeroing_size = ZEROING_SIZE - GC_BLOCK_HEADER_SIZE_BYTES; + allocator->ceiling = (void*)((unsigned int)new_free + zeroing_size); + memset(new_free, 0, zeroing_size); + +#endif /* #ifndef ALLOC_ZEROING */ + + allocator->end = alloc_block->ceiling; allocator->alloc_block = (Block*)alloc_block; - + return TRUE; } @@ -73,7 +80,7 @@ while( !fspace_alloc_block(fspace, allocator)){ vm_gc_lock_enum(); /* after holding lock, try if other thread collected already */ - if ( !fspace_has_free_block(fspace) ) { + if ( !space_has_free_block((Blocked_Space*)fspace) ) { gc_reclaim_heap(allocator->gc, GC_CAUSE_NOS_IS_FULL); } vm_gc_unlock_enum(); Index: vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp =================================================================== --- vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp (revision 493420) +++ vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp (working copy) @@ -1,224 +0,0 @@ -/* - * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @author Xiao-Feng Li, 2006/10/05 - */ - -#include "fspace.h" -#include "../mark_compact/mspace.h" -#include "../mark_sweep/lspace.h" -#include "../thread/collector.h" -#include "../finalizer_weakref/finalizer_weakref.h" - -static volatile Block_Header* current_copy_block; -static volatile Block_Header* current_target_block; - -static Block_Header* fspace_get_first_copy_block(Fspace* fspace) -{ return (Block_Header*)fspace->blocks; } - -static Block_Header* fspace_get_next_copy_block(Fspace* fspace) -{ - /* FIXME::FIXME:: this only works for full space copying */ - Block_Header* cur_copy_block = (Block_Header*)current_copy_block; - - while(cur_copy_block != NULL){ - Block_Header* next_copy_block = current_copy_block->next; - - Block_Header* temp = (Block_Header*)atomic_casptr((volatile void **)¤t_copy_block, next_copy_block, cur_copy_block); - if(temp == cur_copy_block) - return cur_copy_block; - - cur_copy_block = (Block_Header*)current_copy_block; - } - /* run out fspace blocks for copying */ - return NULL; -} - - -/* copying of fspace is only for MAJOR_COLLECTION or non-generational partial copy collection */ -static Block_Header* mspace_get_first_target_block_for_nos(Mspace* mspace) -{ - return (Block_Header*)&mspace->blocks[mspace->free_block_idx-mspace->first_block_idx]; -} - -static Block_Header* mspace_get_next_target_block_for_nos(Mspace* mspace) -{ - Block_Header* mspace_heap_end = (Block_Header*)space_heap_end((Space*)mspace); - Block_Header* cur_target_block = (Block_Header*)current_target_block; - Block_Header* next_target_block = current_target_block->next; - - while(cur_target_block < mspace_heap_end){ - Block_Header* temp = (Block_Header*)atomic_casptr((volatile void **)¤t_target_block, next_target_block, cur_target_block); - if(temp == cur_target_block) - return cur_target_block; - - cur_target_block = (Block_Header*)current_target_block; - next_target_block = current_target_block->next; - } - /* mos is always able to hold nos in minor collection */ - assert(0); - return NULL; -} - -struct GC_Gen; -Space* gc_get_mos(GC_Gen* gc); - -Boolean fspace_compute_object_target(Collector* collector, Fspace* fspace) -{ - Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)collector->gc); - Block_Header* dest_block = mspace_get_next_target_block_for_nos(mspace); - Block_Header* curr_block = fspace_get_next_copy_block(fspace); - - assert(dest_block->status == BLOCK_FREE); - dest_block->status = BLOCK_USED; - void* dest_addr = GC_BLOCK_BODY(dest_block); - - while( curr_block ){ - unsigned int mark_bit_idx; - Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &mark_bit_idx); - - while( p_obj ){ - assert( obj_is_marked_in_vt(p_obj)); - - unsigned int obj_size = vm_object_size(p_obj); - - if( ((unsigned int)dest_addr + obj_size) > (unsigned int)GC_BLOCK_END(dest_block)){ - dest_block->free = dest_addr; - dest_block = mspace_get_next_target_block_for_nos(mspace); - if(dest_block == NULL) return FALSE; - assert(dest_block->status == BLOCK_FREE); - dest_block->status = BLOCK_USED; - dest_addr = GC_BLOCK_BODY(dest_block); - } - assert(((unsigned int)dest_addr + obj_size) <= (unsigned int)GC_BLOCK_END(dest_block)); - - Obj_Info_Type obj_info = get_obj_info(p_obj); - if( obj_info != 0 ) { - collector->obj_info_map->insert(ObjectMap::value_type((Partial_Reveal_Object*)dest_addr, obj_info)); - } - set_forwarding_pointer_in_obj_info(p_obj, dest_addr); - - /* FIXME: should use alloc to handle alignment requirement */ - dest_addr = (void *) WORD_SIZE_ROUND_UP((unsigned int) dest_addr + obj_size); - p_obj = block_get_next_marked_object(curr_block, &mark_bit_idx); - - } - curr_block = fspace_get_next_copy_block(fspace); - } - - return TRUE; -} - -#include "../verify/verify_live_heap.h" - -void fspace_copy_collect(Collector* collector, Fspace* fspace) -{ - Block_Header* curr_block = fspace_get_next_copy_block(fspace); - - while( curr_block ){ - unsigned int mark_bit_idx; - Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &mark_bit_idx); - - while( p_obj ){ - assert( obj_is_marked_in_vt(p_obj)); - obj_unmark_in_vt(p_obj); - - unsigned int obj_size = vm_object_size(p_obj); - Partial_Reveal_Object *p_target_obj = get_forwarding_pointer_in_obj_info(p_obj); - memmove(p_target_obj, p_obj, obj_size); - - if (verify_live_heap) - /* we forwarded it, we need remember it for verification */ - event_collector_move_obj(p_obj, p_target_obj, collector); - - set_obj_info(p_target_obj, 0); - - p_obj = block_get_next_marked_object(curr_block, &mark_bit_idx); - } - - curr_block = fspace_get_next_copy_block(fspace); - } - - return; -} - -void gc_update_repointed_refs(Collector* collector); - -static volatile unsigned int num_marking_collectors = 0; -static volatile unsigned int num_installing_collectors = 0; - -void mark_copy_fspace(Collector* collector) -{ - GC* gc = collector->gc; - Fspace* fspace = (Fspace*)collector->collect_space; - Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc); - - unsigned int num_active_collectors = gc->num_active_collectors; - - /* Pass 1: mark all live objects in heap, and save all the slots that - have references that are going to be repointed */ - atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1); - - mark_scan_heap(collector); - - unsigned int old_num = atomic_inc32(&num_marking_collectors); - if( ++old_num == num_active_collectors ){ - /* world for single thread, e.g., verification of last phase, and preparation of next phase */ - current_copy_block = fspace_get_first_copy_block(fspace); - current_target_block = mspace_get_first_target_block_for_nos(mspace); - - collector_process_finalizer_weakref(collector); - - /* let other collectors go */ - num_marking_collectors++; - } - - while(num_marking_collectors != num_active_collectors + 1); - - /* Pass 2: assign each live fspace object a new location */ - atomic_cas32( &num_installing_collectors, 0, num_active_collectors+1); - - fspace_compute_object_target(collector, fspace); - - old_num = atomic_inc32(&num_installing_collectors); - if( ++old_num == num_active_collectors){ - /* nothing to do in this single thread region */ - mspace->free_block_idx = current_target_block->block_idx; - num_installing_collectors++; - } - - while(num_installing_collectors != num_active_collectors + 1); - - /* FIXME:: temporary. let only one thread go forward */ - if( collector->thread_handle != 0 ) return; - - gc_update_repointed_refs(collector); - - gc_post_process_finalizer_weakref(gc); - - /* FIXME:: Pass 2 and 3 can be merged into one pass */ - /* Pass 3: copy live fspace object to new location */ - current_copy_block = fspace_get_first_copy_block(fspace); - fspace_copy_collect(collector, fspace); - - /* FIXME:: should be collector_restore_obj_info(collector) */ - gc_restore_obj_info(gc); - - reset_fspace_for_allocation(fspace); - - return; -} Index: vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp =================================================================== --- vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp (revision 493420) +++ vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp (working copy) @@ -1,302 +0,0 @@ - -/* - * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @author Xiao-Feng Li, 2006/10/05 - */ - -#include "fspace.h" -#include "../thread/collector.h" -#include "../common/gc_metadata.h" -#include "../finalizer_weakref/finalizer_weakref.h" - -static Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace) -{ - assert(obj_belongs_to_space(p_obj, (Space*)fspace)); - return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary); -} - -static void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref) -{ - Partial_Reveal_Object *p_obj = *p_ref; - if (p_obj == NULL) return; - - /* the slot can be in tspace or fspace, we don't care. - we care only if the reference in the slot is pointing to fspace */ - if (obj_belongs_to_space(p_obj, collector->collect_space)) - collector_tracestack_push(collector, p_ref); - - return; -} - -static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) -{ - if (!object_has_ref_field(p_obj)) return; - - void *slot; - - /* scan array object */ - if (object_is_array(p_obj)) { - Partial_Reveal_Object* array = p_obj; - assert(!obj_is_primitive_array(array)); - - int32 array_length = vector_get_length((Vector_Handle) array); - for (int i = 0; i < array_length; i++) { - slot = vector_get_element_address_ref((Vector_Handle) array, i); - scan_slot(collector, (Partial_Reveal_Object **)slot); - } - return; - } - - /* scan non-array object */ - int *offset_scanner = init_object_scanner(p_obj); - while (true) { - slot = offset_get_ref(offset_scanner, p_obj); - if (slot == NULL) break; - - scan_slot(collector, (Partial_Reveal_Object **)slot); - offset_scanner = offset_next_ref(offset_scanner); - } - - scan_weak_reference(collector, p_obj, scan_slot); - - return; -} - -/* NOTE:: At this point, p_ref can be in anywhere like root, and other spaces, but *p_ref must be in fspace, - since only slot which points to object in fspace could be added into TraceStack. - The problem is the *p_ref may be forwarded already so that, when we come here we find it's pointing to tospace. - We will simply return for that case. It might be forwarded due to: - 1. two difference slots containing same reference; - 2. duplicate slots in remset ( we use SSB for remset, no duplication filtering.) - The same object can be traced by the thread itself, or by other thread. -*/ - -#include "../verify/verify_live_heap.h" - -static void forward_object(Collector* collector, Partial_Reveal_Object **p_ref) -{ - Space* space = collector->collect_space; - GC* gc = collector->gc; - Partial_Reveal_Object *p_obj = *p_ref; - - if(!obj_belongs_to_space(p_obj, space)) return; - - /* Fastpath: object has already been forwarded, update the ref slot */ - if(obj_is_forwarded_in_vt(p_obj)) { - *p_ref = obj_get_forwarding_pointer_in_vt(p_obj); - return; - } - - /* only mark the objects that will remain in fspace */ - if(!fspace_object_to_be_forwarded(p_obj, (Fspace*)space)) { - assert(!obj_is_forwarded_in_vt(p_obj)); - /* this obj remains in fspace, remember its ref slot for next GC if p_ref is not root */ - if( !address_belongs_to_space(p_ref, space) && address_belongs_to_gc_heap(p_ref, gc)) - collector_remset_add_entry(collector, p_ref); - - if(fspace_mark_object((Fspace*)space, p_obj)) - scan_object(collector, p_obj); - - return; - } - - /* following is the logic for forwarding */ - Partial_Reveal_Object* p_target_obj = collector_forward_object(collector, p_obj); - - /* if p_target_obj is NULL, it is forwarded by other thread. - Note: a race condition here, it might be forwarded by other, but not set the - forwarding pointer yet. We need spin here to get the forwarding pointer. - We can implement the collector_forward_object() so that the forwarding pointer - is set in the atomic instruction, which requires to roll back the mos_alloced - space. That is easy for thread local block allocation cancellation. */ - if( p_target_obj == NULL ){ - *p_ref = obj_get_forwarding_pointer_in_vt(p_obj); - return; - } - /* otherwise, we successfully forwarded */ - *p_ref = p_target_obj; - - /* we forwarded it, we need remember it for verification. */ - if(verify_live_heap) { - event_collector_move_obj(p_obj, p_target_obj, collector); - } - - scan_object(collector, p_target_obj); - return; -} - -static void trace_object(Collector* collector, Partial_Reveal_Object **p_ref) -{ - forward_object(collector, p_ref); - - Vector_Block* trace_stack = (Vector_Block*)collector->trace_stack; - while( !vector_stack_is_empty(trace_stack)){ - p_ref = (Partial_Reveal_Object **)vector_stack_pop(trace_stack); - forward_object(collector, p_ref); - trace_stack = (Vector_Block*)collector->trace_stack; - } - - return; -} - -/* for tracing phase termination detection */ -static volatile unsigned int num_finished_collectors = 0; - -static void collector_trace_rootsets(Collector* collector) -{ - GC* gc = collector->gc; - GC_Metadata* metadata = gc->metadata; - - unsigned int num_active_collectors = gc->num_active_collectors; - atomic_cas32( &num_finished_collectors, 0, num_active_collectors); - - Space* space = collector->collect_space; - collector->trace_stack = pool_get_entry(metadata->free_task_pool); - - /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */ - Vector_Block* root_set = pool_get_entry(metadata->gc_rootset_pool); - - /* first step: copy all root objects to trace tasks. */ - while(root_set){ - unsigned int* iter = vector_block_iterator_init(root_set); - while(!vector_block_iterator_end(root_set,iter)){ - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; - iter = vector_block_iterator_advance(root_set,iter); - if(*p_ref == NULL) continue; /* root ref cann't be NULL, but remset can be */ - if(obj_belongs_to_space(*p_ref, space)){ - collector_tracestack_push(collector, p_ref); - } - } - vector_block_clear(root_set); - pool_put_entry(metadata->free_set_pool, root_set); - root_set = pool_get_entry(metadata->gc_rootset_pool); - } - /* put back the last trace_stack task */ - pool_put_entry(metadata->mark_task_pool, collector->trace_stack); - - /* second step: iterate over the trace tasks and forward objects */ - collector->trace_stack = pool_get_entry(metadata->free_task_pool); - -retry: - Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool); - - while(trace_task){ - unsigned int* iter = vector_block_iterator_init(trace_task); - while(!vector_block_iterator_end(trace_task,iter)){ - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; - iter = vector_block_iterator_advance(trace_task,iter); - assert(*p_ref); /* a task can't be NULL, it was checked before put into the task stack */ - /* in sequential version, we only trace same object once, but we were using a local hashset for that, - which couldn't catch the repetition between multiple collectors. This is subject to more study. */ - - /* FIXME:: we should not let root_set empty during working, other may want to steal it. - degenerate my stack into root_set, and grab another stack */ - - /* a task has to belong to collected space, it was checked before put into the stack */ - trace_object(collector, p_ref); - } - vector_stack_clear(trace_task); - pool_put_entry(metadata->free_task_pool, trace_task); - trace_task = pool_get_entry(metadata->mark_task_pool); - } - - atomic_inc32(&num_finished_collectors); - while(num_finished_collectors != num_active_collectors){ - if( pool_is_empty(metadata->mark_task_pool)) continue; - /* we can't grab the task here, because of a race condition. If we grab the task, - and the pool is empty, other threads may fall to this barrier and then pass. */ - atomic_dec32(&num_finished_collectors); - goto retry; - } - - /* now we are done, but each collector has a private stack that is empty */ - trace_task = (Vector_Block*)collector->trace_stack; - vector_stack_clear(trace_task); - pool_put_entry(metadata->free_task_pool, trace_task); - collector->trace_stack = NULL; - - return; -} - -void trace_forward_fspace(Collector* collector) -{ - GC* gc = collector->gc; - Fspace* space = (Fspace*)collector->collect_space; - - collector_trace_rootsets(collector); - - /* the rest work is not enough for parallelization, so let only one thread go */ - if( collector->thread_handle != 0 ) return; - - collector_process_finalizer_weakref(collector); - - gc_update_repointed_refs(collector); - - gc_post_process_finalizer_weakref(gc); - - reset_fspace_for_allocation(space); - - return; - -} - -Boolean obj_is_dead_in_minor_forward_collection(Collector *collector, Partial_Reveal_Object *p_obj) -{ - Space *space = collector->collect_space; - Boolean belong_to_nos = obj_belongs_to_space(p_obj, space); - - if(!belong_to_nos) - return FALSE; - - Boolean space_to_be_forwarded = fspace_object_to_be_forwarded(p_obj, (Fspace*)space); - Boolean forwarded = obj_is_forwarded_in_vt(p_obj); - Boolean marked = obj_is_marked_in_vt(p_obj); - - return (space_to_be_forwarded && !forwarded) || (!space_to_be_forwarded && !marked); -} - -void resurrect_obj_tree_after_trace(Collector *collector, Partial_Reveal_Object **p_ref) -{ - GC *gc = collector->gc; - GC_Metadata* metadata = gc->metadata; - - collector->trace_stack = pool_get_entry(metadata->free_task_pool); - collector_tracestack_push(collector, p_ref); - pool_put_entry(metadata->mark_task_pool, collector->trace_stack); - -//collector->rep_set = pool_get_entry(metadata->free_set_pool); /* has got collector->rep_set in caller */ - collector->trace_stack = pool_get_entry(metadata->free_task_pool); - Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool); - while(trace_task){ - unsigned int* iter = vector_block_iterator_init(trace_task); - while(!vector_block_iterator_end(trace_task,iter)){ - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; - iter = vector_block_iterator_advance(trace_task,iter); - assert(*p_ref); - trace_object(collector, p_ref); - } - vector_stack_clear(trace_task); - pool_put_entry(metadata->free_task_pool, trace_task); - trace_task = pool_get_entry(metadata->mark_task_pool); - } - - trace_task = (Vector_Block*)collector->trace_stack; - vector_stack_clear(trace_task); - pool_put_entry(metadata->free_task_pool, trace_task); - collector->trace_stack = NULL; -} Index: vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp =================================================================== --- vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp (revision 0) +++ vm/gc_gen/src/trace_forward/fspace_gen_forward_pool.cpp (revision 0) @@ -0,0 +1,313 @@ + +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2006/10/05 + */ + +#include "fspace.h" +#include "../thread/collector.h" +#include "../common/gc_metadata.h" +#include "../finalizer_weakref/finalizer_weakref.h" + +static Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace) +{ + assert(obj_belongs_to_nos(p_obj)); + return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary); +} + +static void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref) +{ + Partial_Reveal_Object *p_obj = *p_ref; + if (p_obj == NULL) return; + + /* the slot can be in tspace or fspace, we don't care. + we care only if the reference in the slot is pointing to fspace */ + if (obj_belongs_to_nos(p_obj)) + collector_tracestack_push(collector, p_ref); + + return; +} + +static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) +{ + if (!object_has_ref_field(p_obj)) return; + + void *slot; + + /* scan array object */ + if (object_is_array(p_obj)) { + Partial_Reveal_Object* array = p_obj; + assert(!obj_is_primitive_array(array)); + + int32 array_length = vector_get_length((Vector_Handle) array); + for (int i = 0; i < array_length; i++) { + slot = vector_get_element_address_ref((Vector_Handle) array, i); + scan_slot(collector, (Partial_Reveal_Object **)slot); + } + return; + } + + /* scan non-array object */ + int *offset_scanner = init_object_scanner(p_obj); + while (true) { + slot = offset_get_ref(offset_scanner, p_obj); + if (slot == NULL) break; + + scan_slot(collector, (Partial_Reveal_Object **)slot); + offset_scanner = offset_next_ref(offset_scanner); + } + +#ifndef BUILD_IN_REFERENT + scan_weak_reference(collector, p_obj, scan_slot); +#endif + + return; +} + +/* NOTE:: At this point, p_ref can be in anywhere like root, and other spaces, but *p_ref must be in fspace, + since only slot which points to object in fspace could be added into TraceStack. + The problem is the *p_ref may be forwarded already so that, when we come here we find it's pointing to tospace. + We will simply return for that case. It might be forwarded due to: + 1. two difference slots containing same reference; + 2. duplicate slots in remset ( we use SSB for remset, no duplication filtering.) + The same object can be traced by the thread itself, or by other thread. +*/ + +#include "../verify/verify_live_heap.h" + +static void forward_object(Collector* collector, Partial_Reveal_Object **p_ref) +{ + Space* space = collector->collect_space; + GC* gc = collector->gc; + Partial_Reveal_Object *p_obj = *p_ref; + + if(!obj_belongs_to_nos(p_obj)) return; + + /* Fastpath: object has already been forwarded, update the ref slot */ + if(obj_is_fw_in_oi(p_obj)) { + *p_ref = obj_get_fw_in_oi(p_obj); + return; + } + + /* only mark the objects that will remain in fspace */ + if(NOS_PARTIAL_FORWARD && !fspace_object_to_be_forwarded(p_obj, (Fspace*)space)) { + assert(!obj_is_fw_in_oi(p_obj)); + /* this obj remains in fspace, remember its ref slot for next GC if p_ref is not root. + we don't need remember root ref. Actually it's wrong to rem root ref since they change in next GC */ + if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc)) + collector_remset_add_entry(collector, p_ref); + + if(obj_mark_in_oi(p_obj)) + scan_object(collector, p_obj); + + return; + } + + /* following is the logic for forwarding */ + Partial_Reveal_Object* p_target_obj = collector_forward_object(collector, p_obj); + + /* if p_target_obj is NULL, it is forwarded by other thread. + Note: a race condition here, it might be forwarded by other, but not set the + forwarding pointer yet. We need spin here to get the forwarding pointer. + We can implement the collector_forward_object() so that the forwarding pointer + is set in the atomic instruction, which requires to roll back the mos_alloced + space. That is easy for thread local block allocation cancellation. */ + if( p_target_obj == NULL ){ + if(collector->result == FALSE ){ + /* failed to forward, let's get back to controller. */ + vector_stack_clear(collector->trace_stack); + return; + } + + Partial_Reveal_Object *p_new_obj = obj_get_fw_in_oi(p_obj); + assert(p_new_obj); + *p_ref = p_new_obj; + return; + } + /* otherwise, we successfully forwarded */ + *p_ref = p_target_obj; + + /* we forwarded it, we need remember it for verification. */ + if(verify_live_heap) { + event_collector_move_obj(p_obj, p_target_obj, collector); + } + + scan_object(collector, p_target_obj); + return; +} + +static void trace_object(Collector* collector, Partial_Reveal_Object **p_ref) +{ + forward_object(collector, p_ref); + + Vector_Block* trace_stack = (Vector_Block*)collector->trace_stack; + while( !vector_stack_is_empty(trace_stack)){ + p_ref = (Partial_Reveal_Object **)vector_stack_pop(trace_stack); + forward_object(collector, p_ref); + trace_stack = (Vector_Block*)collector->trace_stack; + } + + return; +} + +/* for tracing phase termination detection */ +static volatile unsigned int num_finished_collectors = 0; + +static void collector_trace_rootsets(Collector* collector) +{ + GC* gc = collector->gc; + GC_Metadata* metadata = gc->metadata; + + unsigned int num_active_collectors = gc->num_active_collectors; + atomic_cas32( &num_finished_collectors, 0, num_active_collectors); + + Space* space = collector->collect_space; + collector->trace_stack = free_task_pool_get_entry(metadata); + + /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */ + Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool); + + /* first step: copy all root objects to trace tasks. */ + while(root_set){ + unsigned int* iter = vector_block_iterator_init(root_set); + while(!vector_block_iterator_end(root_set,iter)){ + Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; + iter = vector_block_iterator_advance(root_set,iter); + if(*p_ref == NULL) continue; /* root ref cann't be NULL, but remset can be */ + if(obj_belongs_to_nos(*p_ref)){ + collector_tracestack_push(collector, p_ref); + } + } + root_set = pool_iterator_next(metadata->gc_rootset_pool); + } + /* put back the last trace_stack task */ + pool_put_entry(metadata->mark_task_pool, collector->trace_stack); + + /* second step: iterate over the trace tasks and forward objects */ + collector->trace_stack = free_task_pool_get_entry(metadata); + +retry: + Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool); + + while(trace_task){ + unsigned int* iter = vector_block_iterator_init(trace_task); + while(!vector_block_iterator_end(trace_task,iter)){ + Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; + iter = vector_block_iterator_advance(trace_task,iter); + assert(*p_ref); /* a task can't be NULL, it was checked before put into the task stack */ + /* in sequential version, we only trace same object once, but we were using a local hashset for that, + which couldn't catch the repetition between multiple collectors. This is subject to more study. */ + + /* FIXME:: we should not let root_set empty during working, other may want to steal it. + degenerate my stack into root_set, and grab another stack */ + + /* a task has to belong to collected space, it was checked before put into the stack */ + trace_object(collector, p_ref); + if(collector->result == FALSE) break; /* force return */ + } + vector_stack_clear(trace_task); + pool_put_entry(metadata->free_task_pool, trace_task); + if(collector->result == FALSE){ + gc_task_pool_clear(metadata->mark_task_pool); + break; /* force return */ + } + + trace_task = pool_get_entry(metadata->mark_task_pool); + } + + atomic_inc32(&num_finished_collectors); + while(num_finished_collectors != num_active_collectors){ + if( pool_is_empty(metadata->mark_task_pool)) continue; + /* we can't grab the task here, because of a race condition. If we grab the task, + and the pool is empty, other threads may fall to this barrier and then pass. */ + atomic_dec32(&num_finished_collectors); + goto retry; + } + + /* now we are done, but each collector has a private stack that is empty */ + trace_task = (Vector_Block*)collector->trace_stack; + vector_stack_clear(trace_task); + pool_put_entry(metadata->free_task_pool, trace_task); + collector->trace_stack = NULL; + + return; +} + +void gen_forward_pool(Collector* collector) +{ + GC* gc = collector->gc; + Fspace* space = (Fspace*)collector->collect_space; + + collector_trace_rootsets(collector); + + /* the rest work is not enough for parallelization, so let only one thread go */ + if( collector->thread_handle != 0 ) return; + + gc->collect_result = gc_collection_result(gc); + if(!gc->collect_result) return; + + if(!IGNORE_FINREF ) + collector_identify_finref(collector); +#ifndef BUILD_IN_REFERENT + else { + gc_set_weakref_sets(gc); + update_ref_ignore_finref(collector); + } +#endif + + gc_fix_rootset(collector); + + if(!IGNORE_FINREF ) + gc_put_finref_to_vm(gc); + + fspace_reset_for_allocation(space); + + return; + +} + +void resurrect_obj_tree_after_trace(Collector *collector, Partial_Reveal_Object **p_ref) +{ + GC *gc = collector->gc; + GC_Metadata* metadata = gc->metadata; + + collector->trace_stack = free_task_pool_get_entry(metadata); + collector_tracestack_push(collector, p_ref); + pool_put_entry(metadata->mark_task_pool, collector->trace_stack); + +//collector->rep_set = free_set_pool_get_entry(metadata); /* has got collector->rep_set in caller */ + collector->trace_stack = free_task_pool_get_entry(metadata); + Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool); + while(trace_task){ + unsigned int* iter = vector_block_iterator_init(trace_task); + while(!vector_block_iterator_end(trace_task,iter)){ + Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; + iter = vector_block_iterator_advance(trace_task,iter); + assert(*p_ref); + trace_object(collector, p_ref); + } + vector_stack_clear(trace_task); + pool_put_entry(metadata->free_task_pool, trace_task); + trace_task = pool_get_entry(metadata->mark_task_pool); + } + + trace_task = (Vector_Block*)collector->trace_stack; + vector_stack_clear(trace_task); + pool_put_entry(metadata->free_task_pool, trace_task); + collector->trace_stack = NULL; +} Index: vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp =================================================================== --- vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp (revision 0) +++ vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp (revision 0) @@ -0,0 +1,255 @@ + +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2006/10/05 + */ + + +#include "fspace.h" +#include "../thread/collector.h" +#include "../common/gc_metadata.h" +#include "../finalizer_weakref/finalizer_weakref.h" + +#ifdef MARK_BIT_FLIPPING + +static void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref) +{ + Partial_Reveal_Object *p_obj = *p_ref; + if(p_obj == NULL) return; + + collector_tracestack_push(collector, p_ref); + return; +} + +static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) +{ + if (!object_has_ref_field_before_scan(p_obj)) return; + + Partial_Reveal_Object **p_ref; + + if (object_is_array(p_obj)) { /* scan array object */ + + Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj; + unsigned int array_length = array->array_len; + p_ref = (Partial_Reveal_Object**)((int)array + (int)array_first_element_offset(array)); + + for (unsigned int i = 0; i < array_length; i++) { + scan_slot(collector, p_ref+i); + } + + }else{ /* scan non-array object */ + + unsigned int num_refs = object_ref_field_num(p_obj); + int* ref_iterator = object_ref_iterator_init(p_obj); + + for(unsigned int i=0; igc; + Partial_Reveal_Object *p_obj = *p_ref; + + if(!obj_belongs_to_nos(p_obj)){ + if(obj_mark_in_oi(p_obj)) + scan_object(collector, p_obj); + return; + } + + /* following is the logic for forwarding */ + Partial_Reveal_Object* p_target_obj = collector_forward_object(collector, p_obj); + + /* if p_target_obj is NULL, it is forwarded by other thread. + Note: a race condition here, it might be forwarded by other, but not set the + forwarding pointer yet. We need spin here to get the forwarding pointer. + We can implement the collector_forward_object() so that the forwarding pointer + is set in the atomic instruction, which requires to roll back the mos_alloced + space. That is easy for thread local block allocation cancellation. */ + if( p_target_obj == NULL ){ + if(collector->result == FALSE ){ + /* failed to forward, let's get back to controller. */ + vector_stack_clear(collector->trace_stack); + return; + } + + Partial_Reveal_Object *p_new_obj = obj_get_fw_in_oi(p_obj); + assert(p_new_obj); + *p_ref = p_new_obj; + return; + } + /* otherwise, we successfully forwarded */ + *p_ref = p_target_obj; + + /* we forwarded it, we need remember it for verification. */ + if(verify_live_heap) { + event_collector_move_obj(p_obj, p_target_obj, collector); + } + + scan_object(collector, p_target_obj); + return; +} + +static void trace_object(Collector* collector, Partial_Reveal_Object **p_ref) +{ + forward_object(collector, p_ref); + + Vector_Block* trace_stack = (Vector_Block*)collector->trace_stack; + while( !vector_stack_is_empty(trace_stack)){ + p_ref = (Partial_Reveal_Object **)vector_stack_pop(trace_stack); + forward_object(collector, p_ref); + trace_stack = (Vector_Block*)collector->trace_stack; + } + return; +} + +/* for tracing phase termination detection */ +static volatile unsigned int num_finished_collectors = 0; + +static void collector_trace_rootsets(Collector* collector) +{ + GC* gc = collector->gc; + GC_Metadata* metadata = gc->metadata; + + unsigned int num_active_collectors = gc->num_active_collectors; + atomic_cas32( &num_finished_collectors, 0, num_active_collectors); + + Space* space = collector->collect_space; + collector->trace_stack = free_task_pool_get_entry(metadata); + + /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */ + Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool); + + /* first step: copy all root objects to trace tasks. */ + while(root_set){ + unsigned int* iter = vector_block_iterator_init(root_set); + while(!vector_block_iterator_end(root_set,iter)){ + Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; + iter = vector_block_iterator_advance(root_set,iter); + + Partial_Reveal_Object* p_obj = *p_ref; + assert(p_obj != NULL); /* root ref cann't be NULL, but remset can be */ + + collector_tracestack_push(collector, p_ref); + } + root_set = pool_iterator_next(metadata->gc_rootset_pool); + } + /* put back the last trace_stack task */ + pool_put_entry(metadata->mark_task_pool, collector->trace_stack); + + /* second step: iterate over the trace tasks and forward objects */ + collector->trace_stack = free_task_pool_get_entry(metadata); + +retry: + Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool); + + while(trace_task){ + unsigned int* iter = vector_block_iterator_init(trace_task); + while(!vector_block_iterator_end(trace_task,iter)){ + Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; + iter = vector_block_iterator_advance(trace_task,iter); + trace_object(collector, p_ref); + + if(collector->result == FALSE) break; /* force return */ + + } + vector_stack_clear(trace_task); + pool_put_entry(metadata->free_task_pool, trace_task); + + if(collector->result == FALSE){ + gc_task_pool_clear(metadata->mark_task_pool); + break; /* force return */ + } + + trace_task = pool_get_entry(metadata->mark_task_pool); + } + + /* A collector comes here when seeing an empty mark_task_pool. The last collector will ensure + all the tasks are finished.*/ + + atomic_inc32(&num_finished_collectors); + while(num_finished_collectors != num_active_collectors){ + if( pool_is_empty(metadata->mark_task_pool)) continue; + /* we can't grab the task here, because of a race condition. If we grab the task, + and the pool is empty, other threads may fall to this barrier and then pass. */ + atomic_dec32(&num_finished_collectors); + goto retry; + } + + /* now we are done, but each collector has a private stack that is empty */ + trace_task = (Vector_Block*)collector->trace_stack; + vector_stack_clear(trace_task); + pool_put_entry(metadata->free_task_pool, trace_task); + collector->trace_stack = NULL; + + return; +} + +void nongen_forward_pool(Collector* collector) +{ + GC* gc = collector->gc; + Fspace* space = (Fspace*)collector->collect_space; + + collector_trace_rootsets(collector); + /* the rest work is not enough for parallelization, so let only one thread go */ + if( collector->thread_handle != 0 ) return; + + gc->collect_result = gc_collection_result(gc); + if(!gc->collect_result) return; + + if(!IGNORE_FINREF ) + collector_identify_finref(collector); +#ifndef BUILD_IN_REFERENT + else { + gc_set_weakref_sets(gc); + update_ref_ignore_finref(collector); + } +#endif + + gc_fix_rootset(collector); + + if(!IGNORE_FINREF ) + gc_put_finref_to_vm(gc); + + fspace_reset_for_allocation(space); + + return; + +} + +#endif /* MARK_BIT_FLIPPING */ Index: vm/gc_gen/src/utils/bidir_list.h =================================================================== --- vm/gc_gen/src/utils/bidir_list.h (revision 493420) +++ vm/gc_gen/src/utils/bidir_list.h (working copy) @@ -22,16 +22,17 @@ #define _BIDIR_LIST_H_ typedef struct Bidir_List{ + unsigned int zero; Bidir_List* next; Bidir_List* prev; }Bidir_List; inline Bidir_List* bidir_list_add_item(Bidir_List* head, Bidir_List* item) { - item->next = head; - item->prev = head->prev; - head->prev->next = item; - head->prev = item; + item->next = head->next; + item->prev = head; + head->next->prev = item; + head->next = item; return head; } Index: vm/gc_gen/src/utils/bit_ops.h =================================================================== --- vm/gc_gen/src/utils/bit_ops.h (revision 493420) +++ vm/gc_gen/src/utils/bit_ops.h (working copy) @@ -52,7 +52,7 @@ { unsigned int bit_offset; - assert(start_idx < 128); + assert((start_idx >= 0) && (start_idx < 128)); unsigned int start_word_index = start_idx >> BIT_SHIFT_TO_BITS_PER_WORD; unsigned int start_bit_offset = start_idx & BIT_MASK_TO_BITS_PER_WORD; @@ -79,7 +79,7 @@ inline void words_set_bit(unsigned int* words, unsigned int count, unsigned int start_idx) { - assert(start_idx < 128); + assert((start_idx >= 0) && (start_idx < 128)); unsigned int word_index = start_idx >> BIT_SHIFT_TO_BITS_PER_WORD; unsigned int bit_offset = start_idx & BIT_MASK_TO_BITS_PER_WORD; @@ -98,7 +98,7 @@ inline void words_clear_bit(unsigned int* words, unsigned int count, unsigned int start_idx) { - assert(start_idx < 128); + assert((start_idx >= 0) && (start_idx < 128)); unsigned int word_index = start_idx >> BIT_SHIFT_TO_BITS_PER_WORD; unsigned int bit_offset = start_idx & BIT_MASK_TO_BITS_PER_WORD; Index: vm/gc_gen/src/utils/sync_pool.h =================================================================== --- vm/gc_gen/src/utils/sync_pool.h (revision 493420) +++ vm/gc_gen/src/utils/sync_pool.h (working copy) @@ -28,7 +28,10 @@ inline Pool* sync_pool_create(){ return sync_stack_init(); } inline void sync_pool_destruct(Pool* pool){ sync_stack_destruct(pool); } -inline Boolean pool_is_empty(Pool* pool){ return stack_is_empty(pool);} +inline Boolean pool_is_empty(Pool* pool){ return sync_stack_is_empty(pool);} + +inline unsigned int pool_size(Pool* pool){ return sync_stack_size(pool); } + inline Vector_Block* pool_get_entry(Pool* pool) { Vector_Block* block = (Vector_Block*)sync_stack_pop(pool); Index: vm/gc_gen/src/utils/sync_stack.h =================================================================== --- vm/gc_gen/src/utils/sync_stack.h (revision 493420) +++ vm/gc_gen/src/utils/sync_stack.h (working copy) @@ -21,22 +21,44 @@ #ifndef _SYNC_STACK_H_ #define _SYNC_STACK_H_ +#include "vector_block.h" + +#define SYNC_STACK_VERSION_MASK_SHIFT 10 +#define SYNC_STACK_VERSION_MASK ((1 << SYNC_STACK_VERSION_MASK_SHIFT) - 1) + typedef struct Node{ Node* next; }Node; +/* + * ATTENTION: only for reference + * Perhaps in some platforms compilers compile this struct in a way different from what we expect + */ +typedef struct Stack_Top{ + unsigned int version: SYNC_STACK_VERSION_MASK_SHIFT; + unsigned int entry: (32-SYNC_STACK_VERSION_MASK_SHIFT); +}Stack_Top; + typedef struct Sync_Stack{ - Node* top; /* pointing to the first filled entry */ + Stack_Top top; /* pointing to the first filled entry */ Node* cur; /* pointing to the current accessed entry, only for iterator */ }Sync_Stack; +#define stack_top_get_entry(top) ((Node*)((*(unsigned int*)&(top)) & ~SYNC_STACK_VERSION_MASK)) +/* The alternative way: (Node*)(top.entry<cur = NULL; - stack->top = NULL; + unsigned int temp_top = 0; + stack->top = *(Stack_Top*)&temp_top; return stack; } @@ -48,7 +70,7 @@ inline void sync_stack_iterate_init(Sync_Stack* stack) { - stack->cur = stack->top; + stack->cur = stack_top_get_entry(stack->top); return; } @@ -62,37 +84,49 @@ return entry; } entry = stack->cur; - } + } return NULL; } inline Node* sync_stack_pop(Sync_Stack* stack) { - Node* entry = stack->top; - while( entry != NULL ){ - Node* new_entry = entry->next; - Node* temp = (Node*)atomic_casptr((volatile void**)&stack->top, new_entry, entry); - if(temp == entry){ /* got it */ - entry->next = NULL; - return entry; + Stack_Top cur_top = stack->top; + Node* top_entry = stack_top_get_entry(cur_top); + unsigned int version = stack_top_get_version(cur_top); + + while( top_entry != NULL ){ + unsigned int temp = stack_top_contruct(top_entry->next, version); + Stack_Top new_top = *(Stack_Top*)&temp; + temp = (unsigned int)atomic_casptr((volatile void**)&stack->top, *(void**)&new_top, *(void**)&cur_top); + if(temp == *(unsigned int*)&cur_top){ /* got it */ + top_entry->next = NULL; + return top_entry; } - entry = stack->top; + cur_top = stack->top; + top_entry = stack_top_get_entry(cur_top); + version = stack_top_get_version(cur_top); } return 0; } inline Boolean sync_stack_push(Sync_Stack* stack, Node* node) { - Node* entry = stack->top; - node->next = entry; + Stack_Top cur_top = stack->top; + node->next = stack_top_get_entry(cur_top); + unsigned int new_version = stack_top_get_next_version(cur_top); + unsigned int temp = stack_top_contruct(node, new_version); + Stack_Top new_top = *(Stack_Top*)&temp; while( TRUE ){ - Node* temp = (Node*)atomic_casptr((volatile void**)&stack->top, node, entry); - if(temp == entry){ /* got it */ + temp = (unsigned int)atomic_casptr((volatile void**)&stack->top, *(void**)&new_top, *(void**)&cur_top); + if(temp == *(unsigned int*)&cur_top){ /* got it */ return TRUE; } - entry = stack->top; - node->next = entry; + cur_top = stack->top; + node->next = stack_top_get_entry(cur_top); + new_version = stack_top_get_next_version(cur_top); + temp = stack_top_contruct(node, new_version); + new_top = *(Stack_Top*)&temp; } /* never comes here */ return FALSE; @@ -100,9 +134,21 @@ /* it does not matter whether this is atomic or not, because it is only invoked when there is no contention or only for rough idea */ -inline Boolean stack_is_empty(Sync_Stack* stack) +inline Boolean sync_stack_is_empty(Sync_Stack* stack) { - return (stack->top == NULL); + return (stack_top_get_entry(stack->top) == NULL); } +inline unsigned int sync_stack_size(Sync_Stack* stack) +{ + unsigned int entry_count = 0; + + sync_stack_iterate_init(stack); + while(sync_stack_iterate_next(stack)){ + ++entry_count; + } + + return entry_count; +} + #endif /* _SYNC_STACK_H_ */ Index: vm/gc_gen/src/utils/vector_block.h =================================================================== --- vm/gc_gen/src/utils/vector_block.h (revision 493420) +++ vm/gc_gen/src/utils/vector_block.h (working copy) @@ -26,9 +26,16 @@ unsigned int* head; /* point to the first filled entry */ unsigned int* tail; /* point to the entry after the last filled one */ unsigned int* heap_end; /* point to heap_end of the block (right after the last entry) */ - unsigned int* entries[1]; + unsigned int entries[1]; }Vector_Block; + +/* this size better be 2's power */ +#define VECTOR_BLOCK_DATA_SIZE_BYTES (2*KB) + +#define VECTOR_BLOCK_HEADER_SIZE_BYTES ((unsigned int)((Vector_Block*)0)->entries) +#define VECTOR_BLOCK_ENTRY_NUM ((VECTOR_BLOCK_DATA_SIZE_BYTES - VECTOR_BLOCK_HEADER_SIZE_BYTES) >> BIT_SHIFT_TO_BYTES_PER_WORD) + inline void vector_block_init(Vector_Block* block, unsigned int size) { block->heap_end = (unsigned int*)((unsigned int)block + size); @@ -41,15 +48,26 @@ inline unsigned int vector_block_entry_count(Vector_Block* block) { return (unsigned int)(block->tail - block->head); } +/* inline Boolean vector_block_is_full(Vector_Block* block) { return block->tail == block->heap_end; } inline Boolean vector_block_is_empty(Vector_Block* block) { return block->tail == block->head; } +*/ +inline Boolean vector_block_is_full(Vector_Block* block) +{ return (block->tail - block->entries) == VECTOR_BLOCK_ENTRY_NUM; } + +inline Boolean vector_block_is_empty(Vector_Block* block) +{ return block->tail == block->entries; } + inline void vector_block_add_entry(Vector_Block* block, unsigned int value) -{ +{ +#ifdef _DEBUG assert(value && !*(block->tail)); +#endif + *(block->tail++) = value; } @@ -88,16 +106,23 @@ #endif } +/* inline Boolean vector_stack_is_empty(Vector_Block* block) { return (block->head == block->tail); } +*/ +inline Boolean vector_stack_is_empty(Vector_Block* block) +{ return (block->head - block->entries) == VECTOR_BLOCK_ENTRY_NUM; } + inline Boolean vector_stack_is_full(Vector_Block* block) -{ return (block->head == (unsigned int*)block->entries); } +{ return (block->head == block->entries); } inline void vector_stack_push(Vector_Block* block, unsigned int value) { block->head--; +#ifdef _DEBUG assert(value && !*(block->head)); +#endif *(block->head) = value; } Index: vm/vmcore/src/kernel_classes/javasrc/org/apache/harmony/drlvm/VMHelper.java =================================================================== --- vm/vmcore/src/kernel_classes/javasrc/org/apache/harmony/drlvm/VMHelper.java (revision 493431) +++ vm/vmcore/src/kernel_classes/javasrc/org/apache/harmony/drlvm/VMHelper.java (working copy) @@ -38,8 +38,8 @@ public static void monitorExit(Object obj) {fail();} + public static void writeBarrier(Address p_objBase, Address p_objSlot, Address p_target){ fail();} - protected static void fail() {throw new RuntimeException("Not supported!");} }