Index: build/make/components/vm/gc_gen.xml =================================================================== --- build/make/components/vm/gc_gen.xml (revision 481733) +++ build/make/components/vm/gc_gen.xml (working copy) @@ -16,7 +16,7 @@ --> + + - + - + + + + Index: vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp =================================================================== --- vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (revision 481733) +++ vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (working copy) @@ -60,7 +60,7 @@ static void gc_reset_block_for_collectors(GC* gc, Mspace* mspace) { - unsigned int free_blk_idx = mspace->free_block_idx; + unsigned int free_blk_idx = mspace->first_block_idx; for(unsigned int i=0; inum_active_collectors; i++){ Collector* collector = gc->collectors[i]; unsigned int collector_target_idx = collector->cur_target_block->block_idx; @@ -324,7 +324,7 @@ have references that are going to be repointed */ unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1); - mark_scan_heap_par(collector); + mark_scan_heap(collector); old_num = atomic_inc32(&num_marking_collectors); if( ++old_num == num_active_collectors ){ Index: vm/gc_gen/src/mark_compact/mspace.h =================================================================== --- vm/gc_gen/src/mark_compact/mspace.h (revision 481733) +++ vm/gc_gen/src/mark_compact/mspace.h (working copy) @@ -22,7 +22,7 @@ #define _MSC_SPACE_H_ #include "../common/gc_block.h" -#include "../thread/thread_alloc.h" +#include "../thread/gc_thread.h" /* Mark-compaction space is orgnized into blocks*/ typedef struct Mspace{ Index: vm/gc_gen/src/utils/vector_block.h =================================================================== --- vm/gc_gen/src/utils/vector_block.h (revision 481733) +++ vm/gc_gen/src/utils/vector_block.h (working copy) @@ -25,24 +25,24 @@ void* next; /* point to next block */ unsigned int* head; /* point to the first filled entry */ unsigned int* tail; /* point to the entry after the last filled one */ - unsigned int* end; /* point to end of the block (right after the last entry) */ + unsigned int* heap_end; /* point to heap_end of the block (right after the last entry) */ unsigned int* entries[1]; }Vector_Block; inline void vector_block_init(Vector_Block* block, unsigned int size) { - block->end = (unsigned int*)((unsigned int)block + size); - block->head = (unsigned int*)block->entries; - block->tail = (unsigned int*)block->entries; - memset(block->head, 0, (block->end - block->head)*BYTES_PER_WORD); - return; + block->heap_end = (unsigned int*)((unsigned int)block + size); + block->head = (unsigned int*)block->entries; + block->tail = (unsigned int*)block->entries; + memset(block->entries, 0, (block->heap_end - (unsigned int*)block->entries)*BYTES_PER_WORD); + return; } inline unsigned int vector_block_entry_count(Vector_Block* block) { return (unsigned int)(block->tail - block->head); } inline Boolean vector_block_is_full(Vector_Block* block) -{ return block->tail == block->end; } +{ return block->tail == block->heap_end; } inline Boolean vector_block_is_empty(Vector_Block* block) { return block->tail == block->head; } @@ -55,11 +55,11 @@ inline void vector_block_clear(Vector_Block* block) { + block->head = (unsigned int*)block->entries; + block->tail = (unsigned int*)block->entries; #ifdef _DEBUG - memset(block->entries, 0, (block->end - (unsigned int*)block->entries)*BYTES_PER_WORD); + memset(block->entries, 0, (block->heap_end - (unsigned int*)block->entries)*BYTES_PER_WORD); #endif - - block->tail = block->head; } /* Below is for sequential local access */ @@ -72,4 +72,53 @@ inline Boolean vector_block_iterator_end(Vector_Block* block, unsigned int* iter) { return iter == block->tail; } + +/* Below is to use Vector_Block as stack (for trace-forwarding DFS order ) */ +inline void vector_stack_init(Vector_Block* block) +{ + block->tail = block->heap_end; + block->head = block->heap_end; +} + +inline void vector_stack_clear(Vector_Block* block) +{ + vector_stack_init(block); +#ifdef _DEBUG + memset(block->entries, 0, (block->heap_end - (unsigned int*)block->entries)*BYTES_PER_WORD); +#endif +} + +inline Boolean vector_stack_is_empty(Vector_Block* block) +{ return (block->head == block->tail); } + +inline Boolean vector_stack_is_full(Vector_Block* block) +{ return (block->head == (unsigned int*)block->entries); } + +inline void vector_stack_push(Vector_Block* block, unsigned int value) +{ + block->head--; + assert(value && !*(block->head)); + *(block->head) = value; +} + +inline unsigned int vector_stack_pop(Vector_Block* block) +{ + unsigned int value = *block->head; +#ifdef _DEBUG + *block->head = 0; +#endif + block->head++; + return value; +} + +inline void vector_block_integrity_check(Vector_Block* block) +{ + unsigned int* iter = vector_block_iterator_init(block); + while(!vector_block_iterator_end(block, iter)){ + assert(*iter); + iter = vector_block_iterator_advance(block, iter); + } + return; +} + #endif /* #ifndef _VECTOR_BLOCK_H_ */ Index: vm/gc_gen/src/utils/sync_stack.h =================================================================== --- vm/gc_gen/src/utils/sync_stack.h (revision 481733) +++ vm/gc_gen/src/utils/sync_stack.h (working copy) @@ -73,6 +73,7 @@ Node* new_entry = entry->next; Node* temp = (Node*)atomic_casptr((volatile void**)&stack->top, new_entry, entry); if(temp == entry){ /* got it */ + entry->next = NULL; return entry; } entry = stack->top; @@ -93,6 +94,7 @@ entry = stack->top; node->next = entry; } + /* never comes here */ return FALSE; } Index: vm/gc_gen/src/utils/sync_pool.h =================================================================== --- vm/gc_gen/src/utils/sync_pool.h (revision 481733) +++ vm/gc_gen/src/utils/sync_pool.h (working copy) @@ -29,10 +29,22 @@ inline void sync_pool_destruct(Pool* pool){ sync_stack_destruct(pool); } inline Boolean pool_is_empty(Pool* pool){ return stack_is_empty(pool);} -inline Vector_Block* pool_get_entry(Pool* pool){ return (Vector_Block*)sync_stack_pop(pool); } -inline void pool_put_entry(Pool* pool, void* value){ assert(value); Boolean ok = sync_stack_push(pool, (Node*)value); assert(ok);} +inline Vector_Block* pool_get_entry(Pool* pool) +{ + Vector_Block* block = (Vector_Block*)sync_stack_pop(pool); + return block; +} + +inline void pool_put_entry(Pool* pool, void* value) +{ + assert(value); + Boolean ok = sync_stack_push(pool, (Node*)value); + assert(ok); +} + inline void pool_iterator_init(Pool* pool){ sync_stack_iterate_init(pool);} inline Vector_Block* pool_iterator_next(Pool* pool){ return (Vector_Block*)sync_stack_iterate_next(pool);} #endif /* #ifndef _SYNC_POOL_H_ */ + Index: vm/gc_gen/src/jni/helper.cpp =================================================================== --- vm/gc_gen/src/jni/helper.cpp (revision 0) +++ vm/gc_gen/src/jni/helper.cpp (revision 0) @@ -0,0 +1,22 @@ +#include +#include +#include "../thread/gc_thread.h" + + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Class: org_apache_harmony_drlvm_gc_gen_GCHelper + * Method: TLSFreeOffset + * Signature: ()I + */ +JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_TLSGCOffset(JNIEnv *e, jclass c) +{ + return (jint)tls_gc_offset; +} + +#ifdef __cplusplus +} +#endif Index: vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp =================================================================== --- vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp (revision 481733) +++ vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp (working copy) @@ -172,7 +172,7 @@ have references that are going to be repointed */ atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1); - mark_scan_heap_par(collector); + mark_scan_heap(collector); unsigned int old_num = atomic_inc32(&num_marking_collectors); if( ++old_num == num_active_collectors ){ Index: vm/gc_gen/src/trace_forward/fspace.cpp =================================================================== --- vm/gc_gen/src/trace_forward/fspace.cpp (revision 481733) +++ vm/gc_gen/src/trace_forward/fspace.cpp (working copy) @@ -22,12 +22,11 @@ #include "fspace.h" -float NURSERY_OBJECT_FORWARDING_RATIO = FORWARD_ALL; -//float NURSERY_OBJECT_FORWARDING_RATIO = FORWARD_HALF; +Boolean NOS_PARTIAL_FORWARD = TRUE; void* nos_boundary = null; /* this is only for speeding up write barrier */ -Boolean forward_first_half;; +Boolean forward_first_half; void* object_forwarding_boundary=NULL; Boolean fspace_mark_object(Fspace* fspace, Partial_Reveal_Object *p_obj) @@ -119,7 +118,10 @@ nos_boundary = fspace->heap_start; forward_first_half = TRUE; - object_forwarding_boundary = (void*)&fspace->blocks[fspace->first_block_idx + (unsigned int)(fspace->num_managed_blocks * NURSERY_OBJECT_FORWARDING_RATIO)]; + if( NOS_PARTIAL_FORWARD ) + object_forwarding_boundary = (void*)&fspace->blocks[fspace->num_managed_blocks >>1 ]; + else + object_forwarding_boundary = (void*)&fspace->blocks[fspace->num_managed_blocks]; return; } @@ -134,35 +136,52 @@ void reset_fspace_for_allocation(Fspace* fspace) { - if( NURSERY_OBJECT_FORWARDING_RATIO == FORWARD_ALL || - fspace->gc->collect_kind == MAJOR_COLLECTION ) + unsigned int first_idx = fspace->first_block_idx; + unsigned int marked_start_idx = 0; + unsigned int marked_last_idx = 0; + + if( fspace->gc->collect_kind == MAJOR_COLLECTION || + NOS_PARTIAL_FORWARD == FALSE || !gc_requires_barriers()) { - fspace->free_block_idx = fspace->first_block_idx; - fspace->ceiling_block_idx = fspace->first_block_idx + fspace->num_managed_blocks - 1; + fspace->free_block_idx = first_idx; + fspace->ceiling_block_idx = first_idx + fspace->num_managed_blocks - 1; forward_first_half = TRUE; /* only useful for not-FORWARD_ALL*/ }else{ if(forward_first_half){ - fspace->free_block_idx = fspace->first_block_idx; + fspace->free_block_idx = first_idx; fspace->ceiling_block_idx = ((Block_Header*)object_forwarding_boundary)->block_idx - 1; + marked_start_idx = ((Block_Header*)object_forwarding_boundary)->block_idx - first_idx; + marked_last_idx = fspace->num_managed_blocks - 1; }else{ fspace->free_block_idx = ((Block_Header*)object_forwarding_boundary)->block_idx; - fspace->ceiling_block_idx = fspace->first_block_idx + fspace->num_managed_blocks - 1; + fspace->ceiling_block_idx = first_idx + fspace->num_managed_blocks - 1; + marked_start_idx = 0; + marked_last_idx = ((Block_Header*)object_forwarding_boundary)->block_idx - 1 - first_idx; } - forward_first_half = ~forward_first_half; + forward_first_half = forward_first_half^1; } - unsigned int first_idx = fspace->free_block_idx; - unsigned int last_idx = fspace->ceiling_block_idx; + Block* blocks = fspace->blocks; unsigned int num_freed = 0; - for(unsigned int i = 0; i <= last_idx-first_idx; i++){ + unsigned int new_start_idx = fspace->free_block_idx - first_idx; + unsigned int new_last_idx = fspace->ceiling_block_idx - first_idx; + for(unsigned int i = new_start_idx; i <= new_last_idx; i++){ Block_Header* block = (Block_Header*)&(blocks[i]); if(block->status == BLOCK_FREE) continue; - block_clear_mark_table(block); block->status = BLOCK_FREE; block->free = GC_BLOCK_BODY(block); + if( !gc_requires_barriers() || fspace->gc->collect_kind == MAJOR_COLLECTION ) + block_clear_mark_table(block); + num_freed ++; } + + for(unsigned int i = marked_start_idx; i <= marked_last_idx; i++){ + Block_Header* block = (Block_Header*)&(blocks[i]); + if(block->status == BLOCK_FREE) continue; + block_clear_markbits(block); + } fspace->num_used_blocks = fspace->num_used_blocks - num_freed; } @@ -177,7 +196,7 @@ GC* gc = fspace->gc; if(gc_requires_barriers()){ - /* generational GC. Only trace (mark) nos */ + /* generational GC. Only trace nos */ collector_execute_task(gc, (TaskType)trace_forward_fspace, (Space*)fspace); }else{ /* non-generational GC. Mark the whole heap (nos, mos, and los) */ Index: vm/gc_gen/src/trace_forward/fspace_collect_forward_seq.cpp =================================================================== --- vm/gc_gen/src/trace_forward/fspace_collect_forward_seq.cpp (revision 481733) +++ vm/gc_gen/src/trace_forward/fspace_collect_forward_seq.cpp (working copy) @@ -1,205 +0,0 @@ -/* - * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @author Xiao-Feng Li, 2006/10/05 - */ - -#include "fspace.h" -#include "../thread/collector.h" -#include "../common/gc_metadata.h" - -static Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace) -{ - assert(obj_belongs_to_space(p_obj, (Space*)fspace)); - return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary); -} - -static void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref) -{ - Partial_Reveal_Object *p_obj = *p_ref; - TraceStack *ts = collector->trace_stack; - - if (p_obj == NULL) return; - - /* the slot can be in tspace or fspace, we don't care. - we care only if the reference in the slot is pointing to fspace */ - if (obj_belongs_to_space(p_obj, collector->collect_space)) { - ts->push(p_ref); - } - - return; -} - -static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) -{ - if (!object_has_slots(p_obj)) return; - - void *slot; - - /* scan array object */ - if (object_is_array(p_obj)) { - Partial_Reveal_Object* array = p_obj; - assert(!obj_is_primitive_array(array)); - - int32 array_length = vector_get_length((Vector_Handle) array); - for (int i = 0; i < array_length; i++) { - slot = vector_get_element_address_ref((Vector_Handle) array, i); - scan_slot(collector, (Partial_Reveal_Object **)slot); - } - return; - } - - /* scan non-array object */ - int *offset_scanner = init_object_scanner(p_obj); - while (true) { - slot = offset_get_ref(offset_scanner, p_obj); - if (slot == NULL) break; - - scan_slot(collector, (Partial_Reveal_Object **)slot); - offset_scanner = offset_next_ref(offset_scanner); - } - - return; -} - -/* At this point, p_ref can be in anywhere like root, and other spaces, - * but *p_ref must be in fspace, since only slot which points to - * object in fspace could be added into TraceStack */ -#include "../verify/verify_live_heap.h" - -void trace_object_seq(Collector* collector, Partial_Reveal_Object **p_ref) -{ - Space* space = collector->collect_space; - Partial_Reveal_Object *p_obj = *p_ref; - - assert(p_obj); - assert(obj_belongs_to_space(p_obj, space)); - - /* Fastpath: object has already been forwarded, update the ref slot */ - if(obj_is_forwarded_in_vt(p_obj)) { - assert(!obj_is_marked_in_vt(p_obj)); - *p_ref = obj_get_forwarding_pointer_in_vt(p_obj); - return; - } - - /* only mark the objects that will remain in fspace */ - if (!fspace_object_to_be_forwarded(p_obj, (Fspace*)space)) { - assert(!obj_is_forwarded_in_vt(p_obj)); - /* this obj remains in fspace, remember its ref slot for next GC. */ - if( !address_belongs_to_space(p_ref, space) ){ - collector_remset_add_entry(collector, p_ref); - } - - if(fspace_mark_object((Fspace*)space, p_obj)) - scan_object(collector, p_obj); - - return; - } - - /* following is the logic for forwarding */ - Partial_Reveal_Object* p_target_obj = collector_forward_object(collector, p_obj); - - /* if it is forwarded by other already, it is ok */ - if(!p_target_obj){ - *p_ref = obj_get_forwarding_pointer_in_vt(p_obj); - return; - } - /* otherwise, we successfully forwarded */ - *p_ref = p_target_obj; - - /* we forwarded it, we need remember it for verification. FIXME:: thread id */ - if(verify_live_heap) { - event_collector_move_obj(p_obj, p_target_obj, collector); - } - - scan_object(collector, p_target_obj); - return; -} - -/* trace the root references from root set and remember sets */ -static void trace_root(Collector* collector, Partial_Reveal_Object **ref) -{ - assert(*ref); - assert(obj_belongs_to_space(*ref, collector->collect_space)); - - TraceStack *ts = collector->trace_stack; - ts->push(ref); - - while(!ts->empty()) { - Partial_Reveal_Object **p_ref = ts->top(); - ts->pop(); - assert(p_ref); - trace_object_seq(collector, p_ref); - } -} - -static void collector_trace_rootsets(Collector* collector) -{ - GC_Metadata* metadata = collector->gc->metadata; - - Space* space = collector->collect_space; - collector->trace_stack = new TraceStack(); - - /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */ - Vector_Block* root_set = pool_get_entry(metadata->gc_rootset_pool); - - while(root_set){ - unsigned int* iter = vector_block_iterator_init(root_set); - while(!vector_block_iterator_end(root_set,iter)){ - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; - iter = vector_block_iterator_advance(root_set,iter); - - assert(p_ref); - if(*p_ref == NULL) continue; - if (obj_belongs_to_space(*p_ref, space)) - trace_root(collector, p_ref); - } - vector_block_clear(root_set); - pool_put_entry(metadata->free_set_pool, root_set); - root_set = pool_get_entry(metadata->gc_rootset_pool); - } - - delete collector->trace_stack; - - return; -} - -void update_rootset_interior_pointer(); - -static void update_relocated_refs(Collector* collector) -{ - update_rootset_interior_pointer(); -} - -void trace_forward_fspace_seq(Collector* collector) -{ - GC* gc = collector->gc; - Fspace* space = (Fspace*)collector->collect_space; - - /* FIXME:: Single-threaded trace-forwarding for fspace currently */ - - collector_trace_rootsets(collector); - - update_relocated_refs(collector); - reset_fspace_for_allocation(space); - - return; - -} - - - Index: vm/gc_gen/src/trace_forward/fspace.h =================================================================== --- vm/gc_gen/src/trace_forward/fspace.h (revision 481733) +++ vm/gc_gen/src/trace_forward/fspace.h (working copy) @@ -21,18 +21,16 @@ #ifndef _FROM_SPACE_H_ #define _FROM_SPACE_H_ -#include "../thread/thread_alloc.h" +#include "../thread/gc_thread.h" /* * In our Gen GC, not all live objects are copied to tspace space, the newer baby will - * still be preserved in fspace, that means give them time to die. + * still be preserved in fspace, that means to give them time to die. */ -#define FORWARD_ALL 1.0 -#define FORWARD_HALF 0.5 -extern float NURSERY_OBJECT_FORWARDING_RATIO; extern Boolean forward_first_half; -extern void* object_forwarding_boundary; //objects allocated before this boundary remain in fspace +/* boundary spliting fspace into forwarding part and remaining part */ +extern void* object_forwarding_boundary; typedef struct Fspace { /* <-- first couple of fields are overloadded as Space */ Index: vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp =================================================================== --- vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp (revision 481733) +++ vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp (working copy) @@ -0,0 +1,240 @@ + +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2006/10/05 + */ + +#include "fspace.h" +#include "../thread/collector.h" +#include "../common/gc_metadata.h" + +static Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace) +{ + assert(obj_belongs_to_space(p_obj, (Space*)fspace)); + return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary); +} + +static void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref) +{ + Partial_Reveal_Object *p_obj = *p_ref; + if (p_obj == NULL) return; + + /* the slot can be in tspace or fspace, we don't care. + we care only if the reference in the slot is pointing to fspace */ + if (obj_belongs_to_space(p_obj, collector->collect_space)) + collector_tracestack_push(collector, p_ref); + + return; +} + +static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) +{ + if (!object_has_ref_field(p_obj)) return; + + void *slot; + + /* scan array object */ + if (object_is_array(p_obj)) { + Partial_Reveal_Object* array = p_obj; + assert(!obj_is_primitive_array(array)); + + int32 array_length = vector_get_length((Vector_Handle) array); + for (int i = 0; i < array_length; i++) { + slot = vector_get_element_address_ref((Vector_Handle) array, i); + scan_slot(collector, (Partial_Reveal_Object **)slot); + } + return; + } + + /* scan non-array object */ + int *offset_scanner = init_object_scanner(p_obj); + while (true) { + slot = offset_get_ref(offset_scanner, p_obj); + if (slot == NULL) break; + + scan_slot(collector, (Partial_Reveal_Object **)slot); + offset_scanner = offset_next_ref(offset_scanner); + } + + return; +} + +/* NOTE:: At this point, p_ref can be in anywhere like root, and other spaces, but *p_ref must be in fspace, + since only slot which points to object in fspace could be added into TraceStack. + The problem is the *p_ref may be forwarded already so that, when we come here we find it's pointing to tospace. + We will simply return for that case. */ + +#include "../verify/verify_live_heap.h" + +static void forward_object(Collector* collector, Partial_Reveal_Object **p_ref) +{ + Space* space = collector->collect_space; + GC* gc = collector->gc; + Partial_Reveal_Object *p_obj = *p_ref; + + if(!obj_belongs_to_space(p_obj, space)) return; + + /* Fastpath: object has already been forwarded, update the ref slot */ + if(obj_is_forwarded_in_vt(p_obj)) { + assert(!obj_is_marked_in_vt(p_obj)); + *p_ref = obj_get_forwarding_pointer_in_vt(p_obj); + return; + } + + /* only mark the objects that will remain in fspace */ + if(!fspace_object_to_be_forwarded(p_obj, (Fspace*)space)) { + assert(!obj_is_forwarded_in_vt(p_obj)); + /* this obj remains in fspace, remember its ref slot for next GC if p_ref is not root */ + if( !address_belongs_to_space(p_ref, space) && address_belongs_to_gc_heap(p_ref, gc)) + collector_remset_add_entry(collector, p_ref); + + if(fspace_mark_object((Fspace*)space, p_obj)) + scan_object(collector, p_obj); + + return; + } + + /* following is the logic for forwarding */ + Partial_Reveal_Object* p_target_obj = collector_forward_object(collector, p_obj); + + /* if it is forwarded by other already, it is ok */ + if( p_target_obj == NULL ){ + *p_ref = obj_get_forwarding_pointer_in_vt(p_obj); + return; + } + /* otherwise, we successfully forwarded */ + *p_ref = p_target_obj; + + /* we forwarded it, we need remember it for verification. */ + if(verify_live_heap) { + event_collector_move_obj(p_obj, p_target_obj, collector); + } + + scan_object(collector, p_target_obj); + return; +} + +static void trace_object(Collector* collector, Partial_Reveal_Object **p_ref) +{ + forward_object(collector, p_ref); + + Vector_Block* trace_stack = (Vector_Block*)collector->trace_stack; + while( !vector_stack_is_empty(trace_stack)){ + p_ref = (Partial_Reveal_Object **)vector_stack_pop(trace_stack); + forward_object(collector, p_ref); + trace_stack = (Vector_Block*)collector->trace_stack; + } + + return; +} + +/* for tracing phase termination detection */ +static volatile unsigned int num_finished_collectors = 0; + +static void collector_trace_rootsets(Collector* collector) +{ + GC* gc = collector->gc; + GC_Metadata* metadata = gc->metadata; + + unsigned int num_active_collectors = gc->num_active_collectors; + atomic_cas32( &num_finished_collectors, 0, num_active_collectors); + + Space* space = collector->collect_space; + collector->trace_stack = pool_get_entry(metadata->free_task_pool); + + /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */ + Vector_Block* root_set = pool_get_entry(metadata->gc_rootset_pool); + + /* first step: copy all root objects to trace tasks. */ + while(root_set){ + unsigned int* iter = vector_block_iterator_init(root_set); + while(!vector_block_iterator_end(root_set,iter)){ + Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; + iter = vector_block_iterator_advance(root_set,iter); + if(*p_ref == NULL) continue; /* root ref cann't be NULL, but remset can be */ + if(obj_belongs_to_space(*p_ref, space)){ + collector_tracestack_push(collector, p_ref); + } + } + vector_block_clear(root_set); + pool_put_entry(metadata->free_set_pool, root_set); + root_set = pool_get_entry(metadata->gc_rootset_pool); + } + /* put back the last trace_stack task */ + pool_put_entry(metadata->mark_task_pool, collector->trace_stack); + + /* second step: iterate over the trace tasks and forward objects */ + collector->trace_stack = pool_get_entry(metadata->free_task_pool); + +retry: + Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool); + + while(trace_task){ + unsigned int* iter = vector_block_iterator_init(trace_task); + while(!vector_block_iterator_end(trace_task,iter)){ + Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; + iter = vector_block_iterator_advance(trace_task,iter); + assert(*p_ref); /* a task can't be NULL, it was checked before put into the task stack */ + /* in sequential version, we only trace same object once, but we were using a local hashset for that, + which couldn't catch the repetition between multiple collectors. This is subject to more study. */ + + /* FIXME:: we should not let root_set empty during working, other may want to steal it. + degenerate my stack into root_set, and grab another stack */ + + /* a task has to belong to collected space, it was checked before put into the stack */ + trace_object(collector, p_ref); + } + vector_stack_clear(trace_task); + pool_put_entry(metadata->free_task_pool, trace_task); + trace_task = pool_get_entry(metadata->mark_task_pool); + } + + atomic_inc32(&num_finished_collectors); + while(num_finished_collectors != num_active_collectors){ + if( pool_is_empty(metadata->mark_task_pool)) continue; + /* we can't grab the task here, because of a race condition. If we grab the task, + and the pool is empty, other threads may fall to this barrier and then pass. */ + atomic_dec32(&num_finished_collectors); + goto retry; + } + + /* now we are done, but each collector has a private stack that is empty */ + trace_task = (Vector_Block*)collector->trace_stack; + vector_stack_clear(trace_task); + pool_put_entry(metadata->free_task_pool, trace_task); + collector->trace_stack = NULL; + + return; +} + +void trace_forward_fspace(Collector* collector) +{ + GC* gc = collector->gc; + Fspace* space = (Fspace*)collector->collect_space; + + collector_trace_rootsets(collector); + + /* the rest work is not enough for parallelization, so let only one thread go */ + if( collector->thread_handle != 0 ) return; + + gc_update_repointed_refs(collector); + reset_fspace_for_allocation(space); + + return; + +} Index: vm/gc_gen/src/trace_forward/fspace_collect_forward_par.cpp =================================================================== --- vm/gc_gen/src/trace_forward/fspace_collect_forward_par.cpp (revision 481733) +++ vm/gc_gen/src/trace_forward/fspace_collect_forward_par.cpp (working copy) @@ -1,247 +0,0 @@ -/* - * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @author Xiao-Feng Li, 2006/10/05 - */ - -#include "fspace.h" -#include "../thread/collector.h" -#include "../common/gc_metadata.h" - -static Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace) -{ - assert(obj_belongs_to_space(p_obj, (Space*)fspace)); - return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary); -} - -static void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref) -{ - Partial_Reveal_Object *p_obj = *p_ref; - if (p_obj == NULL) return; - - /* the slot can be in tspace or fspace, we don't care. - we care only if the reference in the slot is pointing to fspace */ - if (obj_belongs_to_space(p_obj, collector->collect_space)) - collector_tracetask_add_entry(collector, p_ref); - - return; -} - -static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) -{ - if (!object_has_slots(p_obj)) return; - - void *slot; - - /* scan array object */ - if (object_is_array(p_obj)) { - Partial_Reveal_Object* array = p_obj; - assert(!obj_is_primitive_array(array)); - - int32 array_length = vector_get_length((Vector_Handle) array); - for (int i = 0; i < array_length; i++) { - slot = vector_get_element_address_ref((Vector_Handle) array, i); - scan_slot(collector, (Partial_Reveal_Object **)slot); - } - return; - } - - /* scan non-array object */ - int *offset_scanner = init_object_scanner(p_obj); - while (true) { - slot = offset_get_ref(offset_scanner, p_obj); - if (slot == NULL) break; - - scan_slot(collector, (Partial_Reveal_Object **)slot); - offset_scanner = offset_next_ref(offset_scanner); - } - - return; -} - -/* At this point, p_ref can be in anywhere like root, and other spaces, - * but *p_ref must be in fspace, since only slot which points to - * object in fspace could be added into TraceStack */ -#include "../verify/verify_live_heap.h" - -static void trace_object(Collector* collector, Partial_Reveal_Object **p_ref) -{ - Space* space = collector->collect_space; - Partial_Reveal_Object *p_obj = *p_ref; - - assert(p_obj); - /* this assert is no longer valid for parallel forwarding, because remset may have duplicate p_refs that - are traced by difference collectors, and right after both check the p_obj is in fspace, and put into - trace_stack, one thread forwards it quickly before the other runs to this assert. - assert(obj_belongs_to_space(p_obj, space)); */ - - /* Fastpath: object has already been forwarded, update the ref slot */ - if(obj_is_forwarded_in_vt(p_obj)) { - assert(!obj_is_marked_in_vt(p_obj)); - *p_ref = obj_get_forwarding_pointer_in_vt(p_obj); - return; - } - - /* only mark the objects that will remain in fspace */ - if (!fspace_object_to_be_forwarded(p_obj, (Fspace*)space)) { - assert(!obj_is_forwarded_in_vt(p_obj)); - /* this obj remains in fspace, remember its ref slot for next GC. */ - if( !address_belongs_to_space(p_ref, space) ) - collector_remset_add_entry(collector, p_ref); - - if(fspace_mark_object((Fspace*)space, p_obj)) - scan_object(collector, p_obj); - - return; - } - - /* following is the logic for forwarding */ - Partial_Reveal_Object* p_target_obj = collector_forward_object(collector, p_obj); - - /* if it is forwarded by other already, it is ok */ - if( p_target_obj == NULL ){ - *p_ref = obj_get_forwarding_pointer_in_vt(p_obj); - return; - } - /* otherwise, we successfully forwarded */ - *p_ref = p_target_obj; - - /* we forwarded it, we need remember it for verification. FIXME:: thread id */ - if(verify_live_heap) { - event_collector_move_obj(p_obj, p_target_obj, collector); - } - - scan_object(collector, p_target_obj); - return; -} - -void trace_object_seq(Collector* collector, Partial_Reveal_Object **p_ref); - -/* for tracing phase termination detection */ -static volatile unsigned int num_finished_collectors = 0; - -static void collector_trace_rootsets(Collector* collector) -{ - GC* gc = collector->gc; - GC_Metadata* metadata = gc->metadata; - - Space* space = collector->collect_space; - collector->trace_stack = (TraceStack*)pool_get_entry(metadata->free_set_pool); - //collector->trace_stack = new TraceStack(); - - unsigned int num_active_collectors = gc->num_active_collectors; - atomic_cas32( &num_finished_collectors, 0, num_active_collectors); - -retry: - /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */ - Vector_Block* root_set = pool_get_entry(metadata->gc_rootset_pool); - - while(root_set){ - unsigned int* iter = vector_block_iterator_init(root_set); - while(!vector_block_iterator_end(root_set,iter)){ - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; - iter = vector_block_iterator_advance(root_set,iter); - - assert(p_ref); - if(*p_ref == NULL) continue; - /* in sequential version, we only trace same object once, but we were using a local hashset, - which couldn't catch the repetition between multiple collectors. This is subject to more study. */ - if (obj_belongs_to_space(*p_ref, space)) - trace_object(collector, p_ref); - } - vector_block_clear(root_set); - pool_put_entry(metadata->free_set_pool, root_set); - root_set = pool_get_entry(metadata->gc_rootset_pool); - - } - - atomic_inc32(&num_finished_collectors); - while(num_finished_collectors != num_active_collectors){ - if( !pool_is_empty(metadata->gc_rootset_pool)){ - atomic_dec32(&num_finished_collectors); - goto retry; - } - } - - - /* now we are done, but each collector has a private task block to deal with */ - Vector_Block* trace_task = (Vector_Block*)collector->trace_stack; - TraceStack* trace_stack = new TraceStack(); - - unsigned int* iter = vector_block_iterator_init(trace_task); - while(!vector_block_iterator_end(trace_task,iter)){ - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)*iter; - iter = vector_block_iterator_advance(trace_task,iter); - trace_stack->push(p_ref); - } - - /* put back the last task to the free pool */ - vector_block_clear(trace_task); - pool_put_entry(metadata->free_set_pool, trace_task); - - collector->trace_stack = trace_stack; - while(!trace_stack->empty()){ - Partial_Reveal_Object** p_ref = trace_stack->top(); - trace_stack->pop(); - trace_object_seq(collector, p_ref); - } - - delete trace_stack; - collector->trace_stack = NULL; - - return; -} - -void update_rootset_interior_pointer(); - -static void update_relocated_refs(Collector* collector) -{ - update_rootset_interior_pointer(); -} - -static volatile unsigned int num_marking_collectors = 0; - -void trace_forward_fspace(Collector* collector) -{ - GC* gc = collector->gc; - Fspace* space = (Fspace*)collector->collect_space; - - unsigned int num_active_collectors = gc->num_active_collectors; - unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1); - - collector_trace_rootsets(collector); - - old_num = atomic_inc32(&num_marking_collectors); - if( ++old_num == num_active_collectors ){ - /* last collector's world here */ - /* prepare for next phase */ /* let other collectors go */ - num_marking_collectors++; - } - while(num_marking_collectors != num_active_collectors + 1); - - /* the rest work is not enough for parallelization, so let only one thread go */ - if( collector->thread_handle != 0 ) return; - - update_relocated_refs(collector); - reset_fspace_for_allocation(space); - - return; - -} - - - Index: vm/gc_gen/src/thread/collector_alloc.cpp =================================================================== --- vm/gc_gen/src/thread/collector_alloc.cpp (revision 481733) +++ vm/gc_gen/src/thread/collector_alloc.cpp (working copy) @@ -18,7 +18,7 @@ * @author Xiao-Feng Li, 2006/10/05 */ -#include "thread_alloc.h" +#include "gc_thread.h" void* mos_alloc(unsigned size, Allocator *allocator); Index: vm/gc_gen/src/thread/gc_thread.cpp =================================================================== --- vm/gc_gen/src/thread/gc_thread.cpp (revision 0) +++ vm/gc_gen/src/thread/gc_thread.cpp (revision 0) @@ -0,0 +1,32 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2006/10/05 + */ + +#include "gc_thread.h" + +static hythread_tls_key_t tls_gc_key; +unsigned int tls_gc_offset; + +void gc_thread_init() +{ + hythread_tls_alloc(&tls_gc_key); + tls_gc_offset = hythread_tls_get_offset(tls_gc_key); + + return; +} Index: vm/gc_gen/src/thread/mutator_alloc.cpp =================================================================== --- vm/gc_gen/src/thread/mutator_alloc.cpp (revision 481733) +++ vm/gc_gen/src/thread/mutator_alloc.cpp (working copy) @@ -18,23 +18,31 @@ * @author Xiao-Feng Li, 2006/10/05 */ -#include "thread_alloc.h" +#include "gc_thread.h" #include "../gen/gen.h" -Managed_Object_Handle gc_alloc(unsigned size, Allocation_Handle ah, void *gc_tls) +/* classloader sometimes sets the bit for finalizible objects (?) */ +inline unsigned int get_instance_data_size (unsigned int encoded_size) +{ return (encoded_size & NEXT_TO_HIGH_BIT_CLEAR_MASK); } + +Managed_Object_Handle gc_alloc(unsigned size, Allocation_Handle ah, void *unused_gc_tls) { Managed_Object_Handle p_obj = NULL; /* All requests for space should be multiples of 4 (IA32) or 8(IPF) */ assert((size % GC_OBJECT_ALIGNMENT) == 0); - assert(gc_tls == vm_get_gc_thread_local()); assert(ah); + /* FIXME:: this is outdated actually */ + size = get_instance_data_size(size); + + Allocator* allocator = (Allocator*)gc_get_tls(); + if ( size > GC_OBJ_SIZE_THRESHOLD ) - p_obj = (Managed_Object_Handle)los_alloc(size, (Allocator*)gc_tls); + p_obj = (Managed_Object_Handle)los_alloc(size, allocator); else - p_obj = (Managed_Object_Handle)nos_alloc(size, (Allocator*)gc_tls); + p_obj = (Managed_Object_Handle)nos_alloc(size, allocator); assert(p_obj); obj_set_vt((Partial_Reveal_Object*)p_obj, ah); @@ -43,19 +51,20 @@ } -Managed_Object_Handle gc_alloc_fast (unsigned size, Allocation_Handle ah, void *gc_tls) +Managed_Object_Handle gc_alloc_fast (unsigned size, Allocation_Handle ah, void *unused_gc_tls) { /* All requests for space should be multiples of 4 (IA32) or 8(IPF) */ assert((size % GC_OBJECT_ALIGNMENT) == 0); - assert(gc_tls == vm_get_gc_thread_local()); assert(ah); /* object shoud be handled specially */ if ( size > GC_OBJ_SIZE_THRESHOLD ) return NULL; - + + Allocator* allocator = (Allocator*)gc_get_tls(); + /* Try to allocate an object from the current Thread Local Block */ - Managed_Object_Handle p_obj = NULL; - p_obj = (Managed_Object_Handle)thread_local_alloc(size, (Allocator*)gc_tls); + Managed_Object_Handle p_obj; + p_obj = (Managed_Object_Handle)thread_local_alloc(size, allocator); if(p_obj == NULL) return NULL; obj_set_vt((Partial_Reveal_Object*)p_obj, ah); Index: vm/gc_gen/src/thread/gc_thread.h =================================================================== --- vm/gc_gen/src/thread/gc_thread.h (revision 0) +++ vm/gc_gen/src/thread/gc_thread.h (revision 0) @@ -0,0 +1,75 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2006/10/05 + */ + +#ifndef _GC_THREAD_H_ +#define _GC_THREAD_H_ + +#include "../common/gc_block.h" +#include "../common/gc_metadata.h" + +extern unsigned int tls_gc_offset; + +inline void* gc_get_tls() +{ + void* tls_base = vm_thread_local(); + return (void*)*(unsigned int*)((char*)tls_base + tls_gc_offset); +} + +inline void gc_set_tls(void* gc_tls_info) +{ + void* tls_base = vm_thread_local(); + *(unsigned int*)((char*)tls_base + tls_gc_offset) = (unsigned int)gc_tls_info; +} + +/* NOTE:: don't change the position of free/ceiling, because the offsets are constants for inlining */ +typedef struct Allocator{ + void *free; + void *ceiling; + Block *alloc_block; + Space* alloc_space; + GC *gc; + VmThreadHandle thread_handle; /* This thread; */ +}Allocator; + +inline Partial_Reveal_Object* thread_local_alloc(unsigned int size, Allocator* allocator) +{ + void* free = allocator->free; + void* ceiling = allocator->ceiling; + + void* new_free = (void*)((unsigned int)free + size); + + if (new_free <= ceiling){ + allocator->free= new_free; + return (Partial_Reveal_Object*)free; + } + + return NULL; +} + +inline void alloc_context_reset(Allocator* allocator) +{ + allocator->free = NULL; + allocator->ceiling = NULL; + allocator->alloc_block = NULL; + + return; +} + +#endif /* #ifndef _GC_THREAD_H_ */ Index: vm/gc_gen/src/thread/thread_alloc.h =================================================================== --- vm/gc_gen/src/thread/thread_alloc.h (revision 481733) +++ vm/gc_gen/src/thread/thread_alloc.h (working copy) @@ -1,58 +0,0 @@ -/* - * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @author Xiao-Feng Li, 2006/10/05 - */ - -#ifndef _THREAD_ALLOC_H_ -#define _THREAD_ALLOC_H_ - -#include "../common/gc_block.h" -#include "../common/gc_metadata.h" - -typedef struct Allocator{ - void *free; - void *ceiling; - Block *alloc_block; - Space* alloc_space; - GC *gc; - VmThreadHandle thread_handle; /* This thread; */ -}Allocator; - -inline Partial_Reveal_Object* thread_local_alloc(unsigned int size, Allocator* allocator) -{ - Partial_Reveal_Object* p_return_obj=(Partial_Reveal_Object*)allocator->free; - unsigned int new_free = size+(unsigned int)p_return_obj; - - if (new_free <= (unsigned int)allocator->ceiling){ - allocator->free=(void*)new_free; - return p_return_obj; - } - - return NULL; -} - -inline void alloc_context_reset(Allocator* allocator) -{ - allocator->free = NULL; - allocator->ceiling = NULL; - allocator->alloc_block = NULL; - - return; -} - -#endif /* #ifndef _THREAD_ALLOC_H_ */ Index: vm/gc_gen/src/thread/collector.h =================================================================== --- vm/gc_gen/src/thread/collector.h (revision 481733) +++ vm/gc_gen/src/thread/collector.h (working copy) @@ -37,8 +37,7 @@ /* FIXME:: for testing */ Space* collect_space; - TraceStack *trace_stack; - MarkStack* mark_stack; + Vector_Block *trace_stack; Vector_Block* rep_set; /* repointed set */ Vector_Block* rem_set; Index: vm/gc_gen/src/thread/mutator.cpp =================================================================== --- vm/gc_gen/src/thread/mutator.cpp (revision 481733) +++ vm/gc_gen/src/thread/mutator.cpp (working copy) @@ -23,10 +23,10 @@ struct GC_Gen; Space* gc_get_nos(GC_Gen* gc); -void mutator_initialize(GC* gc, void *gc_information) +void mutator_initialize(GC* gc, void *unused_gc_information) { /* FIXME:: make sure gc_info is cleared */ - Mutator *mutator = (Mutator *) gc_information; + Mutator *mutator = (Mutator *)STD_MALLOC(sizeof(Mutator)); mutator->free = NULL; mutator->ceiling = NULL; mutator->alloc_block = NULL; @@ -46,13 +46,16 @@ unlock(gc->mutator_list_lock); // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ gc->num_mutators++; + + gc_set_tls(mutator); + return; } -void mutator_destruct(GC* gc, void *gc_information) +void mutator_destruct(GC* gc, void *unused_gc_information) { - Mutator *mutator = (Mutator *)gc_information; + Mutator *mutator = (Mutator *)gc_get_tls(); if(gc_requires_barriers()){ /* put back the remset when a mutator exits */ pool_put_entry(gc->metadata->mutator_remset_pool, mutator->rem_set); @@ -75,6 +78,9 @@ unlock(gc->mutator_list_lock); // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ gc->num_mutators--; + + gc_set_tls(NULL); + return; } @@ -87,4 +93,5 @@ mutator = mutator->next; } return; -} \ No newline at end of file +} + Index: vm/gc_gen/src/common/gc_metadata.h =================================================================== --- vm/gc_gen/src/common/gc_metadata.h (revision 481733) +++ vm/gc_gen/src/common/gc_metadata.h (working copy) @@ -48,8 +48,7 @@ void gc_reset_rootset(GC* gc); void gc_update_repointed_refs(Collector* collector); -void collector_marktask_add_entry(Collector* collector, Partial_Reveal_Object* p_obj); -void collector_tracetask_add_entry(Collector* collector, Partial_Reveal_Object** p_ref); +void collector_tracestack_push(Collector* collector, void* p_task); void mutator_remset_add_entry(Mutator* mutator, Partial_Reveal_Object** p_slot); void collector_remset_add_entry(Collector* collector, Partial_Reveal_Object** p_slot); Index: vm/gc_gen/src/common/mark_scan.cpp =================================================================== --- vm/gc_gen/src/common/mark_scan.cpp (revision 0) +++ vm/gc_gen/src/common/mark_scan.cpp (revision 0) @@ -0,0 +1,179 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2006/10/05 + */ + +#include "gc_metadata.h" +#include "../thread/collector.h" +#include "../gen/gen.h" + +static void scan_slot(Collector* collector, Partial_Reveal_Object** p_ref) +{ + Partial_Reveal_Object* p_obj = *p_ref; + if(p_obj==NULL) return; + + Space* obj_space = space_of_addr(collector->gc, p_obj); + + /* if obj to be moved, its ref slot needs remembering for later update */ + if(obj_space->move_object) + collector_repset_add_entry(collector, p_ref); + + if(obj_space->mark_object_func(obj_space, p_obj)) + collector_tracestack_push(collector, p_obj); + + return; +} + +static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) +{ + if( !object_has_ref_field(p_obj) ) return; + + /* scan array object */ + if (object_is_array(p_obj)) { + Partial_Reveal_Object* array = p_obj; + assert(!obj_is_primitive_array(array)); + + int32 array_length = vector_get_length((Vector_Handle) array); + for (int i = 0; i < array_length; i++) { + Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)vector_get_element_address_ref((Vector_Handle) array, i); + scan_slot(collector, p_ref); + } + return; + } + + /* scan non-array object */ + int *offset_scanner = init_object_scanner(p_obj); + while (true) { + Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj); + if (p_ref == NULL) break; /* terminating ref slot */ + + scan_slot(collector, p_ref); + offset_scanner = offset_next_ref(offset_scanner); + } + + return; +} + + +static void trace_object(Collector* collector, Partial_Reveal_Object *p_obj) +{ + scan_object(collector, p_obj); + + Vector_Block* trace_stack = collector->trace_stack; + while( !vector_stack_is_empty(trace_stack)){ + p_obj = (Partial_Reveal_Object *)vector_stack_pop(trace_stack); + scan_object(collector, p_obj); + trace_stack = collector->trace_stack; + } + + return; +} + +/* for marking phase termination detection */ +static volatile unsigned int num_finished_collectors = 0; + +/* NOTE:: Only marking in object header is idempotent */ +void mark_scan_heap(Collector* collector) +{ + GC* gc = collector->gc; + GC_Metadata* metadata = gc->metadata; + + /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */ + unsigned int num_active_collectors = gc->num_active_collectors; + atomic_cas32( &num_finished_collectors, 0, num_active_collectors); + + collector->trace_stack = pool_get_entry(metadata->free_task_pool); + + Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool); + + /* first step: copy all root objects to mark tasks. + FIXME:: can be done sequentially before coming here to eliminate atomic ops */ + while(root_set){ + unsigned int* iter = vector_block_iterator_init(root_set); + while(!vector_block_iterator_end(root_set,iter)){ + Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; + iter = vector_block_iterator_advance(root_set,iter); + + Partial_Reveal_Object* p_obj = *p_ref; + /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */ + assert( (gc->collect_kind==MINOR_COLLECTION && !gc_requires_barriers()) || (gc->collect_kind==MAJOR_COLLECTION) && (p_obj!= NULL)); + if(p_obj==NULL) continue; + /* we have to mark the object before put it into marktask, because + it is possible to have two slots containing a same object. They will + be scanned twice and their ref slots will be recorded twice. Problem + occurs after the ref slot is updated first time with new position + and the second time the value is the ref slot is the old position as expected. + This can be worked around if we want. + */ + Space* space = space_of_addr(gc, p_obj); + if( !space->mark_object_func(space, p_obj) ) continue; + + collector_tracestack_push(collector, p_obj); + } + root_set = pool_iterator_next(metadata->gc_rootset_pool); + } + /* put back the last trace_stack task */ + pool_put_entry(metadata->mark_task_pool, collector->trace_stack); + + /* second step: iterate over the mark tasks and scan objects */ + /* get a task buf for the mark stack */ + collector->trace_stack = pool_get_entry(metadata->free_task_pool); + +retry: + Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool); + + while(mark_task){ + unsigned int* iter = vector_block_iterator_init(mark_task); + while(!vector_block_iterator_end(mark_task,iter)){ + Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter; + iter = vector_block_iterator_advance(mark_task,iter); + + /* FIXME:: we should not let mark_task empty during working, , other may want to steal it. + degenerate my stack into mark_task, and grab another mark_task */ + trace_object(collector, p_obj); + } + /* run out one task, put back to the pool and grab another task */ + vector_stack_clear(mark_task); + pool_put_entry(metadata->free_task_pool, mark_task); + mark_task = pool_get_entry(metadata->mark_task_pool); + } + + /* termination detection. This is also a barrier. + NOTE:: We can simply spin waiting for num_finished_collectors, because each + generated new task would surely be processed by its generating collector eventually. + So code below is only for load balance optimization. */ + atomic_inc32(&num_finished_collectors); + while(num_finished_collectors != num_active_collectors){ + if( !pool_is_empty(metadata->mark_task_pool)){ + atomic_dec32(&num_finished_collectors); + goto retry; + } + } + + /* put back the last mark stack to the free pool */ + mark_task = (Vector_Block*)collector->trace_stack; + vector_stack_clear(mark_task); + pool_put_entry(metadata->free_task_pool, mark_task); + collector->trace_stack = NULL; + + /* put back last repointed refs set recorded during marking */ + pool_put_entry(metadata->collector_repset_pool, collector->rep_set); + collector->rep_set = NULL; + + return; +} Index: vm/gc_gen/src/common/interior_pointer.h =================================================================== --- vm/gc_gen/src/common/interior_pointer.h (revision 481733) +++ vm/gc_gen/src/common/interior_pointer.h (working copy) @@ -1,3 +1,23 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2006/10/05 + */ + #ifndef INTERIOR_POINTER_H #define INTERIOR_POINTER_H Index: vm/gc_gen/src/common/mark_scan_par.cpp =================================================================== --- vm/gc_gen/src/common/mark_scan_par.cpp (revision 481733) +++ vm/gc_gen/src/common/mark_scan_par.cpp (working copy) @@ -1,194 +0,0 @@ -/* - * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @author Xiao-Feng Li, 2006/10/05 - */ - -#include "gc_metadata.h" -#include "../thread/collector.h" -#include "../gen/gen.h" - -static void scan_slot_par(Collector* collector, Partial_Reveal_Object** p_ref) -{ - Partial_Reveal_Object* p_obj = *p_ref; - if(p_obj==NULL) return; - - Space* obj_space = space_of_addr(collector->gc, p_obj); - - /* if obj to be moved, its ref slot needs remembering for later update */ - if(obj_space->move_object) - collector_repset_add_entry(collector, p_ref); - - if(obj_space->mark_object_func(obj_space, p_obj)) - collector_marktask_add_entry(collector, p_obj); - - return; -} - -static void scan_object_par(Collector* collector, Partial_Reveal_Object *p_obj) -{ - if( !object_has_slots(p_obj) ) return; - - /* scan array object */ - if (object_is_array(p_obj)) { - Partial_Reveal_Object* array = p_obj; - assert(!obj_is_primitive_array(array)); - - int32 array_length = vector_get_length((Vector_Handle) array); - for (int i = 0; i < array_length; i++) { - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)vector_get_element_address_ref((Vector_Handle) array, i); - scan_slot_par(collector, p_ref); - } - return; - } - - /* scan non-array object */ - int *offset_scanner = init_object_scanner(p_obj); - while (true) { - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj); - if (p_ref == NULL) break; /* terminating ref slot */ - - scan_slot_par(collector, p_ref); - offset_scanner = offset_next_ref(offset_scanner); - } - - return; -} - -extern void scan_object_seq(Collector*, Partial_Reveal_Object *); - -/* for marking phase termination detection */ -static volatile unsigned int num_finished_collectors = 0; - -/* NOTE:: Only marking in object header is idempotent */ -void mark_scan_heap_par(Collector* collector) -{ - GC* gc = collector->gc; - /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */ - unsigned int num_active_collectors = gc->num_active_collectors; - atomic_cas32( &num_finished_collectors, 0, num_active_collectors); - - GC_Metadata* metadata = gc->metadata; - - collector->mark_stack = (MarkStack*)pool_get_entry(metadata->free_task_pool); - - Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool); - - /* first step: copy all root objects to mark tasks. - FIXME:: can be done sequentially before coming here to eliminate atomic ops */ - while(root_set){ - unsigned int* iter = vector_block_iterator_init(root_set); - while(!vector_block_iterator_end(root_set,iter)){ - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; - iter = vector_block_iterator_advance(root_set,iter); - - Partial_Reveal_Object* p_obj = *p_ref; - /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */ - assert((gc->collect_kind==MAJOR_COLLECTION) && (p_obj!= NULL)); - /* we have to mark the object before put it into marktask, because - it is possible to have two slots containing a same object. They will - be scanned twice and their ref slots will be recorded twice. Problem - occurs after the ref slot is updated first time with new position - and the second time the value is the ref slot is the old position as expected. - This can be worked around if we want. - */ - Space* space = space_of_addr(gc, p_obj); - if( !space->mark_object_func(space, p_obj) ) continue; - - collector_marktask_add_entry(collector, p_obj); - } - root_set = pool_iterator_next(metadata->gc_rootset_pool); - } - - pool_put_entry(metadata->mark_task_pool, collector->mark_stack); - - /* second step: iterate over the mark tasks and scan objects */ - /* get a task buf to push new tasks */ - collector->mark_stack = (MarkStack*)pool_get_entry(metadata->free_task_pool); - -retry: - Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool); - while(mark_task){ - unsigned int* iter = vector_block_iterator_init(mark_task); - while(!vector_block_iterator_end(mark_task,iter)){ - Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter; - iter = vector_block_iterator_advance(mark_task,iter); - - scan_object_par(collector, p_obj); - } - /* run out one task, put back to the pool and grab another task */ - vector_block_clear(mark_task); - pool_put_entry(metadata->free_task_pool, mark_task); - mark_task = pool_get_entry(metadata->mark_task_pool); - } - - /* termination detection. This is also a barrier. - NOTE:: actually we don't need this complexity. We can simply - spin waiting for num_finished_collectors, because each generated new - task would surely be processed by its generating collector eventually. - So code below is for load balance. */ - atomic_inc32(&num_finished_collectors); - while(num_finished_collectors != num_active_collectors){ - if( !pool_is_empty(metadata->mark_task_pool)){ - atomic_dec32(&num_finished_collectors); - goto retry; - } - } - - /* up to now, we donot have any tasks in task_pool, but - each collector has remaining tasks in its local mark_stack. */ - - /* Lets process remaining tasks. - NOTE:: this is the key difference from work-stealing, which uses - same stack for both working and sharing. So it has no problem - with remaining tasks in the shared stack. */ - - /* to simplify the processing, we turn back to use a single stack for - the remaining objects scanning. The assumption is, there are only limited - tasks for processing, no need to share the tasks. - FIXME:: a better way is to reduce the task block size into half till - the size becomes one, then the collectors actually share a same stack */ - - mark_task = (Vector_Block*)collector->mark_stack; - MarkStack* mark_stack = new MarkStack(); - - unsigned int* iter = vector_block_iterator_init(mark_task); - while(!vector_block_iterator_end(mark_task,iter)){ - Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter; - iter = vector_block_iterator_advance(mark_task,iter); - mark_stack->push(p_obj); - } - /* put back the last task to the free pool */ - vector_block_clear(mark_task); - pool_put_entry(metadata->free_task_pool, mark_task); - - collector->mark_stack = mark_stack; - while(!mark_stack->empty()){ - Partial_Reveal_Object* p_obj = mark_stack->top(); - mark_stack->pop(); - scan_object_seq(collector, p_obj); - } - - delete mark_stack; - collector->mark_stack = NULL; - - /* put back last repointed refs set recorded during marking */ - pool_put_entry(metadata->collector_repset_pool, collector->rep_set); - collector->rep_set = NULL; - - return; -} Index: vm/gc_gen/src/common/gc_for_vm.cpp =================================================================== --- vm/gc_gen/src/common/gc_for_vm.cpp (revision 481733) +++ vm/gc_gen/src/common/gc_for_vm.cpp (working copy) @@ -24,17 +24,65 @@ #include "../gen/gen.h" #include "interior_pointer.h" -unsigned int HEAP_SIZE_DEFAULT = 256 * MB; - extern Boolean NEED_BARRIER; extern unsigned int NUM_COLLECTORS; extern Boolean GC_VERIFY; extern unsigned int NOS_SIZE; +extern Boolean NOS_PARTIAL_FORWARD; -/* heap size limit is not interesting. only for manual tuning purpose */ +unsigned int HEAP_SIZE_DEFAULT = 256 * MB; unsigned int min_heap_size_bytes = 32 * MB; -unsigned int max_heap_size_bytes = 256 * MB; +unsigned int max_heap_size_bytes = 0; +static int get_int_property(const char *property_name) +{ + assert(property_name); + char *value = get_property(property_name, VM_PROPERTIES); + int return_value; + if (NULL != value) + { + return_value = atoi(value); + destroy_property_value(value); + }else{ + printf("property value %s is not set\n", property_name); + exit(0); + } + + return return_value; +} + +static Boolean get_boolean_property(const char *property_name) +{ + assert(property_name); + char *value = get_property(property_name, VM_PROPERTIES); + if (NULL == value){ + printf("property value %s is not set\n", property_name); + exit(0); + } + + Boolean return_value; + if (0 == strcmp("no", value) + || 0 == strcmp("off", value) + || 0 == strcmp("false", value) + || 0 == strcmp("0", value)) + { + return_value = FALSE; + } + else if (0 == strcmp("yes", value) + || 0 == strcmp("on", value) + || 0 == strcmp("true", value) + || 0 == strcmp("1", value)) + { + return_value = TRUE; + }else{ + printf("property value %s is not properly set\n", property_name); + exit(0); + } + + destroy_property_value(value); + return return_value; +} + static size_t get_size_property(const char* name) { char* size_string = get_property(name, VM_PROPERTIES); @@ -43,7 +91,7 @@ destroy_property_value(size_string); size_t unit = 1; - switch (sizeModifier) { + switch (sizeModifier) { case 'k': unit = 1024; break; case 'm': unit = 1024 * 1024; break; case 'g': unit = 1024 * 1024 * 1024;break; @@ -90,15 +138,30 @@ NOS_SIZE = get_size_property("gc.nos_size"); } - NUM_COLLECTORS = get_int_property("gc.num_collectors", NUM_COLLECTORS, VM_PROPERTIES); - NEED_BARRIER = get_boolean_property("gc.gen_mode", TRUE, VM_PROPERTIES); - GC_VERIFY = get_boolean_property("gc.verify", FALSE, VM_PROPERTIES); + if (is_property_set("gc.num_collectors", VM_PROPERTIES) == 1) { + unsigned int num = get_int_property("gc.num_collectors"); + NUM_COLLECTORS = (num==0)? NUM_COLLECTORS:num; + } + + if (is_property_set("gc.gen_mode", VM_PROPERTIES) == 1) { + NEED_BARRIER = get_boolean_property("gc.gen_mode"); + } + + if (is_property_set("gc.nos_partial_forward", VM_PROPERTIES) == 1) { + NOS_PARTIAL_FORWARD = get_boolean_property("gc.nos_partial_forward"); + } + + if (is_property_set("gc.verify", VM_PROPERTIES) == 1) { + GC_VERIFY = get_boolean_property("gc.verify"); + } return; } static GC* p_global_gc = NULL; +void gc_thread_init(); + void gc_init() { parse_configuration_properties(); @@ -109,6 +172,7 @@ memset(gc, 0, sizeof(GC)); p_global_gc = gc; gc_gen_initialize((GC_Gen*)gc, min_heap_size_bytes, max_heap_size_bytes); + gc_thread_init(); return; } @@ -187,3 +251,4 @@ unsigned int gc_time_since_last_gc() { assert(0); return 0; } + Index: vm/gc_gen/src/common/gc_platform.h =================================================================== --- vm/gc_gen/src/common/gc_platform.h (revision 481733) +++ vm/gc_gen/src/common/gc_platform.h (working copy) @@ -58,6 +58,11 @@ hythread_yield(); } +inline void* vm_thread_local() +{ + return hythread_self(); +} + inline int vm_create_thread(int (*func)(void*), void *data) { hythread_t* ret_thread = NULL; Index: vm/gc_gen/src/common/gc_for_class.cpp =================================================================== --- vm/gc_gen/src/common/gc_for_class.cpp (revision 481733) +++ vm/gc_gen/src/common/gc_for_class.cpp (working copy) @@ -70,7 +70,7 @@ } if( num_ref_fields ) - gcvt->gc_object_has_slots = true; + gcvt->gc_object_has_ref_field = true; else return NULL; @@ -119,7 +119,7 @@ memset((void *)gcvt, 0, sizeof(GC_VTable_Info)); gcvt->gc_clss = ch; gcvt->gc_class_properties = 0; - gcvt->gc_object_has_slots = false; + gcvt->gc_object_has_ref_field = false; gc_set_prop_alignment_mask(gcvt, class_get_alignment(ch)); @@ -133,7 +133,7 @@ if (class_is_non_ref_array (ch)) { gc_set_prop_non_ref_array(gcvt); }else{ - gcvt->gc_object_has_slots = true; + gcvt->gc_object_has_ref_field = true; } } @@ -152,3 +152,4 @@ } /* gc_class_prepared */ + Index: vm/gc_gen/src/common/gc_common.h =================================================================== --- vm/gc_gen/src/common/gc_common.h (revision 481733) +++ vm/gc_gen/src/common/gc_common.h (working copy) @@ -22,8 +22,6 @@ #define _GC_COMMON_H_ #include -#include -#include #include #include "port_vmem.h" @@ -62,8 +60,6 @@ typedef void (*TaskType)(void*); -typedef std::stack MarkStack; -typedef std::stack TraceStack; typedef std::map ObjectMap; enum Collection_Kind { @@ -218,7 +214,7 @@ unsigned int num_collectors; unsigned int num_active_collectors; /* not all collectors are working */ - /* metadata is the pool for rootset, markstack, etc. */ + /* metadata is the pool for rootset, tracestack, etc. */ GC_Metadata* metadata; unsigned int collect_kind; /* MAJOR or MINOR */ /* FIXME:: this is wrong! root_set belongs to mutator */ @@ -230,10 +226,13 @@ }GC; -void mark_scan_heap_par(Collector* collector); -void mark_scan_heap_seq(Collector* collector); +void mark_scan_heap(Collector* collector); inline void* gc_heap_base(GC* gc){ return gc->heap_start; } inline void* gc_heap_ceiling(GC* gc){ return gc->heap_end; } +inline Boolean address_belongs_to_gc_heap(void* addr, GC* gc) +{ + return (addr >= gc_heap_base(gc) && addr < gc_heap_ceiling(gc)); +} #endif //_GC_COMMON_H_ Index: vm/gc_gen/src/common/mark_scan_seq.cpp =================================================================== --- vm/gc_gen/src/common/mark_scan_seq.cpp (revision 481733) +++ vm/gc_gen/src/common/mark_scan_seq.cpp (working copy) @@ -1,108 +0,0 @@ -/* - * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @author Xiao-Feng Li, 2006/10/05 - */ - -#include "gc_metadata.h" -#include "../thread/collector.h" -#include "../gen/gen.h" - -static void scan_slot_seq(Collector* collector, Partial_Reveal_Object** p_ref) -{ - Partial_Reveal_Object* p_obj = *p_ref; - if(p_obj==NULL) return; - - MarkStack* mark_stack = (MarkStack*)collector->mark_stack; - Space* obj_space = space_of_addr(collector->gc, p_obj); - - /* if obj to be moved, its ref slot needs remembering for later update */ - if(obj_space->move_object) - collector_repset_add_entry(collector, p_ref); - - if(obj_space->mark_object_func(obj_space, p_obj)) - mark_stack->push(p_obj); - - return; -} - -void scan_object_seq(Collector* collector, Partial_Reveal_Object *p_obj) -{ - if( !object_has_slots(p_obj) ) return; - - /* scan array object */ - if (object_is_array(p_obj)) { - Partial_Reveal_Object* array = p_obj; - assert(!obj_is_primitive_array(array)); - - int32 array_length = vector_get_length((Vector_Handle) array); - for (int i = 0; i < array_length; i++) { - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)vector_get_element_address_ref((Vector_Handle) array, i); - scan_slot_seq(collector, p_ref); - } - return; - } - - /* scan non-array object */ - int *offset_scanner = init_object_scanner(p_obj); - while (true) { - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj); - if (p_ref == NULL) break; /* terminating ref slot */ - - scan_slot_seq(collector, p_ref); - offset_scanner = offset_next_ref(offset_scanner); - } - - return; -} - -/* NOTE:: Only marking in object header is idempotent */ -void mark_scan_heap_seq(Collector* collector) -{ - GC* gc = collector->gc; - MarkStack* mark_stack = new MarkStack(); - collector->mark_stack = mark_stack; - - GC_Metadata* metadata = gc->metadata; - - pool_iterator_init(metadata->gc_rootset_pool); - Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool); - - while(root_set){ - unsigned int* iter = vector_block_iterator_init(root_set); - while(!vector_block_iterator_end(root_set,iter)){ - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; - iter = vector_block_iterator_advance(root_set,iter); - - Partial_Reveal_Object* p_obj = *p_ref; - assert(p_obj != 0); /* root ref can't be NULL */ - - Space* space = space_of_addr(collector->gc, p_obj); - if( !space->mark_object_func(space, p_obj) ) continue; - mark_stack->push(p_obj); - } - root_set = pool_iterator_next(metadata->gc_rootset_pool); - } - - while(!mark_stack->empty()){ - Partial_Reveal_Object* p_obj = mark_stack->top(); - mark_stack->pop(); - scan_object_seq(collector, p_obj); - } - - return; -} Index: vm/gc_gen/src/common/gc_for_class.h =================================================================== --- vm/gc_gen/src/common/gc_for_class.h (revision 481733) +++ vm/gc_gen/src/common/gc_for_class.h (working copy) @@ -30,7 +30,7 @@ typedef POINTER_SIZE_INT Obj_Info_Type; typedef struct GC_VTable_Info { - unsigned int gc_object_has_slots; + unsigned int gc_object_has_ref_field; unsigned int gc_number_of_ref_fields; uint32 gc_class_properties; // This is the same as class_properties in VM's VTable. @@ -105,12 +105,18 @@ return vtable_get_gcvt(vt); } -inline Boolean object_has_slots(Partial_Reveal_Object *obj) +inline Boolean object_has_ref_field(Partial_Reveal_Object *obj) { GC_VTable_Info *gcvt = obj_get_gcvt(obj); - return gcvt->gc_object_has_slots; + return gcvt->gc_object_has_ref_field; } +inline Boolean object_ref_field_num(Partial_Reveal_Object *obj) +{ + GC_VTable_Info *gcvt = obj_get_gcvt(obj); + return gcvt->gc_number_of_ref_fields; +} + inline Boolean object_is_array(Partial_Reveal_Object *obj) { GC_VTable_Info *gcvt = obj_get_gcvt(obj); Index: vm/gc_gen/src/common/gc_metadata.cpp =================================================================== --- vm/gc_gen/src/common/gc_metadata.cpp (revision 481733) +++ vm/gc_gen/src/common/gc_metadata.cpp (working copy) @@ -21,8 +21,9 @@ #include "gc_metadata.h" #include "../thread/mutator.h" #include "../thread/collector.h" +#include "interior_pointer.h" -#define GC_METADATA_SIZE_BYTES 32*MB +#define GC_METADATA_SIZE_BYTES 48*MB #define METADATA_BLOCK_SIZE_BIT_SHIFT 12 #define METADATA_BLOCK_SIZE_BYTES (1<> 1; + /* part of the metadata space is used for trace_stack */ + unsigned num_tasks = num_blocks >> 2; gc_metadata.free_task_pool = sync_pool_create(); for(i=0; i> 1; + /* the other part is used for root sets (including rem sets) */ + unsigned num_sets = (num_blocks >> 1) + num_tasks; gc_metadata.free_set_pool = sync_pool_create(); /* initialize free rootset pool so that mutators can use them */ - for(; irem_set = pool_get_entry(gc_metadata.free_set_pool); + assert(mutator->rem_set); } void collector_repset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref) @@ -190,6 +191,7 @@ pool_put_entry(gc_metadata.collector_repset_pool, root_set); collector->rep_set = pool_get_entry(gc_metadata.free_set_pool); + assert(collector->rep_set); } void collector_remset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref) @@ -203,32 +205,21 @@ pool_put_entry(gc_metadata.collector_remset_pool, root_set); collector->rem_set = pool_get_entry(gc_metadata.free_set_pool); + assert(collector->rem_set); } -void collector_marktask_add_entry(Collector* collector, Partial_Reveal_Object* p_obj) +void collector_tracestack_push(Collector* collector, void* p_task) { - assert( p_obj>= gc_heap_base_address() && p_obj < gc_heap_ceiling_address()); - - Vector_Block* mark_task = (Vector_Block*)collector->mark_stack; - vector_block_add_entry(mark_task, (unsigned int)p_obj); - - if( !vector_block_is_full(mark_task)) return; - - pool_put_entry(gc_metadata.mark_task_pool, mark_task); - collector->mark_stack = (MarkStack*)pool_get_entry(gc_metadata.free_task_pool); -} - -void collector_tracetask_add_entry(Collector* collector, Partial_Reveal_Object** p_ref) -{ - assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address()); - + /* we don't have assert as others because p_task is a p_obj for marking, + or a p_ref for trace forwarding. The latter can be a root set pointer */ Vector_Block* trace_task = (Vector_Block*)collector->trace_stack; - vector_block_add_entry(trace_task, (unsigned int)p_ref); + vector_stack_push(trace_task, (unsigned int)p_task); - if( !vector_block_is_full(trace_task)) return; + if( !vector_stack_is_full(trace_task)) return; - pool_put_entry(gc_metadata.gc_rootset_pool, trace_task); - collector->trace_stack = (TraceStack*)pool_get_entry(gc_metadata.free_set_pool); + pool_put_entry(gc_metadata.mark_task_pool, trace_task); + collector->trace_stack = pool_get_entry(gc_metadata.free_task_pool); + assert(collector->trace_stack); } void gc_rootset_add_entry(GC* gc, Partial_Reveal_Object** p_ref) @@ -242,6 +233,7 @@ pool_put_entry(gc_metadata.gc_rootset_pool, root_set); gc->root_set = pool_get_entry(gc_metadata.free_set_pool); + assert(gc->root_set); } @@ -279,16 +271,19 @@ return; } -void update_rootset_interior_pointer(); - void gc_update_repointed_refs(Collector* collector) { - GC* gc = collector->gc; + GC* gc = collector->gc; GC_Metadata* metadata = gc->metadata; - gc_update_repointed_sets(gc, metadata->gc_rootset_pool); - gc_update_repointed_sets(gc, metadata->collector_repset_pool); - update_rootset_interior_pointer(); + + /* generational MINOR_COLLECTION doesn't need rootset update */ + if( !gc_requires_barriers() || gc->collect_kind == MAJOR_COLLECTION ){ + gc_update_repointed_sets(gc, metadata->gc_rootset_pool); + gc_update_repointed_sets(gc, metadata->collector_repset_pool); + } + update_rootset_interior_pointer(); + return; } @@ -318,3 +313,4 @@ return; } + Index: vm/gc_gen/src/common/interior_pointer.cpp =================================================================== --- vm/gc_gen/src/common/interior_pointer.cpp (revision 481733) +++ vm/gc_gen/src/common/interior_pointer.cpp (working copy) @@ -1,5 +1,27 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2006/10/05 + */ + #include "interior_pointer.h" +#include + void gc_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned); typedef struct slot_offset_entry_struct{ Index: vm/gc_gen/src/gen/gen.h =================================================================== --- vm/gc_gen/src/gen/gen.h (revision 481733) +++ vm/gc_gen/src/gen/gen.h (working copy) @@ -22,7 +22,7 @@ #define _GC_GEN_H_ #include "../common/gc_common.h" -#include "../thread/thread_alloc.h" +#include "../thread/gc_thread.h" #include "../trace_forward/fspace.h" #include "../mark_compact/mspace.h" #include "../mark_sweep/lspace.h" Index: vm/gc_gen/src/gen/gc_for_barrier.cpp =================================================================== --- vm/gc_gen/src/gen/gc_for_barrier.cpp (revision 481733) +++ vm/gc_gen/src/gen/gc_for_barrier.cpp (working copy) @@ -33,7 +33,7 @@ static void gc_slot_write_barrier(Managed_Object_Handle *p_slot, Managed_Object_Handle p_target) { - Mutator *mutator = (Mutator *)vm_get_gc_thread_local(); + Mutator *mutator = (Mutator *)gc_get_tls(); GC_Gen* gc = (GC_Gen*)mutator->gc; if( address_belongs_to_nursery((void *)p_target, gc) && !address_belongs_to_nursery((void *)p_slot, gc)) @@ -44,7 +44,7 @@ static void gc_object_write_barrier(Managed_Object_Handle p_object) { - Mutator *mutator = (Mutator *)vm_get_gc_thread_local(); + Mutator *mutator = (Mutator *)gc_get_tls(); GC_Gen* gc = (GC_Gen*)mutator->gc; if( address_belongs_to_nursery((void *)p_object, gc)) return; @@ -82,7 +82,7 @@ void gc_heap_wrote_object (Managed_Object_Handle p_obj_written) { if( !NEED_BARRIER ) return; - if( object_has_slots((Partial_Reveal_Object*)p_obj_written)){ + if( object_has_ref_field((Partial_Reveal_Object*)p_obj_written)){ /* for array copy and object clone */ gc_object_write_barrier(p_obj_written); } Index: vm/gc_gen/src/mark_sweep/lspace.cpp =================================================================== --- vm/gc_gen/src/mark_sweep/lspace.cpp (revision 481733) +++ vm/gc_gen/src/mark_sweep/lspace.cpp (working copy) @@ -72,7 +72,7 @@ lspace->heap_end = (void *)((unsigned int)reserved_base + committed_size); lspace->alloc_free = reserved_base; - unsigned int num_bits = lspace_size >> BIT_SHIFT_TO_KILO; + unsigned int num_bits = (lspace_size >> BIT_SHIFT_TO_KILO) + 1; unsigned int num_words = (num_bits >> BIT_SHIFT_TO_BITS_PER_WORD)+1; lspace->mark_table = (unsigned int*)STD_MALLOC( num_words*BYTES_PER_WORD ); memset(lspace->mark_table, 0, num_words*BYTES_PER_WORD); @@ -101,7 +101,7 @@ { /* FIXME:: collection */ unsigned int used_size = (unsigned int)lspace->alloc_free - (unsigned int)lspace->heap_start; - memset(lspace->mark_table, 0, (used_size>>BIT_SHIFT_TO_KILO)>>BIT_SHIFT_TO_BITS_PER_BYTE ); + memset(lspace->mark_table, 0, (((used_size>>BIT_SHIFT_TO_KILO) + 1)>>BIT_SHIFT_TO_BITS_PER_BYTE) + 1); return; } Index: vm/gc_gen/src/mark_sweep/lspace.h =================================================================== --- vm/gc_gen/src/mark_sweep/lspace.h (revision 481733) +++ vm/gc_gen/src/mark_sweep/lspace.h (working copy) @@ -22,7 +22,7 @@ #define _LSPACE_H_ #include "../common/gc_common.h" -#include "../thread/thread_alloc.h" +#include "../thread/gc_thread.h" typedef struct Lspace{ /* <-- first couple of fields are overloadded as Space */ Index: vm/gc_gen/javasrc/org/apache/harmony/drlvm/gc_gen/GCHelper.java =================================================================== --- vm/gc_gen/javasrc/org/apache/harmony/drlvm/gc_gen/GCHelper.java (revision 0) +++ vm/gc_gen/javasrc/org/apache/harmony/drlvm/gc_gen/GCHelper.java (revision 0) @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * @author Xiao-Feng Li + */ + +package org.apache.harmony.drlvm.gc_gen; + +import org.apache.harmony.drlvm.VMHelper; +import org.vmmagic.unboxed.*; + +public class GCHelper { + + static {System.loadLibrary("gc_gen");} + + public static final int TLS_GC_OFFSET = TLSGCOffset(); + + public static Object alloc(int objSize, int allocationHandle) { + + Address TLS_BASE = VMHelper.getTlsBaseAddress(); + + Address allocator = TLS_BASE.plus(TLS_GC_OFFSET); + Address free_addr = allocator.plus(0); + Address free = free_addr.loadAddress(); + Address ceiling = allocator.plus(4).loadAddress(); + + Address new_free = free.plus(objSize); + + if (new_free.LE(ceiling)) { + free_addr.store(new_free); + free.store(allocationHandle); + return free; + } + + return VMHelper.newResolvedUsingAllocHandleAndSize(objSize, allocationHandle); + } + + private static native int TLSGCOffset(); +} +