Index: common/gc_metadata.cpp =================================================================== --- common/gc_metadata.cpp (revision 477572) +++ common/gc_metadata.cpp (working copy) @@ -52,6 +52,7 @@ gc_metadata.free_task_pool = sync_pool_create(num_tasks); for(i=0; imetadata = NULL; } -void gc_metadata_reset(GC* gc) +void gc_set_rootset(GC* gc) { GC_Metadata* metadata = gc->metadata; Pool* gc_rootset_pool = metadata->gc_rootset_pool; @@ -106,18 +108,14 @@ if(!gc_requires_barriers()) return; + /* put back last remset block */ Mutator *mutator = gc->mutator_list; while (mutator) { pool_put_entry(mutator_remset_pool, mutator->rem_set); mutator->rem_set = NULL; + mutator = mutator->next; } - for(unsigned int i=0; inum_collectors; i++){ - Collector* collector = gc->collectors[i]; - pool_put_entry(collector_remset_pool, collector->rem_set); - collector->rem_set = NULL; - } - if( gc->collect_kind == MAJOR_COLLECTION ){ /* all the remsets are useless now */ /* clean and put back mutator remsets */ @@ -150,6 +148,7 @@ pool_put_entry(gc_rootset_pool, root_set); root_set = pool_get_entry( collector_remset_pool ); } + } return; @@ -158,6 +157,8 @@ void mutator_remset_add_entry(Mutator* mutator, Partial_Reveal_Object** p_ref) { + assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address()); + Vector_Block* root_set = mutator->rem_set; vector_block_add_entry(root_set, (unsigned int)p_ref); @@ -182,6 +183,8 @@ void collector_remset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref) { + assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address()); + Vector_Block* root_set = collector->rem_set; vector_block_add_entry(root_set, (unsigned int)p_ref); @@ -204,6 +207,19 @@ collector->mark_stack = (MarkStack*)pool_get_entry(gc_metadata.free_task_pool); } +void collector_tracetask_add_entry(Collector* collector, Partial_Reveal_Object** p_ref) +{ + assert( p_ref >= gc_heap_base_address() || p_ref < gc_heap_ceiling_address()); + + Vector_Block* trace_task = (Vector_Block*)collector->trace_stack; + vector_block_add_entry(trace_task, (unsigned int)p_ref); + + if( !vector_block_is_full(trace_task)) return; + + pool_put_entry(gc_metadata.gc_rootset_pool, trace_task); + collector->trace_stack = (TraceStack*)pool_get_entry(gc_metadata.free_set_pool); +} + void gc_rootset_add_entry(GC* gc, Partial_Reveal_Object** p_ref) { assert( p_ref < gc_heap_base_address() || p_ref >= gc_heap_ceiling_address()); Index: common/gc_metadata.h =================================================================== --- common/gc_metadata.h (revision 477572) +++ common/gc_metadata.h (working copy) @@ -42,12 +42,13 @@ void gc_metadata_initialize(GC* gc); void gc_metadata_destruct(GC* gc); -void gc_metadata_reset(GC* gc); +void gc_set_rootset(GC* gc); void gc_reset_rootset(GC* gc); void gc_update_repointed_refs(Collector* collector); void collector_marktask_add_entry(Collector* collector, Partial_Reveal_Object* p_obj); +void collector_tracetask_add_entry(Collector* collector, Partial_Reveal_Object** p_ref); void mutator_remset_add_entry(Mutator* mutator, Partial_Reveal_Object** p_slot); void collector_remset_add_entry(Collector* collector, Partial_Reveal_Object** p_slot); Index: gen/gc_for_barrier.cpp =================================================================== --- gen/gc_for_barrier.cpp (revision 477572) +++ gen/gc_for_barrier.cpp (working copy) @@ -24,7 +24,7 @@ /* All the write barrier interfaces need cleanup */ -Boolean NEED_BARRIER = FALSE; +Boolean NEED_BARRIER = TRUE; Boolean gc_requires_barriers() { return NEED_BARRIER; } @@ -46,7 +46,7 @@ { Mutator *mutator = (Mutator *)vm_get_gc_thread_local(); GC_Gen* gc = (GC_Gen*)mutator->gc; - if( !address_belongs_to_nursery((void *)p_object, gc)) return; + if( address_belongs_to_nursery((void *)p_object, gc)) return; Partial_Reveal_Object **p_slot; /* scan array object */ Index: gen/gen.cpp =================================================================== --- gen/gen.cpp (revision 477572) +++ gen/gen.cpp (working copy) @@ -143,16 +143,6 @@ void gc_set_los(GC_Gen* gc, Space* los){ gc->los = (Lspace*)los;} unsigned int gc_get_processor_num(GC_Gen* gc){ return gc->_num_processors;} -void reset_mutator_allocation_context(GC_Gen* gc) -{ - Mutator *mutator = gc->mutator_list; - while (mutator) { - alloc_context_reset((Allocator*)mutator); - mutator = mutator->next; - } - return; -} - static unsigned int gc_decide_collection_kind(GC_Gen* gc, unsigned int cause) { if(major_collection_needed(gc) || cause== GC_CAUSE_LOS_IS_FULL) @@ -166,13 +156,12 @@ gc->num_collections++; gc->collect_kind = gc_decide_collection_kind(gc, cause); + //gc->collect_kind = MAJOR_COLLECTION; /* Stop the threads and collect the roots. */ gc_reset_rootset((GC*)gc); vm_enumerate_root_set_all_threads(); - - /* reset metadata (all the rootsets and markstack) */ - gc_metadata_reset((GC*)gc); + gc_set_rootset((GC*)gc); if(verify_live_heap) gc_verify_heap((GC*)gc, TRUE); @@ -203,7 +192,7 @@ if(verify_live_heap) gc_verify_heap((GC*)gc, FALSE); - reset_mutator_allocation_context(gc); + gc_reset_mutator_context((GC*)gc); vm_resume_threads_after(); return; Index: mark_compact/mspace_collect_compact.cpp =================================================================== --- mark_compact/mspace_collect_compact.cpp (revision 477572) +++ mark_compact/mspace_collect_compact.cpp (working copy) @@ -91,9 +91,9 @@ return; } -static unsigned int gc_collection_result(GC* gc) +static Boolean gc_collection_result(GC* gc) { - unsigned int result = TRUE; + Boolean result = TRUE; for(unsigned i=0; inum_active_collectors; i++){ Collector* collector = gc->collectors[i]; result &= collector->result; @@ -212,7 +212,7 @@ return FALSE; } -static Boolean mspace_compute_object_target(Collector* collector, Mspace* mspace) +static void mspace_compute_object_target(Collector* collector, Mspace* mspace) { Block_Header* curr_block = collector->cur_compact_block; Block_Header* dest_block = collector->cur_target_block; @@ -232,8 +232,8 @@ dest_block->free = dest_addr; dest_block = mspace_get_next_target_block(collector, mspace); if(dest_block == NULL){ - collector->result = 0; - return FALSE; + collector->result = FALSE; + return; } dest_addr = GC_BLOCK_BODY(dest_block); @@ -255,7 +255,7 @@ curr_block = mspace_get_next_compact_block(collector, mspace); } - return TRUE; + return; } #include "../verify/verify_live_heap.h" Index: thread/collector.cpp =================================================================== --- thread/collector.cpp (revision 477572) +++ thread/collector.cpp (working copy) @@ -58,15 +58,18 @@ alloc_context_reset((Allocator*)collector); GC_Metadata* metadata = collector->gc->metadata; + assert(collector->rep_set==NULL); - collector->rep_set = pool_get_entry(metadata->free_set_pool); - collector->result = 1; - + if( !gc_requires_barriers() || collector->gc->collect_kind != MINOR_COLLECTION){ + collector->rep_set = pool_get_entry(metadata->free_set_pool); + } + if(gc_requires_barriers()){ assert(collector->rem_set==NULL); collector->rem_set = pool_get_entry(metadata->free_set_pool); } + collector->result = 1; return; } @@ -133,6 +136,13 @@ task_func(collector); + if(gc_requires_barriers()){ + GC_Metadata* metadata = collector->gc->metadata; + assert(collector->rem_set != NULL); + pool_put_entry(metadata->collector_remset_pool, collector->rem_set); + collector->rem_set = NULL; + } + collector_notify_work_done(collector); } @@ -141,7 +151,6 @@ static void collector_init_thread(Collector *collector) { - collector->trace_stack = new TraceStack(); /* only for MINOR_COLLECTION */ collector->obj_info_map = new ObjectMap(); collector->rem_set = NULL; collector->rep_set = NULL; Index: thread/mutator.cpp =================================================================== --- thread/mutator.cpp (revision 477572) +++ thread/mutator.cpp (working copy) @@ -35,6 +35,7 @@ if(gc_requires_barriers()){ mutator->rem_set = pool_get_entry(gc->metadata->free_set_pool); + assert(vector_block_is_empty(mutator->rem_set)); } lock(gc->mutator_list_lock); // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv @@ -77,3 +78,13 @@ return; } +void gc_reset_mutator_context(GC* gc) +{ + Mutator *mutator = gc->mutator_list; + while (mutator) { + mutator->rem_set = pool_get_entry(gc->metadata->free_set_pool); + alloc_context_reset((Allocator*)mutator); + mutator = mutator->next; + } + return; +} \ No newline at end of file Index: thread/mutator.h =================================================================== --- thread/mutator.h (revision 477572) +++ thread/mutator.h (working copy) @@ -42,4 +42,6 @@ void mutator_destruct(GC* gc, void* tls_gc_info); void mutator_reset(GC *gc); +void gc_reset_mutator_context(GC* gc); + #endif /*ifndef _MUTATOR_H_ */ Index: trace_forward/fspace_collect_forward.cpp =================================================================== --- trace_forward/fspace_collect_forward.cpp (revision 477572) +++ trace_forward/fspace_collect_forward.cpp (working copy) @@ -1,207 +0,0 @@ -/* - * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @author Xiao-Feng Li, 2006/10/05 - */ - -#include "fspace.h" -#include "../thread/collector.h" -#include "../common/gc_metadata.h" - -static Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace) -{ - assert(obj_belongs_to_space(p_obj, (Space*)fspace)); - return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary); -} - -static void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref) -{ - Partial_Reveal_Object *p_obj = *p_ref; - TraceStack *ts = collector->trace_stack; - - if (p_obj == NULL) return; - - /* the slot can be in tspace or fspace, we don't care. - we care only if the reference in the slot is pointing to fspace */ - if (obj_belongs_to_space(p_obj, collector->collect_space)) { - ts->push(p_ref); - } - - return; -} - -static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) -{ - if (!object_has_slots(p_obj)) return; - - void *slot; - - /* scan array object */ - if (object_is_array(p_obj)) { - Partial_Reveal_Object* array = p_obj; - assert(!obj_is_primitive_array(array)); - - int32 array_length = vector_get_length((Vector_Handle) array); - for (int i = 0; i < array_length; i++) { - slot = vector_get_element_address_ref((Vector_Handle) array, i); - scan_slot(collector, (Partial_Reveal_Object **)slot); - } - return; - } - - /* scan non-array object */ - int *offset_scanner = init_object_scanner(p_obj); - while (true) { - slot = offset_get_ref(offset_scanner, p_obj); - if (slot == NULL) break; - - scan_slot(collector, (Partial_Reveal_Object **)slot); - offset_scanner = offset_next_ref(offset_scanner); - } - - return; -} - -/* At this point, p_ref can be in anywhere like root, and other spaces, - * but *p_ref must be in fspace, since only slot which points to - * object in fspace could be added into TraceStack */ -#include "../verify/verify_live_heap.h" - -static void trace_object(Collector* collector, Partial_Reveal_Object **p_ref) -{ - Space* space = collector->collect_space; - Partial_Reveal_Object *p_obj = *p_ref; - - assert(p_obj); - assert(obj_belongs_to_space(p_obj, space)); - - /* Fastpath: object has already been forwarded, update the ref slot */ - if(obj_is_forwarded_in_vt(p_obj)) { - assert(!obj_is_marked_in_vt(p_obj)); - *p_ref = obj_get_forwarding_pointer_in_vt(p_obj); - return; - } - - /* only mark the objects that will remain in fspace */ - if (!fspace_object_to_be_forwarded(p_obj, (Fspace*)space)) { - assert(!obj_is_forwarded_in_vt(p_obj)); - /* this obj remains in fspace, remember its ref slot for next GC. */ - if( !address_belongs_to_space(p_ref, space) ){ - collector_remset_add_entry(collector, p_ref); - } - - if(fspace_mark_object((Fspace*)space, p_obj)) - scan_object(collector, p_obj); - - return; - } - - /* following is the logic for forwarding */ - Partial_Reveal_Object* p_target_obj = collector_forward_object(collector, p_obj); - - /* if it is forwarded by other already, it is ok */ - if(!p_target_obj){ - *p_ref = obj_get_forwarding_pointer_in_vt(p_obj); - return; - } - /* otherwise, we successfully forwarded */ - *p_ref = p_target_obj; - - /* we forwarded it, we need remember it for verification. FIXME:: thread id */ - if(verify_live_heap) { - event_collector_move_obj(p_obj, p_target_obj, collector); - } - - scan_object(collector, p_target_obj); - return; -} - -/* trace the root references from root set and remember sets */ -void trace_root(Collector* collector, Partial_Reveal_Object **ref) -{ - assert(*ref); - assert(obj_belongs_to_space(*ref, collector->collect_space)); - - TraceStack *ts = collector->trace_stack; - ts->push(ref); - - while(!ts->empty()) { - Partial_Reveal_Object **p_ref = ts->top(); - ts->pop(); - assert(p_ref); - trace_object(collector, p_ref); - } -} - -static void collector_trace_rootsets(Collector* collector) -{ - GC_Metadata* metadata = collector->gc->metadata; - - Space* space = collector->collect_space; - HashSet remslot_hash; - - /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */ - pool_iterator_init(metadata->gc_rootset_pool); - Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool); - - while(root_set){ - unsigned int* iter = vector_block_iterator_init(root_set); - while(!vector_block_iterator_end(root_set,iter)){ - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; - iter = vector_block_iterator_advance(root_set,iter); - - assert(p_ref); - if(*p_ref == NULL) continue; - if (obj_belongs_to_space(*p_ref, space)) { - if (remslot_hash.find(p_ref) == remslot_hash.end()) { - remslot_hash.insert(p_ref); - trace_root(collector, p_ref); - } - } - } - pool_put_entry(metadata->free_set_pool, root_set); - root_set = pool_iterator_next(metadata->gc_rootset_pool); - } - - return; -} - -void update_rootset_interior_pointer(); - -static void update_relocated_refs(Collector* collector) -{ - update_rootset_interior_pointer(); -} - -void trace_forward_fspace(Collector* collector) -{ - GC* gc = collector->gc; - Fspace* space = (Fspace*)collector->collect_space; - - /* FIXME:: Single-threaded trace-forwarding for fspace currently */ - - collector_trace_rootsets(collector); - - update_relocated_refs(collector); - reset_fspace_for_allocation(space); - - return; - -} - - - Index: trace_forward/fspace_collect_forward_par.cpp =================================================================== --- trace_forward/fspace_collect_forward_par.cpp (revision 0) +++ trace_forward/fspace_collect_forward_par.cpp (revision 0) @@ -0,0 +1,248 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2006/10/05 + */ + +#include "fspace.h" +#include "../thread/collector.h" +#include "../common/gc_metadata.h" + +static Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace) +{ + assert(obj_belongs_to_space(p_obj, (Space*)fspace)); + return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary); +} + +static void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref) +{ + Partial_Reveal_Object *p_obj = *p_ref; + if (p_obj == NULL) return; + + /* the slot can be in tspace or fspace, we don't care. + we care only if the reference in the slot is pointing to fspace */ + if (obj_belongs_to_space(p_obj, collector->collect_space)) + collector_tracetask_add_entry(collector, p_ref); + + return; +} + +static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) +{ + if (!object_has_slots(p_obj)) return; + + void *slot; + + /* scan array object */ + if (object_is_array(p_obj)) { + Partial_Reveal_Object* array = p_obj; + assert(!obj_is_primitive_array(array)); + + int32 array_length = vector_get_length((Vector_Handle) array); + for (int i = 0; i < array_length; i++) { + slot = vector_get_element_address_ref((Vector_Handle) array, i); + scan_slot(collector, (Partial_Reveal_Object **)slot); + } + return; + } + + /* scan non-array object */ + int *offset_scanner = init_object_scanner(p_obj); + while (true) { + slot = offset_get_ref(offset_scanner, p_obj); + if (slot == NULL) break; + + scan_slot(collector, (Partial_Reveal_Object **)slot); + offset_scanner = offset_next_ref(offset_scanner); + } + + return; +} + +/* At this point, p_ref can be in anywhere like root, and other spaces, + * but *p_ref must be in fspace, since only slot which points to + * object in fspace could be added into TraceStack */ +#include "../verify/verify_live_heap.h" + +static void trace_object(Collector* collector, Partial_Reveal_Object **p_ref) +{ + Space* space = collector->collect_space; + Partial_Reveal_Object *p_obj = *p_ref; + + assert(p_obj); + assert(obj_belongs_to_space(p_obj, space)); + + /* Fastpath: object has already been forwarded, update the ref slot */ + if(obj_is_forwarded_in_vt(p_obj)) { + assert(!obj_is_marked_in_vt(p_obj)); + *p_ref = obj_get_forwarding_pointer_in_vt(p_obj); + return; + } + + /* only mark the objects that will remain in fspace */ + if (!fspace_object_to_be_forwarded(p_obj, (Fspace*)space)) { + assert(!obj_is_forwarded_in_vt(p_obj)); + /* this obj remains in fspace, remember its ref slot for next GC. */ + if( !address_belongs_to_space(p_ref, space) ) + collector_remset_add_entry(collector, p_ref); + + if(fspace_mark_object((Fspace*)space, p_obj)) + scan_object(collector, p_obj); + + return; + } + + /* following is the logic for forwarding */ + Partial_Reveal_Object* p_target_obj = collector_forward_object(collector, p_obj); + + /* if it is forwarded by other already, it is ok */ + if( p_target_obj == NULL ){ + *p_ref = obj_get_forwarding_pointer_in_vt(p_obj); + return; + } + /* otherwise, we successfully forwarded */ + *p_ref = p_target_obj; + + /* we forwarded it, we need remember it for verification. FIXME:: thread id */ + if(verify_live_heap) { + event_collector_move_obj(p_obj, p_target_obj, collector); + } + + scan_object(collector, p_target_obj); + return; +} + +void trace_object_seq(Collector* collector, Partial_Reveal_Object **p_ref); + +/* for tracing phase termination detection */ +static volatile unsigned int num_finished_collectors = 0; + +static void collector_trace_rootsets(Collector* collector) +{ + GC* gc = collector->gc; + GC_Metadata* metadata = gc->metadata; + + Space* space = collector->collect_space; + collector->trace_stack = (TraceStack*)pool_get_entry(metadata->free_set_pool); + //collector->trace_stack = new TraceStack(); + + unsigned int num_active_collectors = gc->num_active_collectors; + atomic_cas32( &num_finished_collectors, 0, num_active_collectors); + + HashSet remslot_hash; + +retry: + /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */ + Vector_Block* root_set = pool_get_entry(metadata->gc_rootset_pool); + + while(root_set){ + unsigned int* iter = vector_block_iterator_init(root_set); + while(!vector_block_iterator_end(root_set,iter)){ + Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; + iter = vector_block_iterator_advance(root_set,iter); + + assert(p_ref); + if(*p_ref == NULL) continue; + if (obj_belongs_to_space(*p_ref, space)) { + if (remslot_hash.find(p_ref) == remslot_hash.end()) { + remslot_hash.insert(p_ref); + trace_object(collector, p_ref); + } + } + } + vector_block_clear(root_set); + pool_put_entry(metadata->free_set_pool, root_set); + root_set = pool_get_entry(metadata->gc_rootset_pool); + + } + + atomic_inc32(&num_finished_collectors); + while(num_finished_collectors != num_active_collectors){ + if( !pool_is_empty(metadata->gc_rootset_pool)){ + atomic_dec32(&num_finished_collectors); + goto retry; + } + } + + + /* now we are done, but each collector has a private task block to deal with */ + Vector_Block* trace_task = (Vector_Block*)collector->trace_stack; + TraceStack* trace_stack = new TraceStack(); + + unsigned int* iter = vector_block_iterator_init(trace_task); + while(!vector_block_iterator_end(trace_task,iter)){ + Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)*iter; + iter = vector_block_iterator_advance(trace_task,iter); + trace_stack->push(p_ref); + } + + /* put back the last task to the free pool */ + vector_block_clear(trace_task); + pool_put_entry(metadata->free_set_pool, trace_task); + + collector->trace_stack = trace_stack; + while(!trace_stack->empty()){ + Partial_Reveal_Object** p_ref = trace_stack->top(); + trace_stack->pop(); + trace_object_seq(collector, p_ref); + } + + delete trace_stack; + collector->trace_stack = NULL; + + return; +} + +void update_rootset_interior_pointer(); + +static void update_relocated_refs(Collector* collector) +{ + update_rootset_interior_pointer(); +} + +static volatile unsigned int num_marking_collectors = 0; + +void trace_forward_fspace(Collector* collector) +{ + GC* gc = collector->gc; + Fspace* space = (Fspace*)collector->collect_space; + + unsigned int num_active_collectors = gc->num_active_collectors; + unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1); + + collector_trace_rootsets(collector); + + old_num = atomic_inc32(&num_marking_collectors); + if( ++old_num == num_active_collectors ){ + /* last collector's world here */ + /* prepare for next phase */ /* let other collectors go */ + num_marking_collectors++; + } + while(num_marking_collectors != num_active_collectors + 1); + + /* the rest work is not enough for parallelization, so let only one thread go */ + if( collector->thread_handle != 0 ) return; + + update_relocated_refs(collector); + reset_fspace_for_allocation(space); + + return; + +} + + + Index: trace_forward/fspace_collect_forward_seq.cpp =================================================================== --- trace_forward/fspace_collect_forward_seq.cpp (revision 0) +++ trace_forward/fspace_collect_forward_seq.cpp (revision 0) @@ -0,0 +1,211 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2006/10/05 + */ + +#include "fspace.h" +#include "../thread/collector.h" +#include "../common/gc_metadata.h" + +static Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace) +{ + assert(obj_belongs_to_space(p_obj, (Space*)fspace)); + return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary); +} + +static void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref) +{ + Partial_Reveal_Object *p_obj = *p_ref; + TraceStack *ts = collector->trace_stack; + + if (p_obj == NULL) return; + + /* the slot can be in tspace or fspace, we don't care. + we care only if the reference in the slot is pointing to fspace */ + if (obj_belongs_to_space(p_obj, collector->collect_space)) { + ts->push(p_ref); + } + + return; +} + +static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) +{ + if (!object_has_slots(p_obj)) return; + + void *slot; + + /* scan array object */ + if (object_is_array(p_obj)) { + Partial_Reveal_Object* array = p_obj; + assert(!obj_is_primitive_array(array)); + + int32 array_length = vector_get_length((Vector_Handle) array); + for (int i = 0; i < array_length; i++) { + slot = vector_get_element_address_ref((Vector_Handle) array, i); + scan_slot(collector, (Partial_Reveal_Object **)slot); + } + return; + } + + /* scan non-array object */ + int *offset_scanner = init_object_scanner(p_obj); + while (true) { + slot = offset_get_ref(offset_scanner, p_obj); + if (slot == NULL) break; + + scan_slot(collector, (Partial_Reveal_Object **)slot); + offset_scanner = offset_next_ref(offset_scanner); + } + + return; +} + +/* At this point, p_ref can be in anywhere like root, and other spaces, + * but *p_ref must be in fspace, since only slot which points to + * object in fspace could be added into TraceStack */ +#include "../verify/verify_live_heap.h" + +void trace_object_seq(Collector* collector, Partial_Reveal_Object **p_ref) +{ + Space* space = collector->collect_space; + Partial_Reveal_Object *p_obj = *p_ref; + + assert(p_obj); + assert(obj_belongs_to_space(p_obj, space)); + + /* Fastpath: object has already been forwarded, update the ref slot */ + if(obj_is_forwarded_in_vt(p_obj)) { + assert(!obj_is_marked_in_vt(p_obj)); + *p_ref = obj_get_forwarding_pointer_in_vt(p_obj); + return; + } + + /* only mark the objects that will remain in fspace */ + if (!fspace_object_to_be_forwarded(p_obj, (Fspace*)space)) { + assert(!obj_is_forwarded_in_vt(p_obj)); + /* this obj remains in fspace, remember its ref slot for next GC. */ + if( !address_belongs_to_space(p_ref, space) ){ + collector_remset_add_entry(collector, p_ref); + } + + if(fspace_mark_object((Fspace*)space, p_obj)) + scan_object(collector, p_obj); + + return; + } + + /* following is the logic for forwarding */ + Partial_Reveal_Object* p_target_obj = collector_forward_object(collector, p_obj); + + /* if it is forwarded by other already, it is ok */ + if(!p_target_obj){ + *p_ref = obj_get_forwarding_pointer_in_vt(p_obj); + return; + } + /* otherwise, we successfully forwarded */ + *p_ref = p_target_obj; + + /* we forwarded it, we need remember it for verification. FIXME:: thread id */ + if(verify_live_heap) { + event_collector_move_obj(p_obj, p_target_obj, collector); + } + + scan_object(collector, p_target_obj); + return; +} + +/* trace the root references from root set and remember sets */ +static void trace_root(Collector* collector, Partial_Reveal_Object **ref) +{ + assert(*ref); + assert(obj_belongs_to_space(*ref, collector->collect_space)); + + TraceStack *ts = collector->trace_stack; + ts->push(ref); + + while(!ts->empty()) { + Partial_Reveal_Object **p_ref = ts->top(); + ts->pop(); + assert(p_ref); + trace_object_seq(collector, p_ref); + } +} + +static void collector_trace_rootsets(Collector* collector) +{ + GC_Metadata* metadata = collector->gc->metadata; + + Space* space = collector->collect_space; + HashSet remslot_hash; + + collector->trace_stack = new TraceStack(); + + /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */ + Vector_Block* root_set = pool_get_entry(metadata->gc_rootset_pool); + + while(root_set){ + unsigned int* iter = vector_block_iterator_init(root_set); + while(!vector_block_iterator_end(root_set,iter)){ + Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; + iter = vector_block_iterator_advance(root_set,iter); + + assert(p_ref); + if(*p_ref == NULL) continue; + if (obj_belongs_to_space(*p_ref, space)) { + if (remslot_hash.find(p_ref) == remslot_hash.end()) { + remslot_hash.insert(p_ref); + trace_root(collector, p_ref); + } + } + } + vector_block_clear(root_set); + pool_put_entry(metadata->free_set_pool, root_set); + root_set = pool_get_entry(metadata->gc_rootset_pool); + } + + delete collector->trace_stack; + + return; +} + +void update_rootset_interior_pointer(); + +static void update_relocated_refs(Collector* collector) +{ + update_rootset_interior_pointer(); +} + +void trace_forward_fspace_seq(Collector* collector) +{ + GC* gc = collector->gc; + Fspace* space = (Fspace*)collector->collect_space; + + /* FIXME:: Single-threaded trace-forwarding for fspace currently */ + + collector_trace_rootsets(collector); + + update_relocated_refs(collector); + reset_fspace_for_allocation(space); + + return; + +} + + + Index: utils/vector_block.h =================================================================== --- utils/vector_block.h (revision 477572) +++ utils/vector_block.h (working copy) @@ -35,6 +35,7 @@ block->end = (unsigned int*)((unsigned int)block + size); block->head = block->start; block->tail = block->start; + memset(block->start, 0, (block->end - block->start)*BYTES_PER_WORD); return; } @@ -44,6 +45,9 @@ inline Boolean vector_block_is_full(Vector_Block* block) { return block->tail == block->end; } +inline Boolean vector_block_is_empty(Vector_Block* block) +{ return block->tail == block->head; } + inline void vector_block_add_entry(Vector_Block* block, unsigned int value) { assert(value && !*(block->tail));