Index: build/make/components/vm/gc_gen.xml
===================================================================
--- build/make/components/vm/gc_gen.xml (revision 481733)
+++ build/make/components/vm/gc_gen.xml (working copy)
@@ -16,7 +16,7 @@
-->
+
+
-
+
-
+
+
+
+
Index: vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp
===================================================================
--- vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (revision 482464)
+++ vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (working copy)
@@ -60,7 +60,7 @@
static void gc_reset_block_for_collectors(GC* gc, Mspace* mspace)
{
- unsigned int free_blk_idx = mspace->free_block_idx;
+ unsigned int free_blk_idx = mspace->first_block_idx;
for(unsigned int i=0; inum_active_collectors; i++){
Collector* collector = gc->collectors[i];
unsigned int collector_target_idx = collector->cur_target_block->block_idx;
@@ -324,7 +324,7 @@
have references that are going to be repointed */
unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);
- mark_scan_heap_par(collector);
+ mark_scan_heap(collector);
old_num = atomic_inc32(&num_marking_collectors);
if( ++old_num == num_active_collectors ){
Index: vm/gc_gen/src/mark_compact/mspace.h
===================================================================
--- vm/gc_gen/src/mark_compact/mspace.h (revision 482464)
+++ vm/gc_gen/src/mark_compact/mspace.h (working copy)
@@ -22,7 +22,7 @@
#define _MSC_SPACE_H_
#include "../common/gc_block.h"
-#include "../thread/thread_alloc.h"
+#include "../thread/gc_thread.h"
/* Mark-compaction space is orgnized into blocks*/
typedef struct Mspace{
Index: vm/gc_gen/src/utils/vector_block.h
===================================================================
--- vm/gc_gen/src/utils/vector_block.h (revision 482464)
+++ vm/gc_gen/src/utils/vector_block.h (working copy)
@@ -25,24 +25,24 @@
void* next; /* point to next block */
unsigned int* head; /* point to the first filled entry */
unsigned int* tail; /* point to the entry after the last filled one */
- unsigned int* end; /* point to end of the block (right after the last entry) */
+ unsigned int* heap_end; /* point to heap_end of the block (right after the last entry) */
unsigned int* entries[1];
}Vector_Block;
inline void vector_block_init(Vector_Block* block, unsigned int size)
{
- block->end = (unsigned int*)((unsigned int)block + size);
- block->head = (unsigned int*)block->entries;
- block->tail = (unsigned int*)block->entries;
- memset(block->head, 0, (block->end - block->head)*BYTES_PER_WORD);
- return;
+ block->heap_end = (unsigned int*)((unsigned int)block + size);
+ block->head = (unsigned int*)block->entries;
+ block->tail = (unsigned int*)block->entries;
+ memset(block->entries, 0, (block->heap_end - (unsigned int*)block->entries)*BYTES_PER_WORD);
+ return;
}
inline unsigned int vector_block_entry_count(Vector_Block* block)
{ return (unsigned int)(block->tail - block->head); }
inline Boolean vector_block_is_full(Vector_Block* block)
-{ return block->tail == block->end; }
+{ return block->tail == block->heap_end; }
inline Boolean vector_block_is_empty(Vector_Block* block)
{ return block->tail == block->head; }
@@ -55,11 +55,11 @@
inline void vector_block_clear(Vector_Block* block)
{
+ block->head = (unsigned int*)block->entries;
+ block->tail = (unsigned int*)block->entries;
#ifdef _DEBUG
- memset(block->entries, 0, (block->end - (unsigned int*)block->entries)*BYTES_PER_WORD);
+ memset(block->entries, 0, (block->heap_end - (unsigned int*)block->entries)*BYTES_PER_WORD);
#endif
-
- block->tail = block->head;
}
/* Below is for sequential local access */
@@ -72,4 +72,53 @@
inline Boolean vector_block_iterator_end(Vector_Block* block, unsigned int* iter)
{ return iter == block->tail; }
+
+/* Below is to use Vector_Block as stack (for trace-forwarding DFS order ) */
+inline void vector_stack_init(Vector_Block* block)
+{
+ block->tail = block->heap_end;
+ block->head = block->heap_end;
+}
+
+inline void vector_stack_clear(Vector_Block* block)
+{
+ vector_stack_init(block);
+#ifdef _DEBUG
+ memset(block->entries, 0, (block->heap_end - (unsigned int*)block->entries)*BYTES_PER_WORD);
+#endif
+}
+
+inline Boolean vector_stack_is_empty(Vector_Block* block)
+{ return (block->head == block->tail); }
+
+inline Boolean vector_stack_is_full(Vector_Block* block)
+{ return (block->head == (unsigned int*)block->entries); }
+
+inline void vector_stack_push(Vector_Block* block, unsigned int value)
+{
+ block->head--;
+ assert(value && !*(block->head));
+ *(block->head) = value;
+}
+
+inline unsigned int vector_stack_pop(Vector_Block* block)
+{
+ unsigned int value = *block->head;
+#ifdef _DEBUG
+ *block->head = 0;
+#endif
+ block->head++;
+ return value;
+}
+
+inline void vector_block_integrity_check(Vector_Block* block)
+{
+ unsigned int* iter = vector_block_iterator_init(block);
+ while(!vector_block_iterator_end(block, iter)){
+ assert(*iter);
+ iter = vector_block_iterator_advance(block, iter);
+ }
+ return;
+}
+
#endif /* #ifndef _VECTOR_BLOCK_H_ */
Index: vm/gc_gen/src/utils/sync_stack.h
===================================================================
--- vm/gc_gen/src/utils/sync_stack.h (revision 482464)
+++ vm/gc_gen/src/utils/sync_stack.h (working copy)
@@ -73,6 +73,7 @@
Node* new_entry = entry->next;
Node* temp = (Node*)atomic_casptr((volatile void**)&stack->top, new_entry, entry);
if(temp == entry){ /* got it */
+ entry->next = NULL;
return entry;
}
entry = stack->top;
@@ -93,6 +94,7 @@
entry = stack->top;
node->next = entry;
}
+ /* never comes here */
return FALSE;
}
Index: vm/gc_gen/src/utils/sync_pool.h
===================================================================
--- vm/gc_gen/src/utils/sync_pool.h (revision 482464)
+++ vm/gc_gen/src/utils/sync_pool.h (working copy)
@@ -29,10 +29,22 @@
inline void sync_pool_destruct(Pool* pool){ sync_stack_destruct(pool); }
inline Boolean pool_is_empty(Pool* pool){ return stack_is_empty(pool);}
-inline Vector_Block* pool_get_entry(Pool* pool){ return (Vector_Block*)sync_stack_pop(pool); }
-inline void pool_put_entry(Pool* pool, void* value){ assert(value); Boolean ok = sync_stack_push(pool, (Node*)value); assert(ok);}
+inline Vector_Block* pool_get_entry(Pool* pool)
+{
+ Vector_Block* block = (Vector_Block*)sync_stack_pop(pool);
+ return block;
+}
+
+inline void pool_put_entry(Pool* pool, void* value)
+{
+ assert(value);
+ Boolean ok = sync_stack_push(pool, (Node*)value);
+ assert(ok);
+}
+
inline void pool_iterator_init(Pool* pool){ sync_stack_iterate_init(pool);}
inline Vector_Block* pool_iterator_next(Pool* pool){ return (Vector_Block*)sync_stack_iterate_next(pool);}
#endif /* #ifndef _SYNC_POOL_H_ */
+
Index: vm/gc_gen/src/jni/helper.cpp
===================================================================
--- vm/gc_gen/src/jni/helper.cpp (revision 0)
+++ vm/gc_gen/src/jni/helper.cpp (revision 0)
@@ -0,0 +1,22 @@
+#include
+#include
+#include "../thread/gc_thread.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Class: org_apache_harmony_drlvm_gc_gen_GCHelper
+ * Method: TLSFreeOffset
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_TLSGCOffset(JNIEnv *e, jclass c)
+{
+ return (jint)tls_gc_offset;
+}
+
+#ifdef __cplusplus
+}
+#endif
Index: vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp
===================================================================
--- vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp (revision 481733)
+++ vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp (working copy)
@@ -172,7 +172,7 @@
have references that are going to be repointed */
atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);
- mark_scan_heap_par(collector);
+ mark_scan_heap(collector);
unsigned int old_num = atomic_inc32(&num_marking_collectors);
if( ++old_num == num_active_collectors ){
Index: vm/gc_gen/src/trace_forward/fspace.cpp
===================================================================
--- vm/gc_gen/src/trace_forward/fspace.cpp (revision 481733)
+++ vm/gc_gen/src/trace_forward/fspace.cpp (working copy)
@@ -22,12 +22,11 @@
#include "fspace.h"
-float NURSERY_OBJECT_FORWARDING_RATIO = FORWARD_ALL;
-//float NURSERY_OBJECT_FORWARDING_RATIO = FORWARD_HALF;
+Boolean NOS_PARTIAL_FORWARD = TRUE;
void* nos_boundary = null; /* this is only for speeding up write barrier */
-Boolean forward_first_half;;
+Boolean forward_first_half;
void* object_forwarding_boundary=NULL;
Boolean fspace_mark_object(Fspace* fspace, Partial_Reveal_Object *p_obj)
@@ -119,7 +118,10 @@
nos_boundary = fspace->heap_start;
forward_first_half = TRUE;
- object_forwarding_boundary = (void*)&fspace->blocks[fspace->first_block_idx + (unsigned int)(fspace->num_managed_blocks * NURSERY_OBJECT_FORWARDING_RATIO)];
+ if( NOS_PARTIAL_FORWARD )
+ object_forwarding_boundary = (void*)&fspace->blocks[fspace->num_managed_blocks >>1 ];
+ else
+ object_forwarding_boundary = (void*)&fspace->blocks[fspace->num_managed_blocks];
return;
}
@@ -134,35 +136,52 @@
void reset_fspace_for_allocation(Fspace* fspace)
{
- if( NURSERY_OBJECT_FORWARDING_RATIO == FORWARD_ALL ||
- fspace->gc->collect_kind == MAJOR_COLLECTION )
+ unsigned int first_idx = fspace->first_block_idx;
+ unsigned int marked_start_idx = 0;
+ unsigned int marked_last_idx = 0;
+
+ if( fspace->gc->collect_kind == MAJOR_COLLECTION ||
+ NOS_PARTIAL_FORWARD == FALSE || !gc_requires_barriers())
{
- fspace->free_block_idx = fspace->first_block_idx;
- fspace->ceiling_block_idx = fspace->first_block_idx + fspace->num_managed_blocks - 1;
+ fspace->free_block_idx = first_idx;
+ fspace->ceiling_block_idx = first_idx + fspace->num_managed_blocks - 1;
forward_first_half = TRUE; /* only useful for not-FORWARD_ALL*/
}else{
if(forward_first_half){
- fspace->free_block_idx = fspace->first_block_idx;
+ fspace->free_block_idx = first_idx;
fspace->ceiling_block_idx = ((Block_Header*)object_forwarding_boundary)->block_idx - 1;
+ marked_start_idx = ((Block_Header*)object_forwarding_boundary)->block_idx - first_idx;
+ marked_last_idx = fspace->num_managed_blocks - 1;
}else{
fspace->free_block_idx = ((Block_Header*)object_forwarding_boundary)->block_idx;
- fspace->ceiling_block_idx = fspace->first_block_idx + fspace->num_managed_blocks - 1;
+ fspace->ceiling_block_idx = first_idx + fspace->num_managed_blocks - 1;
+ marked_start_idx = 0;
+ marked_last_idx = ((Block_Header*)object_forwarding_boundary)->block_idx - 1 - first_idx;
}
- forward_first_half = ~forward_first_half;
+ forward_first_half = forward_first_half^1;
}
- unsigned int first_idx = fspace->free_block_idx;
- unsigned int last_idx = fspace->ceiling_block_idx;
+
Block* blocks = fspace->blocks;
unsigned int num_freed = 0;
- for(unsigned int i = 0; i <= last_idx-first_idx; i++){
+ unsigned int new_start_idx = fspace->free_block_idx - first_idx;
+ unsigned int new_last_idx = fspace->ceiling_block_idx - first_idx;
+ for(unsigned int i = new_start_idx; i <= new_last_idx; i++){
Block_Header* block = (Block_Header*)&(blocks[i]);
if(block->status == BLOCK_FREE) continue;
- block_clear_mark_table(block);
block->status = BLOCK_FREE;
block->free = GC_BLOCK_BODY(block);
+ if( !gc_requires_barriers() || fspace->gc->collect_kind == MAJOR_COLLECTION )
+ block_clear_mark_table(block);
+
num_freed ++;
}
+
+ for(unsigned int i = marked_start_idx; i <= marked_last_idx; i++){
+ Block_Header* block = (Block_Header*)&(blocks[i]);
+ if(block->status == BLOCK_FREE) continue;
+ block_clear_markbits(block);
+ }
fspace->num_used_blocks = fspace->num_used_blocks - num_freed;
}
@@ -177,7 +196,7 @@
GC* gc = fspace->gc;
if(gc_requires_barriers()){
- /* generational GC. Only trace (mark) nos */
+ /* generational GC. Only trace nos */
collector_execute_task(gc, (TaskType)trace_forward_fspace, (Space*)fspace);
}else{
/* non-generational GC. Mark the whole heap (nos, mos, and los) */
Index: vm/gc_gen/src/trace_forward/fspace_collect_forward_seq.cpp
===================================================================
--- vm/gc_gen/src/trace_forward/fspace_collect_forward_seq.cpp (revision 482464)
+++ vm/gc_gen/src/trace_forward/fspace_collect_forward_seq.cpp (working copy)
@@ -1,205 +0,0 @@
-/*
- * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @author Xiao-Feng Li, 2006/10/05
- */
-
-#include "fspace.h"
-#include "../thread/collector.h"
-#include "../common/gc_metadata.h"
-
-static Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace)
-{
- assert(obj_belongs_to_space(p_obj, (Space*)fspace));
- return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary);
-}
-
-static void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref)
-{
- Partial_Reveal_Object *p_obj = *p_ref;
- TraceStack *ts = collector->trace_stack;
-
- if (p_obj == NULL) return;
-
- /* the slot can be in tspace or fspace, we don't care.
- we care only if the reference in the slot is pointing to fspace */
- if (obj_belongs_to_space(p_obj, collector->collect_space)) {
- ts->push(p_ref);
- }
-
- return;
-}
-
-static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj)
-{
- if (!object_has_slots(p_obj)) return;
-
- void *slot;
-
- /* scan array object */
- if (object_is_array(p_obj)) {
- Partial_Reveal_Object* array = p_obj;
- assert(!obj_is_primitive_array(array));
-
- int32 array_length = vector_get_length((Vector_Handle) array);
- for (int i = 0; i < array_length; i++) {
- slot = vector_get_element_address_ref((Vector_Handle) array, i);
- scan_slot(collector, (Partial_Reveal_Object **)slot);
- }
- return;
- }
-
- /* scan non-array object */
- int *offset_scanner = init_object_scanner(p_obj);
- while (true) {
- slot = offset_get_ref(offset_scanner, p_obj);
- if (slot == NULL) break;
-
- scan_slot(collector, (Partial_Reveal_Object **)slot);
- offset_scanner = offset_next_ref(offset_scanner);
- }
-
- return;
-}
-
-/* At this point, p_ref can be in anywhere like root, and other spaces,
- * but *p_ref must be in fspace, since only slot which points to
- * object in fspace could be added into TraceStack */
-#include "../verify/verify_live_heap.h"
-
-void trace_object_seq(Collector* collector, Partial_Reveal_Object **p_ref)
-{
- Space* space = collector->collect_space;
- Partial_Reveal_Object *p_obj = *p_ref;
-
- assert(p_obj);
- assert(obj_belongs_to_space(p_obj, space));
-
- /* Fastpath: object has already been forwarded, update the ref slot */
- if(obj_is_forwarded_in_vt(p_obj)) {
- assert(!obj_is_marked_in_vt(p_obj));
- *p_ref = obj_get_forwarding_pointer_in_vt(p_obj);
- return;
- }
-
- /* only mark the objects that will remain in fspace */
- if (!fspace_object_to_be_forwarded(p_obj, (Fspace*)space)) {
- assert(!obj_is_forwarded_in_vt(p_obj));
- /* this obj remains in fspace, remember its ref slot for next GC. */
- if( !address_belongs_to_space(p_ref, space) ){
- collector_remset_add_entry(collector, p_ref);
- }
-
- if(fspace_mark_object((Fspace*)space, p_obj))
- scan_object(collector, p_obj);
-
- return;
- }
-
- /* following is the logic for forwarding */
- Partial_Reveal_Object* p_target_obj = collector_forward_object(collector, p_obj);
-
- /* if it is forwarded by other already, it is ok */
- if(!p_target_obj){
- *p_ref = obj_get_forwarding_pointer_in_vt(p_obj);
- return;
- }
- /* otherwise, we successfully forwarded */
- *p_ref = p_target_obj;
-
- /* we forwarded it, we need remember it for verification. FIXME:: thread id */
- if(verify_live_heap) {
- event_collector_move_obj(p_obj, p_target_obj, collector);
- }
-
- scan_object(collector, p_target_obj);
- return;
-}
-
-/* trace the root references from root set and remember sets */
-static void trace_root(Collector* collector, Partial_Reveal_Object **ref)
-{
- assert(*ref);
- assert(obj_belongs_to_space(*ref, collector->collect_space));
-
- TraceStack *ts = collector->trace_stack;
- ts->push(ref);
-
- while(!ts->empty()) {
- Partial_Reveal_Object **p_ref = ts->top();
- ts->pop();
- assert(p_ref);
- trace_object_seq(collector, p_ref);
- }
-}
-
-static void collector_trace_rootsets(Collector* collector)
-{
- GC_Metadata* metadata = collector->gc->metadata;
-
- Space* space = collector->collect_space;
- collector->trace_stack = new TraceStack();
-
- /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */
- Vector_Block* root_set = pool_get_entry(metadata->gc_rootset_pool);
-
- while(root_set){
- unsigned int* iter = vector_block_iterator_init(root_set);
- while(!vector_block_iterator_end(root_set,iter)){
- Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
- iter = vector_block_iterator_advance(root_set,iter);
-
- assert(p_ref);
- if(*p_ref == NULL) continue;
- if (obj_belongs_to_space(*p_ref, space))
- trace_root(collector, p_ref);
- }
- vector_block_clear(root_set);
- pool_put_entry(metadata->free_set_pool, root_set);
- root_set = pool_get_entry(metadata->gc_rootset_pool);
- }
-
- delete collector->trace_stack;
-
- return;
-}
-
-void update_rootset_interior_pointer();
-
-static void update_relocated_refs(Collector* collector)
-{
- update_rootset_interior_pointer();
-}
-
-void trace_forward_fspace_seq(Collector* collector)
-{
- GC* gc = collector->gc;
- Fspace* space = (Fspace*)collector->collect_space;
-
- /* FIXME:: Single-threaded trace-forwarding for fspace currently */
-
- collector_trace_rootsets(collector);
-
- update_relocated_refs(collector);
- reset_fspace_for_allocation(space);
-
- return;
-
-}
-
-
-
Index: vm/gc_gen/src/trace_forward/fspace.h
===================================================================
--- vm/gc_gen/src/trace_forward/fspace.h (revision 481733)
+++ vm/gc_gen/src/trace_forward/fspace.h (working copy)
@@ -21,18 +21,16 @@
#ifndef _FROM_SPACE_H_
#define _FROM_SPACE_H_
-#include "../thread/thread_alloc.h"
+#include "../thread/gc_thread.h"
/*
* In our Gen GC, not all live objects are copied to tspace space, the newer baby will
- * still be preserved in fspace, that means give them time to die.
+ * still be preserved in fspace, that means to give them time to die.
*/
-#define FORWARD_ALL 1.0
-#define FORWARD_HALF 0.5
-extern float NURSERY_OBJECT_FORWARDING_RATIO;
extern Boolean forward_first_half;
-extern void* object_forwarding_boundary; //objects allocated before this boundary remain in fspace
+/* boundary spliting fspace into forwarding part and remaining part */
+extern void* object_forwarding_boundary;
typedef struct Fspace {
/* <-- first couple of fields are overloadded as Space */
Index: vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp
===================================================================
--- vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp (revision 481733)
+++ vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp (working copy)
@@ -0,0 +1,252 @@
+
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#include "fspace.h"
+#include "../thread/collector.h"
+#include "../common/gc_metadata.h"
+
+static Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace)
+{
+ assert(obj_belongs_to_space(p_obj, (Space*)fspace));
+ return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary);
+}
+
+static void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref)
+{
+ Partial_Reveal_Object *p_obj = *p_ref;
+ if (p_obj == NULL) return;
+
+ /* the slot can be in tspace or fspace, we don't care.
+ we care only if the reference in the slot is pointing to fspace */
+ if (obj_belongs_to_space(p_obj, collector->collect_space))
+ collector_tracestack_push(collector, p_ref);
+
+ return;
+}
+
+static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj)
+{
+ if (!object_has_ref_field(p_obj)) return;
+
+ void *slot;
+
+ /* scan array object */
+ if (object_is_array(p_obj)) {
+ Partial_Reveal_Object* array = p_obj;
+ assert(!obj_is_primitive_array(array));
+
+ int32 array_length = vector_get_length((Vector_Handle) array);
+ for (int i = 0; i < array_length; i++) {
+ slot = vector_get_element_address_ref((Vector_Handle) array, i);
+ scan_slot(collector, (Partial_Reveal_Object **)slot);
+ }
+ return;
+ }
+
+ /* scan non-array object */
+ int *offset_scanner = init_object_scanner(p_obj);
+ while (true) {
+ slot = offset_get_ref(offset_scanner, p_obj);
+ if (slot == NULL) break;
+
+ scan_slot(collector, (Partial_Reveal_Object **)slot);
+ offset_scanner = offset_next_ref(offset_scanner);
+ }
+
+ return;
+}
+
+/* NOTE:: At this point, p_ref can be in anywhere like root, and other spaces, but *p_ref must be in fspace,
+ since only slot which points to object in fspace could be added into TraceStack.
+ The problem is the *p_ref may be forwarded already so that, when we come here we find it's pointing to tospace.
+ We will simply return for that case. It might be forwarded due to:
+ 1. two difference slots containing same reference;
+ 2. duplicate slots in remset ( we use SSB for remset, no duplication filtering.)
+ The same object can be traced by the thread itself, or by other thread.
+*/
+
+#include "../verify/verify_live_heap.h"
+
+static void forward_object(Collector* collector, Partial_Reveal_Object **p_ref)
+{
+ Space* space = collector->collect_space;
+ GC* gc = collector->gc;
+ Partial_Reveal_Object *p_obj = *p_ref;
+
+ if(!obj_belongs_to_space(p_obj, space)) return;
+
+ /* Fastpath: object has already been forwarded, update the ref slot */
+ if(obj_is_forwarded_in_vt(p_obj)) {
+ *p_ref = obj_get_forwarding_pointer_in_vt(p_obj);
+ return;
+ }
+
+ /* only mark the objects that will remain in fspace */
+ if(!fspace_object_to_be_forwarded(p_obj, (Fspace*)space)) {
+ assert(!obj_is_forwarded_in_vt(p_obj));
+ /* this obj remains in fspace, remember its ref slot for next GC if p_ref is not root */
+ if( !address_belongs_to_space(p_ref, space) && address_belongs_to_gc_heap(p_ref, gc))
+ collector_remset_add_entry(collector, p_ref);
+
+ if(fspace_mark_object((Fspace*)space, p_obj))
+ scan_object(collector, p_obj);
+
+ return;
+ }
+
+ /* following is the logic for forwarding */
+ Partial_Reveal_Object* p_target_obj = collector_forward_object(collector, p_obj);
+
+ /* if p_target_obj is NULL, it is forwarded by other thread.
+ Note: a race condition here, it might be forwarded by other, but not set the
+ forwarding pointer yet. We need spin here to get the forwarding pointer.
+ We can implement the collector_forward_object() so that the forwarding pointer
+ is set in the atomic instruction, which requires to roll back the mos_alloced
+ space. That is easy for thread local block allocation cancellation. */
+ if( p_target_obj == NULL ){
+ Partial_Reveal_Object *forward_ptr = obj_get_forwarding_pointer_in_vt(p_obj);
+ while(forward_ptr == NULL)
+ forward_ptr = obj_get_forwarding_pointer_in_vt(p_obj);
+
+ *p_ref = forward_ptr;
+ return;
+ }
+ /* otherwise, we successfully forwarded */
+ *p_ref = p_target_obj;
+
+ /* we forwarded it, we need remember it for verification. */
+ if(verify_live_heap) {
+ event_collector_move_obj(p_obj, p_target_obj, collector);
+ }
+
+ scan_object(collector, p_target_obj);
+ return;
+}
+
+static void trace_object(Collector* collector, Partial_Reveal_Object **p_ref)
+{
+ forward_object(collector, p_ref);
+
+ Vector_Block* trace_stack = (Vector_Block*)collector->trace_stack;
+ while( !vector_stack_is_empty(trace_stack)){
+ p_ref = (Partial_Reveal_Object **)vector_stack_pop(trace_stack);
+ forward_object(collector, p_ref);
+ trace_stack = (Vector_Block*)collector->trace_stack;
+ }
+
+ return;
+}
+
+/* for tracing phase termination detection */
+static volatile unsigned int num_finished_collectors = 0;
+
+static void collector_trace_rootsets(Collector* collector)
+{
+ GC* gc = collector->gc;
+ GC_Metadata* metadata = gc->metadata;
+
+ unsigned int num_active_collectors = gc->num_active_collectors;
+ atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
+
+ Space* space = collector->collect_space;
+ collector->trace_stack = pool_get_entry(metadata->free_task_pool);
+
+ /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */
+ Vector_Block* root_set = pool_get_entry(metadata->gc_rootset_pool);
+
+ /* first step: copy all root objects to trace tasks. */
+ while(root_set){
+ unsigned int* iter = vector_block_iterator_init(root_set);
+ while(!vector_block_iterator_end(root_set,iter)){
+ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+ iter = vector_block_iterator_advance(root_set,iter);
+ if(*p_ref == NULL) continue; /* root ref cann't be NULL, but remset can be */
+ if(obj_belongs_to_space(*p_ref, space)){
+ collector_tracestack_push(collector, p_ref);
+ }
+ }
+ vector_block_clear(root_set);
+ pool_put_entry(metadata->free_set_pool, root_set);
+ root_set = pool_get_entry(metadata->gc_rootset_pool);
+ }
+ /* put back the last trace_stack task */
+ pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
+
+ /* second step: iterate over the trace tasks and forward objects */
+ collector->trace_stack = pool_get_entry(metadata->free_task_pool);
+
+retry:
+ Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool);
+
+ while(trace_task){
+ unsigned int* iter = vector_block_iterator_init(trace_task);
+ while(!vector_block_iterator_end(trace_task,iter)){
+ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+ iter = vector_block_iterator_advance(trace_task,iter);
+ assert(*p_ref); /* a task can't be NULL, it was checked before put into the task stack */
+ /* in sequential version, we only trace same object once, but we were using a local hashset for that,
+ which couldn't catch the repetition between multiple collectors. This is subject to more study. */
+
+ /* FIXME:: we should not let root_set empty during working, other may want to steal it.
+ degenerate my stack into root_set, and grab another stack */
+
+ /* a task has to belong to collected space, it was checked before put into the stack */
+ trace_object(collector, p_ref);
+ }
+ vector_stack_clear(trace_task);
+ pool_put_entry(metadata->free_task_pool, trace_task);
+ trace_task = pool_get_entry(metadata->mark_task_pool);
+ }
+
+ atomic_inc32(&num_finished_collectors);
+ while(num_finished_collectors != num_active_collectors){
+ if( pool_is_empty(metadata->mark_task_pool)) continue;
+ /* we can't grab the task here, because of a race condition. If we grab the task,
+ and the pool is empty, other threads may fall to this barrier and then pass. */
+ atomic_dec32(&num_finished_collectors);
+ goto retry;
+ }
+
+ /* now we are done, but each collector has a private stack that is empty */
+ trace_task = (Vector_Block*)collector->trace_stack;
+ vector_stack_clear(trace_task);
+ pool_put_entry(metadata->free_task_pool, trace_task);
+ collector->trace_stack = NULL;
+
+ return;
+}
+
+void trace_forward_fspace(Collector* collector)
+{
+ GC* gc = collector->gc;
+ Fspace* space = (Fspace*)collector->collect_space;
+
+ collector_trace_rootsets(collector);
+
+ /* the rest work is not enough for parallelization, so let only one thread go */
+ if( collector->thread_handle != 0 ) return;
+
+ gc_update_repointed_refs(collector);
+ reset_fspace_for_allocation(space);
+
+ return;
+
+}
Index: vm/gc_gen/src/trace_forward/fspace_alloc.cpp
===================================================================
--- vm/gc_gen/src/trace_forward/fspace_alloc.cpp (revision 481733)
+++ vm/gc_gen/src/trace_forward/fspace_alloc.cpp (working copy)
@@ -60,9 +60,6 @@
}
/* FIXME:: the collection should be seperated from the alloation */
-struct GC_Gen;
-void gc_gen_reclaim_heap(GC_Gen* gc, unsigned int cause);
-
void* fspace_alloc(unsigned size, Allocator *allocator)
{
void* p_return = NULL;
@@ -77,7 +74,7 @@
vm_gc_lock_enum();
/* after holding lock, try if other thread collected already */
if ( !fspace_has_free_block(fspace) ) {
- gc_gen_reclaim_heap((GC_Gen*)allocator->gc, GC_CAUSE_NOS_IS_FULL);
+ gc_reclaim_heap(allocator->gc, GC_CAUSE_NOS_IS_FULL);
}
vm_gc_unlock_enum();
}
Index: vm/gc_gen/src/trace_forward/fspace_collect_forward_par.cpp
===================================================================
--- vm/gc_gen/src/trace_forward/fspace_collect_forward_par.cpp (revision 482464)
+++ vm/gc_gen/src/trace_forward/fspace_collect_forward_par.cpp (working copy)
@@ -1,247 +0,0 @@
-/*
- * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @author Xiao-Feng Li, 2006/10/05
- */
-
-#include "fspace.h"
-#include "../thread/collector.h"
-#include "../common/gc_metadata.h"
-
-static Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace)
-{
- assert(obj_belongs_to_space(p_obj, (Space*)fspace));
- return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary);
-}
-
-static void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref)
-{
- Partial_Reveal_Object *p_obj = *p_ref;
- if (p_obj == NULL) return;
-
- /* the slot can be in tspace or fspace, we don't care.
- we care only if the reference in the slot is pointing to fspace */
- if (obj_belongs_to_space(p_obj, collector->collect_space))
- collector_tracetask_add_entry(collector, p_ref);
-
- return;
-}
-
-static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj)
-{
- if (!object_has_slots(p_obj)) return;
-
- void *slot;
-
- /* scan array object */
- if (object_is_array(p_obj)) {
- Partial_Reveal_Object* array = p_obj;
- assert(!obj_is_primitive_array(array));
-
- int32 array_length = vector_get_length((Vector_Handle) array);
- for (int i = 0; i < array_length; i++) {
- slot = vector_get_element_address_ref((Vector_Handle) array, i);
- scan_slot(collector, (Partial_Reveal_Object **)slot);
- }
- return;
- }
-
- /* scan non-array object */
- int *offset_scanner = init_object_scanner(p_obj);
- while (true) {
- slot = offset_get_ref(offset_scanner, p_obj);
- if (slot == NULL) break;
-
- scan_slot(collector, (Partial_Reveal_Object **)slot);
- offset_scanner = offset_next_ref(offset_scanner);
- }
-
- return;
-}
-
-/* At this point, p_ref can be in anywhere like root, and other spaces,
- * but *p_ref must be in fspace, since only slot which points to
- * object in fspace could be added into TraceStack */
-#include "../verify/verify_live_heap.h"
-
-static void trace_object(Collector* collector, Partial_Reveal_Object **p_ref)
-{
- Space* space = collector->collect_space;
- Partial_Reveal_Object *p_obj = *p_ref;
-
- assert(p_obj);
- /* this assert is no longer valid for parallel forwarding, because remset may have duplicate p_refs that
- are traced by difference collectors, and right after both check the p_obj is in fspace, and put into
- trace_stack, one thread forwards it quickly before the other runs to this assert.
- assert(obj_belongs_to_space(p_obj, space)); */
-
- /* Fastpath: object has already been forwarded, update the ref slot */
- if(obj_is_forwarded_in_vt(p_obj)) {
- assert(!obj_is_marked_in_vt(p_obj));
- *p_ref = obj_get_forwarding_pointer_in_vt(p_obj);
- return;
- }
-
- /* only mark the objects that will remain in fspace */
- if (!fspace_object_to_be_forwarded(p_obj, (Fspace*)space)) {
- assert(!obj_is_forwarded_in_vt(p_obj));
- /* this obj remains in fspace, remember its ref slot for next GC. */
- if( !address_belongs_to_space(p_ref, space) )
- collector_remset_add_entry(collector, p_ref);
-
- if(fspace_mark_object((Fspace*)space, p_obj))
- scan_object(collector, p_obj);
-
- return;
- }
-
- /* following is the logic for forwarding */
- Partial_Reveal_Object* p_target_obj = collector_forward_object(collector, p_obj);
-
- /* if it is forwarded by other already, it is ok */
- if( p_target_obj == NULL ){
- *p_ref = obj_get_forwarding_pointer_in_vt(p_obj);
- return;
- }
- /* otherwise, we successfully forwarded */
- *p_ref = p_target_obj;
-
- /* we forwarded it, we need remember it for verification. FIXME:: thread id */
- if(verify_live_heap) {
- event_collector_move_obj(p_obj, p_target_obj, collector);
- }
-
- scan_object(collector, p_target_obj);
- return;
-}
-
-void trace_object_seq(Collector* collector, Partial_Reveal_Object **p_ref);
-
-/* for tracing phase termination detection */
-static volatile unsigned int num_finished_collectors = 0;
-
-static void collector_trace_rootsets(Collector* collector)
-{
- GC* gc = collector->gc;
- GC_Metadata* metadata = gc->metadata;
-
- Space* space = collector->collect_space;
- collector->trace_stack = (TraceStack*)pool_get_entry(metadata->free_set_pool);
- //collector->trace_stack = new TraceStack();
-
- unsigned int num_active_collectors = gc->num_active_collectors;
- atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
-
-retry:
- /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */
- Vector_Block* root_set = pool_get_entry(metadata->gc_rootset_pool);
-
- while(root_set){
- unsigned int* iter = vector_block_iterator_init(root_set);
- while(!vector_block_iterator_end(root_set,iter)){
- Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
- iter = vector_block_iterator_advance(root_set,iter);
-
- assert(p_ref);
- if(*p_ref == NULL) continue;
- /* in sequential version, we only trace same object once, but we were using a local hashset,
- which couldn't catch the repetition between multiple collectors. This is subject to more study. */
- if (obj_belongs_to_space(*p_ref, space))
- trace_object(collector, p_ref);
- }
- vector_block_clear(root_set);
- pool_put_entry(metadata->free_set_pool, root_set);
- root_set = pool_get_entry(metadata->gc_rootset_pool);
-
- }
-
- atomic_inc32(&num_finished_collectors);
- while(num_finished_collectors != num_active_collectors){
- if( !pool_is_empty(metadata->gc_rootset_pool)){
- atomic_dec32(&num_finished_collectors);
- goto retry;
- }
- }
-
-
- /* now we are done, but each collector has a private task block to deal with */
- Vector_Block* trace_task = (Vector_Block*)collector->trace_stack;
- TraceStack* trace_stack = new TraceStack();
-
- unsigned int* iter = vector_block_iterator_init(trace_task);
- while(!vector_block_iterator_end(trace_task,iter)){
- Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)*iter;
- iter = vector_block_iterator_advance(trace_task,iter);
- trace_stack->push(p_ref);
- }
-
- /* put back the last task to the free pool */
- vector_block_clear(trace_task);
- pool_put_entry(metadata->free_set_pool, trace_task);
-
- collector->trace_stack = trace_stack;
- while(!trace_stack->empty()){
- Partial_Reveal_Object** p_ref = trace_stack->top();
- trace_stack->pop();
- trace_object_seq(collector, p_ref);
- }
-
- delete trace_stack;
- collector->trace_stack = NULL;
-
- return;
-}
-
-void update_rootset_interior_pointer();
-
-static void update_relocated_refs(Collector* collector)
-{
- update_rootset_interior_pointer();
-}
-
-static volatile unsigned int num_marking_collectors = 0;
-
-void trace_forward_fspace(Collector* collector)
-{
- GC* gc = collector->gc;
- Fspace* space = (Fspace*)collector->collect_space;
-
- unsigned int num_active_collectors = gc->num_active_collectors;
- unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);
-
- collector_trace_rootsets(collector);
-
- old_num = atomic_inc32(&num_marking_collectors);
- if( ++old_num == num_active_collectors ){
- /* last collector's world here */
- /* prepare for next phase */ /* let other collectors go */
- num_marking_collectors++;
- }
- while(num_marking_collectors != num_active_collectors + 1);
-
- /* the rest work is not enough for parallelization, so let only one thread go */
- if( collector->thread_handle != 0 ) return;
-
- update_relocated_refs(collector);
- reset_fspace_for_allocation(space);
-
- return;
-
-}
-
-
-
Index: vm/gc_gen/src/thread/collector_alloc.cpp
===================================================================
--- vm/gc_gen/src/thread/collector_alloc.cpp (revision 481733)
+++ vm/gc_gen/src/thread/collector_alloc.cpp (working copy)
@@ -18,7 +18,7 @@
* @author Xiao-Feng Li, 2006/10/05
*/
-#include "thread_alloc.h"
+#include "gc_thread.h"
void* mos_alloc(unsigned size, Allocator *allocator);
@@ -32,17 +32,18 @@
assert(!obj_is_marked_in_vt(p_obj));
return NULL;
}
+ /* otherwise, get the obj size firstly. The work below will destroy its vtable. */
+ unsigned int size = vm_object_size(p_obj);
/* else, take the obj by setting the forwarding flag atomically
we don't put a simple bit in vt because we need compute obj size later. */
- if ((unsigned int)vt != atomic_cas32((unsigned int*)obj_get_vtraw_addr(p_obj), ((unsigned int)vt|FORWARDING_BIT_MASK), (unsigned int)vt)) {
+ if ((unsigned int)vt != atomic_cas32((unsigned int*)obj_get_vtraw_addr(p_obj), ((unsigned int)FORWARDING_BIT_MASK), (unsigned int)vt)) {
/* forwarded by other */
assert( obj_is_forwarded_in_vt(p_obj) && !obj_is_marked_in_vt(p_obj));
return NULL;
}
/* we hold the object, now forward it */
- unsigned int size = vm_object_size(p_obj);
Partial_Reveal_Object* p_targ_obj = (Partial_Reveal_Object*)mos_alloc(size, (Allocator*)collector);
/* mos should always has enough space to hold nos during collection */
assert(p_targ_obj);
Index: vm/gc_gen/src/thread/gc_thread.cpp
===================================================================
--- vm/gc_gen/src/thread/gc_thread.cpp (revision 0)
+++ vm/gc_gen/src/thread/gc_thread.cpp (revision 0)
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#include "gc_thread.h"
+
+static hythread_tls_key_t tls_gc_key;
+unsigned int tls_gc_offset;
+
+void gc_tls_init()
+{
+ hythread_tls_alloc(&tls_gc_key);
+ tls_gc_offset = hythread_tls_get_offset(tls_gc_key);
+
+ return;
+}
Index: vm/gc_gen/src/thread/mutator_alloc.cpp
===================================================================
--- vm/gc_gen/src/thread/mutator_alloc.cpp (revision 481733)
+++ vm/gc_gen/src/thread/mutator_alloc.cpp (working copy)
@@ -18,23 +18,31 @@
* @author Xiao-Feng Li, 2006/10/05
*/
-#include "thread_alloc.h"
+#include "gc_thread.h"
#include "../gen/gen.h"
-Managed_Object_Handle gc_alloc(unsigned size, Allocation_Handle ah, void *gc_tls)
+/* classloader sometimes sets the bit for finalizible objects (?) */
+inline unsigned int get_instance_data_size (unsigned int encoded_size)
+{ return (encoded_size & NEXT_TO_HIGH_BIT_CLEAR_MASK); }
+
+Managed_Object_Handle gc_alloc(unsigned size, Allocation_Handle ah, void *unused_gc_tls)
{
Managed_Object_Handle p_obj = NULL;
/* All requests for space should be multiples of 4 (IA32) or 8(IPF) */
assert((size % GC_OBJECT_ALIGNMENT) == 0);
- assert(gc_tls == vm_get_gc_thread_local());
assert(ah);
+ /* FIXME:: this is outdated actually */
+ size = get_instance_data_size(size);
+
+ Allocator* allocator = (Allocator*)gc_get_tls();
+
if ( size > GC_OBJ_SIZE_THRESHOLD )
- p_obj = (Managed_Object_Handle)los_alloc(size, (Allocator*)gc_tls);
+ p_obj = (Managed_Object_Handle)los_alloc(size, allocator);
else
- p_obj = (Managed_Object_Handle)nos_alloc(size, (Allocator*)gc_tls);
+ p_obj = (Managed_Object_Handle)nos_alloc(size, allocator);
assert(p_obj);
obj_set_vt((Partial_Reveal_Object*)p_obj, ah);
@@ -43,19 +51,20 @@
}
-Managed_Object_Handle gc_alloc_fast (unsigned size, Allocation_Handle ah, void *gc_tls)
+Managed_Object_Handle gc_alloc_fast (unsigned size, Allocation_Handle ah, void *unused_gc_tls)
{
/* All requests for space should be multiples of 4 (IA32) or 8(IPF) */
assert((size % GC_OBJECT_ALIGNMENT) == 0);
- assert(gc_tls == vm_get_gc_thread_local());
assert(ah);
/* object shoud be handled specially */
if ( size > GC_OBJ_SIZE_THRESHOLD ) return NULL;
-
+
+ Allocator* allocator = (Allocator*)gc_get_tls();
+
/* Try to allocate an object from the current Thread Local Block */
- Managed_Object_Handle p_obj = NULL;
- p_obj = (Managed_Object_Handle)thread_local_alloc(size, (Allocator*)gc_tls);
+ Managed_Object_Handle p_obj;
+ p_obj = (Managed_Object_Handle)thread_local_alloc(size, allocator);
if(p_obj == NULL) return NULL;
obj_set_vt((Partial_Reveal_Object*)p_obj, ah);
Index: vm/gc_gen/src/thread/gc_thread.h
===================================================================
--- vm/gc_gen/src/thread/gc_thread.h (revision 0)
+++ vm/gc_gen/src/thread/gc_thread.h (revision 0)
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#ifndef _GC_THREAD_H_
+#define _GC_THREAD_H_
+
+#include "../common/gc_block.h"
+#include "../common/gc_metadata.h"
+
+extern unsigned int tls_gc_offset;
+
+inline void* gc_get_tls()
+{
+ void* tls_base = vm_thread_local();
+ return (void*)*(unsigned int*)((char*)tls_base + tls_gc_offset);
+}
+
+inline void gc_set_tls(void* gc_tls_info)
+{
+ void* tls_base = vm_thread_local();
+ *(unsigned int*)((char*)tls_base + tls_gc_offset) = (unsigned int)gc_tls_info;
+}
+
+/* NOTE:: don't change the position of free/ceiling, because the offsets are constants for inlining */
+typedef struct Allocator{
+ void *free;
+ void *ceiling;
+ Block *alloc_block;
+ Space* alloc_space;
+ GC *gc;
+ VmThreadHandle thread_handle; /* This thread; */
+}Allocator;
+
+inline Partial_Reveal_Object* thread_local_alloc(unsigned int size, Allocator* allocator)
+{
+ void* free = allocator->free;
+ void* ceiling = allocator->ceiling;
+
+ void* new_free = (void*)((unsigned int)free + size);
+
+ if (new_free <= ceiling){
+ allocator->free= new_free;
+ return (Partial_Reveal_Object*)free;
+ }
+
+ return NULL;
+}
+
+inline void alloc_context_reset(Allocator* allocator)
+{
+ allocator->free = NULL;
+ allocator->ceiling = NULL;
+ allocator->alloc_block = NULL;
+
+ return;
+}
+
+#endif /* #ifndef _GC_THREAD_H_ */
Index: vm/gc_gen/src/thread/thread_alloc.h
===================================================================
--- vm/gc_gen/src/thread/thread_alloc.h (revision 482464)
+++ vm/gc_gen/src/thread/thread_alloc.h (working copy)
@@ -1,58 +0,0 @@
-/*
- * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @author Xiao-Feng Li, 2006/10/05
- */
-
-#ifndef _THREAD_ALLOC_H_
-#define _THREAD_ALLOC_H_
-
-#include "../common/gc_block.h"
-#include "../common/gc_metadata.h"
-
-typedef struct Allocator{
- void *free;
- void *ceiling;
- Block *alloc_block;
- Space* alloc_space;
- GC *gc;
- VmThreadHandle thread_handle; /* This thread; */
-}Allocator;
-
-inline Partial_Reveal_Object* thread_local_alloc(unsigned int size, Allocator* allocator)
-{
- Partial_Reveal_Object* p_return_obj=(Partial_Reveal_Object*)allocator->free;
- unsigned int new_free = size+(unsigned int)p_return_obj;
-
- if (new_free <= (unsigned int)allocator->ceiling){
- allocator->free=(void*)new_free;
- return p_return_obj;
- }
-
- return NULL;
-}
-
-inline void alloc_context_reset(Allocator* allocator)
-{
- allocator->free = NULL;
- allocator->ceiling = NULL;
- allocator->alloc_block = NULL;
-
- return;
-}
-
-#endif /* #ifndef _THREAD_ALLOC_H_ */
Index: vm/gc_gen/src/thread/collector.h
===================================================================
--- vm/gc_gen/src/thread/collector.h (revision 481733)
+++ vm/gc_gen/src/thread/collector.h (working copy)
@@ -37,8 +37,7 @@
/* FIXME:: for testing */
Space* collect_space;
- TraceStack *trace_stack;
- MarkStack* mark_stack;
+ Vector_Block *trace_stack;
Vector_Block* rep_set; /* repointed set */
Vector_Block* rem_set;
Index: vm/gc_gen/src/thread/mutator.cpp
===================================================================
--- vm/gc_gen/src/thread/mutator.cpp (revision 481733)
+++ vm/gc_gen/src/thread/mutator.cpp (working copy)
@@ -23,10 +23,10 @@
struct GC_Gen;
Space* gc_get_nos(GC_Gen* gc);
-void mutator_initialize(GC* gc, void *gc_information)
+void mutator_initialize(GC* gc, void *unused_gc_information)
{
/* FIXME:: make sure gc_info is cleared */
- Mutator *mutator = (Mutator *) gc_information;
+ Mutator *mutator = (Mutator *)STD_MALLOC(sizeof(Mutator));
mutator->free = NULL;
mutator->ceiling = NULL;
mutator->alloc_block = NULL;
@@ -46,13 +46,16 @@
unlock(gc->mutator_list_lock); // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
gc->num_mutators++;
+
+ gc_set_tls(mutator);
+
return;
}
-void mutator_destruct(GC* gc, void *gc_information)
+void mutator_destruct(GC* gc, void *unused_gc_information)
{
- Mutator *mutator = (Mutator *)gc_information;
+ Mutator *mutator = (Mutator *)gc_get_tls();
if(gc_requires_barriers()){ /* put back the remset when a mutator exits */
pool_put_entry(gc->metadata->mutator_remset_pool, mutator->rem_set);
@@ -75,6 +78,9 @@
unlock(gc->mutator_list_lock); // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
gc->num_mutators--;
+
+ //gc_set_tls(NULL);
+
return;
}
@@ -87,4 +93,5 @@
mutator = mutator->next;
}
return;
-}
\ No newline at end of file
+}
+
Index: vm/gc_gen/src/common/gc_metadata.h
===================================================================
--- vm/gc_gen/src/common/gc_metadata.h (revision 481733)
+++ vm/gc_gen/src/common/gc_metadata.h (working copy)
@@ -48,8 +48,7 @@
void gc_reset_rootset(GC* gc);
void gc_update_repointed_refs(Collector* collector);
-void collector_marktask_add_entry(Collector* collector, Partial_Reveal_Object* p_obj);
-void collector_tracetask_add_entry(Collector* collector, Partial_Reveal_Object** p_ref);
+void collector_tracestack_push(Collector* collector, void* p_task);
void mutator_remset_add_entry(Mutator* mutator, Partial_Reveal_Object** p_slot);
void collector_remset_add_entry(Collector* collector, Partial_Reveal_Object** p_slot);
Index: vm/gc_gen/src/common/mark_scan.cpp
===================================================================
--- vm/gc_gen/src/common/mark_scan.cpp (revision 0)
+++ vm/gc_gen/src/common/mark_scan.cpp (revision 0)
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#include "gc_metadata.h"
+#include "../thread/collector.h"
+#include "../gen/gen.h"
+
+static void scan_slot(Collector* collector, Partial_Reveal_Object** p_ref)
+{
+ Partial_Reveal_Object* p_obj = *p_ref;
+ if(p_obj==NULL) return;
+
+ Space* obj_space = space_of_addr(collector->gc, p_obj);
+
+ /* if obj to be moved, its ref slot needs remembering for later update */
+ if(obj_space->move_object)
+ collector_repset_add_entry(collector, p_ref);
+
+ if(obj_space->mark_object_func(obj_space, p_obj))
+ collector_tracestack_push(collector, p_obj);
+
+ return;
+}
+
+static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj)
+{
+ if( !object_has_ref_field(p_obj) ) return;
+
+ /* scan array object */
+ if (object_is_array(p_obj)) {
+ Partial_Reveal_Object* array = p_obj;
+ assert(!obj_is_primitive_array(array));
+
+ int32 array_length = vector_get_length((Vector_Handle) array);
+ for (int i = 0; i < array_length; i++) {
+ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)vector_get_element_address_ref((Vector_Handle) array, i);
+ scan_slot(collector, p_ref);
+ }
+ return;
+ }
+
+ /* scan non-array object */
+ int *offset_scanner = init_object_scanner(p_obj);
+ while (true) {
+ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj);
+ if (p_ref == NULL) break; /* terminating ref slot */
+
+ scan_slot(collector, p_ref);
+ offset_scanner = offset_next_ref(offset_scanner);
+ }
+
+ return;
+}
+
+
+static void trace_object(Collector* collector, Partial_Reveal_Object *p_obj)
+{
+ scan_object(collector, p_obj);
+
+ Vector_Block* trace_stack = collector->trace_stack;
+ while( !vector_stack_is_empty(trace_stack)){
+ p_obj = (Partial_Reveal_Object *)vector_stack_pop(trace_stack);
+ scan_object(collector, p_obj);
+ trace_stack = collector->trace_stack;
+ }
+
+ return;
+}
+
+/* for marking phase termination detection */
+static volatile unsigned int num_finished_collectors = 0;
+
+/* NOTE:: Only marking in object header is idempotent */
+void mark_scan_heap(Collector* collector)
+{
+ GC* gc = collector->gc;
+ GC_Metadata* metadata = gc->metadata;
+
+ /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
+ unsigned int num_active_collectors = gc->num_active_collectors;
+ atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
+
+ collector->trace_stack = pool_get_entry(metadata->free_task_pool);
+
+ Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);
+
+ /* first step: copy all root objects to mark tasks.
+ FIXME:: can be done sequentially before coming here to eliminate atomic ops */
+ while(root_set){
+ unsigned int* iter = vector_block_iterator_init(root_set);
+ while(!vector_block_iterator_end(root_set,iter)){
+ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+ iter = vector_block_iterator_advance(root_set,iter);
+
+ Partial_Reveal_Object* p_obj = *p_ref;
+ /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */
+ assert( (gc->collect_kind==MINOR_COLLECTION && !gc_requires_barriers()) || (gc->collect_kind==MAJOR_COLLECTION) && (p_obj!= NULL));
+ if(p_obj==NULL) continue;
+ /* we have to mark the object before put it into marktask, because
+ it is possible to have two slots containing a same object. They will
+ be scanned twice and their ref slots will be recorded twice. Problem
+ occurs after the ref slot is updated first time with new position
+ and the second time the value is the ref slot is the old position as expected.
+ This can be worked around if we want.
+ */
+ Space* space = space_of_addr(gc, p_obj);
+ if( !space->mark_object_func(space, p_obj) ) continue;
+
+ collector_tracestack_push(collector, p_obj);
+ }
+ root_set = pool_iterator_next(metadata->gc_rootset_pool);
+ }
+ /* put back the last trace_stack task */
+ pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
+
+ /* second step: iterate over the mark tasks and scan objects */
+ /* get a task buf for the mark stack */
+ collector->trace_stack = pool_get_entry(metadata->free_task_pool);
+
+retry:
+ Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool);
+
+ while(mark_task){
+ unsigned int* iter = vector_block_iterator_init(mark_task);
+ while(!vector_block_iterator_end(mark_task,iter)){
+ Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter;
+ iter = vector_block_iterator_advance(mark_task,iter);
+
+ /* FIXME:: we should not let mark_task empty during working, , other may want to steal it.
+ degenerate my stack into mark_task, and grab another mark_task */
+ trace_object(collector, p_obj);
+ }
+ /* run out one task, put back to the pool and grab another task */
+ vector_stack_clear(mark_task);
+ pool_put_entry(metadata->free_task_pool, mark_task);
+ mark_task = pool_get_entry(metadata->mark_task_pool);
+ }
+
+ /* termination detection. This is also a barrier.
+ NOTE:: We can simply spin waiting for num_finished_collectors, because each
+ generated new task would surely be processed by its generating collector eventually.
+ So code below is only for load balance optimization. */
+ atomic_inc32(&num_finished_collectors);
+ while(num_finished_collectors != num_active_collectors){
+ if( !pool_is_empty(metadata->mark_task_pool)){
+ atomic_dec32(&num_finished_collectors);
+ goto retry;
+ }
+ }
+
+ /* put back the last mark stack to the free pool */
+ mark_task = (Vector_Block*)collector->trace_stack;
+ vector_stack_clear(mark_task);
+ pool_put_entry(metadata->free_task_pool, mark_task);
+ collector->trace_stack = NULL;
+
+ /* put back last repointed refs set recorded during marking */
+ pool_put_entry(metadata->collector_repset_pool, collector->rep_set);
+ collector->rep_set = NULL;
+
+ return;
+}
Index: vm/gc_gen/src/common/interior_pointer.h
===================================================================
--- vm/gc_gen/src/common/interior_pointer.h (revision 481733)
+++ vm/gc_gen/src/common/interior_pointer.h (working copy)
@@ -1,3 +1,23 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
#ifndef INTERIOR_POINTER_H
#define INTERIOR_POINTER_H
Index: vm/gc_gen/src/common/mark_scan_par.cpp
===================================================================
--- vm/gc_gen/src/common/mark_scan_par.cpp (revision 482464)
+++ vm/gc_gen/src/common/mark_scan_par.cpp (working copy)
@@ -1,194 +0,0 @@
-/*
- * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @author Xiao-Feng Li, 2006/10/05
- */
-
-#include "gc_metadata.h"
-#include "../thread/collector.h"
-#include "../gen/gen.h"
-
-static void scan_slot_par(Collector* collector, Partial_Reveal_Object** p_ref)
-{
- Partial_Reveal_Object* p_obj = *p_ref;
- if(p_obj==NULL) return;
-
- Space* obj_space = space_of_addr(collector->gc, p_obj);
-
- /* if obj to be moved, its ref slot needs remembering for later update */
- if(obj_space->move_object)
- collector_repset_add_entry(collector, p_ref);
-
- if(obj_space->mark_object_func(obj_space, p_obj))
- collector_marktask_add_entry(collector, p_obj);
-
- return;
-}
-
-static void scan_object_par(Collector* collector, Partial_Reveal_Object *p_obj)
-{
- if( !object_has_slots(p_obj) ) return;
-
- /* scan array object */
- if (object_is_array(p_obj)) {
- Partial_Reveal_Object* array = p_obj;
- assert(!obj_is_primitive_array(array));
-
- int32 array_length = vector_get_length((Vector_Handle) array);
- for (int i = 0; i < array_length; i++) {
- Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)vector_get_element_address_ref((Vector_Handle) array, i);
- scan_slot_par(collector, p_ref);
- }
- return;
- }
-
- /* scan non-array object */
- int *offset_scanner = init_object_scanner(p_obj);
- while (true) {
- Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj);
- if (p_ref == NULL) break; /* terminating ref slot */
-
- scan_slot_par(collector, p_ref);
- offset_scanner = offset_next_ref(offset_scanner);
- }
-
- return;
-}
-
-extern void scan_object_seq(Collector*, Partial_Reveal_Object *);
-
-/* for marking phase termination detection */
-static volatile unsigned int num_finished_collectors = 0;
-
-/* NOTE:: Only marking in object header is idempotent */
-void mark_scan_heap_par(Collector* collector)
-{
- GC* gc = collector->gc;
- /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
- unsigned int num_active_collectors = gc->num_active_collectors;
- atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
-
- GC_Metadata* metadata = gc->metadata;
-
- collector->mark_stack = (MarkStack*)pool_get_entry(metadata->free_task_pool);
-
- Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);
-
- /* first step: copy all root objects to mark tasks.
- FIXME:: can be done sequentially before coming here to eliminate atomic ops */
- while(root_set){
- unsigned int* iter = vector_block_iterator_init(root_set);
- while(!vector_block_iterator_end(root_set,iter)){
- Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
- iter = vector_block_iterator_advance(root_set,iter);
-
- Partial_Reveal_Object* p_obj = *p_ref;
- /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */
- assert((gc->collect_kind==MAJOR_COLLECTION) && (p_obj!= NULL));
- /* we have to mark the object before put it into marktask, because
- it is possible to have two slots containing a same object. They will
- be scanned twice and their ref slots will be recorded twice. Problem
- occurs after the ref slot is updated first time with new position
- and the second time the value is the ref slot is the old position as expected.
- This can be worked around if we want.
- */
- Space* space = space_of_addr(gc, p_obj);
- if( !space->mark_object_func(space, p_obj) ) continue;
-
- collector_marktask_add_entry(collector, p_obj);
- }
- root_set = pool_iterator_next(metadata->gc_rootset_pool);
- }
-
- pool_put_entry(metadata->mark_task_pool, collector->mark_stack);
-
- /* second step: iterate over the mark tasks and scan objects */
- /* get a task buf to push new tasks */
- collector->mark_stack = (MarkStack*)pool_get_entry(metadata->free_task_pool);
-
-retry:
- Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool);
- while(mark_task){
- unsigned int* iter = vector_block_iterator_init(mark_task);
- while(!vector_block_iterator_end(mark_task,iter)){
- Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter;
- iter = vector_block_iterator_advance(mark_task,iter);
-
- scan_object_par(collector, p_obj);
- }
- /* run out one task, put back to the pool and grab another task */
- vector_block_clear(mark_task);
- pool_put_entry(metadata->free_task_pool, mark_task);
- mark_task = pool_get_entry(metadata->mark_task_pool);
- }
-
- /* termination detection. This is also a barrier.
- NOTE:: actually we don't need this complexity. We can simply
- spin waiting for num_finished_collectors, because each generated new
- task would surely be processed by its generating collector eventually.
- So code below is for load balance. */
- atomic_inc32(&num_finished_collectors);
- while(num_finished_collectors != num_active_collectors){
- if( !pool_is_empty(metadata->mark_task_pool)){
- atomic_dec32(&num_finished_collectors);
- goto retry;
- }
- }
-
- /* up to now, we donot have any tasks in task_pool, but
- each collector has remaining tasks in its local mark_stack. */
-
- /* Lets process remaining tasks.
- NOTE:: this is the key difference from work-stealing, which uses
- same stack for both working and sharing. So it has no problem
- with remaining tasks in the shared stack. */
-
- /* to simplify the processing, we turn back to use a single stack for
- the remaining objects scanning. The assumption is, there are only limited
- tasks for processing, no need to share the tasks.
- FIXME:: a better way is to reduce the task block size into half till
- the size becomes one, then the collectors actually share a same stack */
-
- mark_task = (Vector_Block*)collector->mark_stack;
- MarkStack* mark_stack = new MarkStack();
-
- unsigned int* iter = vector_block_iterator_init(mark_task);
- while(!vector_block_iterator_end(mark_task,iter)){
- Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter;
- iter = vector_block_iterator_advance(mark_task,iter);
- mark_stack->push(p_obj);
- }
- /* put back the last task to the free pool */
- vector_block_clear(mark_task);
- pool_put_entry(metadata->free_task_pool, mark_task);
-
- collector->mark_stack = mark_stack;
- while(!mark_stack->empty()){
- Partial_Reveal_Object* p_obj = mark_stack->top();
- mark_stack->pop();
- scan_object_seq(collector, p_obj);
- }
-
- delete mark_stack;
- collector->mark_stack = NULL;
-
- /* put back last repointed refs set recorded during marking */
- pool_put_entry(metadata->collector_repset_pool, collector->rep_set);
- collector->rep_set = NULL;
-
- return;
-}
Index: vm/gc_gen/src/common/gc_common.cpp
===================================================================
--- vm/gc_gen/src/common/gc_common.cpp (revision 481733)
+++ vm/gc_gen/src/common/gc_common.cpp (working copy)
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/12/3
+ */
+
+#include "gc_common.h"
+#include "gc_metadata.h"
+#include "../thread/mutator.h"
+#include "../verify/verify_live_heap.h"
+
+extern Boolean NEED_BARRIER;
+extern unsigned int NUM_COLLECTORS;
+extern Boolean GC_VERIFY;
+extern unsigned int NOS_SIZE;
+extern Boolean NOS_PARTIAL_FORWARD;
+
+unsigned int HEAP_SIZE_DEFAULT = 256 * MB;
+unsigned int min_heap_size_bytes = 32 * MB;
+unsigned int max_heap_size_bytes = 0;
+
+static int get_int_property(const char *property_name)
+{
+ assert(property_name);
+ char *value = get_property(property_name, VM_PROPERTIES);
+ int return_value;
+ if (NULL != value)
+ {
+ return_value = atoi(value);
+ destroy_property_value(value);
+ }else{
+ printf("property value %s is not set\n", property_name);
+ exit(0);
+ }
+
+ return return_value;
+}
+
+static Boolean get_boolean_property(const char *property_name)
+{
+ assert(property_name);
+ char *value = get_property(property_name, VM_PROPERTIES);
+ if (NULL == value){
+ printf("property value %s is not set\n", property_name);
+ exit(0);
+ }
+
+ Boolean return_value;
+ if (0 == strcmp("no", value)
+ || 0 == strcmp("off", value)
+ || 0 == strcmp("false", value)
+ || 0 == strcmp("0", value))
+ {
+ return_value = FALSE;
+ }
+ else if (0 == strcmp("yes", value)
+ || 0 == strcmp("on", value)
+ || 0 == strcmp("true", value)
+ || 0 == strcmp("1", value))
+ {
+ return_value = TRUE;
+ }else{
+ printf("property value %s is not properly set\n", property_name);
+ exit(0);
+ }
+
+ destroy_property_value(value);
+ return return_value;
+}
+
+static size_t get_size_property(const char* name)
+{
+ char* size_string = get_property(name, VM_PROPERTIES);
+ size_t size = atol(size_string);
+ int sizeModifier = tolower(size_string[strlen(size_string) - 1]);
+ destroy_property_value(size_string);
+
+ size_t unit = 1;
+ switch (sizeModifier) {
+ case 'k': unit = 1024; break;
+ case 'm': unit = 1024 * 1024; break;
+ case 'g': unit = 1024 * 1024 * 1024;break;
+ }
+
+ size_t res = size * unit;
+ if (res / unit != size) {
+ /* overflow happened */
+ return 0;
+ }
+ return res;
+}
+
+void gc_parse_options()
+{
+ unsigned int max_heap_size = HEAP_SIZE_DEFAULT;
+ unsigned int min_heap_size = min_heap_size_bytes;
+
+ if (is_property_set("gc.mx", VM_PROPERTIES) == 1) {
+ max_heap_size = get_size_property("gc.mx");
+
+ if (max_heap_size < min_heap_size)
+ max_heap_size = min_heap_size;
+ if (0 == max_heap_size)
+ max_heap_size = HEAP_SIZE_DEFAULT;
+
+ min_heap_size = max_heap_size / 10;
+ if (min_heap_size < min_heap_size_bytes) min_heap_size = min_heap_size_bytes;
+ }
+
+ if (is_property_set("gc.ms", VM_PROPERTIES) == 1) {
+ min_heap_size = get_size_property("gc.ms");
+ if (min_heap_size < min_heap_size_bytes)
+ min_heap_size = min_heap_size_bytes;
+ }
+
+ if (min_heap_size > max_heap_size)
+ max_heap_size = min_heap_size;
+
+ min_heap_size_bytes = min_heap_size;
+ max_heap_size_bytes = max_heap_size;
+
+ if (is_property_set("gc.nos_size", VM_PROPERTIES) == 1) {
+ NOS_SIZE = get_size_property("gc.nos_size");
+ }
+
+ if (is_property_set("gc.num_collectors", VM_PROPERTIES) == 1) {
+ unsigned int num = get_int_property("gc.num_collectors");
+ NUM_COLLECTORS = (num==0)? NUM_COLLECTORS:num;
+ }
+
+ if (is_property_set("gc.gen_mode", VM_PROPERTIES) == 1) {
+ NEED_BARRIER = get_boolean_property("gc.gen_mode");
+ }
+
+ if (is_property_set("gc.nos_partial_forward", VM_PROPERTIES) == 1) {
+ NOS_PARTIAL_FORWARD = get_boolean_property("gc.nos_partial_forward");
+ }
+
+ if (is_property_set("gc.verify", VM_PROPERTIES) == 1) {
+ GC_VERIFY = get_boolean_property("gc.verify");
+ }
+
+ return;
+}
+
+struct GC_Gen;
+void gc_gen_reclaim_heap(GC_Gen* gc);
+unsigned int gc_decide_collection_kind(GC_Gen* gc, unsigned int gc_cause);
+
+void gc_reclaim_heap(GC* gc, unsigned int gc_cause)
+{
+ gc->num_collections++;
+
+ gc->collect_kind = gc_decide_collection_kind((GC_Gen*)gc, gc_cause);
+ //gc->collect_kind = MAJOR_COLLECTION;
+
+ gc_metadata_verify(gc, TRUE);
+
+ /* Stop the threads and collect the roots. */
+ gc_reset_rootset(gc);
+ vm_enumerate_root_set_all_threads();
+ gc_set_rootset(gc);
+
+ if(verify_live_heap) gc_verify_heap(gc, TRUE);
+
+ gc_gen_reclaim_heap((GC_Gen*)gc);
+
+ if(verify_live_heap) gc_verify_heap(gc, FALSE);
+
+ gc_metadata_verify(gc, FALSE);
+
+ gc_reset_mutator_context(gc);
+ vm_resume_threads_after();
+
+ return;
+}
+
Index: vm/gc_gen/src/common/gc_for_vm.cpp
===================================================================
--- vm/gc_gen/src/common/gc_for_vm.cpp (revision 481733)
+++ vm/gc_gen/src/common/gc_for_vm.cpp (working copy)
@@ -23,96 +23,49 @@
#include "../gen/gen.h"
#include "interior_pointer.h"
+#include "../thread/collector.h"
+#include "../verify/verify_live_heap.h"
-unsigned int HEAP_SIZE_DEFAULT = 256 * MB;
-
-extern Boolean NEED_BARRIER;
-extern unsigned int NUM_COLLECTORS;
-extern Boolean GC_VERIFY;
-extern unsigned int NOS_SIZE;
-
-/* heap size limit is not interesting. only for manual tuning purpose */
-unsigned int min_heap_size_bytes = 32 * MB;
-unsigned int max_heap_size_bytes = 256 * MB;
-
-static size_t get_size_property(const char* name)
-{
- char* size_string = get_property(name, VM_PROPERTIES);
- size_t size = atol(size_string);
- int sizeModifier = tolower(size_string[strlen(size_string) - 1]);
- destroy_property_value(size_string);
-
- size_t unit = 1;
- switch (sizeModifier) {
- case 'k': unit = 1024; break;
- case 'm': unit = 1024 * 1024; break;
- case 'g': unit = 1024 * 1024 * 1024;break;
- }
-
- size_t res = size * unit;
- if (res / unit != size) {
- /* overflow happened */
- return 0;
- }
- return res;
-}
-
-static void parse_configuration_properties()
-{
- unsigned int max_heap_size = HEAP_SIZE_DEFAULT;
- unsigned int min_heap_size = min_heap_size_bytes;
-
- if (is_property_set("gc.mx", VM_PROPERTIES) == 1) {
- max_heap_size = get_size_property("gc.mx");
-
- if (max_heap_size < min_heap_size)
- max_heap_size = min_heap_size;
- if (0 == max_heap_size)
- max_heap_size = HEAP_SIZE_DEFAULT;
-
- min_heap_size = max_heap_size / 10;
- if (min_heap_size < min_heap_size_bytes) min_heap_size = min_heap_size_bytes;
- }
-
- if (is_property_set("gc.ms", VM_PROPERTIES) == 1) {
- min_heap_size = get_size_property("gc.ms");
- if (min_heap_size < min_heap_size_bytes)
- min_heap_size = min_heap_size_bytes;
- }
-
- if (min_heap_size > max_heap_size)
- max_heap_size = min_heap_size;
-
- min_heap_size_bytes = min_heap_size;
- max_heap_size_bytes = max_heap_size;
-
- if (is_property_set("gc.nos_size", VM_PROPERTIES) == 1) {
- NOS_SIZE = get_size_property("gc.nos_size");
- }
-
- NUM_COLLECTORS = get_int_property("gc.num_collectors", NUM_COLLECTORS, VM_PROPERTIES);
- NEED_BARRIER = get_boolean_property("gc.gen_mode", TRUE, VM_PROPERTIES);
- GC_VERIFY = get_boolean_property("gc.verify", FALSE, VM_PROPERTIES);
-
- return;
-}
-
static GC* p_global_gc = NULL;
+void gc_tls_init();
+
void gc_init()
{
- parse_configuration_properties();
-
+ gc_parse_options();
+
assert(p_global_gc == NULL);
GC* gc = (GC*)STD_MALLOC(sizeof(GC_Gen));
assert(gc);
memset(gc, 0, sizeof(GC));
p_global_gc = gc;
- gc_gen_initialize((GC_Gen*)gc, min_heap_size_bytes, max_heap_size_bytes);
+ gc_tls_init();
+ gc_gen_initialize((GC_Gen*)gc, min_heap_size_bytes, max_heap_size_bytes);
+
+ gc_metadata_initialize(gc); /* root set and mark stack */
+ collector_initialize(gc);
+ gc_init_heap_verification(gc);
+
return;
}
+void gc_wrapup()
+{
+ GC* gc = p_global_gc;
+ gc_gen_destruct((GC_Gen*)gc);
+ gc_metadata_destruct(gc); /* root set and mark stack */
+ collector_destruct(gc);
+
+ if( verify_live_heap ){
+ gc_terminate_heap_verification(gc);
+ }
+
+ STD_FREE(p_global_gc);
+
+ p_global_gc = NULL;
+}
+
/* this interface need reconsidering. is_pinned is unused. */
void gc_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned)
{
@@ -133,16 +86,10 @@
void gc_force_gc()
{
vm_gc_lock_enum();
- gc_gen_reclaim_heap((GC_Gen*)p_global_gc, GC_CAUSE_RUNTIME_FORCE_GC);
+ gc_reclaim_heap(p_global_gc, GC_CAUSE_RUNTIME_FORCE_GC);
vm_gc_unlock_enum();
}
-void gc_wrapup()
-{
- gc_gen_destruct((GC_Gen*)p_global_gc);
- p_global_gc = NULL;
-}
-
void* gc_heap_base_address()
{ return gc_heap_base(p_global_gc); }
@@ -187,3 +134,4 @@
unsigned int gc_time_since_last_gc()
{ assert(0); return 0; }
+
Index: vm/gc_gen/src/common/gc_platform.h
===================================================================
--- vm/gc_gen/src/common/gc_platform.h (revision 481733)
+++ vm/gc_gen/src/common/gc_platform.h (working copy)
@@ -58,6 +58,11 @@
hythread_yield();
}
+inline void* vm_thread_local()
+{
+ return hythread_self();
+}
+
inline int vm_create_thread(int (*func)(void*), void *data)
{
hythread_t* ret_thread = NULL;
Index: vm/gc_gen/src/common/gc_for_class.cpp
===================================================================
--- vm/gc_gen/src/common/gc_for_class.cpp (revision 481733)
+++ vm/gc_gen/src/common/gc_for_class.cpp (working copy)
@@ -70,7 +70,7 @@
}
if( num_ref_fields )
- gcvt->gc_object_has_slots = true;
+ gcvt->gc_object_has_ref_field = true;
else
return NULL;
@@ -119,7 +119,7 @@
memset((void *)gcvt, 0, sizeof(GC_VTable_Info));
gcvt->gc_clss = ch;
gcvt->gc_class_properties = 0;
- gcvt->gc_object_has_slots = false;
+ gcvt->gc_object_has_ref_field = false;
gc_set_prop_alignment_mask(gcvt, class_get_alignment(ch));
@@ -133,7 +133,7 @@
if (class_is_non_ref_array (ch)) {
gc_set_prop_non_ref_array(gcvt);
}else{
- gcvt->gc_object_has_slots = true;
+ gcvt->gc_object_has_ref_field = true;
}
}
@@ -152,3 +152,4 @@
} /* gc_class_prepared */
+
Index: vm/gc_gen/src/common/gc_common.h
===================================================================
--- vm/gc_gen/src/common/gc_common.h (revision 481733)
+++ vm/gc_gen/src/common/gc_common.h (working copy)
@@ -22,8 +22,6 @@
#define _GC_COMMON_H_
#include
-#include
-#include
#include