Index: build/make/components/vm/gc_gen.xml
===================================================================
--- build/make/components/vm/gc_gen.xml (revision 479958)
+++ build/make/components/vm/gc_gen.xml (working copy)
@@ -52,7 +52,6 @@
-
-
-
Index: vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp
===================================================================
--- vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (revision 479958)
+++ vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (working copy)
@@ -91,9 +91,9 @@
return;
}
-static unsigned int gc_collection_result(GC* gc)
+static Boolean gc_collection_result(GC* gc)
{
- unsigned int result = TRUE;
+ Boolean result = TRUE;
for(unsigned i=0; inum_active_collectors; i++){
Collector* collector = gc->collectors[i];
result &= collector->result;
@@ -144,24 +144,35 @@
static Block_Header* mspace_get_next_target_block(Collector* collector, Mspace* mspace)
{
Block_Header* cur_target_block = (Block_Header*)next_block_for_target;
+
/* firstly, we bump the next_block_for_target global var to the first non BLOCK_TARGET block
- This need not atomic op, because only one thread can own the next_block_for_target */
-
+ This need not atomic op, because the global var is only a hint. */
while(cur_target_block->status == BLOCK_TARGET){
cur_target_block = cur_target_block->next;
}
next_block_for_target = cur_target_block;
+ /* cur_target_block has to be BLOCK_IN_COMPACT|BLOCK_COMPACTED|BLOCK_TARGET. Reason:
+ Any block after it must be either BLOCK_TARGET, or:
+ 1. Since cur_target_block < cur_compact_block, we at least can get cur_compact_block as target.
+ 2. For a block that is >=cur_target_block and status & (BLOCK_IN_COMPACT|BLOCK_COMPACTED|BLOCK_TARGET));
+ */
+
/* nos is higher than mos, we cant use nos block for compaction target */
Block_Header* mspace_heap_end = (Block_Header*)space_heap_end((Space*)mspace);
- while(cur_target_block < mspace_heap_end){
+ while( cur_target_block < mspace_heap_end ){
+ assert( cur_target_block <= collector->cur_compact_block);
Block_Header* next_target_block = cur_target_block->next;
volatile unsigned int* p_block_status = &cur_target_block->status;
unsigned int block_status = cur_target_block->status;
- /* block status has to be BLOCK_IN_COMPACT|BLOCK_COMPACTED|BLOCK_TARGET
- but we care only the BLOCK_COMPACTED ones or own BLOCK_IN_COMPACT */
- assert( block_status & (BLOCK_IN_COMPACT|BLOCK_COMPACTED|BLOCK_TARGET));
- /* if it is not BLOCK_COMPACTED, let's move on to next */
+ //assert( block_status & (BLOCK_IN_COMPACT|BLOCK_COMPACTED|BLOCK_TARGET));
+
+ /* if it is not BLOCK_COMPACTED, let's move on to next except it's own cur_compact_block */
if(block_status != BLOCK_COMPACTED){
if(cur_target_block == collector->cur_compact_block){
assert( block_status == BLOCK_IN_COMPACT);
@@ -169,10 +180,11 @@
collector->cur_target_block = cur_target_block;
return cur_target_block;
}
+ /* it's not my own cur_compact_block, it can be BLOCK_TARGET or other's cur_compact_block */
cur_target_block = next_target_block;
continue;
}
- /* ok, find the first BLOCK_COMPACTED before own compact block */
+ /* else, find a BLOCK_COMPACTED before own cur_compact_block */
unsigned int temp = atomic_cas32(p_block_status, BLOCK_TARGET, BLOCK_COMPACTED);
if(temp == BLOCK_COMPACTED){
collector->cur_target_block = cur_target_block;
@@ -212,7 +224,7 @@
return FALSE;
}
-static Boolean mspace_compute_object_target(Collector* collector, Mspace* mspace)
+static void mspace_compute_object_target(Collector* collector, Mspace* mspace)
{
Block_Header* curr_block = collector->cur_compact_block;
Block_Header* dest_block = collector->cur_target_block;
@@ -232,8 +244,8 @@
dest_block->free = dest_addr;
dest_block = mspace_get_next_target_block(collector, mspace);
if(dest_block == NULL){
- collector->result = 0;
- return FALSE;
+ collector->result = FALSE;
+ return;
}
dest_addr = GC_BLOCK_BODY(dest_block);
@@ -255,7 +267,7 @@
curr_block = mspace_get_next_compact_block(collector, mspace);
}
- return TRUE;
+ return;
}
#include "../verify/verify_live_heap.h"
Index: vm/gc_gen/src/utils/vector_block.h
===================================================================
--- vm/gc_gen/src/utils/vector_block.h (revision 479958)
+++ vm/gc_gen/src/utils/vector_block.h (working copy)
@@ -22,91 +22,19 @@
#define _VECTOR_BLOCK_H_
typedef struct Vector_Block{
- unsigned int* start; /* point to first entry, not needed actually */
- unsigned int* end; /* point to end of the block (right after the last entry) */
+ void* next; /* point to next block */
unsigned int* head; /* point to the first filled entry */
unsigned int* tail; /* point to the entry after the last filled one */
- unsigned int* entries[1];
-}Vector_Block;
-
-inline void vector_block_init(Vector_Block* block, unsigned int size)
-{
- block->start = (unsigned int*)block->entries;
- block->end = (unsigned int*)((unsigned int)block + size);
- block->head = block->start;
- block->tail = block->start;
- return;
-}
-
-inline unsigned int vector_block_entry_count(Vector_Block* block)
-{ return (unsigned int)(block->tail - block->head); }
-
-inline Boolean vector_block_is_full(Vector_Block* block)
-{ return block->tail == block->end; }
-
-inline void vector_block_add_entry(Vector_Block* block, unsigned int value)
-{
- assert(value && !*(block->tail));
- *(block->tail++) = value;
-}
-
-inline void vector_block_clear(Vector_Block* block)
-{
-#ifdef _DEBUG
- memset(block->start, 0, (block->end - block->start)*BYTES_PER_WORD);
-#endif
-
- block->tail = block->head;
-}
-
-/* Below is for sequential local access */
-inline unsigned int* vector_block_iterator_init(Vector_Block* block)
-{ return block->head; }
-
-inline unsigned int* vector_block_iterator_advance(Vector_Block* block, unsigned int* iter)
-{ return ++iter; }
-
-inline Boolean vector_block_iterator_end(Vector_Block* block, unsigned int* iter)
-{ return iter == block->tail; }
-
-#endif /* #ifndef _VECTOR_BLOCK_H_ */
-/*
- * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @author Xiao-Feng Li, 2006/10/25
- */
-
-#ifndef _VECTOR_BLOCK_H_
-#define _VECTOR_BLOCK_H_
-
-typedef struct Vector_Block{
- unsigned int* start; /* point to first entry, not needed actually */
unsigned int* end; /* point to end of the block (right after the last entry) */
- unsigned int* head; /* point to the first filled entry */
- unsigned int* tail; /* point to the entry after the last filled one */
unsigned int* entries[1];
}Vector_Block;
inline void vector_block_init(Vector_Block* block, unsigned int size)
{
- block->start = (unsigned int*)block->entries;
block->end = (unsigned int*)((unsigned int)block + size);
- block->head = block->start;
- block->tail = block->start;
+ block->head = (unsigned int*)block->entries;
+ block->tail = (unsigned int*)block->entries;
+ memset(block->head, 0, (block->end - block->head)*BYTES_PER_WORD);
return;
}
@@ -116,6 +44,9 @@
inline Boolean vector_block_is_full(Vector_Block* block)
{ return block->tail == block->end; }
+inline Boolean vector_block_is_empty(Vector_Block* block)
+{ return block->tail == block->head; }
+
inline void vector_block_add_entry(Vector_Block* block, unsigned int value)
{
assert(value && !*(block->tail));
@@ -125,7 +56,7 @@
inline void vector_block_clear(Vector_Block* block)
{
#ifdef _DEBUG
- memset(block->start, 0, (block->end - block->start)*BYTES_PER_WORD);
+ memset(block->entries, 0, (block->end - (unsigned int*)block->entries)*BYTES_PER_WORD);
#endif
block->tail = block->head;
Index: vm/gc_gen/src/utils/sync_queue.h
===================================================================
--- vm/gc_gen/src/utils/sync_queue.h (revision 479958)
+++ vm/gc_gen/src/utils/sync_queue.h (working copy)
@@ -128,133 +128,3 @@
}
#endif /* _SYNC_QUEUE_H_ */
-/*
- * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @author Xiao-Feng Li, 2006/10/05
- */
-
-#ifndef _SYNC_QUEUE_H_
-#define _SYNC_QUEUE_H_
-
-/* an implementation of MSQ. FIXME:: only work in 32-bit machine */
-
-struct Queue_Node;
-
-typedef struct Queue_Link{
- struct Queue_Node* ptr;
- unsigned int count;
-}Queue_Link;
-
-typedef struct Queue_Node{
- __declspec(align(8))
- Queue_Link next; /* must be aligned to 8Byte*/
- unsigned int* value;
-}Queue_Node;
-
-typedef struct Sync_Queue{
- __declspec(align(8))
- Queue_Link head; /* must be aligned to 8Byte*/
- Queue_Link tail;
-}Sync_Queue;
-
-inline Queue_Node * new_queue_node()
-{
- Queue_Node* p_node = malloc(sizeof(Queue_Node));
- assert( (unsigned int)node%8 == 0 );
- return p_node;
-}
-
-inline void free_queue_node(Queue_Node* node)
-{ free( node ); }
-
-inline void sync_queue_init(Sync_Queue *queue)
-{
- Queue_Node *node = new_queue_node();
- node->next.ptr = NULL;
- node->next.count = 0;
- queue->head.ptr = queue->tail.ptr = node;
- queue->head.count = queue->tail.count = 0;
- return;
-}
-
-#define QLINK_PTR(x) ((unsigned long long*)&(x))
-#define QLINK_VAL(x) (*(QLINK_PTR(x)))
-
-inline void sync_queue_push(Sync_Queue* queue, unsigned int* value)
-{
- Queue_Link tail, next, tmp1, tmp2;
- Queue_Node* node = new_queue_node();
- node->value = value;
- node->next.ptr = NULL;
- while(TRUE){
- QLINK_VAL(tail) = QLINK_VAL(queue->tail);
- QLINK_VAL(next) = QLINK_VAL(tail.ptr->next);
- if( QLINK_VAL(tail) == QLINK_VAL(queue->tail)){
- if( next.ptr==NULL ){
- tmp1.ptr = node;
- tmp1.count = next.count + 1;
- node->next.count = tmp1.count;
- QLINK_VAL(tmp2) = atomic_cas64(QLINK_PTR(tail.ptr->next), QLINK_VAL(next), QLINK_VAL(tmp1))
- if( QLINK_VAL(tmp1) == QLINK_VAL(tmp2))
- break;
-
- }else{
- tmp1.ptr = next.ptr;
- tmp1.count = tail.count + 1;
- atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1));
- }
- }
- }
- tmp1.ptr = node;
- tmp1.count = tail.count + 1;
- atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1));
- return;
-}
-
-Boolean sync_queue_pull(Sync_Queue* queue, unsigned int * pvalue)
-{
- Queue_Link head, tail, next, tmp1, tmp2;
- while(TRUE){
- QLINK_VAL(head) = QLINK_VAL(queue->head);
- QLINK_VAL(tail) = QLINK_VAL(queue->tail);
- QLINK_VAL(next) = QLINK_VAL(head.ptr->next);
-
- if( QLINK_VAL(head) == QLINK_VAL(queue->head)){
- if( head.ptr== tail.ptr )
- if( next.ptr == NULL )
- return FALSE;
- else{
- tmp1.ptr = next.ptr;
- tmp1.count = tail.count+1;
- atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1));
- }
- else{
- *pvalue = next.ptr->value;
- tmp1.ptr = next.ptr;
- tmp1.count = head.count+1;
- QLINK_VAL(tmp2) = atomic_cas64(QLINK_PTR(queue->head), QLINK_VAL(head), QLINK_VAL(tmp1));
- if( QLINK_VAL(tmp2) == QLINK_VAL(tmp1))
- break;
- }
- }
- }
- free( head.ptr );
- return TRUE;
-}
-
-#endif /* _SYNC_QUEUE_H_ */
Index: vm/gc_gen/src/utils/sync_stack.h
===================================================================
--- vm/gc_gen/src/utils/sync_stack.h (revision 479958)
+++ vm/gc_gen/src/utils/sync_stack.h (working copy)
@@ -21,20 +21,22 @@
#ifndef _SYNC_STACK_H_
#define _SYNC_STACK_H_
+typedef struct Node{
+ Node* next;
+}Node;
+
typedef struct Sync_Stack{
- unsigned int* top; /* pointing to the first filled entry */
- unsigned int* cur; /* pointing to the current accessed entry */
- unsigned int* bottom; /* pointing to the pos right after the last entry */
- unsigned int entries[1];
+ Node* top; /* pointing to the first filled entry */
+ Node* cur; /* pointing to the current accessed entry, only for iterator */
}Sync_Stack;
-inline Sync_Stack* sync_stack_init(unsigned int num_entries)
+inline Sync_Stack* sync_stack_init()
{
- unsigned int size = ((num_entries-1) << 2) + sizeof(Sync_Stack);
+ unsigned int size = sizeof(Sync_Stack);
Sync_Stack* stack = (Sync_Stack*)STD_MALLOC(size);
memset(stack, 0, size);
- stack->bottom = &(stack->entries[num_entries]);
- stack->top = stack->bottom;
+ stack->cur = NULL;
+ stack->top = NULL;
return stack;
}
@@ -50,177 +52,55 @@
return;
}
-inline unsigned int sync_stack_iterate_next(Sync_Stack* stack)
+inline Node* sync_stack_iterate_next(Sync_Stack* stack)
{
- unsigned int* entry = stack->cur;
- unsigned int* new_entry = entry + 1;
- unsigned int* last_entry = stack->bottom - 1;
- while ( entry <= last_entry ){
- unsigned int* temp = (unsigned int*)atomic_casptr((volatile void**)&stack->cur, new_entry, entry);
+ Node* entry = stack->cur;
+ while ( entry != NULL ){
+ Node* new_entry = entry->next;
+ Node* temp = (Node*)atomic_casptr((volatile void**)&stack->cur, new_entry, entry);
if(temp == entry){ /* got it */
- return *entry;
+ return entry;
}
entry = stack->cur;
- new_entry = entry + 1;
}
- return 0;
+ return NULL;
}
-inline unsigned int sync_stack_pop(Sync_Stack* stack)
+inline Node* sync_stack_pop(Sync_Stack* stack)
{
- volatile unsigned int* entry = stack->top;
- unsigned int* new_entry = stack->top + 1;
- unsigned int* last_entry = stack->bottom - 1;
- while ( entry <= last_entry ){
- unsigned int* temp = (unsigned int*)atomic_casptr((volatile void**)&stack->top, new_entry, (const void*)entry);
+ Node* entry = stack->top;
+ while( entry != NULL ){
+ Node* new_entry = entry->next;
+ Node* temp = (Node*)atomic_casptr((volatile void**)&stack->top, new_entry, entry);
if(temp == entry){ /* got it */
- while(!*entry); /* has to have something */
- unsigned int result = *entry;
- *entry = NULL; /* put NULL into it */
- return result;
+ return entry;
}
- entry = (volatile unsigned int*)stack->top;
- new_entry = (unsigned int*)(entry + 1);
- }
- return 0;
-}
-
-inline Boolean sync_stack_push(Sync_Stack* stack, unsigned int value)
-{
- unsigned int* entry = stack->top;
- volatile unsigned int* new_entry = stack->top - 1;
- unsigned int* first_entry = stack->entries;
- while ( entry >= first_entry ){
- unsigned int* temp = (unsigned int*)atomic_casptr((volatile void**)&stack->top, (void*)new_entry, entry);
- if(temp == entry){ /* got it */
- while(*new_entry); /* has to be NULL before filled */
- *new_entry = value;
- return TRUE;
- }
entry = stack->top;
- new_entry = entry - 1;
- }
- return FALSE;
-}
-
-/* it does not matter whether this is atomic or not, because
- it is only invoked when there is no contention or only for rough idea */
-inline unsigned int stack_entry_count(Sync_Stack* stack)
-{
- return (stack->bottom - stack->top);
-}
-
-#endif /* _SYNC_STACK_H_ */
-/*
- * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @author Xiao-Feng Li, 2006/10/25
- */
-
-#ifndef _SYNC_STACK_H_
-#define _SYNC_STACK_H_
-
-typedef struct Sync_Stack{
- unsigned int* top; /* pointing to the first filled entry */
- unsigned int* cur; /* pointing to the current accessed entry */
- unsigned int* bottom; /* pointing to the pos right after the last entry */
- unsigned int entries[1];
-}Sync_Stack;
-
-inline Sync_Stack* sync_stack_init(unsigned int num_entries)
-{
- unsigned int size = ((num_entries-1) << 2) + sizeof(Sync_Stack);
- Sync_Stack* stack = (Sync_Stack*)STD_MALLOC(size);
- memset(stack, 0, size);
- stack->bottom = &(stack->entries[num_entries]);
- stack->top = stack->bottom;
- return stack;
-}
-
-inline void sync_stack_destruct(Sync_Stack* stack)
-{
- STD_FREE(stack);
- return;
-}
-
-inline void sync_stack_iterate_init(Sync_Stack* stack)
-{
- stack->cur = stack->top;
- return;
-}
-
-inline unsigned int sync_stack_iterate_next(Sync_Stack* stack)
-{
- unsigned int* entry = stack->cur;
- unsigned int* new_entry = entry + 1;
- unsigned int* last_entry = stack->bottom - 1;
- while ( entry <= last_entry ){
- unsigned int* temp = (unsigned int*)atomic_casptr((volatile void**)&stack->cur, new_entry, entry);
- if(temp == entry){ /* got it */
- return *entry;
- }
- entry = stack->cur;
- new_entry = entry + 1;
}
return 0;
}
-inline unsigned int sync_stack_pop(Sync_Stack* stack)
+inline Boolean sync_stack_push(Sync_Stack* stack, Node* node)
{
- volatile unsigned int* entry = stack->top;
- unsigned int* new_entry = stack->top + 1;
- unsigned int* last_entry = stack->bottom - 1;
- while ( entry <= last_entry ){
- unsigned int* temp = (unsigned int*)atomic_casptr((volatile void**)&stack->top, new_entry, (const void*)entry);
- if(temp == entry){ /* got it */
- while(!*entry); /* has to have something */
- unsigned int result = *entry;
- *entry = NULL; /* put NULL into it */
- return result;
- }
- entry = (volatile unsigned int*)stack->top;
- new_entry = (unsigned int*)(entry + 1);
- }
- return 0;
-}
-
-inline Boolean sync_stack_push(Sync_Stack* stack, unsigned int value)
-{
- unsigned int* entry = stack->top;
- volatile unsigned int* new_entry = stack->top - 1;
- unsigned int* first_entry = stack->entries;
- while ( entry >= first_entry ){
- unsigned int* temp = (unsigned int*)atomic_casptr((volatile void**)&stack->top, (void*)new_entry, entry);
+ Node* entry = stack->top;
+ node->next = entry;
+
+ while( TRUE ){
+ Node* temp = (Node*)atomic_casptr((volatile void**)&stack->top, node, entry);
if(temp == entry){ /* got it */
- while(*new_entry); /* has to be NULL before filled */
- *new_entry = value;
return TRUE;
}
entry = stack->top;
- new_entry = entry - 1;
+ node->next = entry;
}
return FALSE;
}
/* it does not matter whether this is atomic or not, because
it is only invoked when there is no contention or only for rough idea */
-inline unsigned int stack_entry_count(Sync_Stack* stack)
+inline Boolean stack_is_empty(Sync_Stack* stack)
{
- return (stack->bottom - stack->top);
+ return (stack->top == NULL);
}
#endif /* _SYNC_STACK_H_ */
Index: vm/gc_gen/src/utils/sync_pool.h
===================================================================
--- vm/gc_gen/src/utils/sync_pool.h (revision 479958)
+++ vm/gc_gen/src/utils/sync_pool.h (working copy)
@@ -25,70 +25,14 @@
typedef Sync_Stack Pool;
-inline Pool* sync_pool_create(unsigned int size){ return sync_stack_init(size); }
+inline Pool* sync_pool_create(){ return sync_stack_init(); }
inline void sync_pool_destruct(Pool* pool){ sync_stack_destruct(pool); }
-inline Boolean pool_is_empty(Pool* pool){ return stack_entry_count(pool)==0;}
-inline Vector_Block* pool_get_entry(Pool* pool)
-{
- Vector_Block* block = (Vector_Block*)sync_stack_pop(pool);
- assert( !block || (block->start == (unsigned int*)block->entries) );
- assert( !block || (block->head <= block->tail && block->tail <= block->end));
-
- return block;
-}
-
-inline void pool_put_entry(Pool* pool, void* value){ assert(value); Boolean ok = sync_stack_push(pool, (unsigned int)value); assert(ok);}
-
+inline Boolean pool_is_empty(Pool* pool){ return stack_is_empty(pool);}
+inline Vector_Block* pool_get_entry(Pool* pool){ return (Vector_Block*)sync_stack_pop(pool); }
+inline void pool_put_entry(Pool* pool, void* value){ assert(value); Boolean ok = sync_stack_push(pool, (Node*)value); assert(ok);}
inline void pool_iterator_init(Pool* pool){ sync_stack_iterate_init(pool);}
inline Vector_Block* pool_iterator_next(Pool* pool){ return (Vector_Block*)sync_stack_iterate_next(pool);}
#endif /* #ifndef _SYNC_POOL_H_ */
-/*
- * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @author Xiao-Feng Li, 2006/10/25
- */
-
-#ifndef _SYNC_POOL_H_
-#define _SYNC_POOL_H_
-
-#include "sync_stack.h"
-
-typedef Sync_Stack Pool;
-
-inline Pool* sync_pool_create(unsigned int size){ return sync_stack_init(size); }
-inline void sync_pool_destruct(Pool* pool){ sync_stack_destruct(pool); }
-
-inline Boolean pool_is_empty(Pool* pool){ return stack_entry_count(pool)==0;}
-inline Vector_Block* pool_get_entry(Pool* pool)
-{
- Vector_Block* block = (Vector_Block*)sync_stack_pop(pool);
- assert( !block || (block->start == (unsigned int*)block->entries) );
- assert( !block || (block->head <= block->tail && block->tail <= block->end));
-
- return block;
-}
-
-inline void pool_put_entry(Pool* pool, void* value){ assert(value); Boolean ok = sync_stack_push(pool, (unsigned int)value); assert(ok);}
-
-inline void pool_iterator_init(Pool* pool){ sync_stack_iterate_init(pool);}
-inline Vector_Block* pool_iterator_next(Pool* pool){ return (Vector_Block*)sync_stack_iterate_next(pool);}
-
-#endif /* #ifndef _SYNC_POOL_H_ */
-
Index: vm/gc_gen/src/trace_forward/fspace.cpp
===================================================================
--- vm/gc_gen/src/trace_forward/fspace.cpp (revision 479958)
+++ vm/gc_gen/src/trace_forward/fspace.cpp (working copy)
@@ -176,13 +176,12 @@
GC* gc = fspace->gc;
- pool_iterator_init(gc->metadata->gc_rootset_pool);
-
if(gc_requires_barriers()){
/* generational GC. Only trace (mark) nos */
collector_execute_task(gc, (TaskType)trace_forward_fspace, (Space*)fspace);
}else{
/* non-generational GC. Mark the whole heap (nos, mos, and los) */
+ pool_iterator_init(gc->metadata->gc_rootset_pool);
collector_execute_task(gc, (TaskType)mark_copy_fspace, (Space*)fspace);
}
Index: vm/gc_gen/src/trace_forward/fspace_collect_forward_seq.cpp
===================================================================
--- vm/gc_gen/src/trace_forward/fspace_collect_forward_seq.cpp (revision 0)
+++ vm/gc_gen/src/trace_forward/fspace_collect_forward_seq.cpp (revision 0)
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#include "fspace.h"
+#include "../thread/collector.h"
+#include "../common/gc_metadata.h"
+
+static Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace)
+{
+ assert(obj_belongs_to_space(p_obj, (Space*)fspace));
+ return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary);
+}
+
+static void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref)
+{
+ Partial_Reveal_Object *p_obj = *p_ref;
+ TraceStack *ts = collector->trace_stack;
+
+ if (p_obj == NULL) return;
+
+ /* the slot can be in tspace or fspace, we don't care.
+ we care only if the reference in the slot is pointing to fspace */
+ if (obj_belongs_to_space(p_obj, collector->collect_space)) {
+ ts->push(p_ref);
+ }
+
+ return;
+}
+
+static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj)
+{
+ if (!object_has_slots(p_obj)) return;
+
+ void *slot;
+
+ /* scan array object */
+ if (object_is_array(p_obj)) {
+ Partial_Reveal_Object* array = p_obj;
+ assert(!obj_is_primitive_array(array));
+
+ int32 array_length = vector_get_length((Vector_Handle) array);
+ for (int i = 0; i < array_length; i++) {
+ slot = vector_get_element_address_ref((Vector_Handle) array, i);
+ scan_slot(collector, (Partial_Reveal_Object **)slot);
+ }
+ return;
+ }
+
+ /* scan non-array object */
+ int *offset_scanner = init_object_scanner(p_obj);
+ while (true) {
+ slot = offset_get_ref(offset_scanner, p_obj);
+ if (slot == NULL) break;
+
+ scan_slot(collector, (Partial_Reveal_Object **)slot);
+ offset_scanner = offset_next_ref(offset_scanner);
+ }
+
+ return;
+}
+
+/* At this point, p_ref can be in anywhere like root, and other spaces,
+ * but *p_ref must be in fspace, since only slot which points to
+ * object in fspace could be added into TraceStack */
+#include "../verify/verify_live_heap.h"
+
+void trace_object_seq(Collector* collector, Partial_Reveal_Object **p_ref)
+{
+ Space* space = collector->collect_space;
+ Partial_Reveal_Object *p_obj = *p_ref;
+
+ assert(p_obj);
+ assert(obj_belongs_to_space(p_obj, space));
+
+ /* Fastpath: object has already been forwarded, update the ref slot */
+ if(obj_is_forwarded_in_vt(p_obj)) {
+ assert(!obj_is_marked_in_vt(p_obj));
+ *p_ref = obj_get_forwarding_pointer_in_vt(p_obj);
+ return;
+ }
+
+ /* only mark the objects that will remain in fspace */
+ if (!fspace_object_to_be_forwarded(p_obj, (Fspace*)space)) {
+ assert(!obj_is_forwarded_in_vt(p_obj));
+ /* this obj remains in fspace, remember its ref slot for next GC. */
+ if( !address_belongs_to_space(p_ref, space) ){
+ collector_remset_add_entry(collector, p_ref);
+ }
+
+ if(fspace_mark_object((Fspace*)space, p_obj))
+ scan_object(collector, p_obj);
+
+ return;
+ }
+
+ /* following is the logic for forwarding */
+ Partial_Reveal_Object* p_target_obj = collector_forward_object(collector, p_obj);
+
+ /* if it is forwarded by other already, it is ok */
+ if(!p_target_obj){
+ *p_ref = obj_get_forwarding_pointer_in_vt(p_obj);
+ return;
+ }
+ /* otherwise, we successfully forwarded */
+ *p_ref = p_target_obj;
+
+ /* we forwarded it, we need remember it for verification. FIXME:: thread id */
+ if(verify_live_heap) {
+ event_collector_move_obj(p_obj, p_target_obj, collector);
+ }
+
+ scan_object(collector, p_target_obj);
+ return;
+}
+
+/* trace the root references from root set and remember sets */
+static void trace_root(Collector* collector, Partial_Reveal_Object **ref)
+{
+ assert(*ref);
+ assert(obj_belongs_to_space(*ref, collector->collect_space));
+
+ TraceStack *ts = collector->trace_stack;
+ ts->push(ref);
+
+ while(!ts->empty()) {
+ Partial_Reveal_Object **p_ref = ts->top();
+ ts->pop();
+ assert(p_ref);
+ trace_object_seq(collector, p_ref);
+ }
+}
+
+static void collector_trace_rootsets(Collector* collector)
+{
+ GC_Metadata* metadata = collector->gc->metadata;
+
+ Space* space = collector->collect_space;
+ collector->trace_stack = new TraceStack();
+
+ /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */
+ Vector_Block* root_set = pool_get_entry(metadata->gc_rootset_pool);
+
+ while(root_set){
+ unsigned int* iter = vector_block_iterator_init(root_set);
+ while(!vector_block_iterator_end(root_set,iter)){
+ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+ iter = vector_block_iterator_advance(root_set,iter);
+
+ assert(p_ref);
+ if(*p_ref == NULL) continue;
+ if (obj_belongs_to_space(*p_ref, space))
+ trace_root(collector, p_ref);
+ }
+ vector_block_clear(root_set);
+ pool_put_entry(metadata->free_set_pool, root_set);
+ root_set = pool_get_entry(metadata->gc_rootset_pool);
+ }
+
+ delete collector->trace_stack;
+
+ return;
+}
+
+void update_rootset_interior_pointer();
+
+static void update_relocated_refs(Collector* collector)
+{
+ update_rootset_interior_pointer();
+}
+
+void trace_forward_fspace_seq(Collector* collector)
+{
+ GC* gc = collector->gc;
+ Fspace* space = (Fspace*)collector->collect_space;
+
+ /* FIXME:: Single-threaded trace-forwarding for fspace currently */
+
+ collector_trace_rootsets(collector);
+
+ update_relocated_refs(collector);
+ reset_fspace_for_allocation(space);
+
+ return;
+
+}
+
+
+
Index: vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp
===================================================================
--- vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp (revision 479958)
+++ vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp (working copy)
@@ -1,207 +0,0 @@
-/*
- * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @author Xiao-Feng Li, 2006/10/05
- */
-
-#include "fspace.h"
-#include "../thread/collector.h"
-#include "../common/gc_metadata.h"
-
-static Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace)
-{
- assert(obj_belongs_to_space(p_obj, (Space*)fspace));
- return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary);
-}
-
-static void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref)
-{
- Partial_Reveal_Object *p_obj = *p_ref;
- TraceStack *ts = collector->trace_stack;
-
- if (p_obj == NULL) return;
-
- /* the slot can be in tspace or fspace, we don't care.
- we care only if the reference in the slot is pointing to fspace */
- if (obj_belongs_to_space(p_obj, collector->collect_space)) {
- ts->push(p_ref);
- }
-
- return;
-}
-
-static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj)
-{
- if (!object_has_slots(p_obj)) return;
-
- void *slot;
-
- /* scan array object */
- if (object_is_array(p_obj)) {
- Partial_Reveal_Object* array = p_obj;
- assert(!obj_is_primitive_array(array));
-
- int32 array_length = vector_get_length((Vector_Handle) array);
- for (int i = 0; i < array_length; i++) {
- slot = vector_get_element_address_ref((Vector_Handle) array, i);
- scan_slot(collector, (Partial_Reveal_Object **)slot);
- }
- return;
- }
-
- /* scan non-array object */
- int *offset_scanner = init_object_scanner(p_obj);
- while (true) {
- slot = offset_get_ref(offset_scanner, p_obj);
- if (slot == NULL) break;
-
- scan_slot(collector, (Partial_Reveal_Object **)slot);
- offset_scanner = offset_next_ref(offset_scanner);
- }
-
- return;
-}
-
-/* At this point, p_ref can be in anywhere like root, and other spaces,
- * but *p_ref must be in fspace, since only slot which points to
- * object in fspace could be added into TraceStack */
-#include "../verify/verify_live_heap.h"
-
-static void trace_object(Collector* collector, Partial_Reveal_Object **p_ref)
-{
- Space* space = collector->collect_space;
- Partial_Reveal_Object *p_obj = *p_ref;
-
- assert(p_obj);
- assert(obj_belongs_to_space(p_obj, space));
-
- /* Fastpath: object has already been forwarded, update the ref slot */
- if(obj_is_forwarded_in_vt(p_obj)) {
- assert(!obj_is_marked_in_vt(p_obj));
- *p_ref = obj_get_forwarding_pointer_in_vt(p_obj);
- return;
- }
-
- /* only mark the objects that will remain in fspace */
- if (!fspace_object_to_be_forwarded(p_obj, (Fspace*)space)) {
- assert(!obj_is_forwarded_in_vt(p_obj));
- /* this obj remains in fspace, remember its ref slot for next GC. */
- if( !address_belongs_to_space(p_ref, space) ){
- collector_remset_add_entry(collector, p_ref);
- }
-
- if(fspace_mark_object((Fspace*)space, p_obj))
- scan_object(collector, p_obj);
-
- return;
- }
-
- /* following is the logic for forwarding */
- Partial_Reveal_Object* p_target_obj = collector_forward_object(collector, p_obj);
-
- /* if it is forwarded by other already, it is ok */
- if(!p_target_obj){
- *p_ref = obj_get_forwarding_pointer_in_vt(p_obj);
- return;
- }
- /* otherwise, we successfully forwarded */
- *p_ref = p_target_obj;
-
- /* we forwarded it, we need remember it for verification. FIXME:: thread id */
- if(verify_live_heap) {
- event_collector_move_obj(p_obj, p_target_obj, collector);
- }
-
- scan_object(collector, p_target_obj);
- return;
-}
-
-/* trace the root references from root set and remember sets */
-void trace_root(Collector* collector, Partial_Reveal_Object **ref)
-{
- assert(*ref);
- assert(obj_belongs_to_space(*ref, collector->collect_space));
-
- TraceStack *ts = collector->trace_stack;
- ts->push(ref);
-
- while(!ts->empty()) {
- Partial_Reveal_Object **p_ref = ts->top();
- ts->pop();
- assert(p_ref);
- trace_object(collector, p_ref);
- }
-}
-
-static void collector_trace_rootsets(Collector* collector)
-{
- GC_Metadata* metadata = collector->gc->metadata;
-
- Space* space = collector->collect_space;
- HashSet remslot_hash;
-
- /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */
- pool_iterator_init(metadata->gc_rootset_pool);
- Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);
-
- while(root_set){
- unsigned int* iter = vector_block_iterator_init(root_set);
- while(!vector_block_iterator_end(root_set,iter)){
- Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
- iter = vector_block_iterator_advance(root_set,iter);
-
- assert(p_ref);
- if(*p_ref == NULL) continue;
- if (obj_belongs_to_space(*p_ref, space)) {
- if (remslot_hash.find(p_ref) == remslot_hash.end()) {
- remslot_hash.insert(p_ref);
- trace_root(collector, p_ref);
- }
- }
- }
- pool_put_entry(metadata->free_set_pool, root_set);
- root_set = pool_iterator_next(metadata->gc_rootset_pool);
- }
-
- return;
-}
-
-void update_rootset_interior_pointer();
-
-static void update_relocated_refs(Collector* collector)
-{
- update_rootset_interior_pointer();
-}
-
-void trace_forward_fspace(Collector* collector)
-{
- GC* gc = collector->gc;
- Fspace* space = (Fspace*)collector->collect_space;
-
- /* FIXME:: Single-threaded trace-forwarding for fspace currently */
-
- collector_trace_rootsets(collector);
-
- update_relocated_refs(collector);
- reset_fspace_for_allocation(space);
-
- return;
-
-}
-
-
-
Index: vm/gc_gen/src/trace_forward/fspace_collect_forward_par.cpp
===================================================================
--- vm/gc_gen/src/trace_forward/fspace_collect_forward_par.cpp (revision 0)
+++ vm/gc_gen/src/trace_forward/fspace_collect_forward_par.cpp (revision 0)
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#include "fspace.h"
+#include "../thread/collector.h"
+#include "../common/gc_metadata.h"
+
+static Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace)
+{
+ assert(obj_belongs_to_space(p_obj, (Space*)fspace));
+ return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary);
+}
+
+static void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref)
+{
+ Partial_Reveal_Object *p_obj = *p_ref;
+ if (p_obj == NULL) return;
+
+ /* the slot can be in tspace or fspace, we don't care.
+ we care only if the reference in the slot is pointing to fspace */
+ if (obj_belongs_to_space(p_obj, collector->collect_space))
+ collector_tracetask_add_entry(collector, p_ref);
+
+ return;
+}
+
+static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj)
+{
+ if (!object_has_slots(p_obj)) return;
+
+ void *slot;
+
+ /* scan array object */
+ if (object_is_array(p_obj)) {
+ Partial_Reveal_Object* array = p_obj;
+ assert(!obj_is_primitive_array(array));
+
+ int32 array_length = vector_get_length((Vector_Handle) array);
+ for (int i = 0; i < array_length; i++) {
+ slot = vector_get_element_address_ref((Vector_Handle) array, i);
+ scan_slot(collector, (Partial_Reveal_Object **)slot);
+ }
+ return;
+ }
+
+ /* scan non-array object */
+ int *offset_scanner = init_object_scanner(p_obj);
+ while (true) {
+ slot = offset_get_ref(offset_scanner, p_obj);
+ if (slot == NULL) break;
+
+ scan_slot(collector, (Partial_Reveal_Object **)slot);
+ offset_scanner = offset_next_ref(offset_scanner);
+ }
+
+ return;
+}
+
+/* At this point, p_ref can be in anywhere like root, and other spaces,
+ * but *p_ref must be in fspace, since only slot which points to
+ * object in fspace could be added into TraceStack */
+#include "../verify/verify_live_heap.h"
+
+static void trace_object(Collector* collector, Partial_Reveal_Object **p_ref)
+{
+ Space* space = collector->collect_space;
+ Partial_Reveal_Object *p_obj = *p_ref;
+
+ assert(p_obj);
+ /* this assert is no longer valid for parallel forwarding, because remset may have duplicate p_refs that
+ are traced by difference collectors, and right after both check the p_obj is in fspace, and put into
+ trace_stack, one thread forwards it quickly before the other runs to this assert.
+ assert(obj_belongs_to_space(p_obj, space)); */
+
+ /* Fastpath: object has already been forwarded, update the ref slot */
+ if(obj_is_forwarded_in_vt(p_obj)) {
+ assert(!obj_is_marked_in_vt(p_obj));
+ *p_ref = obj_get_forwarding_pointer_in_vt(p_obj);
+ return;
+ }
+
+ /* only mark the objects that will remain in fspace */
+ if (!fspace_object_to_be_forwarded(p_obj, (Fspace*)space)) {
+ assert(!obj_is_forwarded_in_vt(p_obj));
+ /* this obj remains in fspace, remember its ref slot for next GC. */
+ if( !address_belongs_to_space(p_ref, space) )
+ collector_remset_add_entry(collector, p_ref);
+
+ if(fspace_mark_object((Fspace*)space, p_obj))
+ scan_object(collector, p_obj);
+
+ return;
+ }
+
+ /* following is the logic for forwarding */
+ Partial_Reveal_Object* p_target_obj = collector_forward_object(collector, p_obj);
+
+ /* if it is forwarded by other already, it is ok */
+ if( p_target_obj == NULL ){
+ *p_ref = obj_get_forwarding_pointer_in_vt(p_obj);
+ return;
+ }
+ /* otherwise, we successfully forwarded */
+ *p_ref = p_target_obj;
+
+ /* we forwarded it, we need remember it for verification. FIXME:: thread id */
+ if(verify_live_heap) {
+ event_collector_move_obj(p_obj, p_target_obj, collector);
+ }
+
+ scan_object(collector, p_target_obj);
+ return;
+}
+
+void trace_object_seq(Collector* collector, Partial_Reveal_Object **p_ref);
+
+/* for tracing phase termination detection */
+static volatile unsigned int num_finished_collectors = 0;
+
+static void collector_trace_rootsets(Collector* collector)
+{
+ GC* gc = collector->gc;
+ GC_Metadata* metadata = gc->metadata;
+
+ Space* space = collector->collect_space;
+ collector->trace_stack = (TraceStack*)pool_get_entry(metadata->free_set_pool);
+ //collector->trace_stack = new TraceStack();
+
+ unsigned int num_active_collectors = gc->num_active_collectors;
+ atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
+
+retry:
+ /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */
+ Vector_Block* root_set = pool_get_entry(metadata->gc_rootset_pool);
+
+ while(root_set){
+ unsigned int* iter = vector_block_iterator_init(root_set);
+ while(!vector_block_iterator_end(root_set,iter)){
+ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+ iter = vector_block_iterator_advance(root_set,iter);
+
+ assert(p_ref);
+ if(*p_ref == NULL) continue;
+ /* in sequential version, we only trace same object once, but we were using a local hashset,
+ which couldn't catch the repetition between multiple collectors. This is subject to more study. */
+ if (obj_belongs_to_space(*p_ref, space))
+ trace_object(collector, p_ref);
+ }
+ vector_block_clear(root_set);
+ pool_put_entry(metadata->free_set_pool, root_set);
+ root_set = pool_get_entry(metadata->gc_rootset_pool);
+
+ }
+
+ atomic_inc32(&num_finished_collectors);
+ while(num_finished_collectors != num_active_collectors){
+ if( !pool_is_empty(metadata->gc_rootset_pool)){
+ atomic_dec32(&num_finished_collectors);
+ goto retry;
+ }
+ }
+
+
+ /* now we are done, but each collector has a private task block to deal with */
+ Vector_Block* trace_task = (Vector_Block*)collector->trace_stack;
+ TraceStack* trace_stack = new TraceStack();
+
+ unsigned int* iter = vector_block_iterator_init(trace_task);
+ while(!vector_block_iterator_end(trace_task,iter)){
+ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)*iter;
+ iter = vector_block_iterator_advance(trace_task,iter);
+ trace_stack->push(p_ref);
+ }
+
+ /* put back the last task to the free pool */
+ vector_block_clear(trace_task);
+ pool_put_entry(metadata->free_set_pool, trace_task);
+
+ collector->trace_stack = trace_stack;
+ while(!trace_stack->empty()){
+ Partial_Reveal_Object** p_ref = trace_stack->top();
+ trace_stack->pop();
+ trace_object_seq(collector, p_ref);
+ }
+
+ delete trace_stack;
+ collector->trace_stack = NULL;
+
+ return;
+}
+
+void update_rootset_interior_pointer();
+
+static void update_relocated_refs(Collector* collector)
+{
+ update_rootset_interior_pointer();
+}
+
+static volatile unsigned int num_marking_collectors = 0;
+
+void trace_forward_fspace(Collector* collector)
+{
+ GC* gc = collector->gc;
+ Fspace* space = (Fspace*)collector->collect_space;
+
+ unsigned int num_active_collectors = gc->num_active_collectors;
+ unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);
+
+ collector_trace_rootsets(collector);
+
+ old_num = atomic_inc32(&num_marking_collectors);
+ if( ++old_num == num_active_collectors ){
+ /* last collector's world here */
+ /* prepare for next phase */ /* let other collectors go */
+ num_marking_collectors++;
+ }
+ while(num_marking_collectors != num_active_collectors + 1);
+
+ /* the rest work is not enough for parallelization, so let only one thread go */
+ if( collector->thread_handle != 0 ) return;
+
+ update_relocated_refs(collector);
+ reset_fspace_for_allocation(space);
+
+ return;
+
+}
+
+
+
Index: vm/gc_gen/src/verify/verify_live_heap.cpp
===================================================================
--- vm/gc_gen/src/verify/verify_live_heap.cpp (revision 479958)
+++ vm/gc_gen/src/verify/verify_live_heap.cpp (working copy)
@@ -20,6 +20,7 @@
#include "verify_live_heap.h"
+Boolean GC_VERIFY = FALSE;
Boolean verify_live_heap;
void gc_verify_heap(GC* gc, Boolean is_before_gc)
Index: vm/gc_gen/src/thread/mutator.h
===================================================================
--- vm/gc_gen/src/thread/mutator.h (revision 479958)
+++ vm/gc_gen/src/thread/mutator.h (working copy)
@@ -42,4 +42,6 @@
void mutator_destruct(GC* gc, void* tls_gc_info);
void mutator_reset(GC *gc);
+void gc_reset_mutator_context(GC* gc);
+
#endif /*ifndef _MUTATOR_H_ */
Index: vm/gc_gen/src/thread/collector.cpp
===================================================================
--- vm/gc_gen/src/thread/collector.cpp (revision 479958)
+++ vm/gc_gen/src/thread/collector.cpp (working copy)
@@ -52,21 +52,26 @@
{
collector->task_func = NULL;
+ /*
vm_reset_event(collector->task_assigned_event);
vm_reset_event(collector->task_finished_event);
+ */
alloc_context_reset((Allocator*)collector);
GC_Metadata* metadata = collector->gc->metadata;
+
assert(collector->rep_set==NULL);
- collector->rep_set = pool_get_entry(metadata->free_set_pool);
- collector->result = 1;
-
+ if( !gc_requires_barriers() || collector->gc->collect_kind != MINOR_COLLECTION){
+ collector->rep_set = pool_get_entry(metadata->free_set_pool);
+ }
+
if(gc_requires_barriers()){
assert(collector->rem_set==NULL);
collector->rem_set = pool_get_entry(metadata->free_set_pool);
}
+ collector->result = TRUE;
return;
}
@@ -114,7 +119,6 @@
Collector* collector = gc->collectors[i];
wait_collector_to_finish(collector);
}
- gc->num_active_collectors = 0;
return;
}
@@ -141,21 +145,17 @@
static void collector_init_thread(Collector *collector)
{
- collector->trace_stack = new TraceStack(); /* only for MINOR_COLLECTION */
collector->obj_info_map = new ObjectMap();
collector->rem_set = NULL;
collector->rep_set = NULL;
- int status = vm_create_event(&collector->task_assigned_event,0,1);
+ int status = vm_create_event(&collector->task_assigned_event);
assert(status == THREAD_OK);
- status = vm_create_event(&collector->task_finished_event,0,1);
+ status = vm_create_event(&collector->task_finished_event);
assert(status == THREAD_OK);
- status = (unsigned int)vm_create_thread(NULL,
- 0, 0, 0,
- collector_thread_func,
- (void*)collector);
+ status = (unsigned int)vm_create_thread(collector_thread_func, (void*)collector);
assert(status == THREAD_OK);
@@ -222,6 +222,6 @@
{
assign_collector_with_task(gc, task_func, space);
wait_collection_finish(gc);
-
+
return;
}
Index: vm/gc_gen/src/thread/mutator.cpp
===================================================================
--- vm/gc_gen/src/thread/mutator.cpp (revision 479958)
+++ vm/gc_gen/src/thread/mutator.cpp (working copy)
@@ -35,6 +35,7 @@
if(gc_requires_barriers()){
mutator->rem_set = pool_get_entry(gc->metadata->free_set_pool);
+ assert(vector_block_is_empty(mutator->rem_set));
}
lock(gc->mutator_list_lock); // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
@@ -54,7 +55,7 @@
Mutator *mutator = (Mutator *)gc_information;
if(gc_requires_barriers()){ /* put back the remset when a mutator exits */
- pool_put_entry(gc->metadata->gc_rootset_pool, mutator->rem_set);
+ pool_put_entry(gc->metadata->mutator_remset_pool, mutator->rem_set);
mutator->rem_set = NULL;
}
@@ -77,3 +78,13 @@
return;
}
+void gc_reset_mutator_context(GC* gc)
+{
+ Mutator *mutator = gc->mutator_list;
+ while (mutator) {
+ mutator->rem_set = pool_get_entry(gc->metadata->free_set_pool);
+ alloc_context_reset((Allocator*)mutator);
+ mutator = mutator->next;
+ }
+ return;
+}
\ No newline at end of file
Index: vm/gc_gen/src/common/gc_metadata.h
===================================================================
--- vm/gc_gen/src/common/gc_metadata.h (revision 479958)
+++ vm/gc_gen/src/common/gc_metadata.h (working copy)
@@ -42,12 +42,14 @@
void gc_metadata_initialize(GC* gc);
void gc_metadata_destruct(GC* gc);
-void gc_metadata_reset(GC* gc);
+void gc_metadata_verify(GC* gc, Boolean is_before_gc);
+void gc_set_rootset(GC* gc);
void gc_reset_rootset(GC* gc);
void gc_update_repointed_refs(Collector* collector);
void collector_marktask_add_entry(Collector* collector, Partial_Reveal_Object* p_obj);
+void collector_tracetask_add_entry(Collector* collector, Partial_Reveal_Object** p_ref);
void mutator_remset_add_entry(Mutator* mutator, Partial_Reveal_Object** p_slot);
void collector_remset_add_entry(Collector* collector, Partial_Reveal_Object** p_slot);
Index: vm/gc_gen/src/common/mark_scan_par.cpp
===================================================================
--- vm/gc_gen/src/common/mark_scan_par.cpp (revision 479958)
+++ vm/gc_gen/src/common/mark_scan_par.cpp (working copy)
@@ -97,7 +97,8 @@
iter = vector_block_iterator_advance(root_set,iter);
Partial_Reveal_Object* p_obj = *p_ref;
- assert(!p_obj == NULL); /* root ref can't be NULL */
+ /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */
+ assert((gc->collect_kind==MAJOR_COLLECTION) && (p_obj!= NULL));
/* we have to mark the object before put it into marktask, because
it is possible to have two slots containing a same object. They will
be scanned twice and their ref slots will be recorded twice. Problem
Index: vm/gc_gen/src/common/gc_for_vm.cpp
===================================================================
--- vm/gc_gen/src/common/gc_for_vm.cpp (revision 479958)
+++ vm/gc_gen/src/common/gc_for_vm.cpp (working copy)
@@ -26,6 +26,11 @@
unsigned int HEAP_SIZE_DEFAULT = 256 * MB;
+extern Boolean NEED_BARRIER;
+extern unsigned int NUM_COLLECTORS;
+extern Boolean GC_VERIFY;
+extern unsigned int NOS_SIZE;
+
/* heap size limit is not interesting. only for manual tuning purpose */
unsigned int min_heap_size_bytes = 32 * MB;
unsigned int max_heap_size_bytes = 256 * MB;
@@ -100,6 +105,10 @@
min_heap_size_bytes = min_heap_size;
max_heap_size_bytes = max_heap_size;
+ if (is_property_set("gc.nos_size")) {
+ NOS_SIZE = parse_size_string(vm_get_property_value("gc.nos_size"));
+ }
+
if (is_property_set("gc.num_collectors")) {
unsigned int num = get_property_value_int("gc.num_collectors");
NUM_COLLECTORS = (num==0)? NUM_COLLECTORS:num;
@@ -108,6 +117,10 @@
if (is_property_set("gc.gen_mode")) {
NEED_BARRIER = get_property_value_boolean("gc.gen_mode");
}
+
+ if (is_property_set("gc.verify")) {
+ GC_VERIFY = get_property_value_boolean("gc.verify");
+ }
return;
}
Index: vm/gc_gen/src/common/gc_platform.h
===================================================================
--- vm/gc_gen/src/common/gc_platform.h (revision 479958)
+++ vm/gc_gen/src/common/gc_platform.h (working copy)
@@ -48,9 +48,9 @@
assert(stat == TM_ERROR_NONE); return stat;
}
-inline int vm_create_event(VmEventHandle* event, unsigned int initial_count, unsigned int max_count)
+inline int vm_create_event(VmEventHandle* event)
{
- return hysem_create(event, initial_count, max_count);
+ return hysem_create(event, 0, 1);
}
inline void vm_thread_yield()
@@ -58,9 +58,14 @@
hythread_yield();
}
-inline int vm_create_thread(void* ret_thread, unsigned int stacksize, unsigned int priority, unsigned int suspend, int (*func)(void*), void *data)
+inline int vm_create_thread(int (*func)(void*), void *data)
{
- return hythread_create((hythread_t*)ret_thread, (UDATA)stacksize, (UDATA)priority, (UDATA)suspend,
+ hythread_t* ret_thread = NULL;
+ UDATA stacksize = 0;
+ UDATA priority = 0;
+ UDATA suspend = 0;
+
+ return hythread_create(ret_thread, stacksize, priority, suspend,
(hythread_entrypoint_t)func, data);
}
Index: vm/gc_gen/src/common/gc_for_class.cpp
===================================================================
--- vm/gc_gen/src/common/gc_for_class.cpp (revision 479958)
+++ vm/gc_gen/src/common/gc_for_class.cpp (working copy)
@@ -91,7 +91,7 @@
}
/* ref array is NULL-terminated */
- *new_ref_array = NULL;
+ *new_ref_array = 0;
gcvt->gc_number_of_ref_fields = num_ref_fields;
Index: vm/gc_gen/src/common/gc_common.h
===================================================================
--- vm/gc_gen/src/common/gc_common.h (revision 479958)
+++ vm/gc_gen/src/common/gc_common.h (working copy)
@@ -39,8 +39,6 @@
#include "gc_for_class.h"
#include "gc_platform.h"
-#define TRUE 1
-#define FALSE 0
#define null 0
#define MB 1048576
@@ -64,14 +62,9 @@
typedef void (*TaskType)(void*);
-extern Boolean NEED_BARRIER;
-extern unsigned int NUM_COLLECTORS;
-
typedef std::stack MarkStack;
typedef std::stack TraceStack;
typedef std::map ObjectMap;
-#include
-typedef stdext::hash_set HashSet;
enum Collection_Kind {
MINOR_COLLECTION,
Index: vm/gc_gen/src/common/mark_scan_seq.cpp
===================================================================
--- vm/gc_gen/src/common/mark_scan_seq.cpp (revision 479958)
+++ vm/gc_gen/src/common/mark_scan_seq.cpp (working copy)
@@ -89,7 +89,7 @@
iter = vector_block_iterator_advance(root_set,iter);
Partial_Reveal_Object* p_obj = *p_ref;
- assert(!p_obj == NULL); /* root ref can't be NULL */
+ assert(p_obj != 0); /* root ref can't be NULL */
Space* space = space_of_addr(collector->gc, p_obj);
if( !space->mark_object_func(space, p_obj) ) continue;
Index: vm/gc_gen/src/common/gc_block.h
===================================================================
--- vm/gc_gen/src/common/gc_block.h (revision 479958)
+++ vm/gc_gen/src/common/gc_block.h (working copy)
@@ -41,7 +41,7 @@
void* free;
void* ceiling;
unsigned int block_idx;
- unsigned int status;
+ volatile unsigned int status;
Block_Header* next;
unsigned int mark_table[1]; /* entry num == MARKBIT_TABLE_SIZE_WORDS */
}Block_Header;
@@ -58,8 +58,8 @@
#define MARKBIT_TABLE_COMPUTE_DIVISOR 33
/* +1 to round up*/
#define MARKBIT_TABLE_COMPUTED_SIZE_BYTE ((GC_BLOCK_SIZE_BYTES-GC_BLOCK_HEADER_VARS_SIZE_BYTES)/MARKBIT_TABLE_COMPUTE_DIVISOR + 1)
-#define MARKBIT_TABLE_SIZE_WORDS ((MARKBIT_TABLE_COMPUTED_SIZE_BYTE + MASK_OF_BYTES_PER_WORD)&~MASK_OF_BYTES_PER_WORD)
-#define MARKBIT_TABLE_SIZE_BYTES (MARKBIT_TABLE_SIZE_WORDS * BYTES_PER_WORD)
+#define MARKBIT_TABLE_SIZE_BYTES ((MARKBIT_TABLE_COMPUTED_SIZE_BYTE + MASK_OF_BYTES_PER_WORD)&~MASK_OF_BYTES_PER_WORD)
+#define MARKBIT_TABLE_SIZE_WORDS (MARKBIT_TABLE_SIZE_BYTES >> BIT_SHIFT_TO_BYTES_PER_WORD)
#define GC_BLOCK_HEADER_SIZE_BYTES (MARKBIT_TABLE_SIZE_BYTES + GC_BLOCK_HEADER_VARS_SIZE_BYTES)
#define GC_BLOCK_BODY_SIZE_BYTES (GC_BLOCK_SIZE_BYTES - GC_BLOCK_HEADER_SIZE_BYTES)
Index: vm/gc_gen/src/common/gc_for_class.h
===================================================================
--- vm/gc_gen/src/common/gc_for_class.h (revision 479958)
+++ vm/gc_gen/src/common/gc_for_class.h (working copy)
@@ -136,4 +136,5 @@
return gcvt->gc_allocated_size;
}
-#endif //#ifndef _GC_TYPES_H_
\ No newline at end of file
+#endif //#ifndef _GC_TYPES_H_
+
Index: vm/gc_gen/src/common/gc_metadata.cpp
===================================================================
--- vm/gc_gen/src/common/gc_metadata.cpp (revision 479958)
+++ vm/gc_gen/src/common/gc_metadata.cpp (working copy)
@@ -49,26 +49,28 @@
/* half of the metadata space is used for mark_stack */
unsigned num_tasks = num_blocks >> 1;
- gc_metadata.free_task_pool = sync_pool_create(num_tasks);
+ gc_metadata.free_task_pool = sync_pool_create();
for(i=0; i> 1;
- gc_metadata.free_set_pool = sync_pool_create(num_sets);
+ gc_metadata.free_set_pool = sync_pool_create();
/* initialize free rootset pool so that mutators can use them */
for(; imetadata = &gc_metadata;
return;
@@ -90,7 +92,7 @@
gc->metadata = NULL;
}
-void gc_metadata_reset(GC* gc)
+void gc_set_rootset(GC* gc)
{
GC_Metadata* metadata = gc->metadata;
Pool* gc_rootset_pool = metadata->gc_rootset_pool;
@@ -106,15 +108,22 @@
if(!gc_requires_barriers()) return;
+ /* put back last remset block of each mutator */
Mutator *mutator = gc->mutator_list;
while (mutator) {
pool_put_entry(mutator_remset_pool, mutator->rem_set);
mutator->rem_set = NULL;
+ mutator = mutator->next;
}
-
- for(unsigned int i=0; inum_collectors; i++){
+
+ /* put back last remset block of each collector (saved in last collection) */
+ unsigned int num_active_collectors = gc->num_active_collectors;
+ for(unsigned int i=0; icollectors[i];
- pool_put_entry(collector_remset_pool, collector->rem_set);
+ /* 1. in the first time GC, rem_set is NULL. 2. it should be NULL when NOS is forwarding_all */
+ if(collector->rem_set == NULL) continue;
+ pool_put_entry(metadata->collector_remset_pool, collector->rem_set);
collector->rem_set = NULL;
}
@@ -150,6 +159,7 @@
pool_put_entry(gc_rootset_pool, root_set);
root_set = pool_get_entry( collector_remset_pool );
}
+
}
return;
@@ -158,6 +168,8 @@
void mutator_remset_add_entry(Mutator* mutator, Partial_Reveal_Object** p_ref)
{
+ assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address());
+
Vector_Block* root_set = mutator->rem_set;
vector_block_add_entry(root_set, (unsigned int)p_ref);
@@ -182,6 +194,8 @@
void collector_remset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref)
{
+ assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address());
+
Vector_Block* root_set = collector->rem_set;
vector_block_add_entry(root_set, (unsigned int)p_ref);
@@ -204,6 +218,19 @@
collector->mark_stack = (MarkStack*)pool_get_entry(gc_metadata.free_task_pool);
}
+void collector_tracetask_add_entry(Collector* collector, Partial_Reveal_Object** p_ref)
+{
+ assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address());
+
+ Vector_Block* trace_task = (Vector_Block*)collector->trace_stack;
+ vector_block_add_entry(trace_task, (unsigned int)p_ref);
+
+ if( !vector_block_is_full(trace_task)) return;
+
+ pool_put_entry(gc_metadata.gc_rootset_pool, trace_task);
+ collector->trace_stack = (TraceStack*)pool_get_entry(gc_metadata.free_set_pool);
+}
+
void gc_rootset_add_entry(GC* gc, Partial_Reveal_Object** p_ref)
{
assert( p_ref < gc_heap_base_address() || p_ref >= gc_heap_ceiling_address());
@@ -267,8 +294,27 @@
void gc_reset_rootset(GC* gc)
{
- gc->root_set = pool_get_entry(gc_metadata.free_set_pool);
+ assert(pool_is_empty(gc_metadata.gc_rootset_pool));
+ gc->root_set = pool_get_entry(gc_metadata.free_set_pool);
+
+ assert(vector_block_is_empty(gc->root_set));
return;
}
-
+void gc_metadata_verify(GC* gc, Boolean is_before_gc)
+{
+ GC_Metadata* metadata = gc->metadata;
+ assert(pool_is_empty(metadata->gc_rootset_pool));
+ assert(pool_is_empty(metadata->collector_repset_pool));
+ assert(pool_is_empty(metadata->mark_task_pool));
+
+ if(!is_before_gc || !gc_requires_barriers())
+ assert(pool_is_empty(metadata->mutator_remset_pool));
+
+ if(!gc_requires_barriers()){
+ /* FIXME:: even for gen gc, it should be empty if NOS is forwarding_all */
+ assert(pool_is_empty(metadata->collector_remset_pool));
+ }
+
+ return;
+}
Index: vm/gc_gen/src/gen/gc_for_barrier.cpp
===================================================================
--- vm/gc_gen/src/gen/gc_for_barrier.cpp (revision 479958)
+++ vm/gc_gen/src/gen/gc_for_barrier.cpp (working copy)
@@ -24,7 +24,7 @@
/* All the write barrier interfaces need cleanup */
-Boolean NEED_BARRIER = FALSE;
+Boolean NEED_BARRIER = TRUE;
Boolean gc_requires_barriers()
{ return NEED_BARRIER; }
@@ -46,7 +46,7 @@
{
Mutator *mutator = (Mutator *)vm_get_gc_thread_local();
GC_Gen* gc = (GC_Gen*)mutator->gc;
- if( !address_belongs_to_nursery((void *)p_object, gc)) return;
+ if( address_belongs_to_nursery((void *)p_object, gc)) return;
Partial_Reveal_Object **p_slot;
/* scan array object */
Index: vm/gc_gen/src/gen/gen.cpp
===================================================================
--- vm/gc_gen/src/gen/gen.cpp (revision 479958)
+++ vm/gc_gen/src/gen/gen.cpp (working copy)
@@ -29,6 +29,7 @@
/* fspace size limit is not interesting. only for manual tuning purpose */
unsigned int min_nos_size_bytes = 2 * MB;
unsigned int max_nos_size_bytes = 64 * MB;
+unsigned int NOS_SIZE = 0;
static void gc_gen_get_system_info(GC_Gen *gc_gen)
{
@@ -79,8 +80,16 @@
reserved_base = (void*)((unsigned int)reserved_base + los_size);
gc_mos_initialize(gc_gen, reserved_base, mos_size);
- unsigned int nos_size = max_heap_size >> 2;
- assert(nos_size > min_nos_size_bytes);
+ unsigned int nos_size;
+ if(NOS_SIZE){
+ assert( NOS_SIZE>=min_nos_size_bytes && NOS_SIZE<=max_nos_size_bytes);
+ nos_size = NOS_SIZE;
+ }else
+ nos_size = max_heap_size >> 4;
+
+ if(nos_size < min_nos_size_bytes ) nos_size = min_nos_size_bytes;
+ if(nos_size > max_nos_size_bytes ) nos_size = max_nos_size_bytes;
+
reserved_base = (void*)((unsigned int)reserved_base + mos_size);
gc_nos_initialize(gc_gen, reserved_base, nos_size);
@@ -99,9 +108,7 @@
gc_metadata_initialize((GC*)gc_gen); /* root set and mark stack */
collector_initialize((GC*)gc_gen);
- if( verify_live_heap ){ /* for live heap verify*/
- gc_init_heap_verification((GC*)gc_gen);
- }
+ gc_init_heap_verification((GC*)gc_gen);
return;
}
@@ -143,16 +150,6 @@
void gc_set_los(GC_Gen* gc, Space* los){ gc->los = (Lspace*)los;}
unsigned int gc_get_processor_num(GC_Gen* gc){ return gc->_num_processors;}
-void reset_mutator_allocation_context(GC_Gen* gc)
-{
- Mutator *mutator = gc->mutator_list;
- while (mutator) {
- alloc_context_reset((Allocator*)mutator);
- mutator = mutator->next;
- }
- return;
-}
-
static unsigned int gc_decide_collection_kind(GC_Gen* gc, unsigned int cause)
{
if(major_collection_needed(gc) || cause== GC_CAUSE_LOS_IS_FULL)
@@ -166,13 +163,14 @@
gc->num_collections++;
gc->collect_kind = gc_decide_collection_kind(gc, cause);
+ //gc->collect_kind = MAJOR_COLLECTION;
+ gc_metadata_verify((GC*)gc, TRUE);
+
/* Stop the threads and collect the roots. */
gc_reset_rootset((GC*)gc);
vm_enumerate_root_set_all_threads();
-
- /* reset metadata (all the rootsets and markstack) */
- gc_metadata_reset((GC*)gc);
+ gc_set_rootset((GC*)gc);
if(verify_live_heap) gc_verify_heap((GC*)gc, TRUE);
@@ -202,8 +200,10 @@
}
if(verify_live_heap) gc_verify_heap((GC*)gc, FALSE);
-
- reset_mutator_allocation_context(gc);
+
+ gc_metadata_verify((GC*)gc, FALSE);
+
+ gc_reset_mutator_context((GC*)gc);
vm_resume_threads_after();
return;