Index: build/make/components/vm/gc_gen.xml
===================================================================
--- build/make/components/vm/gc_gen.xml (revision 472376)
+++ build/make/components/vm/gc_gen.xml (working copy)
@@ -59,18 +59,20 @@
+
-
+
Index: vm/gc_gen/src/common/gc_block.h
===================================================================
--- vm/gc_gen/src/common/gc_block.h (revision 472408)
+++ vm/gc_gen/src/common/gc_block.h (working copy)
@@ -27,10 +27,13 @@
#define GC_BLOCK_SIZE_BYTES (1 << GC_BLOCK_SHIFT_COUNT)
enum Block_Status {
- BLOCK_NIL,
- BLOCK_FREE,
- BLOCK_IN_USE,
- BLOCK_USED
+ BLOCK_NIL = 0,
+ BLOCK_FREE = 0x1,
+ BLOCK_IN_USE = 0x2,
+ BLOCK_USED = 0x4,
+ BLOCK_IN_COMPACT = 0x8,
+ BLOCK_COMPACTED = 0x10,
+ BLOCK_TARGET = 0x20
};
typedef struct Block_Header {
@@ -39,7 +42,6 @@
void* ceiling;
unsigned int block_idx;
unsigned int status;
- SlotVector* reloc_table;
Block_Header* next;
unsigned int mark_table[1]; /* entry num == MARKBIT_TABLE_SIZE_WORDS */
}Block_Header;
@@ -91,7 +93,7 @@
if( !(markbits& (1< */
+
+ Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */
+
+ /* FIXME:: the block indices should be replaced with block header addresses */
+ unsigned int first_block_idx;
+ unsigned int ceiling_block_idx;
+ volatile unsigned int free_block_idx;
+
+ unsigned int num_used_blocks;
+ unsigned int num_managed_blocks;
+ unsigned int num_total_blocks;
+ /* END of Blocked_Space --> */
+}Blocked_Space;
+
#endif //#ifndef _BLOCK_H_
Index: vm/gc_gen/src/common/gc_common.cpp
===================================================================
--- vm/gc_gen/src/common/gc_common.cpp (revision 472408)
+++ vm/gc_gen/src/common/gc_common.cpp (working copy)
@@ -1,105 +0,0 @@
-/*
- * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @author Xiao-Feng Li, 2006/10/05
- */
-
-#include "../thread/collector.h"
-#include "../gen/gen.h"
-
-static void scan_slot(Collector* collector, Partial_Reveal_Object** p_ref)
-{
- Partial_Reveal_Object* p_obj = *p_ref;
- if(p_obj==NULL) return;
-
- MarkStack* mark_stack = collector->mark_stack;
- Space* obj_space = space_of_addr(collector->gc, p_obj);
- Space* ref_space = space_of_addr(collector->gc, p_ref);
-
- /* if obj to be moved, its ref slot needs remembering for later update */
- if(obj_space->move_object)
- ref_space->save_reloc_func(ref_space, p_ref);
-
- if(obj_space->mark_object_func(obj_space, p_obj))
- mark_stack->push(p_obj);
-
- return;
-}
-
-static void scan_object(Collector* collector, Partial_Reveal_Object *p_obj)
-{
- if( !object_has_slots(p_obj) ) return;
-
- /* scan array object */
- if (object_is_array(p_obj)) {
- Partial_Reveal_Object* array = p_obj;
- assert(!obj_is_primitive_array(array));
-
- int32 array_length = vector_get_length((Vector_Handle) array);
- for (int i = 0; i < array_length; i++) {
- Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)vector_get_element_address_ref((Vector_Handle) array, i);
- scan_slot(collector, p_ref);
- }
- return;
- }
-
- /* scan non-array object */
- int *offset_scanner = init_object_scanner(p_obj);
- while (true) {
- Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj);
- if (p_ref == NULL) break; /* terminating ref slot */
-
- scan_slot(collector, p_ref);
- offset_scanner = offset_next_ref(offset_scanner);
- }
-
- return;
-}
-
-static void scan_root(Collector* collector, Partial_Reveal_Object *p_obj)
-{
- assert(p_obj);
- Space* space = space_of_addr(collector->gc, p_obj);
- if( !space->mark_object_func(space, p_obj) ) return;
-
- MarkStack* mark_stack = collector->mark_stack;
- mark_stack->push(p_obj);
-
- while(!mark_stack->empty()){
- p_obj = mark_stack->top();
- mark_stack->pop();
- scan_object(collector, p_obj);
- }
-
- return;
-}
-
-/* NOTE:: Only marking in object header is idempotent */
-void mark_scan_heap(Collector* collector)
-{
- GC* gc = collector->gc;
-
- int size = gc->root_set->size();
-
- for(int i=0; iroot_set)[i];
- assert(*p_ref); /* root ref should never by NULL */
- scan_root(collector, *p_ref);
- }
-
- return;
-}
Index: vm/gc_gen/src/common/gc_common.h
===================================================================
--- vm/gc_gen/src/common/gc_common.h (revision 472408)
+++ vm/gc_gen/src/common/gc_common.h (working copy)
@@ -64,14 +64,11 @@
typedef void (*TaskType)(void*);
-#define GC_NUM_ROOTS_HINT 10000
+extern Boolean NEED_BARRIER;
+extern unsigned int NUM_COLLECTORS;
typedef std::stack MarkStack;
typedef std::stack TraceStack;
-typedef std::vector RemobjSet;
-typedef std::vector RootSet;
-typedef std::vector RemslotSet;
-typedef std::vector SlotVector;
typedef std::map ObjectMap;
#include
typedef stdext::hash_set HashSet;
@@ -190,8 +187,6 @@
GC* gc;
Boolean move_object;
Boolean (*mark_object_func)(Space* space, Partial_Reveal_Object* p_obj);
- void (*save_reloc_func)(Space* space, Partial_Reveal_Object** p_ref);
- void (*update_reloc_func)(Space* space);
}Space;
inline unsigned int space_committed_size(Space* space){ return space->committed_heap_size;}
@@ -211,7 +206,8 @@
/* all GCs inherit this GC structure */
struct Mutator;
struct Collector;
-
+struct GC_Metadata;
+struct Vector_Block;
typedef struct GC{
void* heap_start;
void* heap_end;
@@ -229,9 +225,11 @@
unsigned int num_collectors;
unsigned int num_active_collectors; /* not all collectors are working */
- /* rootsets for collection (FIXME:: should be distributed to collectors) */
- RootSet* root_set;
+ /* metadata is the pool for rootset, markstack, etc. */
+ GC_Metadata* metadata;
unsigned int collect_kind; /* MAJOR or MINOR */
+ /* FIXME:: this is wrong! root_set belongs to mutator */
+ Vector_Block* root_set;
/* mem info */
apr_pool_t *aux_pool;
@@ -239,22 +237,10 @@
}GC;
-inline void gc_init_rootset(GC* gc)
-{
- gc->root_set = new RootSet();
- gc->root_set->reserve(GC_NUM_ROOTS_HINT);
- gc->root_set->clear();
-}
+void mark_scan_heap_par(Collector* collector);
+void mark_scan_heap_seq(Collector* collector);
-inline void gc_reset_rootset(GC* gc)
-{
- gc->root_set->clear();
-}
-
-void mark_scan_heap(Collector* collector);
-
inline void* gc_heap_base(GC* gc){ return gc->heap_start; }
inline void* gc_heap_ceiling(GC* gc){ return gc->heap_end; }
-
#endif //_GC_COMMON_H_
Index: vm/gc_gen/src/common/gc_for_vm.cpp
===================================================================
--- vm/gc_gen/src/common/gc_for_vm.cpp (revision 472408)
+++ vm/gc_gen/src/common/gc_for_vm.cpp (working copy)
@@ -18,36 +18,125 @@
* @author Xiao-Feng Li, 2006/10/05
*/
+#include
#include "vm_threads.h"
#include "../gen/gen.h"
#include "interior_pointer.h"
+unsigned int HEAP_SIZE_DEFAULT = 256 * MB;
+
+/* heap size limit is not interesting. only for manual tuning purpose */
+unsigned int min_heap_size_bytes = 32 * MB;
+unsigned int max_heap_size_bytes = 256 * MB;
+
+static size_t parse_size_string(const char* size_string)
+{
+ size_t len = strlen(size_string);
+ size_t unit = 1;
+ if (tolower(size_string[len - 1]) == 'k') {
+ unit = 1024;
+ } else if (tolower(size_string[len - 1]) == 'm') {
+ unit = 1024 * 1024;
+ } else if (tolower(size_string[len - 1]) == 'g') {
+ unit = 1024 * 1024 * 1024;
+ }
+ size_t size = atol(size_string);
+ size_t res = size * unit;
+ if (res / unit != size) {
+ /* overflow happened */
+ return 0;
+ }
+ return res;
+}
+
+static bool get_property_value_boolean(char* name)
+{
+ const char* value = vm_get_property_value(name);
+
+ return (strcmp("0", value) != 0
+ && strcmp("off", value) != 0
+ && strcmp("false", value) != 0);
+}
+
+static int get_property_value_int(char* name)
+{
+ const char* value = vm_get_property_value(name);
+ return (NULL == value) ? 0 : atoi(value);
+}
+
+static bool is_property_set(char* name)
+{
+ const char* value = vm_get_property_value(name);
+ return (NULL != value && 0 != value[0]);
+}
+
+static void parse_configuration_properties()
+{
+ unsigned int max_heap_size = HEAP_SIZE_DEFAULT;
+ unsigned int min_heap_size = min_heap_size_bytes;
+
+ if (is_property_set("gc.mx")) {
+ max_heap_size = parse_size_string(vm_get_property_value("gc.mx"));
+
+ if (max_heap_size < min_heap_size)
+ max_heap_size = min_heap_size;
+ if (0 == max_heap_size)
+ max_heap_size = HEAP_SIZE_DEFAULT;
+
+ min_heap_size = max_heap_size / 10;
+ if (min_heap_size < min_heap_size_bytes) min_heap_size = min_heap_size_bytes;
+ }
+
+ if (is_property_set("gc.ms")) {
+ min_heap_size = parse_size_string(vm_get_property_value("gc.ms"));
+ if (min_heap_size < min_heap_size_bytes)
+ min_heap_size = min_heap_size_bytes;
+ }
+
+ if (min_heap_size > max_heap_size)
+ max_heap_size = min_heap_size;
+
+ min_heap_size_bytes = min_heap_size;
+ max_heap_size_bytes = max_heap_size;
+
+ if (is_property_set("gc.num_collectors")) {
+ unsigned int num = get_property_value_int("gc.num_collectors");
+ NUM_COLLECTORS = (num==0)? NUM_COLLECTORS:num;
+ }
+
+ if (is_property_set("gc.gen_mode")) {
+ NEED_BARRIER = get_property_value_boolean("gc.gen_mode");
+ }
+
+ return;
+}
+
static GC* p_global_gc = NULL;
void gc_init()
{
+ parse_configuration_properties();
+
assert(p_global_gc == NULL);
GC* gc = (GC*)STD_MALLOC(sizeof(GC_Gen));
assert(gc);
memset(gc, 0, sizeof(GC));
p_global_gc = gc;
gc_gen_initialize((GC_Gen*)gc, min_heap_size_bytes, max_heap_size_bytes);
- /* initialize the main thread*/
- // gc_thread_init(vm_get_gc_thread_local());
return;
}
/* this interface need reconsidering. is_pinned is unused. */
void gc_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned)
-{
+{
Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)ref;
- if (*p_ref == NULL) return;
- assert( !obj_is_marked_in_vt(*p_ref));
- assert( !obj_is_forwarded_in_vt(*p_ref) && !obj_is_forwarded_in_obj_info(*p_ref));
- assert( obj_is_in_gc_heap(*p_ref));
- p_global_gc->root_set->push_back(p_ref);
+ if (*p_ref == NULL) return;
+ assert( !obj_is_marked_in_vt(*p_ref));
+ assert( !obj_is_forwarded_in_vt(*p_ref) && !obj_is_forwarded_in_obj_info(*p_ref));
+ assert( obj_is_in_gc_heap(*p_ref));
+ gc_rootset_add_entry(p_global_gc, p_ref);
}
void gc_add_root_set_entry_interior_pointer (void **slot, int offset, Boolean is_pinned)
@@ -86,13 +175,13 @@
int64 gc_free_memory()
{
- return (int64)gc_gen_free_memory_size((GC_Gen*)p_global_gc);
+ return (int64)gc_gen_free_memory_size((GC_Gen*)p_global_gc);
}
/* java heap size.*/
int64 gc_total_memory()
{
- return (int64)((POINTER_SIZE_INT)gc_heap_ceiling(p_global_gc) - (POINTER_SIZE_INT)gc_heap_base(p_global_gc));
+ return (int64)((POINTER_SIZE_INT)gc_heap_ceiling(p_global_gc) - (POINTER_SIZE_INT)gc_heap_base(p_global_gc));
}
void gc_vm_initialized()
Index: vm/gc_gen/src/common/gc_metadata.cpp
===================================================================
--- vm/gc_gen/src/common/gc_metadata.cpp (revision 0)
+++ vm/gc_gen/src/common/gc_metadata.cpp (revision 0)
@@ -0,0 +1,274 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/25
+ */
+
+#include "gc_metadata.h"
+#include "../thread/mutator.h"
+#include "../thread/collector.h"
+
+#define GC_METADATA_SIZE_BYTES 32*MB
+
+#define METADATA_BLOCK_SIZE_BIT_SHIFT 12
+#define METADATA_BLOCK_SIZE_BYTES (1<> METADATA_BLOCK_SIZE_BIT_SHIFT;
+ for(i=0; i> 1;
+ gc_metadata.free_task_pool = sync_pool_create(num_tasks);
+ for(i=0; i> 1;
+ gc_metadata.free_set_pool = sync_pool_create(num_sets);
+ /* initialize free rootset pool so that mutators can use them */
+ for(; imetadata = &gc_metadata;
+ return;
+}
+
+void gc_metadata_destruct(GC* gc)
+{
+ GC_Metadata* metadata = gc->metadata;
+ sync_pool_destruct(metadata->free_task_pool);
+ sync_pool_destruct(metadata->mark_task_pool);
+
+ sync_pool_destruct(metadata->free_set_pool);
+ sync_pool_destruct(metadata->gc_rootset_pool);
+ sync_pool_destruct(metadata->mutator_remset_pool);
+ sync_pool_destruct(metadata->collector_remset_pool);
+ sync_pool_destruct(metadata->collector_repset_pool);
+
+ STD_FREE(metadata->heap_start);
+ gc->metadata = NULL;
+}
+
+void gc_metadata_reset(GC* gc)
+{
+ GC_Metadata* metadata = gc->metadata;
+ Pool* gc_rootset_pool = metadata->gc_rootset_pool;
+ Pool* mutator_remset_pool = metadata->mutator_remset_pool;
+ Pool* collector_remset_pool = metadata->collector_remset_pool;
+ Pool* free_set_pool = metadata->free_set_pool;
+
+ Vector_Block* root_set = NULL;
+
+ /* put back last rootset block */
+ pool_put_entry(gc_rootset_pool, gc->root_set);
+ gc->root_set = NULL;
+
+ if(!gc_requires_barriers()) return;
+
+ Mutator *mutator = gc->mutator_list;
+ while (mutator) {
+ pool_put_entry(mutator_remset_pool, mutator->rem_set);
+ mutator->rem_set = NULL;
+ }
+
+ for(unsigned int i=0; inum_collectors; i++){
+ Collector* collector = gc->collectors[i];
+ pool_put_entry(collector_remset_pool, collector->rem_set);
+ collector->rem_set = NULL;
+ }
+
+ if( gc->collect_kind == MAJOR_COLLECTION ){
+ /* all the remsets are useless now */
+ /* clean and put back mutator remsets */
+ root_set = pool_get_entry( mutator_remset_pool );
+ while(root_set){
+ vector_block_clear(root_set);
+ pool_put_entry(free_set_pool, root_set);
+ root_set = pool_get_entry( mutator_remset_pool );
+ }
+
+ /* clean and put back collector remsets */
+ root_set = pool_get_entry( collector_remset_pool );
+ while(root_set){
+ vector_block_clear(root_set);
+ pool_put_entry(free_set_pool, root_set);
+ root_set = pool_get_entry( collector_remset_pool );
+ }
+
+ }else{ /* MINOR_COLLECTION */
+ /* all the remsets are put into the shared pool */
+ root_set = pool_get_entry( mutator_remset_pool );
+ while(root_set){
+ pool_put_entry(gc_rootset_pool, root_set);
+ root_set = pool_get_entry( mutator_remset_pool );
+ }
+
+ /* put back collector remsets */
+ root_set = pool_get_entry( collector_remset_pool );
+ while(root_set){
+ pool_put_entry(gc_rootset_pool, root_set);
+ root_set = pool_get_entry( collector_remset_pool );
+ }
+ }
+
+ return;
+
+}
+
+void mutator_remset_add_entry(Mutator* mutator, Partial_Reveal_Object** p_ref)
+{
+ Vector_Block* root_set = mutator->rem_set;
+ vector_block_add_entry(root_set, (unsigned int)p_ref);
+
+ if( !vector_block_is_full(root_set)) return;
+
+ pool_put_entry(gc_metadata.mutator_remset_pool, root_set);
+ mutator->rem_set = pool_get_entry(gc_metadata.free_set_pool);
+}
+
+void collector_repset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref)
+{
+ assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address());
+
+ Vector_Block* root_set = collector->rep_set;
+ vector_block_add_entry(root_set, (unsigned int)p_ref);
+
+ if( !vector_block_is_full(root_set)) return;
+
+ pool_put_entry(gc_metadata.collector_repset_pool, root_set);
+ collector->rep_set = pool_get_entry(gc_metadata.free_set_pool);
+}
+
+void collector_remset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref)
+{
+ Vector_Block* root_set = collector->rem_set;
+ vector_block_add_entry(root_set, (unsigned int)p_ref);
+
+ if( !vector_block_is_full(root_set)) return;
+
+ pool_put_entry(gc_metadata.collector_remset_pool, root_set);
+ collector->rem_set = pool_get_entry(gc_metadata.free_set_pool);
+}
+
+void collector_marktask_add_entry(Collector* collector, Partial_Reveal_Object* p_obj)
+{
+ assert( p_obj>= gc_heap_base_address() && p_obj < gc_heap_ceiling_address());
+
+ Vector_Block* mark_task = (Vector_Block*)collector->mark_stack;
+ vector_block_add_entry(mark_task, (unsigned int)p_obj);
+
+ if( !vector_block_is_full(mark_task)) return;
+
+ pool_put_entry(gc_metadata.mark_task_pool, mark_task);
+ collector->mark_stack = (MarkStack*)pool_get_entry(gc_metadata.free_task_pool);
+}
+
+void gc_rootset_add_entry(GC* gc, Partial_Reveal_Object** p_ref)
+{
+ assert( p_ref < gc_heap_base_address() || p_ref >= gc_heap_ceiling_address());
+
+ Vector_Block* root_set = gc->root_set;
+ vector_block_add_entry(root_set, (unsigned int)p_ref);
+
+ if( !vector_block_is_full(root_set)) return;
+
+ pool_put_entry(gc_metadata.gc_rootset_pool, root_set);
+ gc->root_set = pool_get_entry(gc_metadata.free_set_pool);
+}
+
+
+static void gc_update_repointed_sets(GC* gc, Pool* pool)
+{
+ GC_Metadata* metadata = gc->metadata;
+
+ /* NOTE:: this is destructive to the root sets. */
+ Vector_Block* root_set = pool_get_entry(pool);
+
+ while(root_set){
+ unsigned int* iter = vector_block_iterator_init(root_set);
+ while(!vector_block_iterator_end(root_set,iter)){
+ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+ iter = vector_block_iterator_advance(root_set,iter);
+
+ Partial_Reveal_Object* p_obj = *p_ref;
+ /* For repset, this check is unnecessary, since all slots are repointed; otherwise
+ they will not be recorded. For root set, it is possible to point to LOS or other
+ non-moved space. */
+#ifdef _DEBUG
+ if( pool != metadata->gc_rootset_pool)
+ assert(obj_is_forwarded_in_obj_info(p_obj));
+ else
+#endif
+ if(!obj_is_forwarded_in_obj_info(p_obj)) continue;
+ Partial_Reveal_Object* p_target_obj = get_forwarding_pointer_in_obj_info(p_obj);
+ *p_ref = p_target_obj;
+ }
+ vector_block_clear(root_set);
+ pool_put_entry(metadata->free_set_pool, root_set);
+ root_set = pool_get_entry(pool);
+ }
+
+ return;
+}
+
+void update_rootset_interior_pointer();
+
+void gc_update_repointed_refs(Collector* collector)
+{
+ GC* gc = collector->gc;
+ GC_Metadata* metadata = gc->metadata;
+ gc_update_repointed_sets(gc, metadata->gc_rootset_pool);
+ gc_update_repointed_sets(gc, metadata->collector_repset_pool);
+ update_rootset_interior_pointer();
+
+ return;
+}
+
+void gc_reset_rootset(GC* gc)
+{
+ gc->root_set = pool_get_entry(gc_metadata.free_set_pool);
+ return;
+}
+
+
Index: vm/gc_gen/src/common/gc_metadata.h
===================================================================
--- vm/gc_gen/src/common/gc_metadata.h (revision 0)
+++ vm/gc_gen/src/common/gc_metadata.h (revision 0)
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/25
+ */
+#ifndef _GC_METADATA_H_
+#define _GC_METADATA_H_
+
+#include "gc_common.h"
+#include "../utils/vector_block.h"
+#include "../utils/sync_pool.h"
+
+typedef struct GC_Metadata{
+ void* heap_start;
+ void* heap_end;
+
+ Pool* free_task_pool; /* list of free buffers for mark tasks */
+ Pool* mark_task_pool; /* list of mark tasks */
+
+ /* FIXME:: the mutator remset pool can be merged with the rootset pool*/
+ Pool* free_set_pool; /* list of free buffers for rootsets remsets */
+ Pool* gc_rootset_pool; /* list of root sets for enumeration */
+ Pool* mutator_remset_pool; /* list of remsets generated by app during execution */
+ Pool* collector_remset_pool; /* list of remsets generated by gc during collection */
+ Pool* collector_repset_pool; /* list of repointed ref slot sets */
+
+}GC_Metadata;
+
+void gc_metadata_initialize(GC* gc);
+void gc_metadata_destruct(GC* gc);
+void gc_metadata_reset(GC* gc);
+
+void gc_reset_rootset(GC* gc);
+void gc_update_repointed_refs(Collector* collector);
+
+void collector_marktask_add_entry(Collector* collector, Partial_Reveal_Object* p_obj);
+
+void mutator_remset_add_entry(Mutator* mutator, Partial_Reveal_Object** p_slot);
+void collector_remset_add_entry(Collector* collector, Partial_Reveal_Object** p_slot);
+void gc_rootset_add_entry(GC* gc, Partial_Reveal_Object** p_slot);
+
+void collector_repset_add_entry(Collector* collector, Partial_Reveal_Object** p_slot);
+
+#endif /* #ifndef _GC_METADATA_H_ */
Index: vm/gc_gen/src/common/gc_platform.h
===================================================================
--- vm/gc_gen/src/common/gc_platform.h (revision 472408)
+++ vm/gc_gen/src/common/gc_platform.h (working copy)
@@ -29,7 +29,7 @@
#define USEC_PER_SEC INT64_C(1000000)
-#define VmThreadHandle void*
+#define VmThreadHandle void*
#define VmEventHandle hysem_t
#define THREAD_OK TM_ERROR_NONE
@@ -71,27 +71,32 @@
inline uint32 atomic_cas32(volatile apr_uint32_t *mem,
apr_uint32_t swap,
apr_uint32_t cmp) {
- return (uint32)apr_atomic_cas32(mem, swap, cmp);
+ return (uint32)apr_atomic_cas32(mem, swap, cmp);
}
-inline Boolean pool_create(apr_pool_t **newpool, apr_pool_t *parent) {
- return (Boolean)apr_pool_create(newpool, parent);
+inline uint32 atomic_inc32(volatile apr_uint32_t *mem){
+ return (uint32)apr_atomic_inc32(mem);
}
-inline void pool_destroy(apr_pool_t *p) {
- apr_pool_destroy(p);
+inline uint32 atomic_dec32(volatile apr_uint32_t *mem){
+ return (uint32)apr_atomic_dec32(mem);
}
-inline uint32 atomic_inc32(volatile apr_uint32_t *mem) {
- return (uint32)apr_atomic_inc32(mem);
+inline uint32 atomic_add32(volatile apr_uint32_t *mem, apr_uint32_t val) {
+ return (uint32)apr_atomic_add32(mem, val);
}
-inline uint32 atomic_add32(volatile apr_uint32_t *mem, apr_uint32_t val) {
- return (uint32)apr_atomic_add32(mem, val);
+inline Boolean pool_create(apr_pool_t **newpool, apr_pool_t *parent) {
+ return (Boolean)apr_pool_create(newpool, parent);
}
+inline void pool_destroy(apr_pool_t *p) {
+ apr_pool_destroy(p);
+}
+
+
inline int64 time_now() {
- return apr_time_now();
+ return apr_time_now();
}
typedef volatile unsigned int SpinLock;
@@ -102,7 +107,7 @@
};
#define try_lock(x) (!atomic_cas32(&(x), LOCKED, FREE_LOCK))
-#define lock(x) while( !try_lock(x)){ while( x==LOCKED );}
+#define lock(x) while( !try_lock(x)){ while( x==LOCKED );}
#define unlock(x) do{ x = FREE_LOCK;}while(0)
#endif //_GC_PLATFORM_H_
Index: vm/gc_gen/src/common/mark_scan_par.cpp
===================================================================
--- vm/gc_gen/src/common/mark_scan_par.cpp (revision 0)
+++ vm/gc_gen/src/common/mark_scan_par.cpp (revision 0)
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#include "gc_metadata.h"
+#include "../thread/collector.h"
+#include "../gen/gen.h"
+
+static void scan_slot_par(Collector* collector, Partial_Reveal_Object** p_ref)
+{
+ Partial_Reveal_Object* p_obj = *p_ref;
+ if(p_obj==NULL) return;
+
+ Space* obj_space = space_of_addr(collector->gc, p_obj);
+
+ /* if obj to be moved, its ref slot needs remembering for later update */
+ if(obj_space->move_object)
+ collector_repset_add_entry(collector, p_ref);
+
+ if(obj_space->mark_object_func(obj_space, p_obj))
+ collector_marktask_add_entry(collector, p_obj);
+
+ return;
+}
+
+static void scan_object_par(Collector* collector, Partial_Reveal_Object *p_obj)
+{
+ if( !object_has_slots(p_obj) ) return;
+
+ /* scan array object */
+ if (object_is_array(p_obj)) {
+ Partial_Reveal_Object* array = p_obj;
+ assert(!obj_is_primitive_array(array));
+
+ int32 array_length = vector_get_length((Vector_Handle) array);
+ for (int i = 0; i < array_length; i++) {
+ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)vector_get_element_address_ref((Vector_Handle) array, i);
+ scan_slot_par(collector, p_ref);
+ }
+ return;
+ }
+
+ /* scan non-array object */
+ int *offset_scanner = init_object_scanner(p_obj);
+ while (true) {
+ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj);
+ if (p_ref == NULL) break; /* terminating ref slot */
+
+ scan_slot_par(collector, p_ref);
+ offset_scanner = offset_next_ref(offset_scanner);
+ }
+
+ return;
+}
+
+extern void scan_object_seq(Collector*, Partial_Reveal_Object *);
+
+/* for marking phase termination detection */
+static volatile unsigned int num_finished_collectors = 0;
+
+/* NOTE:: Only marking in object header is idempotent */
+void mark_scan_heap_par(Collector* collector)
+{
+ GC* gc = collector->gc;
+ /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
+ unsigned int num_active_collectors = gc->num_active_collectors;
+ atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
+
+ GC_Metadata* metadata = gc->metadata;
+
+ collector->mark_stack = (MarkStack*)pool_get_entry(metadata->free_task_pool);
+
+ Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);
+
+ /* first step: copy all root objects to mark tasks.
+ FIXME:: can be done sequentially before coming here to eliminate atomic ops */
+ while(root_set){
+ unsigned int* iter = vector_block_iterator_init(root_set);
+ while(!vector_block_iterator_end(root_set,iter)){
+ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+ iter = vector_block_iterator_advance(root_set,iter);
+
+ Partial_Reveal_Object* p_obj = *p_ref;
+ assert(!p_obj == NULL); /* root ref can't be NULL */
+ /* we have to mark the object before put it into marktask, because
+ it is possible to have two slots containing a same object. They will
+ be scanned twice and their ref slots will be recorded twice. Problem
+ occurs after the ref slot is updated first time with new position
+ and the second time the value is the ref slot is the old position as expected.
+ This can be worked around if we want.
+ */
+ Space* space = space_of_addr(gc, p_obj);
+ if( !space->mark_object_func(space, p_obj) ) continue;
+
+ collector_marktask_add_entry(collector, p_obj);
+ }
+ root_set = pool_iterator_next(metadata->gc_rootset_pool);
+ }
+
+ pool_put_entry(metadata->mark_task_pool, collector->mark_stack);
+
+ /* second step: iterate over the mark tasks and scan objects */
+ /* get a task buf to push new tasks */
+ collector->mark_stack = (MarkStack*)pool_get_entry(metadata->free_task_pool);
+
+retry:
+ Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool);
+ while(mark_task){
+ unsigned int* iter = vector_block_iterator_init(mark_task);
+ while(!vector_block_iterator_end(mark_task,iter)){
+ Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter;
+ iter = vector_block_iterator_advance(mark_task,iter);
+
+ scan_object_par(collector, p_obj);
+ }
+ /* run out one task, put back to the pool and grab another task */
+ vector_block_clear(mark_task);
+ pool_put_entry(metadata->free_task_pool, mark_task);
+ mark_task = pool_get_entry(metadata->mark_task_pool);
+ }
+
+ /* termination detection. This is also a barrier.
+ NOTE:: actually we don't need this complexity. We can simply
+ spin waiting for num_finished_collectors, because each generated new
+ task would surely be processed by its generating collector eventually.
+ So code below is for load balance. */
+ atomic_inc32(&num_finished_collectors);
+ while(num_finished_collectors != num_active_collectors){
+ if( !pool_is_empty(metadata->mark_task_pool)){
+ atomic_dec32(&num_finished_collectors);
+ goto retry;
+ }
+ }
+
+ /* up to now, we donot have any tasks in task_pool, but
+ each collector has remaining tasks in its local mark_stack. */
+
+ /* Lets process remaining tasks.
+ NOTE:: this is the key difference from work-stealing, which uses
+ same stack for both working and sharing. So it has no problem
+ with remaining tasks in the shared stack. */
+
+ /* to simplify the processing, we turn back to use a single stack for
+ the remaining objects scanning. The assumption is, there are only limited
+ tasks for processing, no need to share the tasks.
+ FIXME:: a better way is to reduce the task block size into half till
+ the size becomes one, then the collectors actually share a same stack */
+
+ mark_task = (Vector_Block*)collector->mark_stack;
+ MarkStack* mark_stack = new MarkStack();
+
+ unsigned int* iter = vector_block_iterator_init(mark_task);
+ while(!vector_block_iterator_end(mark_task,iter)){
+ Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter;
+ iter = vector_block_iterator_advance(mark_task,iter);
+ mark_stack->push(p_obj);
+ }
+ /* put back the last task to the free pool */
+ vector_block_clear(mark_task);
+ pool_put_entry(metadata->free_task_pool, mark_task);
+
+ collector->mark_stack = mark_stack;
+ while(!mark_stack->empty()){
+ Partial_Reveal_Object* p_obj = mark_stack->top();
+ mark_stack->pop();
+ scan_object_seq(collector, p_obj);
+ }
+
+ delete mark_stack;
+ collector->mark_stack = NULL;
+
+ /* put back last repointed refs set recorded during marking */
+ pool_put_entry(metadata->collector_repset_pool, collector->rep_set);
+ collector->rep_set = NULL;
+
+ return;
+}
Index: vm/gc_gen/src/common/mark_scan_seq.cpp
===================================================================
--- vm/gc_gen/src/common/mark_scan_seq.cpp (revision 0)
+++ vm/gc_gen/src/common/mark_scan_seq.cpp (revision 0)
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#include "gc_metadata.h"
+#include "../thread/collector.h"
+#include "../gen/gen.h"
+
+static void scan_slot_seq(Collector* collector, Partial_Reveal_Object** p_ref)
+{
+ Partial_Reveal_Object* p_obj = *p_ref;
+ if(p_obj==NULL) return;
+
+ MarkStack* mark_stack = (MarkStack*)collector->mark_stack;
+ Space* obj_space = space_of_addr(collector->gc, p_obj);
+
+ /* if obj to be moved, its ref slot needs remembering for later update */
+ if(obj_space->move_object)
+ collector_repset_add_entry(collector, p_ref);
+
+ if(obj_space->mark_object_func(obj_space, p_obj))
+ mark_stack->push(p_obj);
+
+ return;
+}
+
+void scan_object_seq(Collector* collector, Partial_Reveal_Object *p_obj)
+{
+ if( !object_has_slots(p_obj) ) return;
+
+ /* scan array object */
+ if (object_is_array(p_obj)) {
+ Partial_Reveal_Object* array = p_obj;
+ assert(!obj_is_primitive_array(array));
+
+ int32 array_length = vector_get_length((Vector_Handle) array);
+ for (int i = 0; i < array_length; i++) {
+ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)vector_get_element_address_ref((Vector_Handle) array, i);
+ scan_slot_seq(collector, p_ref);
+ }
+ return;
+ }
+
+ /* scan non-array object */
+ int *offset_scanner = init_object_scanner(p_obj);
+ while (true) {
+ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj);
+ if (p_ref == NULL) break; /* terminating ref slot */
+
+ scan_slot_seq(collector, p_ref);
+ offset_scanner = offset_next_ref(offset_scanner);
+ }
+
+ return;
+}
+
+/* NOTE:: Only marking in object header is idempotent */
+void mark_scan_heap_seq(Collector* collector)
+{
+ GC* gc = collector->gc;
+ MarkStack* mark_stack = new MarkStack();
+ collector->mark_stack = mark_stack;
+
+ GC_Metadata* metadata = gc->metadata;
+
+ pool_iterator_init(metadata->gc_rootset_pool);
+ Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);
+
+ while(root_set){
+ unsigned int* iter = vector_block_iterator_init(root_set);
+ while(!vector_block_iterator_end(root_set,iter)){
+ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+ iter = vector_block_iterator_advance(root_set,iter);
+
+ Partial_Reveal_Object* p_obj = *p_ref;
+ assert(!p_obj == NULL); /* root ref can't be NULL */
+
+ Space* space = space_of_addr(collector->gc, p_obj);
+ if( !space->mark_object_func(space, p_obj) ) continue;
+ mark_stack->push(p_obj);
+ }
+ root_set = pool_iterator_next(metadata->gc_rootset_pool);
+ }
+
+ while(!mark_stack->empty()){
+ Partial_Reveal_Object* p_obj = mark_stack->top();
+ mark_stack->pop();
+ scan_object_seq(collector, p_obj);
+ }
+
+ return;
+}
Index: vm/gc_gen/src/gen/gc_for_barrier.cpp
===================================================================
--- vm/gc_gen/src/gen/gc_for_barrier.cpp (revision 472408)
+++ vm/gc_gen/src/gen/gc_for_barrier.cpp (working copy)
@@ -24,7 +24,7 @@
/* All the write barrier interfaces need cleanup */
-static Boolean NEED_BARRIER = TRUE;
+Boolean NEED_BARRIER = FALSE;
Boolean gc_requires_barriers()
{ return NEED_BARRIER; }
@@ -38,7 +38,7 @@
if( address_belongs_to_nursery((void *)p_target, gc) &&
!address_belongs_to_nursery((void *)p_slot, gc))
{
- mutator->remslot->push_back((Partial_Reveal_Object **)p_slot);
+ mutator_remset_add_entry(mutator, (Partial_Reveal_Object**)p_slot);
}
}
@@ -58,7 +58,7 @@
for (int i = 0; i < array_length; i++) {
p_slot = (Partial_Reveal_Object **)vector_get_element_address_ref((Vector_Handle) array, i);
if( *p_slot != NULL && address_belongs_to_nursery((void *)*p_slot, gc)){
- mutator->remslot->push_back(p_slot);
+ mutator_remset_add_entry(mutator, p_slot);
}
}
return;
@@ -71,7 +71,7 @@
p_slot = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj);
if (p_slot == NULL) break;
if( address_belongs_to_nursery((void *)*p_slot, gc)){
- mutator->remslot->push_back(p_slot);
+ mutator_remset_add_entry(mutator, p_slot);
}
offset_scanner = offset_next_ref(offset_scanner);
}
@@ -96,12 +96,15 @@
void gc_heap_slot_write_ref (Managed_Object_Handle p_obj_holding_ref,Managed_Object_Handle *p_slot, Managed_Object_Handle p_target)
{
*p_slot = p_target;
+
if( !NEED_BARRIER ) return;
gc_slot_write_barrier(p_slot, p_target);
}
-/* this is used for global object update, e.g., strings. Since globals are roots, no barrier here */
+/* this is used for global object update, e.g., strings. */
void gc_heap_write_global_slot(Managed_Object_Handle *p_slot,Managed_Object_Handle p_target)
{
*p_slot = p_target;
+
+ /* Since globals are roots, no barrier here */
}
Index: vm/gc_gen/src/gen/gen.cpp
===================================================================
--- vm/gc_gen/src/gen/gen.cpp (revision 472408)
+++ vm/gc_gen/src/gen/gen.cpp (working copy)
@@ -25,9 +25,6 @@
#include "../thread/collector.h"
#include "../verify/verify_live_heap.h"
-/* heap size limit is not interesting. only for manual tuning purpose */
-unsigned int min_heap_size_bytes = 32 * MB;
-unsigned int max_heap_size_bytes = 128 * MB;
/* fspace size limit is not interesting. only for manual tuning purpose */
unsigned int min_nos_size_bytes = 2 * MB;
@@ -41,18 +38,13 @@
void gc_gen_initialize(GC_Gen *gc_gen, unsigned int min_heap_size, unsigned int max_heap_size)
{
- assert(gc_gen);
+ assert(gc_gen);
assert(max_heap_size <= max_heap_size_bytes);
- /* FIXME:: we need let virtual space to include unmapped region.
- Heuristically for Nursery+MatureFrom+MatureTo(unmapped)+LOS(mapped+unmapped),
- we need almost half more than the user specified virtual space size.
- That's why we have the below. */
- max_heap_size += max_heap_size>>1;
- min_heap_size = round_up_to_size(min_heap_size, GC_BLOCK_SIZE_BYTES);
- max_heap_size = round_up_to_size(max_heap_size, GC_BLOCK_SIZE_BYTES);
+ min_heap_size = round_up_to_size(min_heap_size, GC_BLOCK_SIZE_BYTES);
+ max_heap_size = round_up_to_size(max_heap_size, GC_BLOCK_SIZE_BYTES);
- gc_gen_get_system_info(gc_gen);
+ gc_gen_get_system_info(gc_gen);
void *reserved_base = NULL;
@@ -80,29 +72,31 @@
gc_gen->num_collections = 0;
/* heuristic nos + mos + LOS */
+ unsigned int los_size = max_heap_size >> 2;
+ gc_los_initialize(gc_gen, reserved_base, los_size);
+
+ unsigned int mos_size = max_heap_size >> 1;
+ reserved_base = (void*)((unsigned int)reserved_base + los_size);
+ gc_mos_initialize(gc_gen, reserved_base, mos_size);
+
unsigned int nos_size = max_heap_size >> 2;
assert(nos_size > min_nos_size_bytes);
- gc_nos_initialize(gc_gen, reserved_base, nos_size);
+ reserved_base = (void*)((unsigned int)reserved_base + mos_size);
+ gc_nos_initialize(gc_gen, reserved_base, nos_size);
- unsigned int mos_size = max_heap_size >> 1;
- reserved_base = (void*)((unsigned int)reserved_base + nos_size);
- gc_mos_initialize(gc_gen, reserved_base, mos_size);
+ /* connect mos and nos, so that they can be compacted as one space */
+ Blocked_Space* mos = (Blocked_Space*)gc_get_mos(gc_gen);
+ Blocked_Space* nos = (Blocked_Space*)gc_get_nos(gc_gen);
+ Block_Header* mos_last_block = (Block_Header*)&mos->blocks[mos->num_managed_blocks-1];
+ Block_Header* nos_first_block = (Block_Header*)&nos->blocks[0];
+ mos_last_block->next = nos_first_block;
+ assert(space_heap_end((Space*)mos) == space_heap_start((Space*)nos));
- unsigned int los_size = max_heap_size >> 2;
- reserved_base = (void*)((unsigned int)gc_gen->heap_end - los_size);
- gc_los_initialize(gc_gen, reserved_base, los_size);
-
gc_gen->committed_heap_size = space_committed_size((Space*)gc_gen->nos) +
space_committed_size((Space*)gc_gen->mos) +
space_committed_size((Space*)gc_gen->los);
- gc_init_rootset((GC*)gc_gen);
-
- gc_gen->mutator_list = NULL;
- gc_gen->mutator_list_lock = FREE_LOCK;
-
- gc_gen->num_mutators = 0;
-
+ gc_metadata_initialize((GC*)gc_gen); /* root set and mark stack */
collector_initialize((GC*)gc_gen);
if( verify_live_heap ){ /* for live heap verify*/
@@ -114,25 +108,25 @@
void gc_gen_destruct(GC_Gen *gc_gen)
{
- gc_nos_destruct(gc_gen);
- gc_gen->nos = NULL;
-
- gc_mos_destruct(gc_gen);
- gc_gen->mos = NULL;
+ gc_nos_destruct(gc_gen);
+ gc_gen->nos = NULL;
+
+ gc_mos_destruct(gc_gen);
+ gc_gen->mos = NULL;
- gc_los_destruct(gc_gen);
+ gc_los_destruct(gc_gen);
gc_gen->los = NULL;
+ gc_metadata_destruct((GC*)gc_gen); /* root set and mark stack */
collector_destruct((GC*)gc_gen);
if( verify_live_heap ){
gc_terminate_heap_verification((GC*)gc_gen);
}
- STD_FREE(gc_gen);
+ STD_FREE(gc_gen);
}
-
Boolean major_collection_needed(GC_Gen* gc)
{
return mspace_free_memory_size(gc->mos) < fspace_used_memory_size(gc->nos);
@@ -149,111 +143,13 @@
void gc_set_los(GC_Gen* gc, Space* los){ gc->los = (Lspace*)los;}
unsigned int gc_get_processor_num(GC_Gen* gc){ return gc->_num_processors;}
-static void gc_gen_update_rootset(GC* gc)
+void reset_mutator_allocation_context(GC_Gen* gc)
{
- RootSet* root_set = gc->root_set;
- /* update refs in root set after moving collection */
- for(unsigned int i=0; i < root_set->size(); i++){
- Partial_Reveal_Object** p_ref = (*root_set)[i];
- Partial_Reveal_Object* p_obj = *p_ref;
- assert(p_obj); /* root ref should never by NULL*/
- /* FIXME:: this should be reconsidered: forwarded in vt or obj_info */
- if(!obj_is_forwarded_in_obj_info(p_obj)){
- /* if an obj is not moved, it must be in LOS or otherwise in MOS for MINOR_COLLECTION */
-#ifdef _DEBUG
- if( gc->collect_kind == MINOR_COLLECTION )
- assert( !obj_belongs_to_space(p_obj, gc_get_nos((GC_Gen*)gc)) );
- else
- assert( obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)gc)) );
-#endif
- continue;
- }
- Partial_Reveal_Object* p_target_obj = get_forwarding_pointer_in_obj_info(p_obj);
- *p_ref = p_target_obj;
- }
-
- return;
-}
-
-void update_rootset_interior_pointer();
-
-void gc_gen_update_repointed_refs(Collector* collector)
-{
- GC_Gen* gc = (GC_Gen*)collector->gc;
- Space* space;
- space = gc_get_nos(gc); space->update_reloc_func(space);
- space = gc_get_mos(gc); space->update_reloc_func(space);
- space = gc_get_los(gc); space->update_reloc_func(space);
-
- gc_gen_update_rootset((GC*)gc);
- update_rootset_interior_pointer();
-
- return;
-}
-
-void gc_preprocess_collector(Collector *collector)
-{
- /* for MAJOR_COLLECTION, all the remsets are useless */
- GC_Gen* gc = (GC_Gen*)collector->gc;
- if( gc->collect_kind == MAJOR_COLLECTION ){
- collector->last_cycle_remset->clear();
- return;
- }
-
- Fspace* fspace = (Fspace*)gc_get_nos(gc);
- fspace->remslot_sets->push_back(collector->last_cycle_remset);
-
- /* this_cycle_remset is ready to be used */
- assert(collector->this_cycle_remset->empty());
-
- return;
-}
-
-void gc_postprocess_collector(Collector *collector)
-{
- /* for MAJOR_COLLECTION we do nothing */
- GC_Gen* gc = (GC_Gen*)collector->gc;
- if( gc->collect_kind == MAJOR_COLLECTION )
- return;
-
- /* for MINOR_COLLECTION */
- /* switch its remsets, this_cycle_remset data kept in space->remslot_sets */
- /* last_cycle_remset was in space->remslot_sets and cleared during collection */
- assert(collector->last_cycle_remset->empty());
-
- RemslotSet* temp_set = collector->this_cycle_remset;
- collector->this_cycle_remset = collector->last_cycle_remset;
- collector->last_cycle_remset = temp_set;
-
- return;
-}
-
-void gc_preprocess_mutator(GC_Gen* gc)
-{
Mutator *mutator = gc->mutator_list;
- Fspace* fspace = (Fspace*)mutator->alloc_space;
- /* for MAJOR_COLLECTION, all the remsets are useless */
while (mutator) {
- if(gc->collect_kind == MAJOR_COLLECTION){
- mutator->remslot->clear();
- }else{
- fspace->remslot_sets->push_back(mutator->remslot);
- }
- mutator = mutator->next;
- }
-
- return;
-} /////////FIXME::: need clear space remsets
-
-void gc_postprocess_mutator(GC_Gen* gc)
-{
- Mutator *mutator = gc->mutator_list;
- while (mutator) {
- assert(mutator->remslot->empty());
alloc_context_reset((Allocator*)mutator);
mutator = mutator->next;
- }
-
+ }
return;
}
@@ -274,9 +170,10 @@
/* Stop the threads and collect the roots. */
gc_reset_rootset((GC*)gc);
vm_enumerate_root_set_all_threads();
-
- gc_preprocess_mutator(gc);
-
+
+ /* reset metadata (all the rootsets and markstack) */
+ gc_metadata_reset((GC*)gc);
+
if(verify_live_heap) gc_verify_heap((GC*)gc, TRUE);
if(gc->collect_kind == MINOR_COLLECTION){
@@ -306,8 +203,7 @@
if(verify_live_heap) gc_verify_heap((GC*)gc, FALSE);
- gc_postprocess_mutator(gc);
-
+ reset_mutator_allocation_context(gc);
vm_resume_threads_after();
return;
Index: vm/gc_gen/src/gen/gen.h
===================================================================
--- vm/gc_gen/src/gen/gen.h (revision 472408)
+++ vm/gc_gen/src/gen/gen.h (working copy)
@@ -61,10 +61,12 @@
Collector** collectors;
unsigned int num_collectors;
unsigned int num_active_collectors; /* not all collectors are working */
-
- /* rootsets for collection (FIXME:: should be distributed to collectors) */
- RootSet* root_set;
+
+ /* metadata is the pool for rootset, markstack, etc. */
+ GC_Metadata* metadata;
unsigned int collect_kind; /* MAJOR or MINOR */
+ /* FIXME:: this is wrong! root_set belongs to mutator */
+ Vector_Block* root_set;
/* mem info */
apr_pool_t *aux_pool;
@@ -93,7 +95,6 @@
lspace_free_memory_size(gc->los); }
void gc_gen_reclaim_heap(GC_Gen* gc, unsigned int cause);
-void gc_gen_update_repointed_refs(Collector* collector);
/////////////////////////////////////////////////////////////////////////////////////////
@@ -101,7 +102,7 @@
{ fspace_initialize((GC*)gc, start, nos_size); }
inline void gc_nos_destruct(GC_Gen* gc)
-{ fspace_destruct(gc->nos); }
+{ fspace_destruct(gc->nos); }
inline void gc_mos_initialize(GC_Gen* gc, void* start, unsigned int mos_size)
{ mspace_initialize((GC*)gc, start, mos_size); }
@@ -113,7 +114,7 @@
{ lspace_initialize((GC*)gc, start, los_size); }
inline void gc_los_destruct(GC_Gen* gc)
-{ lspace_destruct(gc->los); }
+{ lspace_destruct(gc->los); }
inline Boolean address_belongs_to_nursery(void* addr, GC_Gen* gc)
{ return address_belongs_to_space(addr, (Space*)gc->nos); }
@@ -123,8 +124,8 @@
inline Space* space_of_addr(GC* gc, void* addr)
{
- if( addr < nos_boundary) return (Space*)((GC_Gen*)gc)->nos;
- if( addr < los_boundary) return (Space*)((GC_Gen*)gc)->mos;
+ if( addr > nos_boundary) return (Space*)((GC_Gen*)gc)->nos;
+ if( addr > los_boundary) return (Space*)((GC_Gen*)gc)->mos;
return (Space*)((GC_Gen*)gc)->los;
}
@@ -139,10 +140,5 @@
void gc_set_los(GC_Gen* gc, Space* los);
unsigned int gc_get_processor_num(GC_Gen* gc);
-void gc_preprocess_mutator(GC_Gen* gc);
-void gc_postprocess_mutator(GC_Gen* gc);
-void gc_preprocess_collector(Collector* collector);
-void gc_postprocess_collector(Collector* collector);
-
#endif /* ifndef _GC_GEN_H_ */
Index: vm/gc_gen/src/mark_compact/mspace.cpp
===================================================================
--- vm/gc_gen/src/mark_compact/mspace.cpp (revision 472408)
+++ vm/gc_gen/src/mark_compact/mspace.cpp (working copy)
@@ -21,14 +21,7 @@
#include "mspace.h"
static void mspace_destruct_blocks(Mspace* mspace)
-{
- Block* blocks = (Block*)mspace->blocks;
- for(unsigned int i=0; i < mspace->num_managed_blocks; i++){
- Block_Header* block = (Block_Header*)&(blocks[i]);
- delete block->reloc_table;
- block->reloc_table = NULL;
- }
-
+{
return;
}
@@ -44,7 +37,6 @@
block->base = block->free;
block->block_idx = i + start_idx;
block->status = BLOCK_FREE;
- block->reloc_table = new SlotVector();
last_block->next = block;
last_block = block;
}
@@ -56,6 +48,7 @@
struct GC_Gen;
extern void gc_set_mos(GC_Gen* gc, Space* space);
+extern Space* gc_set_nos(GC_Gen* gc);
void mspace_initialize(GC* gc, void* start, unsigned int mspace_size)
{
Mspace* mspace = (Mspace*)STD_MALLOC( sizeof(Mspace));
@@ -83,10 +76,7 @@
mspace_init_blocks(mspace);
- mspace->obj_info_map = new ObjectMap();
mspace->mark_object_func = mspace_mark_object;
- mspace->save_reloc_func = mspace_save_reloc;
- mspace->update_reloc_func = mspace_update_reloc;
mspace->move_object = TRUE;
mspace->gc = gc;
Index: vm/gc_gen/src/mark_compact/mspace.h
===================================================================
--- vm/gc_gen/src/mark_compact/mspace.h (revision 472408)
+++ vm/gc_gen/src/mark_compact/mspace.h (working copy)
@@ -35,8 +35,6 @@
GC* gc;
Boolean move_object;
Boolean (*mark_object_func)(Mspace* space, Partial_Reveal_Object* p_obj);
- void (*save_reloc_func)(Mspace* space, Partial_Reveal_Object** p_ref);
- void (*update_reloc_func)(Mspace* space);
/* END of Space --> */
Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */
@@ -49,9 +47,7 @@
unsigned int num_used_blocks;
unsigned int num_managed_blocks;
unsigned int num_total_blocks;
-
- /* during compaction, save non-zero obj_info who's overwritten by forwarding pointer */
- ObjectMap* obj_info_map;
+ /* END of Blocked_Space --> */
}Mspace;
Index: vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp
===================================================================
--- vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (revision 472408)
+++ vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (working copy)
@@ -21,96 +21,201 @@
#include "mspace.h"
#include "../thread/collector.h"
#include "../trace_forward/fspace.h"
+
struct GC_Gen;
Space* gc_get_nos(GC_Gen* gc);
Space* gc_get_mos(GC_Gen* gc);
Space* gc_get_los(GC_Gen* gc);
-static Block_Header* mspace_get_first_compact_block(Mspace* mspace)
-{ return (Block_Header*)mspace->blocks; }
+static void reset_mspace_after_compaction(Mspace* mspace)
+{
+ unsigned int old_num_used = mspace->num_used_blocks;
+ unsigned int new_num_used = mspace->free_block_idx - mspace->first_block_idx;
+ unsigned int num_used = old_num_used>new_num_used? old_num_used:new_num_used;
+
+ Block* blocks = mspace->blocks;
+ unsigned int i;
+ for(i=0; i < num_used; i++){
+ Block_Header* block = (Block_Header*)&(blocks[i]);
+ block_clear_mark_table(block);
+ block->status = BLOCK_USED;
-static Block_Header* mspace_get_next_compact_block(Mspace* mspace, Block_Header* block)
-{ return block->next; }
+ if(i >= new_num_used){
+ block->status = BLOCK_FREE;
+ block->free = GC_BLOCK_BODY(block);
+ }
+ }
+ mspace->num_used_blocks = new_num_used;
+
+ /* we should clear the remaining blocks which are set to be BLOCK_COMPACTED or BLOCK_TARGET */
+ for(; i < mspace->num_managed_blocks; i++){
+ Block_Header* block = (Block_Header*)&(blocks[i]);
+ assert(block->status& (BLOCK_COMPACTED|BLOCK_TARGET));
+ block->status = BLOCK_FREE;
+ }
+}
-static Block_Header* mspace_get_first_target_block(Mspace* mspace)
-{ return (Block_Header*)mspace->blocks; }
+static volatile Block_Header* next_block_for_compact;
+static volatile Block_Header* next_block_for_target;
-static Block_Header* mspace_get_next_target_block(Mspace* mspace, Block_Header* block)
-{ return block->next; }
+static void gc_reset_block_for_collectors(GC* gc, Mspace* mspace)
+{
+ unsigned int free_blk_idx = mspace->free_block_idx;
+ for(unsigned int i=0; inum_active_collectors; i++){
+ Collector* collector = gc->collectors[i];
+ unsigned int collector_target_idx = collector->cur_target_block->block_idx;
+ if(collector_target_idx > free_blk_idx)
+ free_blk_idx = collector_target_idx;
+ collector->cur_target_block = NULL;
+ collector->cur_compact_block = NULL;
+ }
+ mspace->free_block_idx = free_blk_idx+1;
+ return;
+}
-void mspace_save_reloc(Mspace* mspace, Partial_Reveal_Object** p_ref)
+static void gc_init_block_for_collectors(GC* gc, Mspace* mspace)
{
- Block_Header* block = GC_BLOCK_HEADER(p_ref);
- block->reloc_table->push_back(p_ref);
+ unsigned int i;
+ Block_Header* block;
+ for(i=0; inum_active_collectors; i++){
+ Collector* collector = gc->collectors[i];
+ block = (Block_Header*)&mspace->blocks[i];
+ collector->cur_target_block = block;
+ collector->cur_compact_block = block;
+ block->status = BLOCK_TARGET;
+ }
+
+ block = (Block_Header*)&mspace->blocks[i];
+ next_block_for_target = block;
+ next_block_for_compact = block;
return;
}
-void mspace_update_reloc(Mspace* mspace)
+static unsigned int gc_collection_result(GC* gc)
{
- SlotVector* reloc_table;
- /* update refs in mspace */
- Block* blocks = mspace->blocks;
- for(unsigned int i=0; i < mspace->num_used_blocks; i++){
- Block_Header* block = (Block_Header*)&(blocks[i]);
- reloc_table = block->reloc_table;
- for(unsigned int j=0; j < reloc_table->size(); j++){
- Partial_Reveal_Object** p_ref = (*reloc_table)[j];
- Partial_Reveal_Object* p_target_obj = get_forwarding_pointer_in_obj_info(*p_ref);
- *p_ref = p_target_obj;
+ unsigned int result = TRUE;
+ for(unsigned i=0; inum_active_collectors; i++){
+ Collector* collector = gc->collectors[i];
+ result &= collector->result;
+ }
+ return result;
+}
+
+static Block_Header* mspace_get_first_compact_block(Mspace* mspace)
+{ return (Block_Header*)mspace->blocks; }
+
+static Block_Header* mspace_get_first_target_block(Mspace* mspace)
+{ return (Block_Header*)mspace->blocks; }
+
+
+static Block_Header* mspace_get_next_compact_block1(Mspace* mspace, Block_Header* block)
+{ return block->next; }
+
+static Block_Header* mspace_get_next_compact_block(Collector* collector, Mspace* mspace)
+{
+ /* firstly put back the compacted block. If it's not BLOCK_TARGET, it will be set to BLOCK_COMPACTED */
+ unsigned int block_status = collector->cur_compact_block->status;
+ assert( block_status & (BLOCK_IN_COMPACT|BLOCK_TARGET));
+ if( block_status == BLOCK_IN_COMPACT)
+ collector->cur_compact_block->status = BLOCK_COMPACTED;
+
+ Block_Header* cur_compact_block = (Block_Header*)next_block_for_compact;
+
+ while(cur_compact_block != NULL){
+ Block_Header* next_compact_block = cur_compact_block->next;
+
+ Block_Header* temp = (Block_Header*)atomic_casptr((volatile void **)&next_block_for_compact, next_compact_block, cur_compact_block);
+ if(temp != cur_compact_block){
+ cur_compact_block = (Block_Header*)next_block_for_compact;
+ continue;
}
- reloc_table->clear();
+ /* got it, set its state to be BLOCK_IN_COMPACT. It must be the first time touched by compactor */
+ block_status = cur_compact_block->status;
+ assert( !(block_status & (BLOCK_IN_COMPACT|BLOCK_COMPACTED|BLOCK_TARGET)));
+ cur_compact_block->status = BLOCK_IN_COMPACT;
+ collector->cur_compact_block = cur_compact_block;
+ return cur_compact_block;
+
}
-
- return;
-}
+ /* run out space blocks for compacting */
+ return NULL;
+}
+static Block_Header* mspace_get_next_target_block(Collector* collector, Mspace* mspace)
+{
+ Block_Header* cur_target_block = (Block_Header*)next_block_for_target;
+ /* firstly, we bump the next_block_for_target global var to the first non BLOCK_TARGET block
+ This need not atomic op, because only one thread can own the next_block_for_target */
+
+ while(cur_target_block->status == BLOCK_TARGET){
+ cur_target_block = cur_target_block->next;
+ }
+ next_block_for_target = cur_target_block;
+
+ /* nos is higher than mos, we cant use nos block for compaction target */
+ Block_Header* mspace_heap_end = (Block_Header*)space_heap_end((Space*)mspace);
+ while(cur_target_block < mspace_heap_end){
+ Block_Header* next_target_block = cur_target_block->next;
+ volatile unsigned int* p_block_status = &cur_target_block->status;
+ unsigned int block_status = cur_target_block->status;
+ /* block status has to be BLOCK_IN_COMPACT|BLOCK_COMPACTED|BLOCK_TARGET
+ but we care only the BLOCK_COMPACTED ones or own BLOCK_IN_COMPACT */
+ assert( block_status & (BLOCK_IN_COMPACT|BLOCK_COMPACTED|BLOCK_TARGET));
+ /* if it is not BLOCK_COMPACTED, let's move on to next */
+ if(block_status != BLOCK_COMPACTED){
+ if(cur_target_block == collector->cur_compact_block){
+ assert( block_status == BLOCK_IN_COMPACT);
+ *p_block_status = BLOCK_TARGET;
+ collector->cur_target_block = cur_target_block;
+ return cur_target_block;
+ }
+ cur_target_block = next_target_block;
+ continue;
+ }
+ /* ok, find the first BLOCK_COMPACTED before own compact block */
+ unsigned int temp = atomic_cas32(p_block_status, BLOCK_TARGET, BLOCK_COMPACTED);
+ if(temp == BLOCK_COMPACTED){
+ collector->cur_target_block = cur_target_block;
+ return cur_target_block;
+ }
+ /* missed it, it must be set by other into BLOCK_TARGET */
+ assert(temp == BLOCK_TARGET);
+ cur_target_block = next_target_block;
+ }
+ /* mos is run out for major collection */
+ return NULL;
+}
+
Boolean mspace_mark_object(Mspace* mspace, Partial_Reveal_Object *p_obj)
{
+#ifdef _DEBUG
+ if( obj_is_marked_in_vt(p_obj)) return FALSE;
+#endif
+
obj_mark_in_vt(p_obj);
unsigned int obj_word_index = OBJECT_WORD_INDEX_TO_MARKBIT_TABLE(p_obj);
- unsigned int obj_offset_in_word = OBJECT_WORD_OFFSET_IN_MARKBIT_TABLE(p_obj);
-
+ unsigned int obj_offset_in_word = OBJECT_WORD_OFFSET_IN_MARKBIT_TABLE(p_obj);
+
unsigned int *p_word = &(GC_BLOCK_HEADER(p_obj)->mark_table[obj_word_index]);
unsigned int word_mask = (1<mark_table[obj_word_index]);
- unsigned int word_mask = (1<cur_compact_block;
+ Block_Header* dest_block = collector->cur_target_block;
void* dest_addr = GC_BLOCK_BODY(dest_block);
@@ -125,17 +230,21 @@
if( ((unsigned int)dest_addr + obj_size) > (unsigned int)GC_BLOCK_END(dest_block)){
dest_block->free = dest_addr;
- dest_block = mspace_get_next_target_block(mspace, dest_block);
+ dest_block = mspace_get_next_target_block(collector, mspace);
+ if(dest_block == NULL){
+ collector->result = 0;
+ return FALSE;
+ }
+
dest_addr = GC_BLOCK_BODY(dest_block);
}
assert(((unsigned int)dest_addr + obj_size) <= (unsigned int)GC_BLOCK_END(dest_block));
Obj_Info_Type obj_info = get_obj_info(p_obj);
if( obj_info != 0 ) {
- mspace->obj_info_map->insert(ObjectMap::value_type((Partial_Reveal_Object*)dest_addr, obj_info));
+ collector->obj_info_map->insert(ObjectMap::value_type((Partial_Reveal_Object*)dest_addr, obj_info));
}
- assert( (unsigned int) p_obj >= (unsigned int)dest_addr );
set_forwarding_pointer_in_obj_info(p_obj, dest_addr);
/* FIXME: should use alloc to handle alignment requirement */
@@ -143,52 +252,12 @@
p_obj = block_get_next_marked_object(curr_block, &mark_bit_idx);
}
- curr_block = mspace_get_next_compact_block(mspace, curr_block);
+ curr_block = mspace_get_next_compact_block(collector, mspace);
}
-
-
- mspace->free_block_idx = dest_block->block_idx+1;
-
- /* fail to evacuate any room, FIXME:: do nothing at the moment */
- if( mspace->free_block_idx == mspace->first_block_idx + mspace->num_used_blocks)
- return FALSE;
return TRUE;
}
-static void mspace_restore_obj_info(Mspace* mspace)
-{
- ObjectMap* objmap = mspace->obj_info_map;
- ObjectMap::iterator obj_iter;
- for( obj_iter=objmap->begin(); obj_iter!=objmap->end(); obj_iter++){
- Partial_Reveal_Object* p_target_obj = obj_iter->first;
- Obj_Info_Type obj_info = obj_iter->second;
- set_obj_info(p_target_obj, obj_info);
- }
- objmap->clear();
- return;
-}
-
-static void reset_mspace_after_compaction(Mspace* mspace)
-{
- unsigned int old_num_used = mspace->num_used_blocks;
- unsigned int new_num_used = mspace->free_block_idx - mspace->first_block_idx;
- unsigned int num_used = old_num_used>new_num_used? old_num_used:new_num_used;
-
- Block* blocks = mspace->blocks;
- for(unsigned int i=0; i < num_used; i++){
- Block_Header* block = (Block_Header*)&(blocks[i]);
- block_clear_mark_table(block);
- block->status = BLOCK_USED;
-
- if(i >= new_num_used){
- block->status = BLOCK_FREE;
- block->free = GC_BLOCK_BODY(block);
- }
- }
- mspace->num_used_blocks = new_num_used;
-}
-
#include "../verify/verify_live_heap.h"
static void mspace_sliding_compact(Collector* collector, Mspace* mspace)
@@ -218,43 +287,77 @@
p_obj = block_get_next_marked_object(curr_block, &mark_bit_idx);
}
- curr_block = mspace_get_next_compact_block(mspace, curr_block);
+ curr_block = mspace_get_next_compact_block1(mspace, curr_block);
}
- mspace_restore_obj_info(mspace);
- reset_mspace_after_compaction(mspace);
-
return;
}
-void gc_gen_update_repointed_refs(Collector* collector);
+void gc_update_repointed_refs(Collector* collector);
+static volatile unsigned int num_marking_collectors = 0;
+static volatile unsigned int num_installing_collectors = 0;
+
static void mark_compact_mspace(Collector* collector)
{
- GC_Gen* gc = (GC_Gen*)collector->gc;
- Mspace* mspace = (Mspace*)gc_get_mos(gc);
- Fspace* fspace = (Fspace*)gc_get_nos(gc);
+ GC* gc = collector->gc;
+ Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc);
+ Fspace* fspace = (Fspace*)gc_get_nos((GC_Gen*)gc);
- /* FIXME:: Single-threaded mark-compaction for mspace currently */
-
/* Pass 1: mark all live objects in heap, and save all the slots that
have references that are going to be repointed */
- mark_scan_heap(collector);
+ unsigned int num_active_collectors = gc->num_active_collectors;
+ /* Pass 1: mark all live objects in heap, and save all the slots that
+ have references that are going to be repointed */
+ unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);
+
+ mark_scan_heap_par(collector);
+
+ old_num = atomic_inc32(&num_marking_collectors);
+ if( ++old_num == num_active_collectors ){
+ /* last collector's world here */
+ /* prepare for next phase */
+ gc_init_block_for_collectors(gc, mspace);
+ /* let other collectors go */
+ num_marking_collectors++;
+ }
+
+ while(num_marking_collectors != num_active_collectors + 1);
+
/* Pass 2: assign target addresses for all to-be-moved objects */
- Boolean ok;
- ok = mspace_compute_object_target(mspace);
- assert(ok); /* free at least one block */
- ok = fspace_compute_object_target(collector, fspace);
- assert(ok); /* FIXME:: throw out-of-memory exception if not ok */
+ atomic_cas32( &num_installing_collectors, 0, num_active_collectors+1);
+
+ mspace_compute_object_target(collector, mspace);
+ old_num = atomic_inc32(&num_installing_collectors);
+ if( ++old_num == num_active_collectors ){
+ /* single thread world */
+ if(!gc_collection_result(gc)){
+ printf("Out of Memory!\n");
+ assert(0); /* mos is out. FIXME:: throw exception */
+ }
+ gc_reset_block_for_collectors(gc, mspace);
+ num_installing_collectors++;
+ }
+
+ while(num_installing_collectors != num_active_collectors + 1);
+
+ /* FIXME:: temporary. let only one thread go forward */
+ if( collector->thread_handle != 0 ) return;
+
/* Pass 3: update all references whose objects are to be moved */
- gc_gen_update_repointed_refs(collector);
+ gc_update_repointed_refs(collector);
/* Pass 4: do the compaction and reset blocks */
+ next_block_for_compact = mspace_get_first_compact_block(mspace);
mspace_sliding_compact(collector, mspace);
- fspace_copy_collect(collector, fspace);
-
+ /* FIXME:: should be collector_restore_obj_info(collector) */
+ gc_restore_obj_info(gc);
+
+ reset_mspace_after_compaction(mspace);
+ reset_fspace_for_allocation(fspace);
+
return;
}
@@ -264,6 +367,8 @@
GC* gc = mspace->gc;
+ pool_iterator_init(gc->metadata->gc_rootset_pool);
+
collector_execute_task(gc, (TaskType)mark_compact_mspace, (Space*)mspace);
return;
Index: vm/gc_gen/src/mark_sweep/lspace.cpp
===================================================================
--- vm/gc_gen/src/mark_sweep/lspace.cpp (revision 472408)
+++ vm/gc_gen/src/mark_sweep/lspace.cpp (working copy)
@@ -61,7 +61,7 @@
memset(lspace, 0, sizeof(Lspace));
void* reserved_base = start;
- unsigned int committed_size = lspace_size >> 1;
+ unsigned int committed_size = lspace_size;
int status = port_vmem_commit(&reserved_base, committed_size, gc->allocated_memory);
assert(status == APR_SUCCESS && reserved_base == start);
@@ -77,16 +77,13 @@
lspace->mark_table = (unsigned int*)STD_MALLOC( num_words*BYTES_PER_WORD );
memset(lspace->mark_table, 0, num_words*BYTES_PER_WORD);
- lspace->reloc_table = new SlotVector();
lspace->mark_object_func = lspace_mark_object;
- lspace->save_reloc_func = lspace_save_reloc;
- lspace->update_reloc_func = lspace_update_reloc;
lspace->move_object = FALSE;
lspace->gc = gc;
gc_set_los((GC_Gen*)gc, (Space*)lspace);
- los_boundary = start;
+ los_boundary = lspace->heap_end;
return;
}
@@ -123,33 +120,17 @@
unsigned int word_index = OBJECT_WORD_INDEX_TO_LSPACE_MARKBIT_TABLE(lspace, p_obj);
unsigned int bit_offset_in_word = OBJECT_WORD_OFFSET_IN_LSPACE_MARKBIT_TABLE(lspace, p_obj);
- unsigned int* p_markbits = &(lspace->mark_table[word_index]);
+ unsigned int* p_word = &(lspace->mark_table[word_index]);
unsigned int word_mask = (1<reloc_table->push_back(p_ref);
-}
-
-void lspace_update_reloc(Lspace* lspace)
-{
- SlotVector* reloc_table;
+ unsigned int old_value = *p_word;
+ unsigned int new_value = old_value|word_mask;
- reloc_table = lspace->reloc_table;
- for(unsigned int j=0; j < reloc_table->size(); j++){
- Partial_Reveal_Object** p_ref = (*reloc_table)[j];
- Partial_Reveal_Object* p_target_obj = get_forwarding_pointer_in_obj_info(*p_ref);
- *p_ref = p_target_obj;
+ while(old_value != new_value){
+ unsigned int temp = atomic_cas32(p_word, new_value, old_value);
+ if(temp == old_value) return TRUE;
+ old_value = *p_word;
+ new_value = old_value|word_mask;
}
- reloc_table->clear();
- return;
+ return FALSE;
}
Index: vm/gc_gen/src/mark_sweep/lspace.h
===================================================================
--- vm/gc_gen/src/mark_sweep/lspace.h (revision 472408)
+++ vm/gc_gen/src/mark_sweep/lspace.h (working copy)
@@ -34,17 +34,12 @@
GC* gc;
Boolean move_object;
Boolean (*mark_object_func)(Lspace* space, Partial_Reveal_Object* p_obj);
- void (*save_reloc_func)(Lspace* space, Partial_Reveal_Object** p_ref);
- void (*update_reloc_func)(Lspace* space);
/* END of Space --> */
void* alloc_free;
unsigned int* mark_table;
- /* support other space moving collection */
- SlotVector* reloc_table;
-
}Lspace;
void lspace_initialize(GC* gc, void* reserved_base, unsigned int lspace_size);
Index: vm/gc_gen/src/thread/collector.cpp
===================================================================
--- vm/gc_gen/src/thread/collector.cpp (revision 472408)
+++ vm/gc_gen/src/thread/collector.cpp (working copy)
@@ -23,55 +23,87 @@
#include "collector.h"
#include "../mark_compact/mspace.h"
+
+static void collector_restore_obj_info(Collector* collector)
+{
+ ObjectMap* objmap = collector->obj_info_map;
+ ObjectMap::iterator obj_iter;
+ for( obj_iter=objmap->begin(); obj_iter!=objmap->end(); obj_iter++){
+ Partial_Reveal_Object* p_target_obj = obj_iter->first;
+ Obj_Info_Type obj_info = obj_iter->second;
+ set_obj_info(p_target_obj, obj_info);
+ }
+ objmap->clear();
+ return;
+}
+
+void gc_restore_obj_info(GC* gc)
+{
+ for(unsigned int i=0; inum_active_collectors; i++)
+ {
+ Collector* collector = gc->collectors[i];
+ collector_restore_obj_info(collector);
+ }
+ return;
+
+}
+
static void collector_reset_thread(Collector *collector)
{
collector->task_func = NULL;
- vm_reset_event(collector->task_assigned_event);
- vm_reset_event(collector->task_finished_event);
-
- alloc_context_reset((Allocator*)collector);
-
- return;
+ vm_reset_event(collector->task_assigned_event);
+ vm_reset_event(collector->task_finished_event);
+
+ alloc_context_reset((Allocator*)collector);
+
+ GC_Metadata* metadata = collector->gc->metadata;
+ assert(collector->rep_set==NULL);
+ collector->rep_set = pool_get_entry(metadata->free_set_pool);
+ collector->result = 1;
+
+ if(gc_requires_barriers()){
+ assert(collector->rem_set==NULL);
+ collector->rem_set = pool_get_entry(metadata->free_set_pool);
+ }
+
+ return;
}
static void wait_collector_to_finish(Collector *collector)
{
- vm_wait_event(collector->task_finished_event);
+ vm_wait_event(collector->task_finished_event);
}
static void notify_collector_to_work(Collector* collector)
{
- vm_set_event(collector->task_assigned_event);
+ vm_set_event(collector->task_assigned_event);
}
static void collector_wait_for_task(Collector *collector)
{
- vm_wait_event(collector->task_assigned_event);
+ vm_wait_event(collector->task_assigned_event);
}
static void collector_notify_work_done(Collector *collector)
{
- vm_set_event(collector->task_finished_event);
+ vm_set_event(collector->task_finished_event);
}
-void gc_preprocess_collector(Collector*);
-void gc_postprocess_collector(Collector*);
static void assign_collector_with_task(GC* gc, TaskType task_func, Space* space)
{
- unsigned int num_collectors_to_activate = gc->num_collectors;
- for(unsigned int i=0; inum_active_collectors = gc->num_collectors;
+ for(unsigned int i=0; inum_active_collectors; i++)
{
Collector* collector = gc->collectors[i];
- gc_preprocess_collector(collector);
collector_reset_thread(collector);
collector->task_func = task_func;
collector->collect_space = space;
notify_collector_to_work(collector);
}
- gc->num_active_collectors = num_collectors_to_activate;
-
+ return;
}
static void wait_collection_finish(GC* gc)
@@ -81,73 +113,63 @@
{
Collector* collector = gc->collectors[i];
wait_collector_to_finish(collector);
- gc_postprocess_collector(collector);
}
gc->num_active_collectors = 0;
-
+ return;
}
static int collector_thread_func(void *arg)
{
- Collector *collector = (Collector *)arg;
- assert(collector);
-
- while(true){
- /* Waiting for newly assigned task */
- collector_wait_for_task(collector);
-
- /* waken up and check for new task */
+ Collector *collector = (Collector *)arg;
+ assert(collector);
+
+ while(true){
+ /* Waiting for newly assigned task */
+ collector_wait_for_task(collector);
+
+ /* waken up and check for new task */
TaskType task_func = collector->task_func;
if(task_func == NULL) return 1;
task_func(collector);
- collector_notify_work_done(collector);
- }
+ collector_notify_work_done(collector);
+ }
- return 0;
+ return 0;
}
static void collector_init_thread(Collector *collector)
{
- collector->trace_stack = new TraceStack(); /* only for MINOR_COLLECTION */
- collector->mark_stack = new MarkStack(); /* only for MAJOR_COLLECTION */
+ collector->trace_stack = new TraceStack(); /* only for MINOR_COLLECTION */
+ collector->obj_info_map = new ObjectMap();
+ collector->rem_set = NULL;
+ collector->rep_set = NULL;
- collector->last_cycle_remset = new RemslotSet();
- collector->last_cycle_remset->reserve(GC_NUM_ROOTS_HINT);
- collector->last_cycle_remset->clear();
+ int status = vm_create_event(&collector->task_assigned_event,0,1);
+ assert(status == THREAD_OK);
- collector->this_cycle_remset = new RemslotSet();
- collector->this_cycle_remset->reserve(GC_NUM_ROOTS_HINT);
- collector->this_cycle_remset->clear();
+ status = vm_create_event(&collector->task_finished_event,0,1);
+ assert(status == THREAD_OK);
- int status = vm_create_event(&collector->task_assigned_event,0,1);
- assert(status == THREAD_OK);
+ status = (unsigned int)vm_create_thread(NULL,
+ 0, 0, 0,
+ collector_thread_func,
+ (void*)collector);
- status = vm_create_event(&collector->task_finished_event,0,1);
- assert(status == THREAD_OK);
-
- status = (unsigned int)vm_create_thread(NULL,
- 0, 0, 0,
- collector_thread_func,
- (void*)collector);
-
- assert(status == THREAD_OK);
-
- return;
+ assert(status == THREAD_OK);
+
+ return;
}
static void collector_terminate_thread(Collector* collector)
{
collector->task_func = NULL; /* NULL to notify thread exit */
- notify_collector_to_work(collector);
+ notify_collector_to_work(collector);
vm_thread_yield(); /* give collector time to die */
- delete collector->trace_stack;
- delete collector->last_cycle_remset;
- delete collector->this_cycle_remset;
-
- return;
+ delete collector->trace_stack;
+ return;
}
void collector_destruct(GC* gc)
@@ -155,8 +177,8 @@
for(unsigned int i=0; inum_collectors; i++)
{
Collector* collector = gc->collectors[i];
- collector_terminate_thread(collector);
- STD_FREE(collector);
+ collector_terminate_thread(collector);
+ STD_FREE(collector);
}
@@ -164,26 +186,35 @@
return;
}
+unsigned int NUM_COLLECTORS = 0;
+
struct GC_Gen;
unsigned int gc_get_processor_num(GC_Gen*);
void collector_initialize(GC* gc)
{
- unsigned int nthreads = gc_get_processor_num((GC_Gen*)gc);
-
- gc->num_collectors = 1; //FIXME:: nthreads;
- gc->collectors = (Collector **) STD_MALLOC(sizeof(Collector *) * nthreads);
- assert(gc->collectors);
+ //FIXME::
+ unsigned int nthreads = gc_get_processor_num((GC_Gen*)gc);
+
+ nthreads = (NUM_COLLECTORS==0)?nthreads:NUM_COLLECTORS;
- for (unsigned int i = 0; i < nthreads; i++) {
- Collector* collector = (Collector *)STD_MALLOC(sizeof(Collector));
- assert(collector);
-
- collector->gc = gc;
- collector_init_thread(collector);
-
- gc->collectors[i] = collector;
- }
+ gc->num_collectors = nthreads;
+ unsigned int size = sizeof(Collector *) * nthreads;
+ gc->collectors = (Collector **) STD_MALLOC(size);
+ memset(gc->collectors, 0, size);
+ size = sizeof(Collector);
+ for (unsigned int i = 0; i < nthreads; i++) {
+ Collector* collector = (Collector *)STD_MALLOC(size);
+ memset(collector, 0, size);
+
+ /* FIXME:: thread_handle is for temporary control */
+ collector->thread_handle = (VmThreadHandle)i;
+ collector->gc = gc;
+ collector_init_thread(collector);
+
+ gc->collectors[i] = collector;
+ }
+
return;
}
Index: vm/gc_gen/src/thread/collector.h
===================================================================
--- vm/gc_gen/src/thread/collector.h (revision 472408)
+++ vm/gc_gen/src/thread/collector.h (working copy)
@@ -22,6 +22,7 @@
#define _COLLECTOR_H_
#include "../common/gc_common.h"
+struct Block_Header;
typedef struct Collector{
/* <-- first couple of fields are overloaded as Allocator */
@@ -33,18 +34,27 @@
VmThreadHandle thread_handle; /* This thread; */
/* End of Allocator --> */
+ /* FIXME:: for testing */
Space* collect_space;
- /* collector has remsets to remember those stored during copying */
- RemslotSet* last_cycle_remset; /* remembered in last cycle, used in this cycle as roots */
- RemslotSet* this_cycle_remset; /* remembered in this cycle, will switch with last_remslot */
TraceStack *trace_stack;
- MarkStack *mark_stack;
+ MarkStack* mark_stack;
+ Vector_Block* rep_set; /* repointed set */
+ Vector_Block* rem_set;
+
VmEventHandle task_assigned_event;
VmEventHandle task_finished_event;
+ Block_Header* cur_compact_block;
+ Block_Header* cur_target_block;
+
+ /* during compaction, save non-zero obj_info who's overwritten by forwarding pointer */
+ ObjectMap* obj_info_map;
+
void(*task_func)(void*) ; /* current task */
+
+ unsigned int result;
}Collector;
@@ -56,4 +66,7 @@
Partial_Reveal_Object* collector_forward_object(Collector* collector, Partial_Reveal_Object* p_obj);
+void gc_restore_obj_info(GC* gc);
+
+
#endif //#ifndef _COLLECTOR_H_
Index: vm/gc_gen/src/thread/mutator.cpp
===================================================================
--- vm/gc_gen/src/thread/mutator.cpp (revision 472408)
+++ vm/gc_gen/src/thread/mutator.cpp (working copy)
@@ -25,18 +25,18 @@
Space* gc_get_nos(GC_Gen* gc);
void mutator_initialize(GC* gc, void *gc_information)
{
- /* FIXME:: NOTE: gc_info is uncleared */
+ /* FIXME:: make sure gc_info is cleared */
Mutator *mutator = (Mutator *) gc_information;
mutator->free = NULL;
mutator->ceiling = NULL;
mutator->alloc_block = NULL;
mutator->alloc_space = gc_get_nos((GC_Gen*)gc);
mutator->gc = gc;
-
- assert(mutator->remslot == NULL);
- mutator->remslot = new RemslotSet();
- mutator->remslot->clear();
+ if(gc_requires_barriers()){
+ mutator->rem_set = pool_get_entry(gc->metadata->free_set_pool);
+ }
+
lock(gc->mutator_list_lock); // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
mutator->next = (Mutator *)gc->mutator_list;
@@ -53,12 +53,13 @@
Mutator *mutator = (Mutator *)gc_information;
+ if(gc_requires_barriers()){ /* put back the remset when a mutator exits */
+ pool_put_entry(gc->metadata->gc_rootset_pool, mutator->rem_set);
+ mutator->rem_set = NULL;
+ }
+
lock(gc->mutator_list_lock); // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
- Fspace* fspace = (Fspace*)mutator->alloc_space;
- fspace->remslot_sets->push_back(mutator->remslot);
- mutator->remslot = NULL;
-
volatile Mutator *temp = gc->mutator_list;
if (temp == mutator) { /* it is at the head of the list */
gc->mutator_list = temp->next;
@@ -76,4 +77,3 @@
return;
}
-
Index: vm/gc_gen/src/thread/mutator.h
===================================================================
--- vm/gc_gen/src/thread/mutator.h (revision 472408)
+++ vm/gc_gen/src/thread/mutator.h (working copy)
@@ -34,8 +34,8 @@
VmThreadHandle thread_handle; /* This thread; */
/* END of Allocator --> */
- RemslotSet *remslot;
- Mutator *next; /* The gc info area associated with the next active thread. */
+ Vector_Block* rem_set;
+ Mutator* next; /* The gc info area associated with the next active thread. */
} Mutator;
void mutator_initialize(GC* gc, void* tls_gc_info);
Index: vm/gc_gen/src/thread/thread_alloc.h
===================================================================
--- vm/gc_gen/src/thread/thread_alloc.h (revision 472408)
+++ vm/gc_gen/src/thread/thread_alloc.h (working copy)
@@ -22,6 +22,7 @@
#define _THREAD_ALLOC_H_
#include "../common/gc_block.h"
+#include "../common/gc_metadata.h"
typedef struct Allocator{
void *free;
Index: vm/gc_gen/src/trace_forward/fspace.cpp
===================================================================
--- vm/gc_gen/src/trace_forward/fspace.cpp (revision 472408)
+++ vm/gc_gen/src/trace_forward/fspace.cpp (working copy)
@@ -30,32 +30,6 @@
Boolean forward_first_half;;
void* object_forwarding_boundary=NULL;
-void fspace_save_reloc(Fspace* fspace, Partial_Reveal_Object** p_ref)
-{
- Block_Header* block = GC_BLOCK_HEADER(p_ref);
- block->reloc_table->push_back(p_ref);
- return;
-}
-
-void fspace_update_reloc(Fspace* fspace)
-{
- SlotVector* reloc_table;
- /* update refs in fspace */
- Block* blocks = fspace->blocks;
- for(unsigned int i=0; i < fspace->num_managed_blocks; i++){
- Block_Header* block = (Block_Header*)&(blocks[i]);
- reloc_table = block->reloc_table;
- for(unsigned int j=0; j < reloc_table->size(); j++){
- Partial_Reveal_Object** p_ref = (*reloc_table)[j];
- Partial_Reveal_Object* p_target_obj = get_forwarding_pointer_in_obj_info(*p_ref);
- *p_ref = p_target_obj;
- }
- reloc_table->clear();
- }
-
- return;
-}
-
Boolean fspace_mark_object(Fspace* fspace, Partial_Reveal_Object *p_obj)
{
obj_mark_in_vt(p_obj);
@@ -66,48 +40,20 @@
unsigned int *p_word = &(GC_BLOCK_HEADER(p_obj)->mark_table[obj_word_index]);
unsigned int word_mask = (1<mark_table[obj_word_index]);
- unsigned int word_mask = (1<blocks;
- for(unsigned int i=0; i < fspace->num_managed_blocks; i++){
- Block_Header* block = (Block_Header*)&(blocks[i]);
- delete block->reloc_table;
- block->reloc_table = NULL;
- }
-
+{
return;
}
@@ -123,7 +69,6 @@
block->base = block->free;
block->block_idx = i + start_idx;
block->status = BLOCK_FREE;
- block->reloc_table = new SlotVector();
last_block->next = block;
last_block = block;
}
@@ -163,10 +108,7 @@
fspace_init_blocks(fspace);
- fspace->obj_info_map = new ObjectMap();
fspace->mark_object_func = fspace_mark_object;
- fspace->save_reloc_func = fspace_save_reloc;
- fspace->update_reloc_func = fspace_update_reloc;
fspace->move_object = TRUE;
fspace->num_collections = 0;
@@ -174,11 +116,8 @@
gc_set_nos((GC_Gen*)gc, (Space*)fspace);
/* above is same as Mspace init --> */
- fspace->remslot_sets = new std::vector();
- fspace->rem_sets_lock = FREE_LOCK;
+ nos_boundary = fspace->heap_start;
- nos_boundary = fspace->heap_end;
-
forward_first_half = TRUE;
object_forwarding_boundary = (void*)&fspace->blocks[fspace->first_block_idx + (unsigned int)(fspace->num_managed_blocks * NURSERY_OBJECT_FORWARDING_RATIO)];
@@ -216,7 +155,7 @@
unsigned int last_idx = fspace->ceiling_block_idx;
Block* blocks = fspace->blocks;
unsigned int num_freed = 0;
- for(unsigned int i = first_idx; i <= last_idx; i++){
+ for(unsigned int i = 0; i <= last_idx-first_idx; i++){
Block_Header* block = (Block_Header*)&(blocks[i]);
if(block->status == BLOCK_FREE) continue;
block_clear_mark_table(block);
@@ -237,6 +176,8 @@
GC* gc = fspace->gc;
+ pool_iterator_init(gc->metadata->gc_rootset_pool);
+
if(gc_requires_barriers()){
/* generational GC. Only trace (mark) nos */
collector_execute_task(gc, (TaskType)trace_forward_fspace, (Space*)fspace);
Index: vm/gc_gen/src/trace_forward/fspace.h
===================================================================
--- vm/gc_gen/src/trace_forward/fspace.h (revision 472408)
+++ vm/gc_gen/src/trace_forward/fspace.h (working copy)
@@ -44,8 +44,6 @@
GC* gc;
Boolean move_object;
Boolean (*mark_object_func)(Fspace* space, Partial_Reveal_Object* p_obj);
- void (*save_reloc_func)(Fspace* space, Partial_Reveal_Object** p_ref);
- void (*update_reloc_func)(Fspace* space);
/* END of Space --> */
Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */
@@ -58,16 +56,8 @@
unsigned int num_used_blocks;
unsigned int num_managed_blocks;
unsigned int num_total_blocks;
-
- /* during compaction, save non-zero obj_info who's overwritten by forwarding pointer */
- ObjectMap* obj_info_map;
/* END of Blocked_Space --> */
-
- /* saved remsets of collectors */
- /* saved remsets of exited mutators */
- std::vector *remslot_sets;
- SpinLock rem_sets_lock;
-
+
} Fspace;
void fspace_initialize(GC* gc, void* start, unsigned int fspace_size);
@@ -81,17 +71,10 @@
void* fspace_alloc(unsigned size, Allocator *allocator);
Boolean fspace_mark_object(Fspace* fspace, Partial_Reveal_Object *p_obj);
-void fspace_save_reloc(Fspace* fspace, Partial_Reveal_Object** p_ref);
-void fspace_update_reloc(Fspace* fspace);
+
void reset_fspace_for_allocation(Fspace* fspace);
-inline Block_Header* fspace_get_first_copy_block(Fspace* fspace)
-{ return (Block_Header*)fspace->blocks; }
-inline Block_Header* fspace_get_next_copy_block(Fspace* fspace, Block_Header* block)
-{ return block->next; }
-
-
Boolean fspace_compute_object_target(Collector* collector, Fspace* fspace);
void fspace_copy_collect(Collector* collector, Fspace* fspace);
Index: vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp
===================================================================
--- vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp (revision 472408)
+++ vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp (working copy)
@@ -23,26 +23,54 @@
#include "../mark_sweep/lspace.h"
#include "../thread/collector.h"
+static volatile Block_Header* current_copy_block;
+static volatile Block_Header* current_target_block;
+
+static Block_Header* fspace_get_first_copy_block(Fspace* fspace)
+{ return (Block_Header*)fspace->blocks; }
+
+static Block_Header* fspace_get_next_copy_block(Fspace* fspace)
+{
+ /* FIXME::FIXME:: this only works for full space copying */
+ Block_Header* cur_copy_block = (Block_Header*)current_copy_block;
+
+ while(cur_copy_block != NULL){
+ Block_Header* next_copy_block = current_copy_block->next;
+
+ Block_Header* temp = (Block_Header*)atomic_casptr((volatile void **)¤t_copy_block, next_copy_block, cur_copy_block);
+ if(temp == cur_copy_block)
+ return cur_copy_block;
+
+ cur_copy_block = (Block_Header*)current_copy_block;
+ }
+ /* run out fspace blocks for copying */
+ return NULL;
+}
+
+
/* copying of fspace is only for MAJOR_COLLECTION or non-generational partial copy collection */
static Block_Header* mspace_get_first_target_block_for_nos(Mspace* mspace)
{
return (Block_Header*)&mspace->blocks[mspace->free_block_idx-mspace->first_block_idx];
}
-static Block_Header* mspace_get_next_target_block_for_nos(Mspace* mspace, Block_Header* block)
-{ return block->next; }
-
-static void fspace_restore_obj_info(Fspace* fspace)
-{
- ObjectMap* objmap = fspace->obj_info_map;
- ObjectMap::iterator obj_iter;
- for( obj_iter=objmap->begin(); obj_iter!=objmap->end(); obj_iter++){
- Partial_Reveal_Object* p_target_obj = obj_iter->first;
- Obj_Info_Type obj_info = obj_iter->second;
- set_obj_info(p_target_obj, obj_info);
+static Block_Header* mspace_get_next_target_block_for_nos(Mspace* mspace)
+{
+ Block_Header* mspace_heap_end = (Block_Header*)space_heap_end((Space*)mspace);
+ Block_Header* cur_target_block = (Block_Header*)current_target_block;
+ Block_Header* next_target_block = current_target_block->next;
+
+ while(cur_target_block < mspace_heap_end){
+ Block_Header* temp = (Block_Header*)atomic_casptr((volatile void **)¤t_target_block, next_target_block, cur_target_block);
+ if(temp == cur_target_block)
+ return cur_target_block;
+
+ cur_target_block = (Block_Header*)current_target_block;
+ next_target_block = current_target_block->next;
}
- objmap->clear();
- return;
+ /* mos is always able to hold nos in minor collection */
+ assert(0);
+ return NULL;
}
struct GC_Gen;
@@ -51,11 +79,13 @@
Boolean fspace_compute_object_target(Collector* collector, Fspace* fspace)
{
Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)collector->gc);
- Block_Header* dest_block = mspace_get_first_target_block_for_nos(mspace);
- Block_Header* curr_block = fspace_get_first_copy_block(fspace);
+ Block_Header* dest_block = mspace_get_next_target_block_for_nos(mspace);
+ Block_Header* curr_block = fspace_get_next_copy_block(fspace);
+ assert(dest_block->status == BLOCK_FREE);
+ dest_block->status = BLOCK_USED;
void* dest_addr = GC_BLOCK_BODY(dest_block);
-
+
while( curr_block ){
unsigned int mark_bit_idx;
Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &mark_bit_idx);
@@ -67,15 +97,17 @@
if( ((unsigned int)dest_addr + obj_size) > (unsigned int)GC_BLOCK_END(dest_block)){
dest_block->free = dest_addr;
- dest_block = mspace_get_next_target_block_for_nos(mspace, dest_block);
+ dest_block = mspace_get_next_target_block_for_nos(mspace);
if(dest_block == NULL) return FALSE;
+ assert(dest_block->status == BLOCK_FREE);
+ dest_block->status = BLOCK_USED;
dest_addr = GC_BLOCK_BODY(dest_block);
}
assert(((unsigned int)dest_addr + obj_size) <= (unsigned int)GC_BLOCK_END(dest_block));
Obj_Info_Type obj_info = get_obj_info(p_obj);
if( obj_info != 0 ) {
- fspace->obj_info_map->insert(ObjectMap::value_type((Partial_Reveal_Object*)dest_addr, obj_info));
+ collector->obj_info_map->insert(ObjectMap::value_type((Partial_Reveal_Object*)dest_addr, obj_info));
}
set_forwarding_pointer_in_obj_info(p_obj, dest_addr);
@@ -84,11 +116,9 @@
p_obj = block_get_next_marked_object(curr_block, &mark_bit_idx);
}
- curr_block = fspace_get_next_copy_block(fspace, curr_block);
+ curr_block = fspace_get_next_copy_block(fspace);
}
-
- mspace->free_block_idx = dest_block->block_idx+1;
-
+
return TRUE;
}
@@ -96,7 +126,7 @@
void fspace_copy_collect(Collector* collector, Fspace* fspace)
{
- Block_Header* curr_block = fspace_get_first_copy_block(fspace);
+ Block_Header* curr_block = fspace_get_next_copy_block(fspace);
while( curr_block ){
unsigned int mark_bit_idx;
@@ -119,36 +149,70 @@
p_obj = block_get_next_marked_object(curr_block, &mark_bit_idx);
}
- curr_block = fspace_get_next_copy_block(fspace, curr_block);
+ curr_block = fspace_get_next_copy_block(fspace);
}
-
- fspace_restore_obj_info(fspace);
- reset_fspace_for_allocation(fspace);
-
+
return;
}
-void gc_gen_update_repointed_refs(Collector* collector);
+void gc_update_repointed_refs(Collector* collector);
+static volatile unsigned int num_marking_collectors = 0;
+static volatile unsigned int num_installing_collectors = 0;
+
void mark_copy_fspace(Collector* collector)
{
GC* gc = collector->gc;
Fspace* fspace = (Fspace*)collector->collect_space;
+ Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc);
+
+ unsigned int num_active_collectors = gc->num_active_collectors;
- /* FIXME:: Single-threaded mark-copying for fspace currently */
-
/* Pass 1: mark all live objects in heap, and save all the slots that
have references that are going to be repointed */
- mark_scan_heap(collector);
+ atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);
+
+ mark_scan_heap_par(collector);
+ unsigned int old_num = atomic_inc32(&num_marking_collectors);
+ if( ++old_num == num_active_collectors ){
+ /* world for single thread, e.g., verification of last phase, and preparation of next phase */
+ current_copy_block = fspace_get_first_copy_block(fspace);
+ current_target_block = mspace_get_first_target_block_for_nos(mspace);
+ /* let other collectors go */
+ num_marking_collectors++;
+ }
+
+ while(num_marking_collectors != num_active_collectors + 1);
+
/* Pass 2: assign each live fspace object a new location */
+ atomic_cas32( &num_installing_collectors, 0, num_active_collectors+1);
+
fspace_compute_object_target(collector, fspace);
- gc_gen_update_repointed_refs(collector);
+ old_num = atomic_inc32(&num_installing_collectors);
+ if( ++old_num == num_active_collectors){
+ /* nothing to do in this single thread region */
+ mspace->free_block_idx = current_target_block->block_idx;
+ num_installing_collectors++;
+ }
+
+ while(num_installing_collectors != num_active_collectors + 1);
+ /* FIXME:: temporary. let only one thread go forward */
+ if( collector->thread_handle != 0 ) return;
+
+ gc_update_repointed_refs(collector);
+
/* FIXME:: Pass 2 and 3 can be merged into one pass */
/* Pass 3: copy live fspace object to new location */
- fspace_copy_collect(collector, fspace);
+ current_copy_block = fspace_get_first_copy_block(fspace);
+ fspace_copy_collect(collector, fspace);
+
+ /* FIXME:: should be collector_restore_obj_info(collector) */
+ gc_restore_obj_info(gc);
+
+ reset_fspace_for_allocation(fspace);
return;
}
Index: vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp
===================================================================
--- vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp (revision 472408)
+++ vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp (working copy)
@@ -20,6 +20,7 @@
#include "fspace.h"
#include "../thread/collector.h"
+#include "../common/gc_metadata.h"
static Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace)
{
@@ -99,9 +100,10 @@
if (!fspace_object_to_be_forwarded(p_obj, (Fspace*)space)) {
assert(!obj_is_forwarded_in_vt(p_obj));
/* this obj remains in fspace, remember its ref slot for next GC. */
- if( !address_belongs_to_space(p_ref, space) )
- collector->this_cycle_remset->push_back(p_ref);
-
+ if( !address_belongs_to_space(p_ref, space) ){
+ collector_remset_add_entry(collector, p_ref);
+ }
+
if(fspace_mark_object((Fspace*)space, p_obj))
scan_object(collector, p_obj);
@@ -145,29 +147,35 @@
}
}
-static void collector_trace_remsets(Collector* collector)
+static void collector_trace_rootsets(Collector* collector)
{
- Fspace* fspace = (Fspace*)collector->collect_space;
+ GC_Metadata* metadata = collector->gc->metadata;
+ Space* space = collector->collect_space;
HashSet remslot_hash;
/* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */
- for(unsigned int i=0; i< fspace->remslot_sets->size(); i++) {
- RemslotSet* remslot = (*fspace->remslot_sets)[i];
- for (unsigned int j = 0; j < remslot->size(); j++) {
- Partial_Reveal_Object **ref = (*remslot)[j];
- assert(ref);
- if(*ref == NULL) continue;
- if (obj_belongs_to_space(*ref, (Space*)fspace)) {
- if (remslot_hash.find(ref) == remslot_hash.end()) {
- remslot_hash.insert(ref);
- trace_root(collector, ref);
+ pool_iterator_init(metadata->gc_rootset_pool);
+ Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);
+
+ while(root_set){
+ unsigned int* iter = vector_block_iterator_init(root_set);
+ while(!vector_block_iterator_end(root_set,iter)){
+ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+ iter = vector_block_iterator_advance(root_set,iter);
+
+ assert(p_ref);
+ if(*p_ref == NULL) continue;
+ if (obj_belongs_to_space(*p_ref, space)) {
+ if (remslot_hash.find(p_ref) == remslot_hash.end()) {
+ remslot_hash.insert(p_ref);
+ trace_root(collector, p_ref);
}
}
}
- remslot->clear();
+ pool_put_entry(metadata->free_set_pool, root_set);
+ root_set = pool_iterator_next(metadata->gc_rootset_pool);
}
- fspace->remslot_sets->clear();
return;
}
@@ -186,8 +194,7 @@
/* FIXME:: Single-threaded trace-forwarding for fspace currently */
- space->remslot_sets->push_back(gc->root_set);
- collector_trace_remsets(collector);
+ collector_trace_rootsets(collector);
update_relocated_refs(collector);
reset_fspace_for_allocation(space);
Index: vm/gc_gen/src/utils/vector_block.h
===================================================================
--- vm/gc_gen/src/utils/vector_block.h (revision 0)
+++ vm/gc_gen/src/utils/vector_block.h (revision 0)
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/25
+ */
+
+#ifndef _VECTOR_BLOCK_H_
+#define _VECTOR_BLOCK_H_
+
+typedef struct Vector_Block{
+ unsigned int* start; /* point to first entry, not needed actually */
+ unsigned int* end; /* point to end of the block (right after the last entry) */
+ unsigned int* head; /* point to the first filled entry */
+ unsigned int* tail; /* point to the entry after the last filled one */
+ unsigned int* entries[1];
+}Vector_Block;
+
+inline void vector_block_init(Vector_Block* block, unsigned int size)
+{
+ block->start = (unsigned int*)block->entries;
+ block->end = (unsigned int*)((unsigned int)block + size);
+ block->head = block->start;
+ block->tail = block->start;
+ return;
+}
+
+inline unsigned int vector_block_entry_count(Vector_Block* block)
+{ return (unsigned int)(block->tail - block->head); }
+
+inline Boolean vector_block_is_full(Vector_Block* block)
+{ return block->tail == block->end; }
+
+inline void vector_block_add_entry(Vector_Block* block, unsigned int value)
+{
+ assert(value && !*(block->tail));
+ *(block->tail++) = value;
+}
+
+inline void vector_block_clear(Vector_Block* block)
+{
+#ifdef _DEBUG
+ memset(block->start, 0, (block->end - block->start)*BYTES_PER_WORD);
+#endif
+
+ block->tail = block->head;
+}
+
+/* Below is for sequential local access */
+inline unsigned int* vector_block_iterator_init(Vector_Block* block)
+{ return block->head; }
+
+inline unsigned int* vector_block_iterator_advance(Vector_Block* block, unsigned int* iter)
+{ return ++iter; }
+
+inline Boolean vector_block_iterator_end(Vector_Block* block, unsigned int* iter)
+{ return iter == block->tail; }
+
+#endif /* #ifndef _VECTOR_BLOCK_H_ */
Index: vm/gc_gen/src/utils/sync_queue.h
===================================================================
--- vm/gc_gen/src/utils/sync_queue.h (revision 0)
+++ vm/gc_gen/src/utils/sync_queue.h (revision 0)
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#ifndef _SYNC_QUEUE_H_
+#define _SYNC_QUEUE_H_
+
+/* an implementation of MSQ. FIXME:: only work in 32-bit machine */
+
+struct Queue_Node;
+
+typedef struct Queue_Link{
+ struct Queue_Node* ptr;
+ unsigned int count;
+}Queue_Link;
+
+typedef struct Queue_Node{
+ __declspec(align(8))
+ Queue_Link next; /* must be aligned to 8Byte*/
+ unsigned int* value;
+}Queue_Node;
+
+typedef struct Sync_Queue{
+ __declspec(align(8))
+ Queue_Link head; /* must be aligned to 8Byte*/
+ Queue_Link tail;
+}Sync_Queue;
+
+inline Queue_Node * new_queue_node()
+{
+ Queue_Node* p_node = malloc(sizeof(Queue_Node));
+ assert( (unsigned int)node%8 == 0 );
+ return p_node;
+}
+
+inline void free_queue_node(Queue_Node* node)
+{ free( node ); }
+
+inline void sync_queue_init(Sync_Queue *queue)
+{
+ Queue_Node *node = new_queue_node();
+ node->next.ptr = NULL;
+ node->next.count = 0;
+ queue->head.ptr = queue->tail.ptr = node;
+ queue->head.count = queue->tail.count = 0;
+ return;
+}
+
+#define QLINK_PTR(x) ((unsigned long long*)&(x))
+#define QLINK_VAL(x) (*(QLINK_PTR(x)))
+
+inline void sync_queue_push(Sync_Queue* queue, unsigned int* value)
+{
+ Queue_Link tail, next, tmp1, tmp2;
+ Queue_Node* node = new_queue_node();
+ node->value = value;
+ node->next.ptr = NULL;
+ while(TRUE){
+ QLINK_VAL(tail) = QLINK_VAL(queue->tail);
+ QLINK_VAL(next) = QLINK_VAL(tail.ptr->next);
+ if( QLINK_VAL(tail) == QLINK_VAL(queue->tail)){
+ if( next.ptr==NULL ){
+ tmp1.ptr = node;
+ tmp1.count = next.count + 1;
+ node->next.count = tmp1.count;
+ QLINK_VAL(tmp2) = atomic_cas64(QLINK_PTR(tail.ptr->next), QLINK_VAL(next), QLINK_VAL(tmp1))
+ if( QLINK_VAL(tmp1) == QLINK_VAL(tmp2))
+ break;
+
+ }else{
+ tmp1.ptr = next.ptr;
+ tmp1.count = tail.count + 1;
+ atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1));
+ }
+ }
+ }
+ tmp1.ptr = node;
+ tmp1.count = tail.count + 1;
+ atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1));
+ return;
+}
+
+Boolean sync_queue_pull(Sync_Queue* queue, unsigned int * pvalue)
+{
+ Queue_Link head, tail, next, tmp1, tmp2;
+ while(TRUE){
+ QLINK_VAL(head) = QLINK_VAL(queue->head);
+ QLINK_VAL(tail) = QLINK_VAL(queue->tail);
+ QLINK_VAL(next) = QLINK_VAL(head.ptr->next);
+
+ if( QLINK_VAL(head) == QLINK_VAL(queue->head)){
+ if( head.ptr== tail.ptr )
+ if( next.ptr == NULL )
+ return FALSE;
+ else{
+ tmp1.ptr = next.ptr;
+ tmp1.count = tail.count+1;
+ atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1));
+ }
+ else{
+ *pvalue = next.ptr->value;
+ tmp1.ptr = next.ptr;
+ tmp1.count = head.count+1;
+ QLINK_VAL(tmp2) = atomic_cas64(QLINK_PTR(queue->head), QLINK_VAL(head), QLINK_VAL(tmp1));
+ if( QLINK_VAL(tmp2) == QLINK_VAL(tmp1))
+ break;
+ }
+ }
+ }
+ free( head.ptr );
+ return TRUE;
+}
+
+#endif /* _SYNC_QUEUE_H_ */
Index: vm/gc_gen/src/utils/sync_stack.h
===================================================================
--- vm/gc_gen/src/utils/sync_stack.h (revision 0)
+++ vm/gc_gen/src/utils/sync_stack.h (revision 0)
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/25
+ */
+
+#ifndef _SYNC_STACK_H_
+#define _SYNC_STACK_H_
+
+typedef struct Sync_Stack{
+ unsigned int* top; /* pointing to the first filled entry */
+ unsigned int* cur; /* pointing to the current accessed entry */
+ unsigned int* bottom; /* pointing to the pos right after the last entry */
+ unsigned int entries[1];
+}Sync_Stack;
+
+inline Sync_Stack* sync_stack_init(unsigned int num_entries)
+{
+ unsigned int size = ((num_entries-1) << 2) + sizeof(Sync_Stack);
+ Sync_Stack* stack = (Sync_Stack*)STD_MALLOC(size);
+ memset(stack, 0, size);
+ stack->bottom = &(stack->entries[num_entries]);
+ stack->top = stack->bottom;
+ return stack;
+}
+
+inline void sync_stack_destruct(Sync_Stack* stack)
+{
+ STD_FREE(stack);
+ return;
+}
+
+inline void sync_stack_iterate_init(Sync_Stack* stack)
+{
+ stack->cur = stack->top;
+ return;
+}
+
+inline unsigned int sync_stack_iterate_next(Sync_Stack* stack)
+{
+ unsigned int* entry = stack->cur;
+ unsigned int* new_entry = entry + 1;
+ unsigned int* last_entry = stack->bottom - 1;
+ while ( entry <= last_entry ){
+ unsigned int* temp = (unsigned int*)atomic_casptr((volatile void**)&stack->cur, new_entry, entry);
+ if(temp == entry){ /* got it */
+ return *entry;
+ }
+ entry = stack->cur;
+ new_entry = entry + 1;
+ }
+ return 0;
+}
+
+inline unsigned int sync_stack_pop(Sync_Stack* stack)
+{
+ volatile unsigned int* entry = stack->top;
+ unsigned int* new_entry = stack->top + 1;
+ unsigned int* last_entry = stack->bottom - 1;
+ while ( entry <= last_entry ){
+ unsigned int* temp = (unsigned int*)atomic_casptr((volatile void**)&stack->top, new_entry, (const void*)entry);
+ if(temp == entry){ /* got it */
+ while(!*entry); /* has to have something */
+ unsigned int result = *entry;
+ *entry = NULL; /* put NULL into it */
+ return result;
+ }
+ entry = (volatile unsigned int*)stack->top;
+ new_entry = (unsigned int*)(entry + 1);
+ }
+ return 0;
+}
+
+inline Boolean sync_stack_push(Sync_Stack* stack, unsigned int value)
+{
+ unsigned int* entry = stack->top;
+ volatile unsigned int* new_entry = stack->top - 1;
+ unsigned int* first_entry = stack->entries;
+ while ( entry >= first_entry ){
+ unsigned int* temp = (unsigned int*)atomic_casptr((volatile void**)&stack->top, (void*)new_entry, entry);
+ if(temp == entry){ /* got it */
+ while(*new_entry); /* has to be NULL before filled */
+ *new_entry = value;
+ return TRUE;
+ }
+ entry = stack->top;
+ new_entry = entry - 1;
+ }
+ return FALSE;
+}
+
+/* it does not matter whether this is atomic or not, because
+ it is only invoked when there is no contention or only for rough idea */
+inline unsigned int stack_entry_count(Sync_Stack* stack)
+{
+ return (stack->bottom - stack->top);
+}
+
+#endif /* _SYNC_STACK_H_ */
Index: vm/gc_gen/src/utils/sync_pool.h
===================================================================
--- vm/gc_gen/src/utils/sync_pool.h (revision 0)
+++ vm/gc_gen/src/utils/sync_pool.h (revision 0)
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/25
+ */
+
+#ifndef _SYNC_POOL_H_
+#define _SYNC_POOL_H_
+
+#include "sync_stack.h"
+
+typedef Sync_Stack Pool;
+
+inline Pool* sync_pool_create(unsigned int size){ return sync_stack_init(size); }
+inline void sync_pool_destruct(Pool* pool){ sync_stack_destruct(pool); }
+
+inline Boolean pool_is_empty(Pool* pool){ return stack_entry_count(pool)==0;}
+inline Vector_Block* pool_get_entry(Pool* pool)
+{
+ Vector_Block* block = (Vector_Block*)sync_stack_pop(pool);
+ assert( !block || (block->start == (unsigned int*)block->entries) );
+ assert( !block || (block->head <= block->tail && block->tail <= block->end));
+
+ return block;
+}
+
+inline void pool_put_entry(Pool* pool, void* value){ assert(value); Boolean ok = sync_stack_push(pool, (unsigned int)value); assert(ok);}
+
+inline void pool_iterator_init(Pool* pool){ sync_stack_iterate_init(pool);}
+inline Vector_Block* pool_iterator_next(Pool* pool){ return (Vector_Block*)sync_stack_iterate_next(pool);}
+
+#endif /* #ifndef _SYNC_POOL_H_ */
+
Index: vm/gc_gen/src/utils/sync_pool.h
===================================================================
--- vm/gc_gen/src/utils/sync_pool.h (revision 0)
+++ vm/gc_gen/src/utils/sync_pool.h (revision 0)
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/25
+ */
+
+#ifndef _SYNC_POOL_H_
+#define _SYNC_POOL_H_
+
+#include "sync_stack.h"
+
+typedef Sync_Stack Pool;
+
+inline Pool* sync_pool_create(unsigned int size){ return sync_stack_init(size); }
+inline void sync_pool_destruct(Pool* pool){ sync_stack_destruct(pool); }
+
+inline Boolean pool_is_empty(Pool* pool){ return stack_entry_count(pool)==0;}
+inline Vector_Block* pool_get_entry(Pool* pool)
+{
+ Vector_Block* block = (Vector_Block*)sync_stack_pop(pool);
+ assert( !block || (block->start == (unsigned int*)block->entries) );
+ assert( !block || (block->head <= block->tail && block->tail <= block->end));
+
+ return block;
+}
+
+inline void pool_put_entry(Pool* pool, void* value){ assert(value); Boolean ok = sync_stack_push(pool, (unsigned int)value); assert(ok);}
+
+inline void pool_iterator_init(Pool* pool){ sync_stack_iterate_init(pool);}
+inline Vector_Block* pool_iterator_next(Pool* pool){ return (Vector_Block*)sync_stack_iterate_next(pool);}
+
+#endif /* #ifndef _SYNC_POOL_H_ */
+
Index: vm/gc_gen/src/utils/sync_queue.h
===================================================================
--- vm/gc_gen/src/utils/sync_queue.h (revision 0)
+++ vm/gc_gen/src/utils/sync_queue.h (revision 0)
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/05
+ */
+
+#ifndef _SYNC_QUEUE_H_
+#define _SYNC_QUEUE_H_
+
+/* an implementation of MSQ. FIXME:: only work in 32-bit machine */
+
+struct Queue_Node;
+
+typedef struct Queue_Link{
+ struct Queue_Node* ptr;
+ unsigned int count;
+}Queue_Link;
+
+typedef struct Queue_Node{
+ __declspec(align(8))
+ Queue_Link next; /* must be aligned to 8Byte*/
+ unsigned int* value;
+}Queue_Node;
+
+typedef struct Sync_Queue{
+ __declspec(align(8))
+ Queue_Link head; /* must be aligned to 8Byte*/
+ Queue_Link tail;
+}Sync_Queue;
+
+inline Queue_Node * new_queue_node()
+{
+ Queue_Node* p_node = malloc(sizeof(Queue_Node));
+ assert( (unsigned int)node%8 == 0 );
+ return p_node;
+}
+
+inline void free_queue_node(Queue_Node* node)
+{ free( node ); }
+
+inline void sync_queue_init(Sync_Queue *queue)
+{
+ Queue_Node *node = new_queue_node();
+ node->next.ptr = NULL;
+ node->next.count = 0;
+ queue->head.ptr = queue->tail.ptr = node;
+ queue->head.count = queue->tail.count = 0;
+ return;
+}
+
+#define QLINK_PTR(x) ((unsigned long long*)&(x))
+#define QLINK_VAL(x) (*(QLINK_PTR(x)))
+
+inline void sync_queue_push(Sync_Queue* queue, unsigned int* value)
+{
+ Queue_Link tail, next, tmp1, tmp2;
+ Queue_Node* node = new_queue_node();
+ node->value = value;
+ node->next.ptr = NULL;
+ while(TRUE){
+ QLINK_VAL(tail) = QLINK_VAL(queue->tail);
+ QLINK_VAL(next) = QLINK_VAL(tail.ptr->next);
+ if( QLINK_VAL(tail) == QLINK_VAL(queue->tail)){
+ if( next.ptr==NULL ){
+ tmp1.ptr = node;
+ tmp1.count = next.count + 1;
+ node->next.count = tmp1.count;
+ QLINK_VAL(tmp2) = atomic_cas64(QLINK_PTR(tail.ptr->next), QLINK_VAL(next), QLINK_VAL(tmp1))
+ if( QLINK_VAL(tmp1) == QLINK_VAL(tmp2))
+ break;
+
+ }else{
+ tmp1.ptr = next.ptr;
+ tmp1.count = tail.count + 1;
+ atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1));
+ }
+ }
+ }
+ tmp1.ptr = node;
+ tmp1.count = tail.count + 1;
+ atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1));
+ return;
+}
+
+Boolean sync_queue_pull(Sync_Queue* queue, unsigned int * pvalue)
+{
+ Queue_Link head, tail, next, tmp1, tmp2;
+ while(TRUE){
+ QLINK_VAL(head) = QLINK_VAL(queue->head);
+ QLINK_VAL(tail) = QLINK_VAL(queue->tail);
+ QLINK_VAL(next) = QLINK_VAL(head.ptr->next);
+
+ if( QLINK_VAL(head) == QLINK_VAL(queue->head)){
+ if( head.ptr== tail.ptr )
+ if( next.ptr == NULL )
+ return FALSE;
+ else{
+ tmp1.ptr = next.ptr;
+ tmp1.count = tail.count+1;
+ atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1));
+ }
+ else{
+ *pvalue = next.ptr->value;
+ tmp1.ptr = next.ptr;
+ tmp1.count = head.count+1;
+ QLINK_VAL(tmp2) = atomic_cas64(QLINK_PTR(queue->head), QLINK_VAL(head), QLINK_VAL(tmp1));
+ if( QLINK_VAL(tmp2) == QLINK_VAL(tmp1))
+ break;
+ }
+ }
+ }
+ free( head.ptr );
+ return TRUE;
+}
+
+#endif /* _SYNC_QUEUE_H_ */
Index: vm/gc_gen/src/utils/sync_stack.h
===================================================================
--- vm/gc_gen/src/utils/sync_stack.h (revision 0)
+++ vm/gc_gen/src/utils/sync_stack.h (revision 0)
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/25
+ */
+
+#ifndef _SYNC_STACK_H_
+#define _SYNC_STACK_H_
+
+typedef struct Sync_Stack{
+ unsigned int* top; /* pointing to the first filled entry */
+ unsigned int* cur; /* pointing to the current accessed entry */
+ unsigned int* bottom; /* pointing to the pos right after the last entry */
+ unsigned int entries[1];
+}Sync_Stack;
+
+inline Sync_Stack* sync_stack_init(unsigned int num_entries)
+{
+ unsigned int size = ((num_entries-1) << 2) + sizeof(Sync_Stack);
+ Sync_Stack* stack = (Sync_Stack*)STD_MALLOC(size);
+ memset(stack, 0, size);
+ stack->bottom = &(stack->entries[num_entries]);
+ stack->top = stack->bottom;
+ return stack;
+}
+
+inline void sync_stack_destruct(Sync_Stack* stack)
+{
+ STD_FREE(stack);
+ return;
+}
+
+inline void sync_stack_iterate_init(Sync_Stack* stack)
+{
+ stack->cur = stack->top;
+ return;
+}
+
+inline unsigned int sync_stack_iterate_next(Sync_Stack* stack)
+{
+ unsigned int* entry = stack->cur;
+ unsigned int* new_entry = entry + 1;
+ unsigned int* last_entry = stack->bottom - 1;
+ while ( entry <= last_entry ){
+ unsigned int* temp = (unsigned int*)atomic_casptr((volatile void**)&stack->cur, new_entry, entry);
+ if(temp == entry){ /* got it */
+ return *entry;
+ }
+ entry = stack->cur;
+ new_entry = entry + 1;
+ }
+ return 0;
+}
+
+inline unsigned int sync_stack_pop(Sync_Stack* stack)
+{
+ volatile unsigned int* entry = stack->top;
+ unsigned int* new_entry = stack->top + 1;
+ unsigned int* last_entry = stack->bottom - 1;
+ while ( entry <= last_entry ){
+ unsigned int* temp = (unsigned int*)atomic_casptr((volatile void**)&stack->top, new_entry, (const void*)entry);
+ if(temp == entry){ /* got it */
+ while(!*entry); /* has to have something */
+ unsigned int result = *entry;
+ *entry = NULL; /* put NULL into it */
+ return result;
+ }
+ entry = (volatile unsigned int*)stack->top;
+ new_entry = (unsigned int*)(entry + 1);
+ }
+ return 0;
+}
+
+inline Boolean sync_stack_push(Sync_Stack* stack, unsigned int value)
+{
+ unsigned int* entry = stack->top;
+ volatile unsigned int* new_entry = stack->top - 1;
+ unsigned int* first_entry = stack->entries;
+ while ( entry >= first_entry ){
+ unsigned int* temp = (unsigned int*)atomic_casptr((volatile void**)&stack->top, (void*)new_entry, entry);
+ if(temp == entry){ /* got it */
+ while(*new_entry); /* has to be NULL before filled */
+ *new_entry = value;
+ return TRUE;
+ }
+ entry = stack->top;
+ new_entry = entry - 1;
+ }
+ return FALSE;
+}
+
+/* it does not matter whether this is atomic or not, because
+ it is only invoked when there is no contention or only for rough idea */
+inline unsigned int stack_entry_count(Sync_Stack* stack)
+{
+ return (stack->bottom - stack->top);
+}
+
+#endif /* _SYNC_STACK_H_ */
Index: vm/gc_gen/src/utils/vector_block.h
===================================================================
--- vm/gc_gen/src/utils/vector_block.h (revision 0)
+++ vm/gc_gen/src/utils/vector_block.h (revision 0)
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Xiao-Feng Li, 2006/10/25
+ */
+
+#ifndef _VECTOR_BLOCK_H_
+#define _VECTOR_BLOCK_H_
+
+typedef struct Vector_Block{
+ unsigned int* start; /* point to first entry, not needed actually */
+ unsigned int* end; /* point to end of the block (right after the last entry) */
+ unsigned int* head; /* point to the first filled entry */
+ unsigned int* tail; /* point to the entry after the last filled one */
+ unsigned int* entries[1];
+}Vector_Block;
+
+inline void vector_block_init(Vector_Block* block, unsigned int size)
+{
+ block->start = (unsigned int*)block->entries;
+ block->end = (unsigned int*)((unsigned int)block + size);
+ block->head = block->start;
+ block->tail = block->start;
+ return;
+}
+
+inline unsigned int vector_block_entry_count(Vector_Block* block)
+{ return (unsigned int)(block->tail - block->head); }
+
+inline Boolean vector_block_is_full(Vector_Block* block)
+{ return block->tail == block->end; }
+
+inline void vector_block_add_entry(Vector_Block* block, unsigned int value)
+{
+ assert(value && !*(block->tail));
+ *(block->tail++) = value;
+}
+
+inline void vector_block_clear(Vector_Block* block)
+{
+#ifdef _DEBUG
+ memset(block->start, 0, (block->end - block->start)*BYTES_PER_WORD);
+#endif
+
+ block->tail = block->head;
+}
+
+/* Below is for sequential local access */
+inline unsigned int* vector_block_iterator_init(Vector_Block* block)
+{ return block->head; }
+
+inline unsigned int* vector_block_iterator_advance(Vector_Block* block, unsigned int* iter)
+{ return ++iter; }
+
+inline Boolean vector_block_iterator_end(Vector_Block* block, unsigned int* iter)
+{ return iter == block->tail; }
+
+#endif /* #ifndef _VECTOR_BLOCK_H_ */