diff -ruN oldtrunk/build/make/components/vm/gc_gen.xml newtrunk/build/make/components/vm/gc_gen.xml
--- oldtrunk/build/make/components/vm/gc_gen.xml 2006-12-09 19:17:31.000000000 +0800
+++ newtrunk/build/make/components/vm/gc_gen.xml 2006-12-07 09:57:58.000000000 +0800
@@ -61,7 +61,8 @@
-
+
+
diff -ruN oldtrunk/vm/gc_gen/src/common/gc_common.cpp newtrunk/vm/gc_gen/src/common/gc_common.cpp
--- oldtrunk/vm/gc_gen/src/common/gc_common.cpp 2006-12-04 05:01:04.000000000 +0800
+++ newtrunk/vm/gc_gen/src/common/gc_common.cpp 2006-12-09 16:02:16.000000000 +0800
@@ -22,6 +22,7 @@
#include "gc_metadata.h"
#include "../thread/mutator.h"
#include "../verify/verify_live_heap.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
extern Boolean NEED_BARRIER;
extern unsigned int NUM_COLLECTORS;
@@ -169,12 +170,15 @@
//gc->collect_kind = MAJOR_COLLECTION;
gc_metadata_verify(gc, TRUE);
+ gc_finalizer_weakref_metadata_verify((GC*)gc, TRUE);
/* Stop the threads and collect the roots. */
gc_reset_rootset(gc);
vm_enumerate_root_set_all_threads();
gc_set_rootset(gc);
-
+
+ gc_set_objects_with_finalizer(gc);
+
if(verify_live_heap) gc_verify_heap(gc, TRUE);
gc_gen_reclaim_heap((GC_Gen*)gc);
@@ -182,8 +186,12 @@
if(verify_live_heap) gc_verify_heap(gc, FALSE);
gc_metadata_verify(gc, FALSE);
-
+ gc_finalizer_weakref_metadata_verify(gc, FALSE);
+
+ gc_reset_finalizer_weakref_metadata(gc);
gc_reset_mutator_context(gc);
+
+ gc_activate_finalizer_weakref_threads((GC*)gc);
vm_resume_threads_after();
return;
diff -ruN oldtrunk/vm/gc_gen/src/common/gc_common.h newtrunk/vm/gc_gen/src/common/gc_common.h
--- oldtrunk/vm/gc_gen/src/common/gc_common.h 2006-12-04 05:01:04.000000000 +0800
+++ newtrunk/vm/gc_gen/src/common/gc_common.h 2006-12-09 16:02:20.000000000 +0800
@@ -196,6 +196,7 @@
struct Mutator;
struct Collector;
struct GC_Metadata;
+struct Finalizer_Weakref_Metadata;
struct Vector_Block;
typedef struct GC{
void* heap_start;
@@ -216,6 +217,7 @@
/* metadata is the pool for rootset, tracestack, etc. */
GC_Metadata* metadata;
+ Finalizer_Weakref_Metadata *finalizer_weakref_metadata;
unsigned int collect_kind; /* MAJOR or MINOR */
/* FIXME:: this is wrong! root_set belongs to mutator */
Vector_Block* root_set;
diff -ruN oldtrunk/vm/gc_gen/src/common/gc_for_class.cpp newtrunk/vm/gc_gen/src/common/gc_for_class.cpp
--- oldtrunk/vm/gc_gen/src/common/gc_for_class.cpp 2006-12-03 23:49:30.000000000 +0800
+++ newtrunk/vm/gc_gen/src/common/gc_for_class.cpp 2006-12-09 16:02:22.000000000 +0800
@@ -19,6 +19,7 @@
*/
#include "gc_common.h"
+#include "../finalizer_weakref/finalizer_weakref_metadata.h"
/* Setter functions for the gc class property field. */
void gc_set_prop_alignment_mask (GC_VTable_Info *gcvt, unsigned int the_mask)
@@ -41,6 +42,10 @@
{
gcvt->gc_class_properties |= CL_PROP_FINALIZABLE_MASK;
}
+void gc_set_prop_reference(Partial_Reveal_VTable *vt, WeakReferenceType type)
+{
+ vtable_get_gcvt(vt)->gc_class_properties |= (unsigned int)type << CL_PROP_REFERENCE_TYPE_SHIFT;
+}
/* A comparison function for qsort() called below to order offset slots. */
@@ -56,7 +61,7 @@
return 0;
}
-static int *build_ref_offset_array(Class_Handle ch, GC_VTable_Info *gcvt)
+static int *build_ref_offset_array(Class_Handle ch, GC_VTable_Info *gcvt, WeakReferenceType type)
{
unsigned num_ref_fields = 0;
unsigned num_fields = class_num_instance_fields_recursive(ch);
@@ -69,11 +74,25 @@
}
}
+ int skip = -1; // not skip any reference
+ if (type != NOT_REFERENCE) {
+ int offset = class_get_referent_offset(ch);
+ unsigned int gc_referent_offset = get_gc_referent_offset();
+ if (gc_referent_offset == 0) {
+ set_gc_referent_offset(offset);
+ } else {
+ assert(gc_referent_offset == offset);
+ }
+
+ skip = offset; // skip global referent offset
+ num_ref_fields--;
+ }
+
if( num_ref_fields )
gcvt->gc_object_has_ref_field = true;
else
return NULL;
-
+
/* add a null-termination slot */
unsigned int size = (num_ref_fields+1) * sizeof (unsigned int);
@@ -85,6 +104,8 @@
for(idx = 0; idx < num_fields; idx++) {
Field_Handle fh = class_get_instance_field_recursive(ch, idx);
if(field_is_reference(fh)) {
+ int offset = field_get_offset(fh);
+ if (offset == skip) continue;
*new_ref_array = field_get_offset(fh);
new_ref_array++;
}
@@ -141,11 +162,14 @@
gc_set_prop_finalizable(gcvt);
}
+ WeakReferenceType type = class_is_reference(ch);
+ gc_set_prop_reference(vt, type);
+
unsigned int size = class_get_boxed_data_size(ch);
gcvt->gc_allocated_size = size;
/* Build the offset array */
- build_ref_offset_array(ch, gcvt);
+ build_ref_offset_array(ch, gcvt, type);
gcvt->gc_class_name = class_get_name(ch);
assert (gcvt->gc_class_name);
diff -ruN oldtrunk/vm/gc_gen/src/common/gc_for_class.h newtrunk/vm/gc_gen/src/common/gc_for_class.h
--- oldtrunk/vm/gc_gen/src/common/gc_for_class.h 2006-12-03 23:49:30.000000000 +0800
+++ newtrunk/vm/gc_gen/src/common/gc_for_class.h 2006-12-09 16:02:24.000000000 +0800
@@ -142,5 +142,20 @@
return gcvt->gc_allocated_size;
}
+#define CL_PROP_REFERENCE_TYPE_SHIFT 16
+#define CL_PROP_REFERENCE_TYPE_MASK 0x00030000
+
+inline WeakReferenceType special_reference_type(Partial_Reveal_Object *p_reference)
+{
+ GC_VTable_Info *gcvt = obj_get_gcvt(p_reference);
+ return (WeakReferenceType)((gcvt->gc_class_properties & CL_PROP_REFERENCE_TYPE_MASK) >> CL_PROP_REFERENCE_TYPE_SHIFT);
+}
+
+inline Boolean type_has_finalizer(Partial_Reveal_VTable *vt)
+{
+ GC_VTable_Info *gcvt = vtable_get_gcvt(vt);
+ return gcvt->gc_class_properties & CL_PROP_FINALIZABLE_MASK;
+}
+
#endif //#ifndef _GC_TYPES_H_
diff -ruN oldtrunk/vm/gc_gen/src/common/gc_for_vm.cpp newtrunk/vm/gc_gen/src/common/gc_for_vm.cpp
--- oldtrunk/vm/gc_gen/src/common/gc_for_vm.cpp 2006-12-04 05:01:04.000000000 +0800
+++ newtrunk/vm/gc_gen/src/common/gc_for_vm.cpp 2006-12-09 16:02:30.000000000 +0800
@@ -25,6 +25,7 @@
#include "interior_pointer.h"
#include "../thread/collector.h"
#include "../verify/verify_live_heap.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
static GC* p_global_gc = NULL;
@@ -44,6 +45,7 @@
gc_gen_initialize((GC_Gen*)gc, min_heap_size_bytes, max_heap_size_bytes);
gc_metadata_initialize(gc); /* root set and mark stack */
+ gc_finalizer_weakref_metadata_initialize(gc);
collector_initialize(gc);
gc_init_heap_verification(gc);
@@ -55,6 +57,7 @@
GC* gc = p_global_gc;
gc_gen_destruct((GC_Gen*)gc);
gc_metadata_destruct(gc); /* root set and mark stack */
+ gc_finalizer_weakref_metadata_destruct(gc);
collector_destruct(gc);
if( verify_live_heap ){
@@ -135,3 +138,17 @@
{ assert(0); return 0; }
+void gc_finalize_on_exit()
+{
+ process_objects_with_finalizer_on_exit(p_global_gc);
+}
+
+/* for future use
+ * void gc_phantom_ref_enqueue_hook(void *p_reference)
+ * {
+ * if(special_reference_type((Partial_Reveal_Object *)p_reference) == PHANTOM_REFERENCE){
+ * Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_reference);
+ * *p_referent_field = (Partial_Reveal_Object *)((unsigned int)*p_referent_field | PHANTOM_REF_ENQUEUED_MASK | ~PHANTOM_REF_PENDING_MASK);
+ * }
+ * }
+ */
diff -ruN oldtrunk/vm/gc_gen/src/common/gc_metadata.cpp newtrunk/vm/gc_gen/src/common/gc_metadata.cpp
--- oldtrunk/vm/gc_gen/src/common/gc_metadata.cpp 2006-12-03 23:49:30.000000000 +0800
+++ newtrunk/vm/gc_gen/src/common/gc_metadata.cpp 2006-12-09 16:02:28.000000000 +0800
@@ -22,6 +22,7 @@
#include "../thread/mutator.h"
#include "../thread/collector.h"
#include "interior_pointer.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
#define GC_METADATA_SIZE_BYTES 48*MB
@@ -182,7 +183,7 @@
void collector_repset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref)
{
- assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address());
+// assert( p_ref >= gc_heap_base_address() && p_ref < gc_heap_ceiling_address());
Vector_Block* root_set = collector->rep_set;
vector_block_add_entry(root_set, (unsigned int)p_ref);
@@ -260,8 +261,7 @@
else
#endif
if(!obj_is_forwarded_in_obj_info(p_obj)) continue;
- Partial_Reveal_Object* p_target_obj = get_forwarding_pointer_in_obj_info(p_obj);
- *p_ref = p_target_obj;
+ *p_ref = get_forwarding_pointer_in_obj_info(p_obj);
}
vector_block_clear(root_set);
pool_put_entry(metadata->free_set_pool, root_set);
@@ -282,6 +282,7 @@
gc_update_repointed_sets(gc, metadata->collector_repset_pool);
}
+ gc_update_finalizer_weakref_repointed_refs(gc);
update_rootset_interior_pointer();
return;
diff -ruN oldtrunk/vm/gc_gen/src/common/mark_scan.cpp newtrunk/vm/gc_gen/src/common/mark_scan.cpp
--- oldtrunk/vm/gc_gen/src/common/mark_scan.cpp 2006-12-03 23:49:30.000000000 +0800
+++ newtrunk/vm/gc_gen/src/common/mark_scan.cpp 2006-12-09 16:02:46.000000000 +0800
@@ -22,6 +22,8 @@
#include "../thread/collector.h"
#include "../gen/gen.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
+
static void scan_slot(Collector* collector, Partial_Reveal_Object** p_ref)
{
Partial_Reveal_Object* p_obj = *p_ref;
@@ -66,6 +68,8 @@
offset_scanner = offset_next_ref(offset_scanner);
}
+ scan_weak_reference(collector, p_obj, scan_slot);
+
return;
}
@@ -140,7 +144,7 @@
while(mark_task){
unsigned int* iter = vector_block_iterator_init(mark_task);
while(!vector_block_iterator_end(mark_task,iter)){
- Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter;
+ Partial_Reveal_Object* p_obj = (Partial_Reveal_Object *)*iter;
iter = vector_block_iterator_advance(mark_task,iter);
/* FIXME:: we should not let mark_task empty during working, , other may want to steal it.
@@ -177,3 +181,39 @@
return;
}
+
+void resurrect_obj_tree_after_mark(Collector *collector, Partial_Reveal_Object *p_obj)
+{
+ GC *gc = collector->gc;
+ GC_Metadata* metadata = gc->metadata;
+
+ Space* space = space_of_addr(gc, p_obj);
+// if(!space->mark_object_func(space, p_obj)) { assert(0); }
+ space->mark_object_func(space, p_obj);
+ collector->trace_stack = pool_get_entry(metadata->free_task_pool);
+ collector_tracestack_push(collector, p_obj);
+ pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
+
+//collector->rep_set = pool_get_entry(metadata->free_set_pool); /* has got collector->rep_set in caller */
+ collector->trace_stack = pool_get_entry(metadata->free_task_pool);
+ Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool);
+ while(mark_task){
+ unsigned int* iter = vector_block_iterator_init(mark_task);
+ while(!vector_block_iterator_end(mark_task,iter)){
+ Partial_Reveal_Object* p_obj = (Partial_Reveal_Object *)*iter;
+ trace_object(collector, p_obj);
+ iter = vector_block_iterator_advance(mark_task, iter);
+ }
+ /* run out one task, put back to the pool and grab another task */
+ vector_stack_clear(mark_task);
+ pool_put_entry(metadata->free_task_pool, mark_task);
+ mark_task = pool_get_entry(metadata->mark_task_pool);
+ }
+
+ mark_task = (Vector_Block*)collector->trace_stack;
+ vector_stack_clear(mark_task);
+ pool_put_entry(metadata->free_task_pool, mark_task);
+ collector->trace_stack = NULL;
+//pool_put_entry(metadata->collector_repset_pool, collector->rep_set); /* has got collector->rep_set in caller */
+//collector->rep_set = NULL; /* has got collector->rep_set in caller */
+}
diff -ruN oldtrunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp newtrunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
--- oldtrunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp 1970-01-01 08:00:00.000000000 +0800
+++ newtrunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp 2006-12-12 19:06:58.000000000 +0800
@@ -0,0 +1,532 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Li-Gang Wang, 2006/11/29
+ */
+
+#include "open/types.h"
+#include "open/vm_gc.h"
+#include "finalizer_weakref.h"
+#include "../thread/mutator.h"
+#include "../common/gc_metadata.h"
+#include "../trace_forward/fspace.h"
+#include "../mark_sweep/lspace.h"
+#include "../gen/gen.h"
+
+/* reset objects_with_finalizer vector block of each mutator */
+void mutator_reset_objects_with_finalizer(Mutator *mutator)
+{
+ mutator->objects_with_finalizer = finalizer_weakref_get_free_block();
+}
+
+void gc_set_objects_with_finalizer(GC *gc)
+{
+ Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
+ Pool *objects_with_finalizer_pool = metadata->objects_with_finalizer_pool;
+
+ /* put back last objects_with_finalizer block of each mutator */
+ Mutator *mutator = gc->mutator_list;
+ while(mutator){
+ pool_put_entry(objects_with_finalizer_pool, mutator->objects_with_finalizer);
+ mutator->objects_with_finalizer = NULL;
+ mutator = mutator->next;
+ }
+ return;
+}
+
+/* reset weak references vetctor block of each collector */
+void collector_reset_weakref_sets(Collector *collector)
+{
+ collector->softref_set = finalizer_weakref_get_free_block();
+ collector->weakref_set = finalizer_weakref_get_free_block();
+ collector->phanref_set= finalizer_weakref_get_free_block();
+}
+
+static void gc_set_weakref_sets(GC *gc)
+{
+ Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
+
+ /* put back last weak references block of each collector */
+ unsigned int num_active_collectors = gc->num_active_collectors;
+ for(unsigned int i = 0; i < num_active_collectors; i++)
+ {
+ Collector* collector = gc->collectors[i];
+ pool_put_entry(metadata->softref_set_pool, collector->softref_set);
+ pool_put_entry(metadata->weakref_set_pool, collector->weakref_set);
+ pool_put_entry(metadata->phanref_set_pool, collector->phanref_set);
+ collector->softref_set = NULL;
+ collector->weakref_set= NULL;
+ collector->phanref_set= NULL;
+ }
+ return;
+}
+
+
+extern Boolean obj_is_dead_in_minor_forward_collection(Collector *collector, Partial_Reveal_Object *p_obj);
+static inline Boolean obj_is_dead_in_minor_copy_collection(Collector *collector, Partial_Reveal_Object *p_obj)
+{
+ GC *gc = collector->gc;
+ Lspace *los = ((GC_Gen *)gc)->los;
+
+ if(space_of_addr(gc, p_obj) != (Space *)los)
+ return !obj_is_marked_in_vt(p_obj);
+ else
+ return !lspace_object_is_marked(los, p_obj);
+}
+static inline Boolean obj_is_dead_in_major_collection(Collector *collector, Partial_Reveal_Object *p_obj)
+{
+ GC *gc = collector->gc;
+ Lspace *los = ((GC_Gen *)gc)->los;
+
+ if(space_of_addr(gc, p_obj) != (Space *)los)
+ return !obj_is_marked_in_vt(p_obj);
+ else
+ return !lspace_object_is_marked(los, p_obj);
+}
+// clear the two least significant bits of p_obj first
+static inline Boolean obj_is_dead(Collector *collector, Partial_Reveal_Object *p_obj)
+{
+ GC *gc = collector->gc;
+
+ assert(p_obj);
+ if(gc->collect_kind == MINOR_COLLECTION){
+ if( gc_requires_barriers())
+ return obj_is_dead_in_minor_forward_collection(collector, p_obj);
+ else
+ return obj_is_dead_in_minor_copy_collection(collector, p_obj);
+ } else {
+ return obj_is_dead_in_major_collection(collector, p_obj);
+ }
+}
+
+
+static inline Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Space *space)
+{
+ if(!obj_belongs_to_space(p_obj, (Space*)space)) return FALSE;
+ return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary);
+}
+static inline Boolean obj_need_move(Collector *collector, Partial_Reveal_Object *p_obj)
+{
+ assert(!obj_is_dead(collector, p_obj));
+ GC *gc = collector->gc;
+
+ if(gc_requires_barriers() && gc->collect_kind == MINOR_COLLECTION)
+ return fspace_object_to_be_forwarded(p_obj, collector->collect_space);
+
+ Space *space = space_of_addr(gc, p_obj);
+ return space->move_object;
+}
+
+
+extern void resurrect_obj_tree_after_trace(Collector *collector, Partial_Reveal_Object **p_ref);
+extern void resurrect_obj_tree_after_mark(Collector *collector, Partial_Reveal_Object *p_obj);
+static inline void resurrect_obj_tree_in_minor_copy_collection(Collector *collector, Partial_Reveal_Object *p_obj)
+{
+ resurrect_obj_tree_after_mark(collector, p_obj);
+}
+static inline void resurrect_obj_tree_in_major_collection(Collector *collector, Partial_Reveal_Object *p_obj)
+{
+ resurrect_obj_tree_after_mark(collector, p_obj);
+}
+// clear the two least significant bits of p_obj first
+// add p_ref to repset
+static inline void resurrect_obj_tree(Collector *collector, Partial_Reveal_Object **p_ref)
+{
+ GC *gc = collector->gc;
+
+ if(!gc_requires_barriers() || !(gc->collect_kind == MINOR_COLLECTION))
+ collector_repset_add_entry(collector, p_ref);
+ if(!obj_is_dead(collector, *p_ref)){
+ if(gc_requires_barriers() && gc->collect_kind == MINOR_COLLECTION && obj_need_move(collector, *p_ref))
+ *p_ref = obj_get_forwarding_pointer_in_vt(*p_ref);
+ return;
+ }
+ Partial_Reveal_Object* p_obj = *p_ref;
+ assert(p_obj);
+
+ if(gc->collect_kind == MINOR_COLLECTION){
+ if( gc_requires_barriers())
+ resurrect_obj_tree_after_trace(collector, p_ref);
+ else
+ resurrect_obj_tree_in_minor_copy_collection(collector, p_obj);
+ } else {
+ resurrect_obj_tree_in_major_collection(collector, p_obj);
+ }
+}
+
+
+/* called before loop of resurrect_obj_tree() */
+static inline void collector_reset_repset(Collector *collector)
+{
+ GC *gc = collector->gc;
+
+ assert(!collector->rep_set);
+ if(gc_requires_barriers() && gc->collect_kind == MINOR_COLLECTION)
+ return;
+ collector->rep_set = pool_get_entry(gc->metadata->free_set_pool);
+}
+/* called after loop of resurrect_obj_tree() */
+static inline void collector_put_repset(Collector *collector)
+{
+ GC *gc = collector->gc;
+
+ if(gc_requires_barriers() && gc->collect_kind == MINOR_COLLECTION)
+ return;
+ pool_put_entry(gc->metadata->collector_repset_pool, collector->rep_set);
+ collector->rep_set = NULL;
+}
+
+
+void finalizer_weakref_repset_add_entry_from_pool(Collector *collector, Pool *pool)
+{
+ GC *gc = collector->gc;
+
+ finalizer_weakref_reset_repset(gc);
+
+ pool_iterator_init(pool);
+ while(Vector_Block *block = pool_iterator_next(pool)){
+ unsigned int *iter = vector_block_iterator_init(block);
+
+ while(!vector_block_iterator_end(block, iter)){
+ Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter;
+ iter = vector_block_iterator_advance(block, iter);
+
+ if(*p_ref && obj_need_move(collector, *p_ref))
+ finalizer_weakref_repset_add_entry(gc, p_ref);
+ }
+ }
+ finalizer_weakref_put_repset(gc);
+}
+
+
+static void process_objects_with_finalizer(Collector *collector)
+{
+ GC *gc = collector->gc;
+ Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
+ Pool *objects_with_finalizer_pool = metadata->objects_with_finalizer_pool;
+ Pool *finalizable_objects_pool = metadata->finalizable_objects_pool;
+
+ gc_reset_finalizable_objects(gc);
+ pool_iterator_init(objects_with_finalizer_pool);
+ while(Vector_Block *block = pool_iterator_next(objects_with_finalizer_pool)){
+ unsigned int block_has_ref = 0;
+ unsigned int *iter = vector_block_iterator_init(block);
+ for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
+ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
+ if(!p_obj)
+ continue;
+ if(obj_is_dead(collector, p_obj)){
+ gc_finalizable_objects_add_entry(gc, p_obj);
+ *iter = NULL;
+ } else {
+ ++block_has_ref;
+ }
+ }
+ if(!block_has_ref)
+ vector_block_clear(block);
+ }
+ gc_put_finalizable_objects(gc);
+
+ collector_reset_repset(collector);
+ if(!finalizable_objects_pool_is_empty(gc)){
+ pool_iterator_init(finalizable_objects_pool);
+ while(Vector_Block *block = pool_iterator_next(finalizable_objects_pool)){
+ unsigned int *iter = vector_block_iterator_init(block);
+ while(!vector_block_iterator_end(block, iter)){
+ assert(*iter);
+ resurrect_obj_tree(collector, (Partial_Reveal_Object **)iter);
+ iter = vector_block_iterator_advance(block, iter);
+ }
+ }
+ metadata->pending_finalizers = TRUE;
+ }
+ collector_put_repset(collector);
+
+ finalizer_weakref_repset_add_entry_from_pool(collector, objects_with_finalizer_pool);
+ /* fianlizable objects have been added to collector repset pool */
+ //finalizer_weakref_repset_add_entry_from_pool(collector, finalizable_objects_pool);
+}
+
+static void post_process_finalizable_objects(GC *gc)
+{
+ Pool *finalizable_objects_pool = gc->finalizer_weakref_metadata->finalizable_objects_pool;
+ Pool *free_pool = gc->finalizer_weakref_metadata->free_pool;
+
+ while(Vector_Block *block = pool_get_entry(finalizable_objects_pool)){
+ unsigned int *iter = vector_block_iterator_init(block);
+ while(!vector_block_iterator_end(block, iter)){
+ assert(*iter);
+ Managed_Object_Handle p_obj = (Managed_Object_Handle)*iter;
+ vm_finalize_object(p_obj);
+ iter = vector_block_iterator_advance(block, iter);
+ }
+ vector_block_clear(block);
+ pool_put_entry(free_pool, block);
+ }
+}
+
+static void process_soft_references(Collector *collector)
+{
+ GC *gc = collector->gc;
+ if(gc->collect_kind == MINOR_COLLECTION){
+ assert(softref_set_pool_is_empty(gc));
+ return;
+ }
+
+ Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
+ Pool *softref_set_pool = metadata->softref_set_pool;
+
+ finalizer_weakref_reset_repset(gc);
+ pool_iterator_init(softref_set_pool);
+ while(Vector_Block *block = pool_iterator_next(softref_set_pool)){
+ unsigned int *iter = vector_block_iterator_init(block);
+ for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
+ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
+ assert(p_obj);
+ Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj);
+ Partial_Reveal_Object *p_referent = *p_referent_field;
+
+ if(!p_referent){ // referent field has been cleared
+ *iter = NULL;
+ continue;
+ }
+ if(!obj_is_dead(collector, p_referent)){ // referent is alive
+ if(obj_need_move(collector, p_referent))
+ finalizer_weakref_repset_add_entry(gc, p_referent_field);
+ *iter = NULL;
+ continue;
+ }
+ *p_referent_field = NULL; /* referent is softly reachable: clear the referent field */
+ }
+ }
+ finalizer_weakref_put_repset(gc);
+
+ finalizer_weakref_repset_add_entry_from_pool(collector, softref_set_pool);
+ return;
+}
+
+static void process_weak_references(Collector *collector)
+{
+ GC *gc = collector->gc;
+ Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
+ Pool *weakref_set_pool = metadata->weakref_set_pool;
+
+ finalizer_weakref_reset_repset(gc);
+ pool_iterator_init(weakref_set_pool);
+ while(Vector_Block *block = pool_iterator_next(weakref_set_pool)){
+ unsigned int *iter = vector_block_iterator_init(block);
+ for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
+ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
+ assert(p_obj);
+ Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj);
+ Partial_Reveal_Object *p_referent = *p_referent_field;
+
+ if(!p_referent){ // referent field has been cleared
+ *iter = NULL;
+ continue;
+ }
+ if(!obj_is_dead(collector, p_referent)){ // referent is alive
+ if(obj_need_move(collector, p_referent))
+ finalizer_weakref_repset_add_entry(gc, p_referent_field);
+ *iter = NULL;
+ continue;
+ }
+ *p_referent_field = NULL; /* referent is weakly reachable: clear the referent field */
+ }
+ }
+ finalizer_weakref_put_repset(gc);
+
+ finalizer_weakref_repset_add_entry_from_pool(collector, weakref_set_pool);
+ return;
+}
+
+static void process_phantom_references(Collector *collector)
+{
+ GC *gc = collector->gc;
+ Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
+ Pool *phanref_set_pool = metadata->phanref_set_pool;
+
+ finalizer_weakref_reset_repset(gc);
+// collector_reset_repset(collector);
+ pool_iterator_init(phanref_set_pool);
+ while(Vector_Block *block = pool_iterator_next(phanref_set_pool)){
+ unsigned int *iter = vector_block_iterator_init(block);
+ for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
+ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
+ assert(p_obj);
+ Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj);
+ Partial_Reveal_Object *p_referent = *p_referent_field;
+
+ if(!p_referent){ // referent field has been cleared
+ *iter = NULL;
+ continue;
+ }
+ if(!obj_is_dead(collector, p_referent)){ // referent is alive
+ if(obj_need_move(collector, p_referent))
+ finalizer_weakref_repset_add_entry(gc, p_referent_field);
+ *iter = NULL;
+ continue;
+ }
+ *p_referent_field = NULL;
+ /* Phantom status: for future use
+ * if((unsigned int)p_referent & PHANTOM_REF_ENQUEUE_STATUS_MASK){
+ * // enqueued but not explicitly cleared OR pending for enqueueing
+ * *iter = NULL;
+ * }
+ * resurrect_obj_tree(collector, p_referent_field);
+ */
+ }
+ }
+// collector_put_repset(collector);
+ finalizer_weakref_put_repset(gc);
+
+ finalizer_weakref_repset_add_entry_from_pool(collector, phanref_set_pool);
+ return;
+}
+
+static inline void post_process_special_reference_pool(GC *gc, Pool *reference_pool)
+{
+ Pool *free_pool = gc->finalizer_weakref_metadata->free_pool;
+
+ while(Vector_Block *block = pool_get_entry(reference_pool)){
+ unsigned int *iter = vector_block_iterator_init(block);
+ while(!vector_block_iterator_end(block, iter)){
+ Managed_Object_Handle p_obj = (Managed_Object_Handle)*iter;
+ if(p_obj)
+ vm_enqueue_reference(p_obj);
+ iter = vector_block_iterator_advance(block, iter);
+ }
+ vector_block_clear(block);
+ pool_put_entry(free_pool, block);
+ }
+}
+
+static void post_process_special_references(GC *gc)
+{
+ if(softref_set_pool_is_empty(gc)
+ && weakref_set_pool_is_empty(gc)
+ && phanref_set_pool_is_empty(gc)){
+ gc_clear_special_reference_pools(gc);
+ return;
+ }
+
+ gc->finalizer_weakref_metadata->pending_weak_references = TRUE;
+
+ Pool *softref_set_pool = gc->finalizer_weakref_metadata->softref_set_pool;
+ Pool *weakref_set_pool = gc->finalizer_weakref_metadata->weakref_set_pool;
+ Pool *phanref_set_pool = gc->finalizer_weakref_metadata->phanref_set_pool;
+ Pool *free_pool = gc->finalizer_weakref_metadata->free_pool;
+
+ post_process_special_reference_pool(gc, softref_set_pool);
+ post_process_special_reference_pool(gc, weakref_set_pool);
+ post_process_special_reference_pool(gc, phanref_set_pool);
+}
+
+void collector_process_finalizer_weakref(Collector *collector)
+{
+ GC *gc = collector->gc;
+
+ gc_set_weakref_sets(gc);
+ process_soft_references(collector);
+ process_weak_references(collector);
+ process_objects_with_finalizer(collector);
+ process_phantom_references(collector);
+}
+
+void gc_post_process_finalizer_weakref(GC *gc)
+{
+ post_process_special_references(gc);
+ post_process_finalizable_objects(gc);
+}
+
+void process_objects_with_finalizer_on_exit(GC *gc)
+{
+ Pool *objects_with_finalizer_pool = gc->finalizer_weakref_metadata->objects_with_finalizer_pool;
+ Pool *free_pool = gc->finalizer_weakref_metadata->free_pool;
+
+ vm_gc_lock_enum();
+ /* FIXME: holding gc lock is not enough, perhaps there are mutators that are allocating objects with finalizer
+ * could be fixed as this:
+ * in fspace_alloc() and lspace_alloc() hold gc lock through
+ * allocating mem and adding the objects with finalizer to the pool
+ */
+ lock(gc->mutator_list_lock);
+ gc_set_objects_with_finalizer(gc);
+ unlock(gc->mutator_list_lock);
+ while(Vector_Block *block = pool_get_entry(objects_with_finalizer_pool)){
+ unsigned int *iter = vector_block_iterator_init(block);
+ while(!vector_block_iterator_end(block, iter)){
+ Managed_Object_Handle p_obj = (Managed_Object_Handle)*iter;
+ if(p_obj)
+ vm_finalize_object(p_obj);
+ iter = vector_block_iterator_advance(block, iter);
+ }
+ vector_block_clear(block);
+ pool_put_entry(free_pool, block);
+ }
+ vm_gc_unlock_enum();
+}
+
+void gc_update_finalizer_weakref_repointed_refs(GC* gc)
+{
+ Finalizer_Weakref_Metadata* metadata = gc->finalizer_weakref_metadata;
+ Pool *repset_pool = metadata->repset_pool;
+
+ /* NOTE:: this is destructive to the root sets. */
+ Vector_Block* root_set = pool_get_entry(repset_pool);
+
+ while(root_set){
+ unsigned int* iter = vector_block_iterator_init(root_set);
+ while(!vector_block_iterator_end(root_set,iter)){
+ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+ iter = vector_block_iterator_advance(root_set,iter);
+
+ Partial_Reveal_Object* p_obj = *p_ref;
+ /* For repset, this check is unnecessary, since all slots are repointed; otherwise
+ they will not be recorded. For root set, it is possible to point to LOS or other
+ non-moved space. */
+#ifdef _DEBUG
+ if( !gc_requires_barriers() || gc->collect_kind == MAJOR_COLLECTION ){
+ assert(obj_is_forwarded_in_obj_info(p_obj));
+ } else
+ assert(obj_is_forwarded_in_vt(p_obj));
+#endif
+ Partial_Reveal_Object* p_target_obj;
+ if( !gc_requires_barriers() || gc->collect_kind == MAJOR_COLLECTION )
+ p_target_obj = get_forwarding_pointer_in_obj_info(p_obj);
+ else
+ p_target_obj = obj_get_forwarding_pointer_in_vt(p_obj);
+ *p_ref = p_target_obj;
+ }
+ vector_block_clear(root_set);
+ pool_put_entry(metadata->free_pool, root_set);
+ root_set = pool_get_entry(repset_pool);
+ }
+
+ return;
+}
+
+void gc_activate_finalizer_weakref_threads(GC *gc)
+{
+ Finalizer_Weakref_Metadata* metadata = gc->finalizer_weakref_metadata;
+
+ if(metadata->pending_finalizers || metadata->pending_weak_references){
+ metadata->pending_finalizers = FALSE;
+ metadata->pending_weak_references = FALSE;
+ vm_hint_finalize();
+ }
+}
diff -ruN oldtrunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h newtrunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h
--- oldtrunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h 1970-01-01 08:00:00.000000000 +0800
+++ newtrunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h 2006-12-09 16:05:00.000000000 +0800
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Li-Gang Wang, 2006/11/30
+ */
+
+#ifndef _FINALIZER_WEAKREF_H_
+#define _FINALIZER_WEAKREF_H_
+
+#include "finalizer_weakref_metadata.h"
+#include "../thread/collector.h"
+
+/* Phantom status: for future use
+ * #define PHANTOM_REF_ENQUEUE_STATUS_MASK 0x3
+ * #define PHANTOM_REF_ENQUEUED_MASK 0x1
+ * #define PHANTOM_REF_PENDING_MASK 0x2
+ *
+ * inline Partial_Reveal_Object *get_reference_pointer(Partial_Reveal_Object *p_obj)
+ * {
+ * return (Partial_Reveal_Object *)((unsigned int)(p_obj)&(~PHANTOM_REF_ENQUEUE_STATUS_MASK));
+ * }
+ * inline void update_reference_pointer(Partial_Reveal_Object **p_ref, Partial_Reveal_Object *p_target_obj)
+ * {
+ * unsigned int temp = (unsigned int)*p_ref;
+ *
+ * temp &= PHANTOM_REF_ENQUEUE_STATUS_MASK;
+ * temp |= (unsigned int)p_target_obj;
+ * *p_ref = (Partial_Reveal_Object *)temp;
+ * }
+ */
+
+inline Partial_Reveal_Object **obj_get_referent_field(Partial_Reveal_Object *p_obj)
+{
+ assert(p_obj);
+ return (Partial_Reveal_Object **)(( Byte*)p_obj+get_gc_referent_offset());
+}
+
+typedef void (* Scan_Slot_Func)(Collector *collector, Partial_Reveal_Object **p_ref);
+inline void scan_weak_reference(Collector *collector, Partial_Reveal_Object *p_obj, Scan_Slot_Func scan_slot)
+{
+ WeakReferenceType type = special_reference_type(p_obj);
+ if(type == NOT_REFERENCE)
+ return;
+ unsigned int collect_kind = collector->gc->collect_kind;
+ Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj);
+ Partial_Reveal_Object *p_referent = *p_referent_field;
+ if (!p_referent) return;
+ switch(type){
+ case SOFT_REFERENCE :
+ if(collect_kind==MINOR_COLLECTION)
+ scan_slot(collector, p_referent_field);
+ else
+ collector_softref_set_add_entry(collector, p_obj);
+ break;
+ case WEAK_REFERENCE :
+ collector_weakref_set_add_entry(collector, p_obj);
+ break;
+ case PHANTOM_REFERENCE :
+ collector_phanref_set_add_entry(collector, p_obj);
+ break;
+ default :
+ assert(0);
+ break;
+ }
+}
+
+
+extern void mutator_reset_objects_with_finalizer(Mutator *mutator);
+extern void gc_set_objects_with_finalizer(GC *gc);
+extern void collector_reset_weakref_sets(Collector *collector);
+
+extern void collector_process_finalizer_weakref(Collector *collector);
+extern void gc_post_process_finalizer_weakref(GC *gc);
+extern void process_objects_with_finalizer_on_exit(GC *gc);
+
+extern void gc_update_finalizer_weakref_repointed_refs(GC* gc);
+extern void gc_activate_finalizer_weakref_threads(GC *gc);
+
+#endif // _FINALIZER_WEAKREF_H_
diff -ruN oldtrunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp newtrunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp
--- oldtrunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp 1970-01-01 08:00:00.000000000 +0800
+++ newtrunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp 2006-12-09 16:06:48.000000000 +0800
@@ -0,0 +1,297 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Li-Gang Wang, 2006/11/29
+ */
+
+#include "finalizer_weakref_metadata.h"
+#include "../thread/mutator.h"
+#include "../thread/collector.h"
+
+#define POOL_SEGMENT_SIZE_BIT_SHIFT 20
+#define POOL_SEGMENT_SIZE_BYTES (1 << POOL_SEGMENT_SIZE_BIT_SHIFT)
+
+#define METADATA_BLOCK_SIZE_BIT_SHIFT 10
+#define METADATA_BLOCK_SIZE_BYTES (1<> METADATA_BLOCK_SIZE_BIT_SHIFT;
+ for(unsigned int i=0; ifinalizer_weakref_metadata = &finalizer_weakref_metadata;
+ return;
+}
+
+void gc_finalizer_weakref_metadata_destruct(GC *gc)
+{
+ Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
+
+ sync_pool_destruct(metadata->free_pool);
+ sync_pool_destruct(metadata->objects_with_finalizer_pool);
+ sync_pool_destruct(metadata->finalizable_objects_pool);
+ sync_pool_destruct(metadata->softref_set_pool);
+ sync_pool_destruct(metadata->weakref_set_pool);
+ sync_pool_destruct(metadata->phanref_set_pool);
+ sync_pool_destruct(metadata->repset_pool);
+
+ metadata->finalizable_objects = NULL;
+ metadata->repset = NULL;
+
+ for(unsigned int i=0; inext_segment_pos; i++){
+ assert(metadata->pool_segments[i]);
+ STD_FREE(metadata->pool_segments[i]);
+ }
+
+ gc->finalizer_weakref_metadata = NULL;
+}
+
+void gc_finalizer_weakref_metadata_verify(GC *gc, Boolean is_before_gc)
+{
+ Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
+
+ assert(pool_is_empty(metadata->finalizable_objects_pool));
+ assert(pool_is_empty(metadata->softref_set_pool));
+ assert(pool_is_empty(metadata->weakref_set_pool));
+ assert(pool_is_empty(metadata->phanref_set_pool));
+ assert(pool_is_empty(metadata->repset_pool));
+ assert(metadata->finalizable_objects == NULL);
+ assert(metadata->repset == NULL);
+
+ return;
+}
+
+void gc_reset_finalizer_weakref_metadata(GC *gc)
+{
+ Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
+ Pool *objects_with_finalizer_pool = metadata->objects_with_finalizer_pool;
+ Pool *finalizable_objects_pool = metadata->finalizable_objects_pool;
+
+ assert(pool_is_empty(finalizable_objects_pool));
+ assert(pool_is_empty(metadata->softref_set_pool));
+ assert(pool_is_empty(metadata->weakref_set_pool));
+ assert(pool_is_empty(metadata->phanref_set_pool));
+ assert(pool_is_empty(metadata->repset_pool));
+ assert(metadata->finalizable_objects == NULL);
+ assert(metadata->repset == NULL);
+
+ while(Vector_Block *block = pool_get_entry(objects_with_finalizer_pool)){
+ unsigned int *iter = vector_block_iterator_init(block);
+ if(vector_block_iterator_end(block, iter)){
+ vector_block_clear(block);
+ pool_put_entry(metadata->free_pool, block);
+ } else {
+ pool_put_entry(finalizable_objects_pool, block);
+ }
+ }
+ assert(pool_is_empty(objects_with_finalizer_pool));
+ metadata->objects_with_finalizer_pool = finalizable_objects_pool;
+ metadata->finalizable_objects_pool = objects_with_finalizer_pool;
+}
+
+/* called when there is no Vector_Block in finalizer_weakref_metadata->free_pool
+ * extend the pool by a pool segment
+ */
+static void gc_finalizer_weakref_metadata_extend(void)
+{
+ Finalizer_Weakref_Metadata metadata = finalizer_weakref_metadata;
+
+ unsigned int segment_pos = metadata.next_segment_pos;
+ while(segment_pos < POOL_SEGMENT_NUM){
+ unsigned int next_segment_pos = segment_pos + 1;
+ unsigned int temp = (unsigned int)atomic_cas32((volatile unsigned int *)&metadata.next_segment_pos, next_segment_pos, segment_pos);
+ if(temp == segment_pos)
+ break;
+ segment_pos = metadata.next_segment_pos;
+ }
+ if(segment_pos > POOL_SEGMENT_NUM)
+ return;
+
+ void *pool_segment = STD_MALLOC(POOL_SEGMENT_SIZE_BYTES);
+ memset(pool_segment, 0, POOL_SEGMENT_SIZE_BYTES);
+ metadata.pool_segments[segment_pos] = pool_segment;
+
+ unsigned int num_blocks = POOL_SEGMENT_SIZE_BYTES >> METADATA_BLOCK_SIZE_BIT_SHIFT;
+ for(unsigned int i=0; ifinalizer_weakref_metadata
+ * shrink the free pool by half
+ */
+void gc_finalizer_weakref_metadata_shrink(GC *gc)
+{
+}
+
+static inline void finalizer_weakref_metadata_general_add_entry(Vector_Block* &vector_block_in_use, Pool *pool, Partial_Reveal_Object *ref)
+{
+ assert(vector_block_in_use);
+ assert(ref);
+
+ Vector_Block* block = vector_block_in_use;
+ vector_block_add_entry(block, (unsigned int)ref);
+
+ if(!vector_block_is_full(block)) return;
+
+ pool_put_entry(pool, block);
+ vector_block_in_use = finalizer_weakref_get_free_block();
+}
+
+void mutator_finalizer_add_entry(Mutator *mutator, Partial_Reveal_Object *ref)
+{
+ finalizer_weakref_metadata_general_add_entry(mutator->objects_with_finalizer, finalizer_weakref_metadata.objects_with_finalizer_pool, ref);
+}
+
+void gc_finalizable_objects_add_entry(GC *gc, Partial_Reveal_Object *ref)
+{
+ finalizer_weakref_metadata_general_add_entry(finalizer_weakref_metadata.finalizable_objects, finalizer_weakref_metadata.finalizable_objects_pool, ref);
+}
+
+void collector_softref_set_add_entry(Collector *collector, Partial_Reveal_Object *ref)
+{
+ finalizer_weakref_metadata_general_add_entry(collector->softref_set, finalizer_weakref_metadata.softref_set_pool, ref);
+}
+
+void collector_weakref_set_add_entry(Collector *collector, Partial_Reveal_Object *ref)
+{
+ finalizer_weakref_metadata_general_add_entry(collector->weakref_set, finalizer_weakref_metadata.weakref_set_pool, ref);
+}
+
+void collector_phanref_set_add_entry(Collector *collector, Partial_Reveal_Object *ref)
+{
+ finalizer_weakref_metadata_general_add_entry(collector->phanref_set, finalizer_weakref_metadata.phanref_set_pool, ref);
+}
+
+void finalizer_weakref_repset_add_entry(GC *gc, Partial_Reveal_Object **p_ref)
+{
+ assert(*p_ref);
+ finalizer_weakref_metadata_general_add_entry(finalizer_weakref_metadata.repset, finalizer_weakref_metadata.repset_pool, (Partial_Reveal_Object *)p_ref);
+}
+
+static inline Boolean pool_has_no_reference(Pool *pool)
+{
+ if(pool_is_empty(pool))
+ return TRUE;
+ pool_iterator_init(pool);
+ while(Vector_Block *block = pool_iterator_next(pool)){
+ unsigned int *iter = vector_block_iterator_init(block);
+ while(!vector_block_iterator_end(block, iter)){
+ if(*iter)
+ return FALSE;
+ iter = vector_block_iterator_advance(block, iter);
+ }
+ }
+ return TRUE;
+}
+
+Boolean objects_with_finalizer_pool_is_empty(GC *gc)
+{
+ return pool_has_no_reference(gc->finalizer_weakref_metadata->objects_with_finalizer_pool);
+}
+
+Boolean finalizable_objects_pool_is_empty(GC *gc)
+{
+ return pool_has_no_reference(gc->finalizer_weakref_metadata->finalizable_objects_pool);
+}
+
+Boolean softref_set_pool_is_empty(GC *gc)
+{
+ return pool_has_no_reference(gc->finalizer_weakref_metadata->softref_set_pool);
+}
+
+Boolean weakref_set_pool_is_empty(GC *gc)
+{
+ return pool_has_no_reference(gc->finalizer_weakref_metadata->weakref_set_pool);
+}
+
+Boolean phanref_set_pool_is_empty(GC *gc)
+{
+ return pool_has_no_reference(gc->finalizer_weakref_metadata->phanref_set_pool);
+}
+
+Boolean finalizer_weakref_repset_pool_is_empty(GC *gc)
+{
+ return pool_has_no_reference(gc->finalizer_weakref_metadata->repset_pool);
+}
+
+static inline void finalizer_weakref_metadata_clear_pool(Pool *pool)
+{
+ while(Vector_Block* block = pool_get_entry(pool))
+ {
+ vector_block_clear(block);
+ pool_put_entry(finalizer_weakref_metadata.free_pool, block);
+ }
+}
+
+void gc_clear_special_reference_pools(GC *gc)
+{
+ finalizer_weakref_metadata_clear_pool(gc->finalizer_weakref_metadata->softref_set_pool);
+ finalizer_weakref_metadata_clear_pool(gc->finalizer_weakref_metadata->weakref_set_pool);
+ finalizer_weakref_metadata_clear_pool(gc->finalizer_weakref_metadata->phanref_set_pool);
+}
diff -ruN oldtrunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h newtrunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h
--- oldtrunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h 1970-01-01 08:00:00.000000000 +0800
+++ newtrunk/vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h 2006-12-09 16:05:06.000000000 +0800
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Li-Gang Wang, 2006/11/29
+ */
+
+#ifndef _FINALIZER_WEAKREF_METADATA_H_
+#define _FINALIZER_WEAKREF_METADATA_H_
+
+#include "../common/gc_common.h"
+#include "../utils/vector_block.h"
+#include "../utils/sync_pool.h"
+
+#define POOL_SEGMENT_NUM 256
+
+typedef struct Finalizer_Weakref_Metadata{
+ void *pool_segments[POOL_SEGMENT_NUM]; // malloced free pool segments' addresses array
+ unsigned int next_segment_pos; // next available position in pool_segments array
+
+ Pool *free_pool; // list of free buffers for the five pools below
+
+ Pool *objects_with_finalizer_pool; // list of objects that have finalizer;
+ // these objects are added in when they are allocated
+ Pool *finalizable_objects_pool; // temporary buffer for finalizable objects identified during one single GC
+
+ Pool *softref_set_pool; // temporary buffer for soft references identified during one single GC
+ Pool *weakref_set_pool; // temporary buffer for weak references identified during one single GC
+ Pool *phanref_set_pool; // temporary buffer for phantom references identified during one single GC
+
+ Pool *repset_pool; // repointed reference slot sets
+
+ Vector_Block *finalizable_objects; // buffer for finalizable_objects_pool
+ Vector_Block *repset; // buffer for repset_pool
+
+ Boolean pending_finalizers; // there are objects waiting to be finalized
+ Boolean pending_weak_references; // there are weak references waiting to be enqueued
+
+ unsigned int gc_referent_offset; // the referent field's offset in Reference Class
+}Finalizer_Weakref_Metadata;
+
+extern unsigned int get_gc_referent_offset(void);
+extern void set_gc_referent_offset(unsigned int offset);
+
+extern void gc_finalizer_weakref_metadata_initialize(GC *gc);
+extern void gc_finalizer_weakref_metadata_destruct(GC *gc);
+extern void gc_finalizer_weakref_metadata_verify(GC *gc, Boolean is_before_gc);
+extern void gc_reset_finalizer_weakref_metadata(GC *gc);
+extern Vector_Block *finalizer_weakref_get_free_block(void);
+extern void gc_finalizer_weakref_metadata_shrink(GC *gc);
+
+extern void mutator_finalizer_add_entry(Mutator *mutator, Partial_Reveal_Object *ref);
+extern void gc_finalizable_objects_add_entry(GC *gc, Partial_Reveal_Object *ref);
+extern void collector_softref_set_add_entry(Collector *collector, Partial_Reveal_Object *ref);
+extern void collector_weakref_set_add_entry(Collector *collector, Partial_Reveal_Object *ref);
+extern void collector_phanref_set_add_entry(Collector *collector, Partial_Reveal_Object *ref);
+extern void finalizer_weakref_repset_add_entry(GC *gc, Partial_Reveal_Object **ref);
+
+extern Boolean objects_with_finalizer_pool_is_empty(GC *gc);
+extern Boolean finalizable_objects_pool_is_empty(GC *gc);
+extern Boolean softref_set_pool_is_empty(GC *gc);
+extern Boolean weakref_set_pool_is_empty(GC *gc);
+extern Boolean phanref_set_pool_is_empty(GC *gc);
+extern Boolean finalizer_weakref_repset_pool_is_empty(GC *gc);
+
+extern void gc_clear_special_reference_pools(GC *gc);
+
+
+/* called before loop of recording finalizable objects */
+inline void gc_reset_finalizable_objects(GC *gc)
+{
+ Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
+
+ assert(!metadata->finalizable_objects);
+ metadata->finalizable_objects = pool_get_entry(metadata->free_pool);
+}
+/* called after loop of recording finalizable objects */
+inline void gc_put_finalizable_objects(GC *gc)
+{
+ Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
+
+ pool_put_entry(metadata->finalizable_objects_pool, metadata->finalizable_objects);
+ metadata->finalizable_objects = NULL;
+}
+
+/* called before loop of recording repointed reference */
+inline void finalizer_weakref_reset_repset(GC *gc)
+{
+ Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
+
+ assert(!metadata->repset);
+ metadata->repset = pool_get_entry(metadata->free_pool);
+}
+/* called after loop of recording repointed reference */
+inline void finalizer_weakref_put_repset(GC *gc)
+{
+ Finalizer_Weakref_Metadata *metadata = gc->finalizer_weakref_metadata;
+
+ pool_put_entry(metadata->repset_pool, metadata->repset);
+ metadata->repset = NULL;
+}
+
+#endif // _FINALIZER_WEAKREF_METADATA_H_
diff -ruN oldtrunk/vm/gc_gen/src/gen/gen.cpp newtrunk/vm/gc_gen/src/gen/gen.cpp
--- oldtrunk/vm/gc_gen/src/gen/gen.cpp 2006-12-09 01:16:04.000000000 +0800
+++ newtrunk/vm/gc_gen/src/gen/gen.cpp 2006-12-09 16:08:30.000000000 +0800
@@ -100,6 +100,10 @@
gc_gen->committed_heap_size = space_committed_size((Space*)gc_gen->nos) +
space_committed_size((Space*)gc_gen->mos) +
space_committed_size((Space*)gc_gen->los);
+
+ set_native_finalizer_thread_flag(TRUE);
+ set_native_ref_enqueue_thread_flag(TRUE);
+
return;
}
diff -ruN oldtrunk/vm/gc_gen/src/gen/gen.h newtrunk/vm/gc_gen/src/gen/gen.h
--- oldtrunk/vm/gc_gen/src/gen/gen.h 2006-12-04 05:54:32.000000000 +0800
+++ newtrunk/vm/gc_gen/src/gen/gen.h 2006-12-09 16:08:34.000000000 +0800
@@ -26,6 +26,7 @@
#include "../trace_forward/fspace.h"
#include "../mark_compact/mspace.h"
#include "../mark_sweep/lspace.h"
+#include "../finalizer_weakref/finalizer_weakref_metadata.h"
enum Write_Barrier_Kind{
WRITE_BARRIER_NIL,
@@ -64,6 +65,7 @@
/* metadata is the pool for rootset, markstack, etc. */
GC_Metadata* metadata;
+ Finalizer_Weakref_Metadata *finalizer_weakref_metadata;
unsigned int collect_kind; /* MAJOR or MINOR */
/* FIXME:: this is wrong! root_set belongs to mutator */
Vector_Block* root_set;
diff -ruN oldtrunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp newtrunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp
--- oldtrunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp 2006-12-03 23:49:30.000000000 +0800
+++ newtrunk/vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp 2006-12-09 16:10:06.000000000 +0800
@@ -21,6 +21,7 @@
#include "mspace.h"
#include "../thread/collector.h"
#include "../trace_forward/fspace.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
struct GC_Gen;
Space* gc_get_nos(GC_Gen* gc);
@@ -331,6 +332,9 @@
/* last collector's world here */
/* prepare for next phase */
gc_init_block_for_collectors(gc, mspace);
+
+ collector_process_finalizer_weakref(collector);
+
/* let other collectors go */
num_marking_collectors++;
}
@@ -360,6 +364,8 @@
/* Pass 3: update all references whose objects are to be moved */
gc_update_repointed_refs(collector);
+
+ gc_post_process_finalizer_weakref(gc);
/* Pass 4: do the compaction and reset blocks */
next_block_for_compact = mspace_get_first_compact_block(mspace);
diff -ruN oldtrunk/vm/gc_gen/src/thread/collector.cpp newtrunk/vm/gc_gen/src/thread/collector.cpp
--- oldtrunk/vm/gc_gen/src/thread/collector.cpp 2006-12-03 23:49:30.000000000 +0800
+++ newtrunk/vm/gc_gen/src/thread/collector.cpp 2006-12-09 16:11:40.000000000 +0800
@@ -22,6 +22,7 @@
#include "collector.h"
#include "../mark_compact/mspace.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
static void collector_restore_obj_info(Collector* collector)
@@ -70,6 +71,8 @@
assert(collector->rem_set==NULL);
collector->rem_set = pool_get_entry(metadata->free_set_pool);
}
+
+ collector_reset_weakref_sets(collector);
collector->result = TRUE;
return;
diff -ruN oldtrunk/vm/gc_gen/src/thread/collector.h newtrunk/vm/gc_gen/src/thread/collector.h
--- oldtrunk/vm/gc_gen/src/thread/collector.h 2006-12-03 23:49:30.000000000 +0800
+++ newtrunk/vm/gc_gen/src/thread/collector.h 2006-12-09 16:11:44.000000000 +0800
@@ -42,6 +42,10 @@
Vector_Block* rep_set; /* repointed set */
Vector_Block* rem_set;
+ Vector_Block *softref_set;
+ Vector_Block *weakref_set;
+ Vector_Block *phanref_set;
+
VmEventHandle task_assigned_event;
VmEventHandle task_finished_event;
diff -ruN oldtrunk/vm/gc_gen/src/thread/mutator_alloc.cpp newtrunk/vm/gc_gen/src/thread/mutator_alloc.cpp
--- oldtrunk/vm/gc_gen/src/thread/mutator_alloc.cpp 2006-12-03 23:49:30.000000000 +0800
+++ newtrunk/vm/gc_gen/src/thread/mutator_alloc.cpp 2006-12-09 16:12:02.000000000 +0800
@@ -22,6 +22,8 @@
#include "../gen/gen.h"
+#include "../finalizer_weakref/finalizer_weakref_metadata.h"
+
/* classloader sometimes sets the bit for finalizible objects (?) */
inline unsigned int get_instance_data_size (unsigned int encoded_size)
{ return (encoded_size & NEXT_TO_HIGH_BIT_CLEAR_MASK); }
@@ -37,15 +39,19 @@
/* FIXME:: this is outdated actually */
size = get_instance_data_size(size);
- Allocator* allocator = (Allocator*)gc_get_tls();
+ Mutator* mutator = (Mutator*)gc_get_tls();
if ( size > GC_OBJ_SIZE_THRESHOLD )
- p_obj = (Managed_Object_Handle)los_alloc(size, allocator);
+ p_obj = (Managed_Object_Handle)los_alloc(size, (Allocator*)mutator);
else
- p_obj = (Managed_Object_Handle)nos_alloc(size, allocator);
+ p_obj = (Managed_Object_Handle)nos_alloc(size, (Allocator*)mutator);
- assert(p_obj);
+ if( p_obj == NULL ) return NULL;
+
obj_set_vt((Partial_Reveal_Object*)p_obj, ah);
+
+ if(type_has_finalizer((Partial_Reveal_VTable *)ah))
+ mutator_finalizer_add_entry(mutator, (Partial_Reveal_Object*)p_obj);
return (Managed_Object_Handle)p_obj;
}
@@ -57,6 +63,9 @@
assert((size % GC_OBJECT_ALIGNMENT) == 0);
assert(ah);
+ if(type_has_finalizer((Partial_Reveal_VTable *)ah))
+ return NULL;
+
/* object shoud be handled specially */
if ( size > GC_OBJ_SIZE_THRESHOLD ) return NULL;
diff -ruN oldtrunk/vm/gc_gen/src/thread/mutator.cpp newtrunk/vm/gc_gen/src/thread/mutator.cpp
--- oldtrunk/vm/gc_gen/src/thread/mutator.cpp 2006-12-04 02:02:24.000000000 +0800
+++ newtrunk/vm/gc_gen/src/thread/mutator.cpp 2006-12-09 16:11:56.000000000 +0800
@@ -20,6 +20,7 @@
#include "mutator.h"
#include "../trace_forward/fspace.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
struct GC_Gen;
Space* gc_get_nos(GC_Gen* gc);
@@ -37,6 +38,8 @@
mutator->rem_set = pool_get_entry(gc->metadata->free_set_pool);
assert(vector_block_is_empty(mutator->rem_set));
}
+
+ mutator->objects_with_finalizer = finalizer_weakref_get_free_block();
lock(gc->mutator_list_lock); // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
@@ -61,6 +64,11 @@
pool_put_entry(gc->metadata->mutator_remset_pool, mutator->rem_set);
mutator->rem_set = NULL;
}
+
+ if(mutator->objects_with_finalizer){
+ pool_put_entry(gc->finalizer_weakref_metadata->objects_with_finalizer_pool, mutator->objects_with_finalizer);
+ mutator->objects_with_finalizer = NULL;
+ }
lock(gc->mutator_list_lock); // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
@@ -90,6 +98,7 @@
while (mutator) {
mutator->rem_set = pool_get_entry(gc->metadata->free_set_pool);
alloc_context_reset((Allocator*)mutator);
+ mutator_reset_objects_with_finalizer(mutator);
mutator = mutator->next;
}
return;
diff -ruN oldtrunk/vm/gc_gen/src/thread/mutator.h newtrunk/vm/gc_gen/src/thread/mutator.h
--- oldtrunk/vm/gc_gen/src/thread/mutator.h 2006-12-03 23:49:30.000000000 +0800
+++ newtrunk/vm/gc_gen/src/thread/mutator.h 2006-12-09 16:12:00.000000000 +0800
@@ -35,6 +35,7 @@
/* END of Allocator --> */
Vector_Block* rem_set;
+ Vector_Block* objects_with_finalizer;
Mutator* next; /* The gc info area associated with the next active thread. */
} Mutator;
diff -ruN oldtrunk/vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp newtrunk/vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp
--- oldtrunk/vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp 2006-12-03 23:49:30.000000000 +0800
+++ newtrunk/vm/gc_gen/src/trace_forward/fspace_collect_copy.cpp 2006-12-09 16:13:18.000000000 +0800
@@ -22,6 +22,7 @@
#include "../mark_compact/mspace.h"
#include "../mark_sweep/lspace.h"
#include "../thread/collector.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
static volatile Block_Header* current_copy_block;
static volatile Block_Header* current_target_block;
@@ -179,6 +180,9 @@
/* world for single thread, e.g., verification of last phase, and preparation of next phase */
current_copy_block = fspace_get_first_copy_block(fspace);
current_target_block = mspace_get_first_target_block_for_nos(mspace);
+
+ collector_process_finalizer_weakref(collector);
+
/* let other collectors go */
num_marking_collectors++;
}
@@ -203,6 +207,8 @@
if( collector->thread_handle != 0 ) return;
gc_update_repointed_refs(collector);
+
+ gc_post_process_finalizer_weakref(gc);
/* FIXME:: Pass 2 and 3 can be merged into one pass */
/* Pass 3: copy live fspace object to new location */
diff -ruN oldtrunk/vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp newtrunk/vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp
--- oldtrunk/vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp 2006-12-06 17:04:50.000000000 +0800
+++ newtrunk/vm/gc_gen/src/trace_forward/fspace_collect_forward.cpp 2006-12-12 19:08:01.000000000 +0800
@@ -22,6 +22,7 @@
#include "fspace.h"
#include "../thread/collector.h"
#include "../common/gc_metadata.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
static Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace)
{
@@ -71,6 +72,8 @@
offset_scanner = offset_next_ref(offset_scanner);
}
+ scan_weak_reference(collector, p_obj, scan_slot);
+
return;
}
@@ -95,7 +98,7 @@
/* Fastpath: object has already been forwarded, update the ref slot */
if(obj_is_forwarded_in_vt(p_obj)) {
- *p_ref = obj_get_forwarding_pointer_in_vt(p_obj);
+ *p_ref = obj_get_forwarding_pointer_in_vt(p_obj);
return;
}
@@ -122,11 +125,11 @@
is set in the atomic instruction, which requires to roll back the mos_alloced
space. That is easy for thread local block allocation cancellation. */
if( p_target_obj == NULL ){
- *p_ref = obj_get_forwarding_pointer_in_vt(p_obj);
- return;
+ *p_ref = obj_get_forwarding_pointer_in_vt(p_obj);
+ return;
}
/* otherwise, we successfully forwarded */
- *p_ref = p_target_obj;
+ *p_ref = p_target_obj;
/* we forwarded it, we need remember it for verification. */
if(verify_live_heap) {
@@ -240,9 +243,60 @@
/* the rest work is not enough for parallelization, so let only one thread go */
if( collector->thread_handle != 0 ) return;
+ collector_process_finalizer_weakref(collector);
+
gc_update_repointed_refs(collector);
+
+ gc_post_process_finalizer_weakref(gc);
+
reset_fspace_for_allocation(space);
return;
}
+
+Boolean obj_is_dead_in_minor_forward_collection(Collector *collector, Partial_Reveal_Object *p_obj)
+{
+ Space *space = collector->collect_space;
+ Boolean belong_to_nos = obj_belongs_to_space(p_obj, space);
+
+ if(!belong_to_nos)
+ return FALSE;
+
+ Boolean space_to_be_forwarded = fspace_object_to_be_forwarded(p_obj, (Fspace*)space);
+ Boolean forwarded = obj_is_forwarded_in_vt(p_obj);
+ Boolean marked = obj_is_marked_in_vt(p_obj);
+
+ return (space_to_be_forwarded && !forwarded) || (!space_to_be_forwarded && !marked);
+}
+
+void resurrect_obj_tree_after_trace(Collector *collector, Partial_Reveal_Object **p_ref)
+{
+ GC *gc = collector->gc;
+ GC_Metadata* metadata = gc->metadata;
+
+ collector->trace_stack = pool_get_entry(metadata->free_task_pool);
+ collector_tracestack_push(collector, p_ref);
+ pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
+
+//collector->rep_set = pool_get_entry(metadata->free_set_pool); /* has got collector->rep_set in caller */
+ collector->trace_stack = pool_get_entry(metadata->free_task_pool);
+ Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool);
+ while(trace_task){
+ unsigned int* iter = vector_block_iterator_init(trace_task);
+ while(!vector_block_iterator_end(trace_task,iter)){
+ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter;
+ iter = vector_block_iterator_advance(trace_task,iter);
+ assert(*p_ref);
+ trace_object(collector, p_ref);
+ }
+ vector_stack_clear(trace_task);
+ pool_put_entry(metadata->free_task_pool, trace_task);
+ trace_task = pool_get_entry(metadata->mark_task_pool);
+ }
+
+ trace_task = (Vector_Block*)collector->trace_stack;
+ vector_stack_clear(trace_task);
+ pool_put_entry(metadata->free_task_pool, trace_task);
+ collector->trace_stack = NULL;
+}
diff -ruN oldtrunk/vm/include/open/vm_gc.h newtrunk/vm/include/open/vm_gc.h
--- oldtrunk/vm/include/open/vm_gc.h 2006-12-09 19:17:40.000000000 +0800
+++ newtrunk/vm/include/open/vm_gc.h 2006-12-09 15:49:12.000000000 +0800
@@ -176,12 +176,16 @@
*/
VMEXPORT void vm_finalize_object(Managed_Object_Handle p_obj);
+VMEXPORT void set_native_finalizer_thread_flag(Boolean flag);
+
/**
* GC should call this function when an phantom reference object
* is to be enqueued, i.e. when the reference is not reachable anymore.
*/
VMEXPORT void vm_enqueue_reference(Managed_Object_Handle p_obj);
+VMEXPORT void set_native_ref_enqueue_thread_flag(Boolean flag);
+
enum WeakReferenceType {
NOT_REFERENCE = 0,
WEAK_REFERENCE,
diff -ruN oldtrunk/vm/vmcore/include/finalize.h newtrunk/vm/vmcore/include/finalize.h
--- oldtrunk/vm/vmcore/include/finalize.h 2006-12-09 19:18:09.000000000 +0800
+++ newtrunk/vm/vmcore/include/finalize.h 2006-12-09 15:50:16.000000000 +0800
@@ -50,5 +50,8 @@
void vm_enqueue_references();
+void vm_ref_enqueue_func(void); // added for NATIVE REFERENCE ENQUEUE THREAD
+
+Boolean get_native_finalizer_thread_flag(); // added for NATIVE FINALIZER THREAD
#endif
diff -ruN oldtrunk/vm/vmcore/include/finalizer_thread.h newtrunk/vm/vmcore/include/finalizer_thread.h
--- oldtrunk/vm/vmcore/include/finalizer_thread.h 1970-01-01 08:00:00.000000000 +0800
+++ newtrunk/vm/vmcore/include/finalizer_thread.h 2006-12-09 14:23:16.000000000 +0800
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Li-Gang Wang, 2006/11/15
+ */
+
+#ifndef _FINALIZER_THREAD_H_
+#define _FINALIZER_THREAD_H_
+
+#include "jni_types.h"
+#include "open/hythread_ext.h"
+#include
+#include "open/types.h"
+#include
+
+#ifndef _FINALIZER_WEAKREF_PLATFORM_
+#define _FINALIZER_WEAKREF_PLATFORM_
+
+#define VmEventHandle hysem_t
+
+inline int vm_wait_event(VmEventHandle event)
+{
+ IDATA stat = hysem_wait(event);
+ assert(stat == TM_ERROR_NONE); return stat;
+}
+inline int vm_set_event(VmEventHandle event, IDATA count)
+{
+ IDATA stat = hysem_set(event, count);
+ assert(stat == TM_ERROR_NONE); return stat;
+}
+inline int vm_post_event(VmEventHandle event)
+{
+ IDATA stat = hysem_set(event, 1);
+ assert(stat == TM_ERROR_NONE); return stat;
+}
+inline int vm_create_event(VmEventHandle* event, unsigned int initial_count, unsigned int max_count)
+{
+ return hysem_create(event, initial_count, max_count);
+}
+
+
+typedef volatile unsigned int SpinLock;
+enum Lock_State{
+ FREE_LOCK,
+ LOCKED
+};
+
+#define gc_try_lock(x) (!apr_atomic_cas32(&(x), LOCKED, FREE_LOCK))
+#define gc_lock(x) while( !gc_try_lock(x)){ while( x==LOCKED );}
+#define gc_unlock(x) do{ x = FREE_LOCK;}while(0)
+
+#endif
+
+#define FINALIZER_THREAD_PRIORITY (HYTHREAD_PRIORITY_USER_MAX - 3)
+
+struct finalizer_thread_info {
+ SpinLock lock;
+ int thread_num;
+ int working_thread_num;
+ Boolean shutdown;
+ Boolean on_exit;
+ VmEventHandle finalizer_pending_event;
+ VmEventHandle finalization_end_event;
+};
+
+
+extern Boolean get_finalizer_shutdown_flag();
+extern Boolean get_finalizer_on_exit_flag();
+extern void finalizer_threads_init(JavaVM *java_vm, JNIEnv *jni_env);
+extern void finalizer_shutdown(Boolean start_finalization_on_exit);
+extern void activate_finalizer_threads(Boolean wait);
+extern void vmmemory_manager_runfinalization(void);
+
+#endif // _FINALIZER_THREAD_H_
diff -ruN oldtrunk/vm/vmcore/include/ref_enqueue_thread.h newtrunk/vm/vmcore/include/ref_enqueue_thread.h
--- oldtrunk/vm/vmcore/include/ref_enqueue_thread.h 1970-01-01 08:00:00.000000000 +0800
+++ newtrunk/vm/vmcore/include/ref_enqueue_thread.h 2006-12-09 14:23:40.000000000 +0800
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Li-Gang Wang, 2006/11/15
+ */
+
+#ifndef _REF_ENQUEUE_THREAD_H_
+#define _REF_ENQUEUE_THREAD_H_
+
+#include "jni_types.h"
+#include "open/hythread_ext.h"
+#include
+#include "open/types.h"
+#include
+
+#ifndef _FINALIZER_WEAKREF_PLATFORM_
+#define _FINALIZER_WEAKREF_PLATFORM_
+
+#define VmEventHandle hysem_t
+
+inline int vm_wait_event(VmEventHandle event)
+{
+ IDATA stat = hysem_wait(event);
+ assert(stat == TM_ERROR_NONE); return stat;
+}
+inline int vm_set_event(VmEventHandle event, IDATA count)
+{
+ IDATA stat = hysem_set(event, count);
+ assert(stat == TM_ERROR_NONE); return stat;
+}
+inline int vm_post_event(VmEventHandle event)
+{
+ IDATA stat = hysem_set(event, 1);
+ assert(stat == TM_ERROR_NONE); return stat;
+}
+inline int vm_create_event(VmEventHandle* event, unsigned int initial_count, unsigned int max_count)
+{
+ return hysem_create(event, initial_count, max_count);
+}
+
+
+typedef volatile unsigned int SpinLock;
+enum Lock_State{
+ FREE_LOCK,
+ LOCKED
+};
+
+#define gc_try_lock(x) (!apr_atomic_cas32(&(x), LOCKED, FREE_LOCK))
+#define gc_lock(x) while( !gc_try_lock(x)){ while( x==LOCKED );}
+#define gc_unlock(x) do{ x = FREE_LOCK;}while(0)
+
+#endif
+
+#define REF_ENQUEUE_THREAD_PRIORITY (HYTHREAD_PRIORITY_USER_MAX - 1)
+
+struct ref_enqueue_thread_info {
+ SpinLock lock;
+ VmEventHandle reference_pending_event;
+ Boolean shutdown;
+};
+
+extern Boolean get_native_ref_enqueue_thread_flag();
+extern void ref_enqueue_thread_init(JavaVM *java_vm, JNIEnv *jni_env);
+extern void ref_enqueue_shutdown(void);
+extern void activate_ref_enqueue_thread(void);
+
+#endif // _REF_ENQUEUE_THREAD_H_
diff -ruN oldtrunk/vm/vmcore/src/init/finalize.cpp newtrunk/vm/vmcore/src/init/finalize.cpp
--- oldtrunk/vm/vmcore/src/init/finalize.cpp 2006-12-09 19:18:22.000000000 +0800
+++ newtrunk/vm/vmcore/src/init/finalize.cpp 2006-12-09 15:53:44.000000000 +0800
@@ -37,6 +37,9 @@
#include "jit_runtime_support.h"
#include "finalize.h"
+#include "finalizer_thread.h" /* added for NATIVE FINALIZER THREAD */
+#include "ref_enqueue_thread.h" /* added for NATIVE REFERENCE ENQUEUE THREAD */
+
#define LOG_DOMAIN "vm.object_queue"
#include "classloader.h"
@@ -343,8 +346,14 @@
} //Objects_To_Finalize::run_finalizers
int Objects_To_Finalize::do_finalization(int quantity) {
+ /* BEGIN: added for NATIVE FINALIZER THREAD */
+ Boolean native_finalizer_thread_flag = get_native_finalizer_thread_flag();
+ Boolean native_finalizer_shutdown, native_finalizer_on_exit;
+ /* END: added for NATIVE FINALIZER THREAD */
+
//SetThreadPriority(GetCurrentThread(),THREAD_PRIORITY_HIGHEST);
- p_TLS_vmthread->finalize_thread_flags = FINALIZER_THREAD;
+ if(!native_finalizer_thread_flag) // added for NATIVE FINALIZER THREAD
+ p_TLS_vmthread->finalize_thread_flags = FINALIZER_THREAD;
int i;
tmn_suspend_disable();
@@ -353,18 +362,28 @@
jvalue args[1];
args[0].l = (jobject) handle;
- assert(VM_Global_State::loader_env->finalizer_thread);
- jboolean* finalizer_shutdown = VM_Global_State::loader_env->finalizer_shutdown;
- assert(finalizer_shutdown);
- jboolean* finalizer_on_exit = VM_Global_State::loader_env->finalizer_on_exit;
- assert(finalizer_on_exit);
+ /* BEGIN: modified for NATIVE FINALIZER THREAD */
+ jboolean *finalizer_shutdown, *finalizer_on_exit;
+ if(!native_finalizer_thread_flag){
+ assert(VM_Global_State::loader_env->finalizer_thread);
+ finalizer_shutdown = VM_Global_State::loader_env->finalizer_shutdown;
+ assert(finalizer_shutdown);
+ finalizer_on_exit = VM_Global_State::loader_env->finalizer_on_exit;
+ assert(finalizer_on_exit);
+ native_finalizer_shutdown = (Boolean)*finalizer_shutdown;
+ native_finalizer_on_exit = (Boolean)*finalizer_on_exit;
+ }
+ /* END: modified for NATIVE FINALIZER THREAD */
for (i=0; ((iobject->vt()->clss;
assert(clss);
- if ((*finalizer_on_exit) && is_class_ignored(clss)) {
+ /* BEGIN: modified for NATIVE FINALIZER THREAD */
+ if(native_finalizer_thread_flag)
+ native_finalizer_on_exit = get_finalizer_on_exit_flag();
+ if (native_finalizer_on_exit && is_class_ignored(clss)) {
tmn_suspend_enable();
continue;
}
+ /* END: modified for NATIVE FINALIZER THREAD */
Method *finalize = class_lookup_method_recursive(clss,
VM_Global_State::loader_env->FinalizeName_String,
@@ -467,7 +490,12 @@
{
NativeObjectHandles nhs;
assert(hythread_is_suspend_enabled());
- objects_to_finalize.run_finalizers();
+ /* BEGIN: modified for NATIVE FINALIZER THREAD */
+ if(get_native_finalizer_thread_flag())
+ activate_finalizer_threads(FALSE);
+ else
+ objects_to_finalize.run_finalizers();
+ /* END: modified for NATIVE FINALIZER THREAD */
} //vm_run_pending_finalizers
int vm_do_finalization(int quantity)
@@ -510,5 +538,16 @@
void vm_enqueue_references()
{
- references_to_enqueue.enqueue_references();
+ /* BEGIN: modified for NATIVE REFERENCE ENQUEUE THREAD */
+ if(get_native_ref_enqueue_thread_flag())
+ activate_ref_enqueue_thread();
+ else
+ references_to_enqueue.enqueue_references();
+ /* END: modified for NATIVE REFERENCE ENQUEUE THREAD */
} //vm_enqueue_references
+
+/* added for NATIVE REFERENCE ENQUEUE THREAD */
+void vm_ref_enqueue_func(void)
+{
+ references_to_enqueue.enqueue_references();
+}
diff -ruN oldtrunk/vm/vmcore/src/init/finalizer_thread.cpp newtrunk/vm/vmcore/src/init/finalizer_thread.cpp
--- oldtrunk/vm/vmcore/src/init/finalizer_thread.cpp 1970-01-01 08:00:00.000000000 +0800
+++ newtrunk/vm/vmcore/src/init/finalizer_thread.cpp 2006-12-12 19:05:56.000000000 +0800
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Li-Gang Wang, 2006/11/15
+ */
+
+#include "finalizer_thread.h"
+#include "ref_enqueue_thread.h"
+#include "open/gc.h"
+#include "port_sysinfo.h"
+#include "finalize.h"
+#include "vm_threads.h"
+#include "../../../thread/src/thread_private.h"
+
+static Boolean native_finalizer_thread_flag = FALSE;
+static struct finalizer_thread_info *finalizer_thread_info = NULL;
+
+
+Boolean get_native_finalizer_thread_flag()
+{
+ return native_finalizer_thread_flag;
+}
+
+void set_native_finalizer_thread_flag(Boolean flag)
+{
+ native_finalizer_thread_flag = flag;
+}
+
+Boolean get_finalizer_shutdown_flag()
+{
+ return finalizer_thread_info->shutdown;
+}
+
+Boolean get_finalizer_on_exit_flag()
+{
+ return finalizer_thread_info->on_exit;
+}
+
+static int finalizer_thread_func(void **args);
+
+void finalizer_threads_init(JavaVM *java_vm, JNIEnv *jni_env)
+{
+ if(!native_finalizer_thread_flag)
+ return;
+
+ finalizer_thread_info = (struct finalizer_thread_info *)STD_MALLOC(sizeof(struct finalizer_thread_info));
+ finalizer_thread_info->lock = FREE_LOCK;
+ finalizer_thread_info->thread_num = port_CPUs_number();
+ finalizer_thread_info->working_thread_num = 0;
+ finalizer_thread_info->shutdown = FALSE;
+ finalizer_thread_info->on_exit = FALSE;
+
+ int status = vm_create_event(&finalizer_thread_info->finalizer_pending_event, 0, finalizer_thread_info->thread_num);
+ assert(status == TM_ERROR_NONE);
+ status = vm_create_event(&finalizer_thread_info->finalization_end_event, 0, 1);
+ assert(status == TM_ERROR_NONE);
+
+ void **args = (void **)STD_MALLOC(sizeof(void *)*2);
+ args[0] = (void *)java_vm;
+ args[1] = (void *)jni_env;
+ for(int i = 0; i < finalizer_thread_info->thread_num; i++){
+ status = (unsigned int)hythread_create(NULL, 0, 0, 0, (hythread_entrypoint_t)finalizer_thread_func, args);
+ assert(status == TM_ERROR_NONE);
+ }
+}
+
+void finalizer_shutdown(Boolean start_finalization_on_exit)
+{
+ if(start_finalization_on_exit){
+ tmn_suspend_disable();
+ gc_force_gc();
+ tmn_suspend_enable();
+ activate_finalizer_threads(TRUE);
+ tmn_suspend_disable();
+ gc_finalize_on_exit();
+ tmn_suspend_enable();
+ gc_lock(finalizer_thread_info->lock);
+ finalizer_thread_info->on_exit = TRUE;
+ gc_unlock(finalizer_thread_info->lock);
+ activate_finalizer_threads(TRUE);
+ }
+ gc_lock(finalizer_thread_info->lock);
+ finalizer_thread_info->shutdown = TRUE;
+ gc_unlock(finalizer_thread_info->lock);
+ ref_enqueue_shutdown();
+ activate_finalizer_threads(FALSE);
+}
+
+static void wait_finalization_end(void)
+{
+ vm_wait_event(finalizer_thread_info->finalization_end_event);
+}
+
+void activate_finalizer_threads(Boolean wait)
+{
+ gc_lock(finalizer_thread_info->lock);
+ vm_set_event(finalizer_thread_info->finalizer_pending_event,
+ finalizer_thread_info->thread_num - finalizer_thread_info->working_thread_num);
+ gc_unlock(finalizer_thread_info->lock);
+
+ if(wait)
+ wait_finalization_end();
+}
+
+void vmmemory_manager_runfinalization(void)
+{
+ activate_finalizer_threads(TRUE);
+}
+
+
+static int do_finalization_func(void)
+{
+ return vm_do_finalization(0);
+}
+
+static void wait_pending_finalizer(void)
+{
+ vm_wait_event(finalizer_thread_info->finalizer_pending_event);
+}
+
+static void notify_finalization_end(void)
+{
+ vm_post_event(finalizer_thread_info->finalization_end_event);
+}
+
+static void finalizer_notify_work_done(void)
+{
+ gc_lock(finalizer_thread_info->lock);
+ --finalizer_thread_info->working_thread_num;
+ if(finalizer_thread_info->working_thread_num == 0)
+ notify_finalization_end();
+ gc_unlock(finalizer_thread_info->lock);
+}
+
+static int finalizer_thread_func(void **args)
+{
+ JavaVM *java_vm = (JavaVM *)args[0];
+ JNIEnv *jni_env = (JNIEnv *)args[1];
+
+ IDATA status = vm_attach(java_vm, &jni_env);
+ if(status != TM_ERROR_NONE)
+ return status;
+
+ while(true){
+ /* Waiting for pending finalizers */
+ wait_pending_finalizer();
+
+ gc_lock(finalizer_thread_info->lock);
+ ++finalizer_thread_info->working_thread_num;
+ gc_unlock(finalizer_thread_info->lock);
+
+ /* do the real finalization work */
+ do_finalization_func();
+
+ finalizer_notify_work_done();
+
+ gc_lock(finalizer_thread_info->lock);
+ if(finalizer_thread_info->shutdown){
+ gc_unlock(finalizer_thread_info->lock);
+ break;
+ }
+ gc_unlock(finalizer_thread_info->lock);
+ }
+
+ return TM_ERROR_NONE;
+}
diff -ruN oldtrunk/vm/vmcore/src/init/ref_enqueue_thread.cpp newtrunk/vm/vmcore/src/init/ref_enqueue_thread.cpp
--- oldtrunk/vm/vmcore/src/init/ref_enqueue_thread.cpp 1970-01-01 08:00:00.000000000 +0800
+++ newtrunk/vm/vmcore/src/init/ref_enqueue_thread.cpp 2006-12-11 06:30:12.000000000 +0800
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @author Li-Gang Wang, 2006/11/15
+ */
+
+#include "ref_enqueue_thread.h"
+#include "finalize.h"
+#include "vm_threads.h"
+#include "../../../thread/src/thread_private.h"
+
+static Boolean native_ref_enqueue_thread_flag = FALSE;
+static struct ref_enqueue_thread_info *ref_enqueue_thread_info = NULL;
+
+
+Boolean get_native_ref_enqueue_thread_flag()
+{
+ return native_ref_enqueue_thread_flag;
+}
+
+void set_native_ref_enqueue_thread_flag(Boolean flag)
+{
+ native_ref_enqueue_thread_flag = flag;
+}
+
+static int ref_enqueue_thread_func(void **args);
+
+void ref_enqueue_thread_init(JavaVM *java_vm, JNIEnv *jni_env)
+{
+ if(!get_native_ref_enqueue_thread_flag())
+ return;
+
+ ref_enqueue_thread_info = (struct ref_enqueue_thread_info *)STD_MALLOC(sizeof(struct ref_enqueue_thread_info));
+ ref_enqueue_thread_info->lock = FREE_LOCK;
+ ref_enqueue_thread_info->shutdown = FALSE;
+
+ int status = vm_create_event(&ref_enqueue_thread_info->reference_pending_event, 0, 1);
+ assert(status == TM_ERROR_NONE);
+
+ void **args = (void **)STD_MALLOC(sizeof(void *)*2);
+ args[0] = (void *)java_vm;
+ args[1] = (void *)jni_env;
+ status = (unsigned int)hythread_create(NULL, 0, REF_ENQUEUE_THREAD_PRIORITY, 0, (hythread_entrypoint_t)ref_enqueue_thread_func, args);
+ assert(status == TM_ERROR_NONE);
+}
+
+void ref_enqueue_shutdown(void)
+{
+ gc_lock(ref_enqueue_thread_info->lock);
+ ref_enqueue_thread_info->shutdown = TRUE;
+ gc_unlock(ref_enqueue_thread_info->lock);
+ activate_ref_enqueue_thread();
+}
+
+void activate_ref_enqueue_thread(void)
+{
+ vm_post_event(ref_enqueue_thread_info->reference_pending_event);
+}
+
+
+static int ref_enqueue_func(void)
+{
+ vm_ref_enqueue_func();
+ return 0;
+}
+
+static void wait_pending_reference(void)
+{
+ vm_wait_event(ref_enqueue_thread_info->reference_pending_event);
+}
+
+
+static int ref_enqueue_thread_func(void **args)
+{
+ JavaVM *java_vm = (JavaVM *)args[0];
+ JNIEnv *jni_env = (JNIEnv *)args[1];
+
+ IDATA status = vm_attach(java_vm, &jni_env);
+ if(status != TM_ERROR_NONE)
+ return status;
+
+ while(true){
+ /* Waiting for pending weak references */
+ wait_pending_reference();
+
+ /* do the real reference enqueue work */
+ ref_enqueue_func();
+
+ gc_lock(ref_enqueue_thread_info->lock);
+ if(ref_enqueue_thread_info->shutdown){
+ gc_unlock(ref_enqueue_thread_info->lock);
+ break;
+ }
+ gc_unlock(ref_enqueue_thread_info->lock);
+ }
+
+ return TM_ERROR_NONE;
+}
diff -ruN oldtrunk/vm/vmcore/src/init/vm_init.cpp newtrunk/vm/vmcore/src/init/vm_init.cpp
--- oldtrunk/vm/vmcore/src/init/vm_init.cpp 2006-12-09 19:18:22.000000000 +0800
+++ newtrunk/vm/vmcore/src/init/vm_init.cpp 2006-12-09 15:53:56.000000000 +0800
@@ -46,6 +46,8 @@
#include "vm_strings.h"
#include "slot.h"
#include "classpath_const.h"
+#include "finalizer_thread.h" /* added for NATIVE FINALIZER THREAD */
+#include "ref_enqueue_thread.h" /* added for NATIVE REFERENCE ENQUEUE THREAD */
#ifdef PLATFORM_NT
// 20040427 Used to turn on heap checking on every allocation
@@ -676,6 +678,9 @@
status = vm_attach(java_vm, &jni_env);
if (status != JNI_OK) return status;
+ finalizer_threads_init(java_vm, jni_env); /* added for NATIVE FINALIZER THREAD */
+ ref_enqueue_thread_init(java_vm, jni_env); /* added for NATIVE REFERENCE ENQUEUE THREAD */
+
// "Tool Interface" initialization
status = vm_env->TI->Init(java_vm);
if (status != JNI_OK) {
diff -ruN oldtrunk/vm/vmcore/src/kernel_classes/javasrc/java/lang/FinalizerThread.java newtrunk/vm/vmcore/src/kernel_classes/javasrc/java/lang/FinalizerThread.java
--- oldtrunk/vm/vmcore/src/kernel_classes/javasrc/java/lang/FinalizerThread.java 2006-12-09 19:18:44.000000000 +0800
+++ newtrunk/vm/vmcore/src/kernel_classes/javasrc/java/lang/FinalizerThread.java 2006-12-09 15:52:00.000000000 +0800
@@ -51,7 +51,12 @@
* VM calls this thread from Runtime.runFinalization().
*/
public static void runFinalization() {
- startFinalization(true);
+ /* BEGIN: added for NATIVE FINALIZER THREAD */
+ if(NATIVE_FINALIZER_THREAD)
+ runFinalizationInNativeFinalizerThreads();
+ else
+ /* END: added for NATIVE FINALIZER THREAD */
+ startFinalization(true);
}
/*
@@ -62,6 +67,11 @@
* Initializes finalization system. Starts permanent thread.
*/
static void initialize() {
+ /* BEGIN: added for NATIVE FINALIZER THREAD */
+ if (NATIVE_FINALIZER_THREAD)
+ return;
+ /* END: added for NATIVE FINALIZER THREAD */
+
if (TRACE) {
trace("FinalizerThread: static initialization started");
}
@@ -87,6 +97,12 @@
* VM calls this method to request finalizer thread shutdown.
*/
static void shutdown(boolean startFinalizationOnExit) {
+ /* BEGIN: added for NATIVE FINALIZER THREAD */
+ if(NATIVE_FINALIZER_THREAD) {
+ finalizerShutDown(startFinalizationOnExit);
+ return;
+ }
+ /* END: added for NATIVE FINALIZER THREAD */
if (TRACE) {
trace("shutting down finalizer thread");
}
@@ -101,6 +117,29 @@
}
}
+ /* added for NATIVE FINALIZER THREAD
+ * A flag to indicate whether the finalizer threads are native threads or Java threads.
+ */
+ private static final boolean NATIVE_FINALIZER_THREAD = getNativeFinalizerThreadFlagFromVM();
+
+ /* BEGIN: These three methods are added for NATIVE FINALIZER THREAD */
+ /**
+ * This method gets the flag that indicates
+ * whether VM uses native finalizer threads or Java finalizer threads.
+ */
+ private static native boolean getNativeFinalizerThreadFlagFromVM();
+
+ /**
+ * This method implements runFinalization() method in native finalizer threads.
+ */
+ private static native void runFinalizationInNativeFinalizerThreads();
+
+ /**
+ * This method does finalization work related to VM shutdown in native finalizer threads.
+ */
+ private static native void finalizerShutDown(boolean finalizeOnExit);
+ /* END: These three methods are added for NATIVE FINALIZER THREAD */
+
/*
* Staic private part
*/
diff -ruN oldtrunk/vm/vmcore/src/kernel_classes/native/java_lang_FinalizerThread.cpp newtrunk/vm/vmcore/src/kernel_classes/native/java_lang_FinalizerThread.cpp
--- oldtrunk/vm/vmcore/src/kernel_classes/native/java_lang_FinalizerThread.cpp 2006-12-09 19:18:39.000000000 +0800
+++ newtrunk/vm/vmcore/src/kernel_classes/native/java_lang_FinalizerThread.cpp 2006-12-09 15:54:18.000000000 +0800
@@ -34,6 +34,9 @@
#include "port_sysinfo.h"
#include "vm_threads.h"
+/* added for NATIVE FINALIZER THREAD */
+#include "finalizer_thread.h"
+
/**
* Implements getObject(..) method.
* For details see kernel classes component documentation.
@@ -90,3 +93,41 @@
gc_finalize_on_exit();
tmn_suspend_enable();
}
+
+/* BEGIN: These three methods are added for NATIVE FINALIZER THREAD */
+/*
+ * Class: java_lang_FinalizerThread
+ * Method: getNativeFinalizerThreadFlagFromVM
+ * Signature: ()Z
+ */
+JNIEXPORT jboolean JNICALL Java_java_lang_FinalizerThread_getNativeFinalizerThreadFlagFromVM
+ (JNIEnv *, jclass)
+{
+ return (jboolean)get_native_finalizer_thread_flag();
+}
+
+/*
+ * Class: java_lang_FinalizerThread
+ * Method: runFinalizationInNativeFinalizerThreads
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_java_lang_FinalizerThread_runFinalizationInNativeFinalizerThreads
+ (JNIEnv *, jclass)
+{
+ vm_enqueue_references();
+
+ // Do finalization in dedicated native finalizer threads.
+ vmmemory_manager_runfinalization();
+}
+
+/*
+ * Class: java_lang_FinalizerThread
+ * Method: finalizerShutDown
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_java_lang_FinalizerThread_finalizerShutDown
+ (JNIEnv *, jclass, jboolean value)
+{
+ finalizer_shutdown(value);
+}
+/* END: These three methods are added for NATIVE FINALIZER THREAD */
diff -ruN oldtrunk/vm/vmcore/src/kernel_classes/native/java_lang_FinalizerThread.h newtrunk/vm/vmcore/src/kernel_classes/native/java_lang_FinalizerThread.h
--- oldtrunk/vm/vmcore/src/kernel_classes/native/java_lang_FinalizerThread.h 2006-12-09 19:18:39.000000000 +0800
+++ newtrunk/vm/vmcore/src/kernel_classes/native/java_lang_FinalizerThread.h 2006-12-09 15:54:36.000000000 +0800
@@ -81,6 +81,26 @@
JNIEXPORT void JNICALL
Java_java_lang_FinalizerThread_fillFinalizationQueueOnExit(JNIEnv *, jclass);
+/* BEGIN: These three methods are added for NATIVE FINALIZER THREAD */
+/*
+ * Method: java.lang.FinalizerThread.getNativeFinalizerThreadFlagFromVM()Z
+ */
+JNIEXPORT jboolean JNICALL
+Java_java_lang_FinalizerThread_getNativeFinalizerThreadFlagFromVM(JNIEnv *, jclass);
+
+/*
+ * Method: java_lang_FinalizerThread.runFinalizationInNativeFinalizerThreads()V
+ */
+JNIEXPORT void JNICALL
+Java_java_lang_FinalizerThread_runFinalizationInNativeFinalizerThreads(JNIEnv *, jclass);
+
+/*
+ * Method: java_lang_FinalizerThread.finalizerShutDown()V
+ */
+JNIEXPORT void JNICALL
+Java_java_lang_FinalizerThread_finalizerShutDown(JNIEnv *, jclass, jboolean);
+/* END: These three methods are added for NATIVE FINALIZER THREAD */
+
#ifdef __cplusplus
}