Index: make/vm/kernel.xml
===================================================================
--- make/vm/kernel.xml (revision 747844)
+++ make/vm/kernel.xml (working copy)
@@ -59,7 +59,7 @@
-
+
Index: vm/gc_gen/javasrc/org/apache/harmony/drlvm/gc_gen/GCHelper.java
===================================================================
--- vm/gc_gen/javasrc/org/apache/harmony/drlvm/gc_gen/GCHelper.java (revision 747844)
+++ vm/gc_gen/javasrc/org/apache/harmony/drlvm/gc_gen/GCHelper.java (working copy)
@@ -345,6 +345,16 @@
private static native int getArrayFirstElemOffsetInGCVT();
private static native int getGCAllocatedSizeOffsetInGCVT();
+
+ //not used! will be used in VMReferenceManager
+ @Inline
+ public static void get_barrier(Object weakref) {
+ //if(weakref == null) return 0;
+ //TODO: check type!
+ VMHelper.weakReferenceGetReferent(weakref);
+ }
+
+
}
Index: vm/gc_gen/src/common/concurrent_collection_scheduler.cpp
===================================================================
--- vm/gc_gen/src/common/concurrent_collection_scheduler.cpp (revision 747844)
+++ vm/gc_gen/src/common/concurrent_collection_scheduler.cpp (working copy)
@@ -24,6 +24,7 @@
#include "gc_concurrent.h"
#include "../thread/conclctor.h"
#include "../verify/verify_live_heap.h"
+#include "../finalizer_weakref/finalizer_weakref.h"
#define NUM_TRIAL_COLLECTION 2
#define MIN_DELAY_TIME 0x0
@@ -281,62 +282,63 @@
concurrent collection entry function, it may start proper phase according to the current state.
*/
Boolean gc_con_perform_collection( GC* gc ) {
- int disable_count;
- int64 pause_start;
- Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
- switch( gc->gc_concurrent_status ) {
+ int disable_count;
+ int64 pause_start;
+ Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
+ switch( gc->gc_concurrent_status ) {
case GC_CON_NIL :
- if( !gc_con_start_condition(gc) )
- return FALSE;
- if( !state_transformation( gc, GC_CON_NIL, GC_CON_STW_ENUM ) )
- return FALSE;
-
- gc->num_collections++;
- gc->cause = GC_CAUSE_CONCURRENT_GC;
+ if( !gc_con_start_condition(gc) )
+ return FALSE;
+ if( !state_transformation( gc, GC_CON_NIL, GC_CON_STW_ENUM ) )
+ return FALSE;
- con_collection_stat->gc_start_time = time_now();
- disable_count = hythread_reset_suspend_disable();
-
- gc_start_con_enumeration(gc); //now, it is a stw enumeration
- con_collection_stat->marking_start_time = time_now();
- state_transformation( gc, GC_CON_STW_ENUM, GC_CON_START_MARKERS );
- gc_start_con_marking(gc);
+ gc->num_collections++;
+ gc->cause = GC_CAUSE_CONCURRENT_GC;
- INFO2("gc.con.time","[ER] start con pause, ERSM="<<((unsigned int)(time_now()-con_collection_stat->gc_start_time))<<" us "); // ERSM means enumerate rootset and start concurrent marking
- vm_resume_threads_after();
- hythread_set_suspend_disable(disable_count);
- break;
-
+ con_collection_stat->gc_start_time = time_now();
+ disable_count = hythread_reset_suspend_disable();
+
+ gc_start_con_enumeration(gc); //now, it is a stw enumeration
+ con_collection_stat->marking_start_time = time_now();
+ state_transformation( gc, GC_CON_STW_ENUM, GC_CON_START_MARKERS );
+ gc_start_con_marking(gc);
+
+ INFO2("gc.con.time","[ER] start con pause, ERSM="<<((unsigned int)(time_now()-con_collection_stat->gc_start_time))<<" us "); // ERSM means enumerate rootset and start concurrent marking
+ vm_resume_threads_after();
+ hythread_set_suspend_disable(disable_count);
+ break;
+
case GC_CON_BEFORE_SWEEP :
- if(!gc_is_specify_con_sweep())
- return FALSE;
- if( !state_transformation( gc, GC_CON_BEFORE_SWEEP, GC_CON_SWEEPING ) )
- return FALSE;
- gc_ms_start_con_sweep((GC_MS*)gc, gc_get_sweeper_numer(gc));
- break;
-
-
+ if(!gc_is_specify_con_sweep())
+ return FALSE;
+ if(!state_transformation(gc, GC_CON_BEFORE_SWEEP, GC_CON_SWEEPING))
+ return FALSE;
+ if(!IGNORE_FINREF)
+ gc_process_finphanref_STW(gc);
+ gc_ms_start_con_sweep((GC_MS*)gc, gc_get_sweeper_numer(gc));
+ break;
+
+
case GC_CON_BEFORE_FINISH :
- if( !state_transformation( gc, GC_CON_BEFORE_FINISH, GC_CON_RESET ) )
- return FALSE;
+ if( !state_transformation( gc, GC_CON_BEFORE_FINISH, GC_CON_RESET ) )
+ return FALSE;
/* thread should be suspended before the state transformation,
- it is for the case that the heap is exhausted in the reset state, although it is almost impossible */
+ it is for the case that the heap is exhausted in the reset state, although it is almost impossible */
disable_count = vm_suspend_all_threads();
- pause_start = time_now();
-
+ pause_start = time_now();
gc_merge_free_list_global(gc);
gc_reset_after_con_collection(gc);
state_transformation( gc, GC_CON_RESET, GC_CON_NIL );
- pause_time = time_now()-pause_start;
-
+ pause_time = time_now()-pause_start;
+
vm_resume_all_threads(disable_count);
- gc_con_stat_information_out(gc);
- INFO2("gc.con.time","[GC][Con]pause(reset collection): CRST="<gc_concurrent_status, GC_CON_NIL );
}
+inline void thread_sleep_slice() {
+ apr_sleep(SLEEP_TIME);
+}
/* gc start enumeration phase, now, it is in a stop-the-world manner */
void gc_start_con_enumeration(GC * gc);
@@ -146,10 +149,10 @@
/* gc start marking phase */
void gc_start_con_marking(GC *gc);
-
-/* prepare for sweeping */
-void gc_prepare_sweeping(GC *gc);
-
+void gc_con_process_weakref(Conclctor *conclctor);
+void gc_con_process_finphanref(Conclctor *conclctor);
+void gc_process_finphanref_STW(GC *gc);
+
/* gc start sweeping phase */
void gc_start_con_sweeping(GC *gc);
Index: vm/gc_gen/src/common/gc_for_barrier.cpp
===================================================================
--- vm/gc_gen/src/common/gc_for_barrier.cpp (revision 747844)
+++ vm/gc_gen/src/common/gc_for_barrier.cpp (working copy)
@@ -415,6 +415,40 @@
/*Concurrent Mark & Generational Mode:
Global objects are roots. After root set enumeration, this objects will be touched by GC. No barrier here.
*/
-
*p_slot = p_target;
}
+
+//[tick weakref]
+void gc_weak_ref_get_barrier(Managed_Object_Handle weak_ref_obj) {
+
+ if(write_barrier_function==WB_REM_NIL)
+ return;
+ Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)weak_ref_obj;
+ REF *p_referent_field = obj_get_referent_field(p_obj);
+ Partial_Reveal_Object *p_referent = read_slot(p_referent_field);
+
+ if(!p_referent)
+ return;
+
+ Partial_Reveal_VTable *vt = decode_vt(obj_get_vt(p_obj));
+ const char *class_name = vtable_get_gcvt(vt)->gc_class_name;
+ INFO2( "gc.weakref", "weak reference object is class " << class_name << " reference addr=" << p_obj);
+ vt = decode_vt(obj_get_vt(p_referent));
+ INFO2( "gc.weakref", "weak referent object is class " << vtable_get_gcvt(vt)->gc_class_name << " referent addr=" << p_referent);
+
+ switch(write_barrier_function) {
+ case WB_REM_SOURCE_OBJ:
+ if(!referent_need_remember_MostlyCon(p_referent))
+ return;
+ break;
+ case WB_REM_OBJ_SNAPSHOT:
+ case WB_REM_OLD_VAR:
+ if(!referent_need_remember_SATB(p_referent))
+ return;
+ break;
+ }
+ Mutator *mutator = (Mutator *)gc_get_tls();
+ mutator_dirtyset_add_entry(mutator, p_referent);
+ //return (Managed_Object_Handle)p_referent;
+}
+
\ No newline at end of file
Index: vm/gc_gen/src/common/gc_for_vm.cpp
===================================================================
--- vm/gc_gen/src/common/gc_for_vm.cpp (revision 747844)
+++ vm/gc_gen/src/common/gc_for_vm.cpp (working copy)
@@ -63,6 +63,7 @@
vm_helper_register_magic_helper(VM_RT_NEW_VECTOR_USING_VTABLE, "org/apache/harmony/drlvm/gc_gen/GCHelper", "allocArray");
vm_helper_register_magic_helper(VM_RT_GC_HEAP_WRITE_REF, "org/apache/harmony/drlvm/gc_gen/GCHelper", "write_barrier_slot_rem");
vm_helper_register_magic_helper(VM_RT_GET_IDENTITY_HASHCODE, "org/apache/harmony/drlvm/gc_gen/GCHelper", "get_hashcode");
+ //vm_helper_register_magic_helper(VM_RT_WEAK_REFERENCE_GET, "org/apache/harmony/drlvm/gc_gen/GCHelper", "get_barrier");
}
int gc_init()
Index: vm/gc_gen/src/common/gc_metadata.h
===================================================================
--- vm/gc_gen/src/common/gc_metadata.h (revision 747844)
+++ vm/gc_gen/src/common/gc_metadata.h (working copy)
@@ -146,8 +146,15 @@
if( !vector_block_is_full(dirty_set) ) {
return;
}
-
- lock(mutator->dirty_set_lock);
+
+ /*
+ the vector is mutator local here, but it is not thread safe in concurren gc
+ we use it here with lock to prevent it has been already put into global dirty pool when conclctors are checking the convergence condition
+ if yes, it may has been read out by conclctors, the header pointer++,
+ only when they are meet at some position (it is empty AND full now), we need clear it to reuse.
+ otherwise, replace it with a new one
+ */
+ lock(mutator->dirty_set_lock);
if( vector_block_is_empty(dirty_set) ) {
vector_block_clear(dirty_set);
unlock(mutator->dirty_set_lock);
Index: vm/gc_gen/src/common/gc_options.cpp
===================================================================
--- vm/gc_gen/src/common/gc_options.cpp (revision 747844)
+++ vm/gc_gen/src/common/gc_options.cpp (working copy)
@@ -359,7 +359,6 @@
#endif
gc_specify_con_mark();
gc->generate_barrier = TRUE;
- IGNORE_FINREF = TRUE; /*TODO: finref is unsupported.*/
}
}
@@ -372,7 +371,6 @@
LDIE(77, "Please define USE_UNIQUE_MARK_SWEEP_GC macro.");
#endif
gc_specify_con_sweep();
- IGNORE_FINREF = TRUE; /*TODO: finref is unsupported.*/
}
}
Index: vm/gc_gen/src/common/large_pages.cpp
===================================================================
--- vm/gc_gen/src/common/large_pages.cpp (revision 747844)
+++ vm/gc_gen/src/common/large_pages.cpp (working copy)
@@ -125,14 +125,13 @@
LWARN(52, "GC large_page: Large pages are not supported by kernel.\nGC large_page: CONFIG_HUGETLB_PAGE and CONFIG_HUGETLBFS needs to be enabled.");
} else if (proc_huge_pages_total == 0){
LWARN(53, "GC large_page: No large pages reserved, Use the following command: echo num> /proc/sys/vm/nr_hugepages.\nGC large_page: Do it just after kernel boot before huge pages become fragmented.");
- } else {
- //compute required huge page number
- size_t required = (required_size+proc_huge_page_size-1)/proc_huge_page_size;
- if (proc_huge_pages_total < required) {
- LWARN(54, "GC large_page: required size exceeds total large page size.");
- } else if (proc_huge_pages_free < required) {
- LWARN(55, "GC large_page: required size exceeds free large page size.");
+ } else if (proc_huge_pages_free * proc_huge_page_size < required_size) {
+ if (proc_huge_pages_total * proc_huge_page_size >= required_size) {
+ LWARN(54, "GC large_page: Not enough free large pages, some of reserved space is already busy.");
+ } else {
+ LWARN(54, "GC large_page: Not enough free large pages, some of reserved space is already busy.");
}
+ LWARN(55, "GC large_page: Large pages can be only allocated.");
}
}
Index: vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
===================================================================
--- vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (revision 747844)
+++ vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (working copy)
@@ -68,9 +68,8 @@
}
}
-static void identify_finalizable_objects(Collector *collector)
+static void identify_finalizable_objects(GC *gc)
{
- GC *gc = collector->gc;
Finref_Metadata *metadata = gc->finref_metadata;
Pool *obj_with_fin_pool = metadata->obj_with_fin_pool;
@@ -118,9 +117,9 @@
extern void trace_obj_in_ms_fallback_marking(Collector *collector, void *p_ref);
extern void trace_obj_in_space_tune_marking(Collector *collector, void *p_obj);
extern void trace_obj_in_ms_marking(Collector *collector, void *p_obj);
-extern void trace_obj_in_ms_concurrent_mark(Collector *collector, void *p_obj);
+extern void trace_obj_in_ms_concurrent_mark(Conclctor *collector, void *p_obj);
+extern void trace_obj_in_ms_mostly_concurrent_mark(Conclctor *collector, void *p_obj);
-
typedef void (* Trace_Object_Func)(Collector *collector, void *p_ref_or_obj);
// Resurrect the obj tree whose root is the obj which p_ref points to
@@ -180,8 +179,10 @@
p_ref_or_obj = p_obj;
if( gc->gc_concurrent_status == GC_CON_NIL )
trace_object = trace_obj_in_ms_marking;
- else
- trace_object = trace_obj_in_ms_concurrent_mark;
+ else if(gc_is_kind(ALGO_CON_MOSTLY))
+ trace_object = (Trace_Object_Func)trace_obj_in_ms_mostly_concurrent_mark;
+ else
+ trace_object = (Trace_Object_Func)trace_obj_in_ms_concurrent_mark;
}
collector->trace_stack = free_task_pool_get_entry(metadata);
@@ -330,9 +331,8 @@
}
}
-static void identify_dead_softrefs(Collector *collector)
+static void identify_dead_softrefs(GC *gc)
{
- GC *gc = collector->gc;
if(collect_is_minor()){
assert(softref_pool_is_empty(gc));
return;
@@ -342,9 +342,8 @@
identify_dead_refs(gc, softref_pool);
}
-static void identify_dead_weakrefs(Collector *collector)
+static void identify_dead_weakrefs(GC *gc)
{
- GC *gc = collector->gc;
Pool *weakref_pool = gc->finref_metadata->weakref_pool;
identify_dead_refs(gc, weakref_pool);
@@ -354,9 +353,8 @@
* The reason why we don't use identify_dead_refs() to implement this function is
* that we will differentiate phanref from weakref in the future.
*/
-static void identify_dead_phanrefs(Collector *collector)
+static void identify_dead_phanrefs(GC *gc)
{
- GC *gc = collector->gc;
Finref_Metadata *metadata = gc->finref_metadata;
Pool *phanref_pool = metadata->phanref_pool;
@@ -446,11 +444,11 @@
Managed_Object_Handle p_obj = (Managed_Object_Handle)read_slot((REF*)iter);
if(p_obj)
vm_enqueue_reference(p_obj);
- iter = vector_block_iterator_advance(block, iter);
- }
- vector_block_clear(block);
- pool_put_entry(free_pool, block);
- block = pool_get_entry(ref_pool);
+ iter = vector_block_iterator_advance(block, iter);
+ }
+ vector_block_clear(block);
+ pool_put_entry(free_pool, block);
+ block = pool_get_entry(ref_pool);
}
}
@@ -583,9 +581,9 @@
GC *gc = collector->gc;
gc_set_weakref_sets(gc);
- identify_dead_softrefs(collector);
- identify_dead_weakrefs(collector);
- identify_finalizable_objects(collector);
+ identify_dead_softrefs(gc);
+ identify_dead_weakrefs(gc);
+ identify_finalizable_objects(gc);
resurrect_finalizable_objects(collector);
gc->collect_result = gc_collection_result(gc);
if(!gc->collect_result){
@@ -593,9 +591,32 @@
resurrection_fallback_handler(gc);
return;
}
- identify_dead_phanrefs(collector);
+ identify_dead_phanrefs(gc);
}
+void gc_process_weakref(GC *gc)
+{
+ //processing softreference
+ Pool *softref_pool = gc->finref_metadata->softref_pool;
+ identify_dead_refs(gc, softref_pool);
+
+ //processing weakreference
+ Pool *weakref_pool = gc->finref_metadata->weakref_pool;
+ identify_dead_refs(gc, weakref_pool);
+
+}
+
+void gc_process_finphanref(Conclctor *conclctor)
+{
+ GC *gc = conclctor->gc;
+ //processing finalizable objects
+ identify_finalizable_objects(gc);
+ resurrect_finalizable_objects((Collector *)conclctor);
+
+ //processing phantom reference
+ identify_dead_phanrefs(gc);
+}
+
void fallback_finref_cleanup(GC *gc)
{
gc_set_weakref_sets(gc);
Index: vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h
===================================================================
--- vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h (revision 747844)
+++ vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h (working copy)
@@ -26,6 +26,7 @@
#include "finalizer_weakref_metadata.h"
#include "../thread/collector.h"
+#include "../thread/conclctor.h"
extern Boolean IGNORE_FINREF;
@@ -88,6 +89,37 @@
}
}
+
+//[tick weakref]
+inline void scan_weak_reference_con(Conclctor *conclctor, Partial_Reveal_Object *p_obj, Scan_Slot_Func scan_slot)
+{
+
+ WeakReferenceType type = special_reference_type(p_obj);
+ if(type == NOT_REFERENCE)
+ return;
+ REF *p_referent_field = obj_get_referent_field(p_obj);
+ REF p_referent = *p_referent_field;
+ if (!p_referent) return;
+
+ switch(type){
+ case SOFT_REFERENCE :
+ if(TRUE)
+ scan_slot((Collector *)conclctor, p_referent_field); //TODO: condition to processing soft references
+ else
+ conclctor_add_softref(conclctor, p_obj);
+ break;
+ case WEAK_REFERENCE :
+ conclctor_add_weakref(conclctor, p_obj);
+ break;
+ case PHANTOM_REFERENCE :
+ conclctor_add_phanref(conclctor, p_obj);
+ break;
+ default :
+ assert(0);
+ break;
+ }
+}
+
inline void scan_weak_reference_direct(Collector *collector, Partial_Reveal_Object *p_obj, Scan_Slot_Func scan_slot)
{
WeakReferenceType type = special_reference_type(p_obj);
@@ -111,6 +143,11 @@
extern void gc_update_weakref_ignore_finref(GC *gc);
extern void collector_identify_finref(Collector *collector);
+
+extern void gc_process_finphanref(Conclctor *conclctor);
+extern void gc_process_weakref(GC *gc);
+//extern void conclctor_identify_phanref(Conclctor *conclctor);
+
extern void fallback_finref_cleanup(GC *gc);
extern void gc_put_finref_to_vm(GC *gc);
extern void put_all_fin_on_exit(GC *gc);
Index: vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp
===================================================================
--- vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp (revision 747844)
+++ vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp (working copy)
@@ -20,6 +20,7 @@
*/
#include "finalizer_weakref_metadata.h"
+#include "../common/gc_concurrent.h"
#include "../thread/mutator.h"
#include "../thread/collector.h"
#include "../thread/conclctor.h"
@@ -207,38 +208,51 @@
collector->phanref_set= finref_get_free_block(gc);
}
-#include "../common/gc_concurrent.h"
+/* reset weak references vetctor block of each conclctor */
+void conclctor_reset_weakref_sets(Conclctor *conclctor)
+{
+ GC *gc = conclctor->gc;
+ assert(conclctor->softref_set == NULL);
+ assert(conclctor->weakref_set == NULL);
+ assert(conclctor->phanref_set == NULL);
+ conclctor->softref_set = finref_get_free_block(gc);
+ conclctor->weakref_set = finref_get_free_block(gc);
+ conclctor->phanref_set= finref_get_free_block(gc);
+}
+
+
/* put back last weak references block of each collector */
void gc_set_weakref_sets(GC *gc)
{
- Finref_Metadata *metadata = gc->finref_metadata;
+ Finref_Metadata *metadata = gc->finref_metadata;
- unsigned int num_active_collectors = gc->num_active_collectors;
- for(unsigned int i = 0; i < num_active_collectors; i++)
- {
- Collector *collector = gc->collectors[i];
- if(!vector_block_is_empty(collector->softref_set))
- pool_put_entry(metadata->softref_pool, collector->softref_set);
- else
- pool_put_entry(metadata->free_pool, collector->softref_set);
+ unsigned int num_active_collectors = gc->num_active_collectors;
+ for(unsigned int i = 0; i < num_active_collectors; i++)
+ {
+ Collector *collector = gc->collectors[i];
+ if(!vector_block_is_empty(collector->softref_set))
+ pool_put_entry(metadata->softref_pool, collector->softref_set);
+ else
+ pool_put_entry(metadata->free_pool, collector->softref_set);
- if(!vector_block_is_empty(collector->weakref_set))
- pool_put_entry(metadata->weakref_pool, collector->weakref_set);
- else
- pool_put_entry(metadata->free_pool, collector->weakref_set);
+ if(!vector_block_is_empty(collector->weakref_set))
+ pool_put_entry(metadata->weakref_pool, collector->weakref_set);
+ else
+ pool_put_entry(metadata->free_pool, collector->weakref_set);
- if(!vector_block_is_empty(collector->phanref_set))
- pool_put_entry(metadata->phanref_pool, collector->phanref_set);
- else
- pool_put_entry(metadata->free_pool, collector->phanref_set);
+ if(!vector_block_is_empty(collector->phanref_set))
+ pool_put_entry(metadata->phanref_pool, collector->phanref_set);
+ else
+ pool_put_entry(metadata->free_pool, collector->phanref_set);
- collector->softref_set = NULL;
- collector->weakref_set= NULL;
- collector->phanref_set= NULL;
- }
+ collector->softref_set = NULL;
+ collector->weakref_set= NULL;
+ collector->phanref_set= NULL;
+ }
return;
}
+
void gc_reset_finref_metadata(GC *gc)
{
Finref_Metadata *metadata = gc->finref_metadata;
@@ -333,6 +347,31 @@
collector->phanref_set = finref_metadata_add_entry(gc, collector->phanref_set, metadata->phanref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(ref));
}
+
+//[tick weakref]
+void conclctor_add_softref(Conclctor *conclctor, Partial_Reveal_Object *ref)
+{
+ GC *gc = conclctor->gc;
+ Finref_Metadata *metadata = gc->finref_metadata;
+ conclctor->softref_set = finref_metadata_add_entry(gc, conclctor->softref_set, metadata->softref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(ref));
+}
+
+void conclctor_add_weakref(Conclctor *conclctor, Partial_Reveal_Object *ref)
+{
+ GC *gc = conclctor->gc;
+ Finref_Metadata *metadata = gc->finref_metadata;
+ conclctor->weakref_set = finref_metadata_add_entry(gc, conclctor->weakref_set, metadata->weakref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(ref));
+}
+
+void conclctor_add_phanref(Conclctor *conclctor, Partial_Reveal_Object *ref)
+{
+ GC *gc = conclctor->gc;
+ Finref_Metadata *metadata = gc->finref_metadata;
+ conclctor->phanref_set = finref_metadata_add_entry(gc, conclctor->phanref_set, metadata->phanref_pool, (POINTER_SIZE_INT)obj_ptr_to_ref(ref));
+}
+
+
+
void finref_repset_add_entry(GC *gc, REF* p_ref)
{
assert(*p_ref);
Index: vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h
===================================================================
--- vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h (revision 747844)
+++ vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h (working copy)
@@ -67,7 +67,9 @@
extern void gc_set_obj_with_fin(GC *gc);
extern void collector_reset_weakref_sets(Collector *collector);
+extern void conclctor_reset_weakref_sets(Conclctor *conclctor);
extern void gc_set_weakref_sets(GC *gc);
+
extern void gc_reset_finref_metadata(GC *gc);
extern void mutator_add_finalizer(Mutator *mutator, Partial_Reveal_Object *ref);
@@ -76,6 +78,11 @@
extern void collector_add_softref(Collector *collector, Partial_Reveal_Object *ref);
extern void collector_add_weakref(Collector *collector, Partial_Reveal_Object *ref);
extern void collector_add_phanref(Collector *collector, Partial_Reveal_Object *ref);
+
+extern void conclctor_add_softref(Conclctor *conclctor, Partial_Reveal_Object *ref);
+extern void conclctor_add_weakref(Conclctor *conclctor, Partial_Reveal_Object *ref);
+extern void conclctor_add_phanref(Conclctor *conclctor, Partial_Reveal_Object *ref);
+
extern void finref_repset_add_entry(GC *gc, REF* ref);
extern Vector_Block *finref_add_fallback_ref(GC *gc, Vector_Block *vector_block_in_use, Partial_Reveal_Object *p_ref);
Index: vm/gc_gen/src/mark_sweep/gc_ms.cpp
===================================================================
--- vm/gc_gen/src/mark_sweep/gc_ms.cpp (revision 747844)
+++ vm/gc_gen/src/mark_sweep/gc_ms.cpp (working copy)
@@ -71,13 +71,9 @@
void gc_ms_reclaim_heap(GC_MS *gc)
{
//if(verify_live_heap) gc_verify_heap((GC*)gc, TRUE);
-
Wspace *wspace = gc_ms_get_wspace(gc);
-
wspace_collection(wspace);
-
wspace_reset_after_collection(wspace);
-
//if(verify_live_heap) gc_verify_heap((GC*)gc, FALSE);
}
@@ -87,10 +83,9 @@
void gc_ms_start_con_mark(GC_MS* gc, unsigned int num_markers)
{
- if(gc->num_active_markers == 0)
- pool_iterator_init(gc->metadata->gc_rootset_pool);
+ pool_iterator_init(gc->metadata->gc_rootset_pool);
- set_marker_final_func( (TaskType)wspace_last_otf_marker_work );
+ set_marker_final_func((TaskType)wspace_last_otf_marker_work);
conclctor_execute_task_concurrent((GC*)gc,(TaskType)wspace_mark_scan_concurrent,(Space*)gc->wspace, num_markers, CONCLCTOR_ROLE_MARKER);
}
@@ -99,27 +94,23 @@
void gc_ms_start_mostly_con_mark(GC_MS* gc, unsigned int num_markers)
{
- if(gc->num_active_markers == 0)
- pool_iterator_init(gc->metadata->gc_rootset_pool);
+ pool_iterator_init(gc->metadata->gc_rootset_pool);
- set_marker_final_func( (TaskType)wspace_last_mc_marker_work );
+ set_marker_final_func((TaskType)wspace_last_mc_marker_work);
conclctor_execute_task_concurrent((GC*)gc,(TaskType)wspace_mark_scan_mostly_concurrent,(Space*)gc->wspace, num_markers, CONCLCTOR_ROLE_MARKER);
}
-void wspace_final_mark_scan_mostly_concurrent( Conclctor *marker );
-void conclctor_execute_task_synchronized(GC* gc, TaskType task_func, Space* space, unsigned int num_markers, unsigned int role);
-
+void wspace_final_mark_scan_mostly_concurrent(Conclctor *marker);
+void wspace_final_mark_last_marker_work(Conclctor *marker);
+
void gc_ms_start_mostly_con_final_mark(GC_MS* gc, unsigned int num_markers)
{
pool_iterator_init(gc->metadata->gc_rootset_pool);
-
- conclctor_execute_task_synchronized( (GC*)gc,(TaskType)wspace_final_mark_scan_mostly_concurrent,(Space*)gc->wspace, num_markers, CONCLCTOR_ROLE_MARKER );
-
- /*
- collector_execute_task( (GC*)gc,(TaskType)wspace_mark_scan_mostly_concurrent,(Space*)gc->wspace );
- collector_set_weakref_sets( (GC*)gc );
- */
+
+ set_marker_final_func((TaskType)wspace_final_mark_last_marker_work);
+ conclctor_execute_task_concurrent((GC*)gc,(TaskType)wspace_final_mark_scan_mostly_concurrent,(Space*)gc->wspace, num_markers, CONCLCTOR_ROLE_MARKER);
+
}
/*FIXME: move this function out of this file.*/
@@ -148,6 +139,7 @@
mem_fence();
gc_check_mutator_allocation((GC*)gc);
gc_disable_alloc_obj_live((GC*)gc);
+ gc_init_chunk_for_sweep((GC *)gc, gc->wspace);
//just debugging
//gc_con_print_stat_heap_utilization_rate((GC*)gc);
//INFO2("gc.scheduler", "=== Start Con Sweeping ===");
Index: vm/gc_gen/src/mark_sweep/wspace_mark_mostly_concurrent.cpp
===================================================================
--- vm/gc_gen/src/mark_sweep/wspace_mark_mostly_concurrent.cpp (revision 747844)
+++ vm/gc_gen/src/mark_sweep/wspace_mark_mostly_concurrent.cpp (working copy)
@@ -18,6 +18,7 @@
#include "wspace_mark_sweep.h"
#include "../finalizer_weakref/finalizer_weakref.h"
#include "../thread/conclctor.h"
+#include "../thread/collector.h"
#include "gc_ms.h"
volatile Boolean need_terminate_mostly_con_mark;
@@ -69,9 +70,12 @@
scan_slot((Collector*)marker, p_ref);
}
+ if(!IGNORE_FINREF )
+ scan_weak_reference_con(marker, p_obj, scan_slot);
#ifndef BUILD_IN_REFERENT
- //scan_weak_reference((Collector*)marker, p_obj, scan_slot);
- scan_weak_reference_direct((Collector*)marker, p_obj, scan_slot);
+ else {
+ scan_weak_reference_direct((Collector *)marker, p_obj, scan_slot);
+ }
#endif
}
@@ -219,7 +223,7 @@
} else if( current_thread_id >= mostly_con_long_marker_num ) {
break;
}
- apr_sleep(15000);
+ thread_sleep_slice();
}
/*
@@ -246,7 +250,6 @@
return;
}
-
void wspace_final_mark_scan_mostly_concurrent(Conclctor* marker)
{
@@ -337,7 +340,7 @@
-void wspace_last_mc_marker_work( Conclctor *last_marker ) {
+void wspace_last_mc_marker_work(Conclctor *last_marker) {
GC *gc = last_marker->gc;
if( gc->gc_concurrent_status != GC_CON_TRACING )
@@ -350,36 +353,59 @@
int64 con_marking_time = con_collection_stat->marking_end_time - con_collection_stat->marking_start_time;
INFO2("gc.scheduler", "[MOSTLY_CON] con marking time=" << con_marking_time << " us");
+ /*STW marking thread may reuse the one just finished in the concurrent marking phase*/
+ conclctors_put_weakref_sets(gc);
+ conclctors_put_phanref_sets(gc);
+
state_transformation( gc, GC_CON_TRACING, GC_CON_TRACE_DONE );
//INFO2("gc.con.info", " first marking thread finished its job, GC is waiting for all the marking threads finish, current marker num is [" << gc->num_active_markers << "]" );
}
+
+void wspace_final_mark_last_marker_work(Conclctor *last_final_marker)
+{
+ GC *gc = last_final_marker->gc;
+ int64 time_start = time_now();
+ conclctors_put_weakref_sets(gc);
+ conclctors_put_phanref_sets(gc);
+ if(!IGNORE_FINREF ) {
+ //processing soft/weak referneces can be in concurrent phase : TODO parallel process weak reference
+ gc_con_process_weakref(last_final_marker);
+ //processing finalizable object and phantom referneces can NOT be in concurrent phase
+ gc_con_process_finphanref(last_final_marker);
+ INFO2("gc.con", "weak/soft references, finalizable objects and phantom references are processed in final mark");
+ }
+ gc_init_chunk_for_sweep(gc, ((GC_MS *)gc)->wspace);
+ //gc_identify_dead_weak_roots(gc);
+ int64 time_end = time_now();
+ INFO2("gc.con", "references processing time="<<(unsigned int)(time_end-time_start));
+ state_transformation( gc, GC_CON_TRACE_DONE, GC_CON_BEFORE_SWEEP );
+}
+
+
+
void gc_mostly_con_update_stat_after_final_marking(GC *gc);
void wspace_mostly_con_final_mark( GC *gc ) {
+ /*prepare dirty object*/
+ gc_prepare_dirty_set(gc);
- /*init the root set pool*/
- pool_iterator_init(gc->metadata->gc_rootset_pool);
- /*prepare dirty object*/
- gc_prepare_dirty_set(gc);
- /*new asssign thread may reuse the one just finished in the same phase*/
- conclctor_set_weakref_sets(gc);
-
/*start final mostly concurrent mark */
gc_ms_start_mostly_con_final_mark((GC_MS*)gc, mostly_con_final_marker_num);
-
mostly_con_mark_terminate_reset();
+
+ while(gc->gc_concurrent_status!=GC_CON_BEFORE_SWEEP) {
+ vm_thread_yield();
+ }
gc_mostly_con_update_stat_after_final_marking(gc);
-
gc_reset_dirty_set(gc);
gc_clear_rootset(gc);
- gc_prepare_sweeping(gc);
- state_transformation( gc, GC_CON_TRACE_DONE, GC_CON_BEFORE_SWEEP );
+
}
-void trace_obj_in_ms_mostly_concurrent_mark(Collector *collector, void *p_obj)
+void trace_obj_in_ms_mostly_concurrent_mark(Conclctor *collector, void *p_obj)
{
obj_mark_gray_in_table((Partial_Reveal_Object*)p_obj);
- trace_object((Conclctor*)collector, (Partial_Reveal_Object *)p_obj);
+ trace_object(collector, (Partial_Reveal_Object *)p_obj);
}
Index: vm/gc_gen/src/mark_sweep/wspace_mark_otf_concurrent.cpp
===================================================================
--- vm/gc_gen/src/mark_sweep/wspace_mark_otf_concurrent.cpp (revision 747844)
+++ vm/gc_gen/src/mark_sweep/wspace_mark_otf_concurrent.cpp (working copy)
@@ -17,6 +17,7 @@
#include "wspace_mark_sweep.h"
#include "../finalizer_weakref/finalizer_weakref.h"
#include "../thread/conclctor.h"
+#include "../thread/collector.h"
#include "gc_ms.h"
struct GC_MS;
struct Wspace;
@@ -69,8 +70,12 @@
scan_slot((Collector*)marker, p_ref);
}
+ if(!IGNORE_FINREF )
+ scan_weak_reference_con(marker, p_obj, scan_slot);
#ifndef BUILD_IN_REFERENT
- scan_weak_reference_direct((Collector*)marker, p_obj, scan_slot);
+ else {
+ scan_weak_reference_direct((Collector *)marker, p_obj, scan_slot);
+ }
#endif
}
@@ -207,7 +212,7 @@
atomic_inc32(&num_active_markers);
goto retry;
}
- apr_sleep(15000);
+ thread_sleep_slice();
}
state_transformation( gc, GC_CON_TRACING, GC_CON_TRACE_DONE );
@@ -223,26 +228,46 @@
return;
}
-void wspace_last_otf_marker_work( Conclctor *last_marker ) {
+void wspace_last_otf_marker_work(Conclctor *last_marker) {
GC *gc = last_marker->gc;
-
+
+ gc_set_barrier_function(WB_REM_NIL);
gc_reset_dirty_set(gc);
- gc_set_barrier_function(WB_REM_NIL);
//INFO2("gc.con.info", "all markers finish ");
gc_con_update_stat_after_marking(gc); //calculate marked size
gc_clear_rootset(gc);
+ INFO2("gc.con.info", "Concurrent collection, current collection = " << gc->num_collections );
- gc_prepare_sweeping(gc);
+ //processing soft/weak referneces in concurrent phase : TODO parallel process weak reference
+ conclctors_put_weakref_sets(gc);
+ conclctors_put_phanref_sets(gc);
+ if(!IGNORE_FINREF)
+ gc_con_process_weakref(last_marker);
+ INFO2("gc.con", "waek/soft references are processed");
+ //transform to start sweeping
state_transformation( gc, GC_CON_TRACE_DONE, GC_CON_BEFORE_SWEEP );
}
-void trace_obj_in_ms_concurrent_mark(Collector *collector, void *p_obj)
+void conclctor_execute_task_synchronized(GC* gc, TaskType task_func, Space* space, unsigned int num_markers, unsigned int role);
+void gc_con_process_finphanref(Conclctor *conclctor);
+
+/*
+ this method should be called in a STW phase after concurrent marking finishes and before sweeping really starts
+*/
+void wspace_otf_weakref_processing_work(GC* gc) {
+ set_marker_final_func(NULL);
+ conclctor_execute_task_synchronized(gc, (TaskType)gc_con_process_finphanref, (Space*)((GC_MS *)gc)->wspace, 1, CONCLCTOR_ROLE_MARKER);
+ INFO2("gc.con", "finablizable objects and phantom references are processed");
+}
+
+
+void trace_obj_in_ms_concurrent_mark(Conclctor *collector, void *p_obj)
{
obj_mark_gray_in_table((Partial_Reveal_Object*)p_obj);
- trace_object((Conclctor*)collector, (Partial_Reveal_Object *)p_obj);
+ trace_object(collector, (Partial_Reveal_Object *)p_obj);
}
Index: vm/gc_gen/src/mark_sweep/wspace_mark_sweep.cpp
===================================================================
--- vm/gc_gen/src/mark_sweep/wspace_mark_sweep.cpp (revision 747844)
+++ vm/gc_gen/src/mark_sweep/wspace_mark_sweep.cpp (working copy)
@@ -265,41 +265,40 @@
nos = gc_get_nos((GC_Gen*)gc);
unsigned int num_active_collectors = gc->num_active_collectors;
-
+
+ unsigned int old_num;
/* Pass 1: **************************************************
Mark all live objects in heap ****************************/
- atomic_cas32(&num_marking_collectors, 0, num_active_collectors+1);
+ if( gc_con_is_in_STW(gc) ) { //if mark has been done in a concurrent manner, skip this mark
+ atomic_cas32(&num_marking_collectors, 0, num_active_collectors+1);
- //if mark has been done in a concurrent manner, skip this mark
- if( gc_con_is_in_STW(gc) ) {
- if(collect_is_fallback())
- wspace_fallback_mark_scan(collector, wspace);
- else
- wspace_mark_scan(collector, wspace);
- }
-
- unsigned int old_num = atomic_inc32(&num_marking_collectors);
- if( ++old_num == num_active_collectors ){
- /* last collector's world here */
+ if(collect_is_fallback())
+ wspace_fallback_mark_scan(collector, wspace);
+ else
+ wspace_mark_scan(collector, wspace);
+
+ old_num = atomic_inc32(&num_marking_collectors);
+ if( ++old_num == num_active_collectors ){
+ /* last collector's world here */
#ifdef SSPACE_TIME
- wspace_mark_time(FALSE);
+ wspace_mark_time(FALSE);
#endif
- if(!IGNORE_FINREF )
- collector_identify_finref(collector);
+ if(!IGNORE_FINREF )
+ collector_identify_finref(collector);
#ifndef BUILD_IN_REFERENT
- else {
- gc_set_weakref_sets(gc);
- gc_update_weakref_ignore_finref(gc);
- }
+ else {
+ gc_set_weakref_sets(gc);
+ gc_update_weakref_ignore_finref(gc);
+ }
#endif
- gc_identify_dead_weak_roots(gc);
- gc_init_chunk_for_sweep(gc, wspace);
-
- /* let other collectors go */
- num_marking_collectors++;
+ gc_identify_dead_weak_roots(gc);
+ gc_init_chunk_for_sweep(gc, wspace);
+
+ /* let other collectors go */
+ num_marking_collectors++;
+ }
+ while(num_marking_collectors != num_active_collectors + 1);
}
- while(num_marking_collectors != num_active_collectors + 1);
-
/* Pass 2: **************************************************
Sweep dead objects ***************************************/
atomic_cas32( &num_sweeping_collectors, 0, num_active_collectors+1);
Index: vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h
===================================================================
--- vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h (revision 747844)
+++ vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h (working copy)
@@ -553,6 +553,17 @@
return !obj_is_mark_gray_in_table(obj) && !obj_is_mark_black_in_table(obj);
}
+//[tick weakref]
+FORCE_INLINE Boolean referent_need_remember_SATB(Partial_Reveal_Object *referent)
+{
+ return !obj_is_mark_gray_in_table(referent) && !obj_is_mark_black_in_table(referent);
+}
+
+FORCE_INLINE Boolean referent_need_remember_MostlyCon(Partial_Reveal_Object *referent)
+{
+ return !obj_is_mark_gray_in_table(referent) && !obj_is_mark_black_in_table(referent) && !obj_is_dirty_in_table(referent);
+}
+
inline void collector_add_free_chunk(Collector *collector, Free_Chunk *chunk)
{
Free_Chunk_List *list = collector->free_chunk_list;
Index: vm/gc_gen/src/thread/collector.cpp
===================================================================
--- vm/gc_gen/src/thread/collector.cpp (revision 747844)
+++ vm/gc_gen/src/thread/collector.cpp (working copy)
@@ -365,5 +365,3 @@
}
-
-
Index: vm/gc_gen/src/thread/collector.h
===================================================================
--- vm/gc_gen/src/thread/collector.h (revision 747844)
+++ vm/gc_gen/src/thread/collector.h (working copy)
@@ -118,7 +118,7 @@
Boolean is_collector_finished(GC* gc);
void wait_collection_finish(GC* gc);
int64 gc_get_collector_time(GC* gc);
-
+
inline Boolean gc_collection_result(GC* gc)
{
Boolean result = TRUE;
Index: vm/gc_gen/src/thread/conclctor.cpp
===================================================================
--- vm/gc_gen/src/thread/conclctor.cpp (revision 747844)
+++ vm/gc_gen/src/thread/conclctor.cpp (working copy)
@@ -40,10 +40,8 @@
static inline void conclctor_reset_thread(Conclctor *conclctor)
{
conclctor->task_func = NULL;
-#ifndef BUILD_IN_REFERENT
if(conclctor->role == CONCLCTOR_ROLE_MARKER) //only marker use weakref sets
conclctor_reset_weakref_sets(conclctor);
-#endif
return;
}
@@ -56,7 +54,7 @@
case CONCLCTOR_ROLE_MARKER:
if(apr_atomic_dec32(&gc->num_active_markers) == 0 ) {
if(marker_final_func!=NULL)
- marker_final_func( conclctor );
+ marker_final_func( conclctor );
}
break;
case CONCLCTOR_ROLE_SWEEPER:
@@ -95,7 +93,6 @@
static void assign_conclctor_with_task(GC* gc, TaskType task_func, Space* space, unsigned int num_conclctors, unsigned int role )
{
-
unsigned int num_assign = round_conclctor_num(gc, num_conclctors);
if( num_assign < num_conclctors ) {
INFO2( "gc.con.info", " There is no free conclctors" );
@@ -120,12 +117,11 @@
Conclctor* conclctor = gc->conclctors[i];
if( conclctor->status != CONCLCTOR_NIL )
continue;
+ conclctor->role = role;
conclctor_reset_thread(conclctor);
conclctor->task_func = task_func;
conclctor->con_space = space;
- conclctor->role = role;
conclctor->status = CONCLCTOR_ACTIVE;
- //assign_event_info( role, i );
notify_conclctor_to_work(conclctor);
if( ++j >= num_assign) break;
}
@@ -147,25 +143,13 @@
TaskType task_func = conclctor->task_func;
if(task_func == NULL) {
atomic_dec32(&live_conclctor_num);
- conclctor->status = CONCLCTOR_DEAD;
+ conclctor->status = CONCLCTOR_DEAD;
//INFO2( "gc.con.info", "CONCLCTOR DEAD");
return 1;
}
conclctor->time_measurement_start = time_now();
task_func(conclctor);
-
- /*
- if( conclctor->role == CONCLCTOR_ROLE_MARKER ) {
- int64 marking_time = conclctor->time_measurement_end - conclctor->time_measurement_start;
- double marking_rate = conclctor->num_dirty_slots_traced;
- if( marking_time != 0 )
- marking_rate = (double)conclctor->num_dirty_slots_traced/(marking_time>>10);
- lock( print_lock );
- INFO2( "gc.con.info", "[MR] Marking Time=" << (unsigned int)marking_time << ", Dirty Slots Traced=" << conclctor->num_dirty_slots_traced << ", Trace Rate=" << marking_rate << "/ms" );
- unlock( print_lock );
- }*/
-
conclctor_finish(conclctor);
conclctor->time_measurement_end = time_now();
conclctor->status = CONCLCTOR_NIL;
@@ -346,7 +330,7 @@
gc->num_active_markers = 0;
}
-void conclctor_set_weakref_sets(GC* gc)// now only marker uses this
+void conclctors_put_phanref_sets(GC *gc)
{
unsigned int req_role = CONCLCTOR_ROLE_MARKER;
Finref_Metadata *metadata = gc->finref_metadata;
@@ -354,26 +338,37 @@
unsigned int i = 0;
for(; iconclctors[i];
- if( conclctor->role != req_role )
- continue;
- //check_ref_pool(conclctor);
- /* for mostly concurrent, some conclctors's weak sets have already been reclaimed, so the NOT NULL check is need here */
- if( conclctor->softref_set != NULL ) {
- pool_put_entry(metadata->softref_pool, conclctor->softref_set);
- conclctor->softref_set = NULL;
+ if( conclctor->role != req_role )
+ continue;
+ if( conclctor->phanref_set != NULL ) {
+ pool_put_entry(metadata->phanref_pool, conclctor->phanref_set);
+ conclctor->phanref_set = NULL;
+ }
}
+}
- if( conclctor->weakref_set != NULL ) {
- pool_put_entry(metadata->weakref_pool, conclctor->weakref_set);
- conclctor->weakref_set = NULL;
- }
+void conclctors_put_weakref_sets(GC* gc)// now only marker uses this
+{
+ unsigned int req_role = CONCLCTOR_ROLE_MARKER;
+ Finref_Metadata *metadata = gc->finref_metadata;
+ unsigned int num_conclctors = gc->num_conclctors;
+ unsigned int i = 0;
+ for(; iconclctors[i];
+ if( conclctor->role != req_role )
+ continue;
+ //check_ref_pool(conclctor);
+ /* for mostly concurrent, some conclctors's weak sets have already been reclaimed, so the NOT NULL check is need here */
+ if( conclctor->softref_set != NULL ) {
+ pool_put_entry(metadata->softref_pool, conclctor->softref_set);
+ conclctor->softref_set = NULL;
+ }
- if( conclctor->phanref_set != NULL ) {
- pool_put_entry(metadata->phanref_pool, conclctor->phanref_set);
- conclctor->phanref_set = NULL;
+ if( conclctor->weakref_set != NULL ) {
+ pool_put_entry(metadata->weakref_pool, conclctor->weakref_set);
+ conclctor->weakref_set = NULL;
+ }
}
-
- }
}
@@ -398,14 +393,4 @@
}
-/* reset weak references vetctor block of each conclctor */
-void conclctor_reset_weakref_sets(Conclctor *conclctor)
-{
- GC *gc = conclctor->gc;
- assert(conclctor->softref_set == NULL);
- assert(conclctor->weakref_set == NULL);
- assert(conclctor->phanref_set == NULL);
- conclctor->softref_set = finref_get_free_block(gc);
- conclctor->weakref_set = finref_get_free_block(gc);
- conclctor->phanref_set= finref_get_free_block(gc);
-}
+
Index: vm/gc_gen/src/thread/conclctor.h
===================================================================
--- vm/gc_gen/src/thread/conclctor.h (revision 747844)
+++ vm/gc_gen/src/thread/conclctor.h (working copy)
@@ -113,9 +113,9 @@
void conclctor_execute_task_concurrent(GC* gc, TaskType task_func, Space* space, unsigned int num_conclctors, unsigned int role);
int64 gc_get_conclctor_time(GC* gc, unsigned int req_role);
void gc_clear_conclctor_role(GC *gc);
-void conclctor_set_weakref_sets(GC* gc);
+void conclctors_put_phanref_sets(GC *gc);
+void conclctors_put_weakref_sets(GC* gc);
void conclctor_release_weakref_sets(GC* gc);
-void conclctor_reset_weakref_sets(Conclctor *conclctor);
//void conclctor_release_weakref_sets(GC* gc, unsigned int num_conclctor);
//void conclctor_restore_obj_info(Collector* collector);
Index: vm/include/open/gc.h
===================================================================
--- vm/include/open/gc.h (revision 747844)
+++ vm/include/open/gc.h (working copy)
@@ -633,6 +633,9 @@
* The below variables are used in the runtime dynamic linking of
* garbage collector with virtual machine executable.
*/
+
+ //[tick weakref]
+extern Boolean (*gc_is_concurrent_mode)();
extern Boolean (*gc_supports_compressed_references)();
@@ -652,6 +655,9 @@
extern void (*gc_heap_wrote_object)(Managed_Object_Handle p_base_of_object_just_written);
extern Boolean (*gc_heap_copy_object_array)(Managed_Object_Handle src_array, unsigned int src_start, Managed_Object_Handle dst_array, unsigned int dst_start, unsigned int length);
+
+//[tick weakref]
+extern void (*gc_weak_ref_get_barrier)(Managed_Object_Handle weak_ref_obj);
/*
* The variables below are exported by the VM so other DLLs modules
* may use them. dll_gc.cpp initializes them to the addresses exported
@@ -672,6 +678,9 @@
*/
//@{
+ //[tick weakref]
+GCExport Boolean gc_is_concurrent_mode();
+
/**
* @return TRUE if references within objects and vector
* elements are to be treated as offsets rather than raw pointers.
@@ -701,7 +710,15 @@
* */
GCExport Boolean gc_heap_copy_object_array(Managed_Object_Handle src_array, unsigned int src_start, Managed_Object_Handle dst_array, unsigned int dst_start, unsigned int length);
+//[tick weakref]
/**
+ * * By calling this function VM notifies GC that the application using weak reference's get() method to get its referent's reference.
+ * *
+ * * This function is a read (get) barrier for weak reference processing in concurrent GC
+ * */
+GCExport void gc_weak_ref_get_barrier(Managed_Object_Handle weak_ref_obj);
+
+/**
* By calling this function VM notifies GC that a heap reference was written to
* global slot.
*
Index: vm/include/open/rt_helpers.h
===================================================================
--- vm/include/open/rt_helpers.h (revision 747844)
+++ vm/include/open/rt_helpers.h (working copy)
@@ -513,6 +513,9 @@
/////
VM_RT_GET_IDENTITY_HASHCODE,
+
+ //[tick weakref]
+ VM_RT_WEAK_REFERENCE_GET,
/**
* @param The parameters are the following:
* arg\ Object reference for the source array. Must be non-null and refer to an array
Index: vm/jitrino/src/codegenerator/ia32/Ia32InstCodeSelector.cpp
===================================================================
--- vm/jitrino/src/codegenerator/ia32/Ia32InstCodeSelector.cpp (revision 747844)
+++ vm/jitrino/src/codegenerator/ia32/Ia32InstCodeSelector.cpp (working copy)
@@ -1201,12 +1201,32 @@
//_______________________________________________________________________________________________________________
// Shift left and add
-
+#define LEA_EXP_LIMIT 3
CG_OpndHandle* InstCodeSelector::shladd(IntegerOp::Types opType,
CG_OpndHandle* value,
U_32 imm,
CG_OpndHandle* addto)
{
+ if ((opType == IntegerOp::I4) && (imm <= LEA_EXP_LIMIT)) {
+ Type * dstType = irManager.getTypeFromTag(Type::Int32);
+ bool addtoIsImm = false;
+ if (((Opnd*)addto)->isPlacedIn(OpndKind_Imm))
+ addtoIsImm = true;
+ Opnd *res;
+ if (addtoIsImm) {
+ res = irManager.newMemOpnd(dstType, NULL, (Opnd*)value,
+ irManager.newImmOpnd(typeManager.getInt32Type(), 1<setMemOpndKind(MemOpndKind_LEA);
+ Opnd *dst = irManager.newOpnd(dstType);
+ Inst *newInst = irManager.newInstEx(Mnemonic_LEA, 1, dst, res);
+ appendInsts(newInst);
+ return dst;
+ }
+
Opnd * shiftDest = (Opnd *)shl(opType, value, irManager.newImmOpnd(typeManager.getUInt8Type(), imm));
ArithmeticOp::Types atype;
switch (opType) {
@@ -2941,6 +2961,8 @@
case VM_RT_MULTIANEWARRAY_RESOLVED:
case VM_RT_GET_IDENTITY_HASHCODE:
+ //[tick weakref]
+ case VM_RT_WEAK_REFERENCE_GET:
{
dstOpnd = retType==NULL ? NULL: irManager.newOpnd(retType);
CallInst * callInst=irManager.newRuntimeHelperCallInst(callId, numArgs, (Opnd**)args, dstOpnd);
Index: vm/jitrino/src/optimizer/Opcode.h
===================================================================
--- vm/jitrino/src/optimizer/Opcode.h (revision 747844)
+++ vm/jitrino/src/optimizer/Opcode.h (working copy)
@@ -454,7 +454,7 @@
// prefixes: unaligned, volatile, tail,
Op_IdentHC,
-
+ Op_WRGet,
NumOpcodes,
};
Index: vm/jitrino/src/translator/java/JavaByteCodeTranslator.cpp
===================================================================
--- vm/jitrino/src/translator/java/JavaByteCodeTranslator.cpp (revision 747844)
+++ vm/jitrino/src/translator/java/JavaByteCodeTranslator.cpp (working copy)
@@ -3025,6 +3025,15 @@
return true;
}
+ //[tick weakref]
+ if (!strcmp(mname,"weakReferenceGetReferent")) {
+ assert(numArgs == 1);
+ irBuilder.genVMHelperCall(VM_RT_WEAK_REFERENCE_GET, resType, numArgs, srcOpnds);
+ //Opnd* res = irBuilder.genVMHelperCall(VM_RT_WEAK_REFERENCE_GET, resType, numArgs, srcOpnds);
+ //pushOpnd(res);
+ return true;
+ }
+
if (!strcmp(mname, "memset0"))
{
assert(numArgs == 2);
Index: vm/vmcore/src/gc/dll_gc.cpp
===================================================================
--- vm/vmcore/src/gc/dll_gc.cpp (revision 747844)
+++ vm/vmcore/src/gc/dll_gc.cpp (working copy)
@@ -41,6 +41,9 @@
static int64 default_gc_max_memory();
static void default_gc_wrapup();
static Boolean default_gc_requires_barriers();
+//[tick weakref]
+static Boolean default_gc_is_concurrent_mode();
+
static Boolean default_gc_supports_compressed_references();
static void default_gc_heap_slot_write_ref(Managed_Object_Handle p_base_of_object_with_slot,
Managed_Object_Handle *p_slot,
@@ -53,6 +56,8 @@
static void default_gc_heap_write_global_slot_compressed(U_32 *p_slot,
Managed_Object_Handle value);
static void default_gc_heap_wrote_object(Managed_Object_Handle p_base_of_object_just_written);
+static Boolean default_gc_heap_copy_object_array(Managed_Object_Handle rc_array, unsigned int src_start, Managed_Object_Handle dst_array, unsigned int dst_start, unsigned int length);
+static void default_gc_weak_ref_get_barrier(Managed_Object_Handle weak_ref_obj);
static void default_gc_add_compressed_root_set_entry(U_32 *ref);
static void default_gc_add_root_set_entry_managed_pointer(void **slot,
Boolean is_pinned);
@@ -67,6 +72,9 @@
static Boolean default_gc_supports_class_unloading();
+ //[tick weakref]
+Boolean (*gc_is_concurrent_mode)() = 0;
+
Boolean (*gc_supports_compressed_references)() = 0;
void (*gc_add_root_set_entry)(Managed_Object_Handle *ref, Boolean is_pinned) = 0;
void (*gc_add_weak_root_set_entry)(Managed_Object_Handle *ref, Boolean is_pinned,Boolean is_short_weak) = 0;
@@ -92,6 +100,8 @@
unsigned offset,
Managed_Object_Handle value) = 0;
Boolean (*gc_heap_copy_object_array)(Managed_Object_Handle src_array, unsigned int src_start, Managed_Object_Handle dst_array, unsigned int dst_start, unsigned int length)=0;
+//[tick weakref]
+void (*gc_weak_ref_get_barrier)(Managed_Object_Handle weak_ref_obj) = 0;
void (*gc_heap_wrote_object)(Managed_Object_Handle p_base_of_object_just_written) = 0;
int (*gc_init)() = 0;
Boolean (*gc_is_object_pinned)(Managed_Object_Handle obj) = 0;
@@ -172,6 +182,13 @@
}
+ //[tick weakref]
+ gc_is_concurrent_mode = (Boolean (*)())
+ getFunctionOptional(handle,
+ "gc_is_concurrent_mode",
+ dllName,
+ (apr_dso_handle_sym_t)default_gc_is_concurrent_mode);
+
gc_supports_compressed_references = (Boolean (*)())
getFunctionOptional(handle,
"gc_supports_compressed_references",
@@ -231,7 +248,13 @@
getFunctionOptional(handle,
"gc_heap_copy_object_array",
dllName,
- (apr_dso_handle_sym_t)default_gc_heap_wrote_object);
+ (apr_dso_handle_sym_t)default_gc_heap_copy_object_array);
+ //[tick weakref]
+ gc_weak_ref_get_barrier = (void (*)(Managed_Object_Handle weak_ref_obj))
+ getFunctionOptional(handle,
+ "gc_weak_ref_get_barrier",
+ dllName,
+ (apr_dso_handle_sym_t)default_gc_weak_ref_get_barrier);
gc_heap_wrote_object = (void (*)(Managed_Object_Handle p_base_of_object_just_written))
getFunctionOptional(handle,
"gc_heap_wrote_object",
@@ -363,6 +386,10 @@
{
} //default_gc_wrapup
+static Boolean default_gc_is_concurrent_mode()
+{
+ return FALSE;
+}
static Boolean default_gc_supports_compressed_references()
{
@@ -434,8 +461,20 @@
{
} //default_gc_heap_wrote_object
+static Boolean default_gc_heap_copy_object_array(Managed_Object_Handle UNREF src_array, unsigned int src_start, Managed_Object_Handle UNREF dst_array, unsigned int dst_start, unsigned int length)
+{
+ LDIE(7, "Fatal GC error: native array copy not supported");
+ return FALSE;
+}
+//[tick weakref]
+static void default_gc_weak_ref_get_barrier(Managed_Object_Handle UNREF weak_ref_obj)
+{
+ LDIE(7, "Fatal GC error: weak reference get barrier not supported");
+ //return NULL;
+} //default_weak_ref_get_barrier
+
static void default_gc_add_compressed_root_set_entry(U_32 * UNREF ref)
{
LDIE(7, "Fatal GC error: compressed references are not supported.");
Index: vm/vmcore/src/jit/rt_helper_info.cpp
===================================================================
--- vm/vmcore/src/jit/rt_helper_info.cpp (revision 747844)
+++ vm/vmcore/src/jit/rt_helper_info.cpp (working copy)
@@ -74,9 +74,8 @@
{VM_RT_GET_IDENTITY_HASHCODE, "VM_RT_GET_IDENTITY_HASHCODE",
INTERRUPTIBLE_ALWAYS, CALLING_CONVENTION_STDCALL, 1,
- NULL, NULL, "(Ljava/lang/Object;)I", NULL},
+ NULL, NULL, "(Ljava/lang/Object;)I", NULL},
-
{VM_RT_MONITOR_ENTER, "VM_RT_MONITOR_ENTER",
INTERRUPTIBLE_SOMETIMES, CALLING_CONVENTION_STDCALL, 1,
"org/apache/harmony/drlvm/thread/ThreadHelper", "monitorEnterUseReservation",
@@ -121,6 +120,12 @@
{VM_RT_GC_HEAP_WRITE_REF, "VM_RT_GC_HEAP_WRITE_REF",
INTERRUPTIBLE_NEVER, CALLING_CONVENTION_CDECL, 3,
NULL, NULL, "(Lorg/vmmagic/unboxed/Address;Lorg/vmmagic/unboxed/Address;Lorg/vmmagic/unboxed/Address;)V", NULL},
+
+ //[tick weakref] parameter (object)object; need change to (address)address in fast path
+ {VM_RT_WEAK_REFERENCE_GET, "VM_RT_WEAK_REFERENCE_GET",
+ INTERRUPTIBLE_NEVER, CALLING_CONVENTION_CDECL, 1,
+ NULL, NULL, "(Ljava/lang/Object;)V", NULL},
+
{VM_RT_GC_SAFE_POINT, "VM_RT_GC_SAFE_POINT",
INTERRUPTIBLE_ALWAYS, CALLING_CONVENTION_STDCALL, 0,
NULL, NULL, NULL, NULL},
Index: vm/vmcore/src/kernel_classes/javasrc/java/lang/ref/Reference.java
===================================================================
--- vm/vmcore/src/kernel_classes/javasrc/java/lang/ref/Reference.java (revision 747844)
+++ vm/vmcore/src/kernel_classes/javasrc/java/lang/ref/Reference.java (working copy)
@@ -1,4 +1,4 @@
-/*
+ /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
@@ -21,6 +21,8 @@
package java.lang.ref;
+import org.apache.harmony.drlvm.VMHelper;
+
/**
* @com.intel.drl.spec_ref
*/
@@ -64,7 +66,14 @@
* @com.intel.drl.spec_ref
*/
public T get() {
- return referent;
+
+ if (VMHelper.GC_Concurrent_Mode) {
+ if(VMHelper.isVMMagicPackageSupported())
+ VMHelper.weakReferenceGetReferent(this);
+ else
+ VMReferenceManager.weakReferenceGet(this);
+ }
+ return referent;
}
/**
Index: vm/vmcore/src/kernel_classes/javasrc/java/lang/VMMemoryManager.java
===================================================================
--- vm/vmcore/src/kernel_classes/javasrc/java/lang/VMMemoryManager.java (revision 747844)
+++ vm/vmcore/src/kernel_classes/javasrc/java/lang/VMMemoryManager.java (working copy)
@@ -36,6 +36,7 @@
*/
private VMMemoryManager() {}
+
/**
* This method satisfies the requirements of the specification for the
* {@link System#arraycopy(java.lang.Object, int, java.lang.Object, int, int)
Index: vm/vmcore/src/kernel_classes/javasrc/org/apache/harmony/drlvm/VMHelper.java
===================================================================
--- vm/vmcore/src/kernel_classes/javasrc/org/apache/harmony/drlvm/VMHelper.java (revision 747844)
+++ vm/vmcore/src/kernel_classes/javasrc/org/apache/harmony/drlvm/VMHelper.java (working copy)
@@ -23,6 +23,7 @@
import org.vmmagic.unboxed.*;
import org.vmmagic.pragma.*;
+
/**
Core class for DRLVM's vmmagic based helpers.
Resolved and initilized during VM startup
@@ -56,7 +57,10 @@
public static final int OBJ_INFO_OFFSET = 4;
public static final int CLASS_JLC_HANDLE_OFFSET = getClassJLCHanldeOffset();
-
+
+ //[tick weakref]
+ public static final boolean GC_Concurrent_Mode = isGCConcurrentMode();
+
// preload @Inline vmmagic class
static final Class pragmaInline = org.vmmagic.pragma.Inline.class;
static final Class threadHelper = org.apache.harmony.drlvm.thread.ThreadHelper.class;
@@ -88,9 +92,9 @@
public static boolean instanceOf(Object obj, Address castTypePtr) {fail(); return false;}
+ //[tick weakref]
+ public static void weakReferenceGetReferent(Object weakrefObj) {fail();}
-
-
//utility magics supported by JIT
public static boolean isVMMagicPackageSupported() {return false;}
@@ -176,6 +180,14 @@
/** @return managed object field offset in vtable*/
private static native int getClassJLCHanldeOffset();
+ //[tick weakref]
+ /** @return gc concurrent mode*/
+ private static native boolean isGCConcurrentMode();
+
+ public static boolean needWeakReferenceGetBarrier() {
+ return GC_Concurrent_Mode;
+ }
+
}
Index: vm/vmcore/src/kernel_classes/native/org_apache_harmony_drlvm_VMHelper.cpp
===================================================================
--- vm/vmcore/src/kernel_classes/native/org_apache_harmony_drlvm_VMHelper.cpp (revision 747844)
+++ vm/vmcore/src/kernel_classes/native/org_apache_harmony_drlvm_VMHelper.cpp (working copy)
@@ -16,6 +16,7 @@
*/
#include "org_apache_harmony_drlvm_VMHelper.h"
+#include "open/gc.h"
#include "open/vm.h"
#include "open/vm_ee.h"
#include "open/vm_util.h"
@@ -75,6 +76,9 @@
return static_cast(reinterpret_cast(&((VTable*)0)->clss));
}
+JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_VMHelper_isGCConcurrentMode(JNIEnv *e, jclass c)
+{
+ return gc_is_concurrent_mode();
+}
-
Index: vm/vmcore/src/kernel_classes/native/org_apache_harmony_drlvm_VMHelper.h
===================================================================
--- vm/vmcore/src/kernel_classes/native/org_apache_harmony_drlvm_VMHelper.h (revision 747844)
+++ vm/vmcore/src/kernel_classes/native/org_apache_harmony_drlvm_VMHelper.h (working copy)
@@ -57,6 +57,8 @@
JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_VMHelper_getVtableClassOffset
(JNIEnv *, jclass);
+JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_VMHelper_isGCConcurrentMode
+ (JNIEnv *, jclass);
#ifdef __cplusplus
}
Index: vm/vmcore/src/util/ia32/base/jit_runtime_support_ia32.cpp
===================================================================
--- vm/vmcore/src/util/ia32/base/jit_runtime_support_ia32.cpp (revision 747844)
+++ vm/vmcore/src/util/ia32/base/jit_runtime_support_ia32.cpp (working copy)
@@ -1083,10 +1083,14 @@
case VM_RT_GC_HEAP_WRITE_REF:
return (void*)gc_heap_slot_write_ref;
-
+
+ //[tick weakref]
+ case VM_RT_WEAK_REFERENCE_GET:
+ return (void*)gc_weak_ref_get_barrier;
+
case VM_RT_GET_IDENTITY_HASHCODE:
return getaddress__vm_gethashcode_java_object_resolved_using_gethashcode_naked();
-
+
default:
LDIE(50, "Unexpected helper id {0}" << f);
return 0;