Index: src/common/collection_scheduler.cpp =================================================================== --- src/common/collection_scheduler.cpp (revision 650025) +++ src/common/collection_scheduler.cpp (working copy) @@ -16,263 +16,38 @@ */ #include "gc_common.h" -#include "../gen/gen.h" -#include "../mark_sweep/gc_ms.h" -#include "../mark_sweep/wspace.h" #include "collection_scheduler.h" +#include "concurrent_collection_scheduler.h" #include "gc_concurrent.h" -#include "../thread/marker.h" -#include "../verify/verify_live_heap.h" -#define NUM_TRIAL_COLLECTION 10 -#define MAX_DELAY_TIME 0x7fFfFfFf -#define MAX_TRACING_RATE 2 - -static int64 time_delay_to_start_mark = MAX_DELAY_TIME; - void collection_scheduler_initialize(GC* gc) { - - Collection_Scheduler* collection_scheduler = (Collection_Scheduler*) STD_MALLOC(sizeof(Collection_Scheduler)); - assert(collection_scheduler); - memset(collection_scheduler, 0, sizeof(Collection_Scheduler)); - - collection_scheduler->gc = gc; - gc->collection_scheduler = collection_scheduler; - time_delay_to_start_mark = MAX_DELAY_TIME; - + if(gc_is_specify_con_gc()) con_collection_scheduler_initialize(gc); return; } void collection_scheduler_destruct(GC* gc) { - STD_FREE(gc->collection_scheduler); + if(gc_is_specify_con_gc()) con_collection_scheduler_destruct(gc); + return; } -Boolean gc_need_start_concurrent_mark(GC* gc) +void gc_update_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection) { - if(!USE_CONCURRENT_MARK) return FALSE; - //FIXME: GEN mode also needs the support of starting mark after thread resume. -#ifdef USE_UNIQUE_MARK_SWEEP_GC - if(gc_is_concurrent_mark_phase() || gc_mark_is_concurrent()) return FALSE; - - int64 time_current = time_now(); - if( time_current - get_collection_end_time() > time_delay_to_start_mark) - return TRUE; - else - return FALSE; -#else - /*FIXME: concurrent mark is not supported in GC_GEN*/ - assert(0); - return FALSE; -#endif -} - -Boolean gc_need_start_concurrent_sweep(GC* gc) -{ - if(!USE_CONCURRENT_SWEEP) return FALSE; - - if(gc_sweep_is_concurrent()) return FALSE; - - /*if mark is concurrent and STW GC has not started, we should start concurrent sweep*/ - if(gc_mark_is_concurrent() && !gc_is_concurrent_mark_phase(gc)) - return TRUE; - else - return FALSE; -} - -Boolean gc_need_reset_status(GC* gc) -{ - if(gc_sweep_is_concurrent() && !gc_is_concurrent_sweep_phase(gc)) - return TRUE; - else - return FALSE; -} - -Boolean gc_need_prepare_rootset(GC* gc) -{ - /*TODO: support on-the-fly root set enumeration.*/ - return FALSE; -} - -void gc_update_collection_scheduler(GC* gc, int64 time_mutator, int64 time_mark) -{ - //FIXME: support GEN GC. -#ifdef USE_UNIQUE_MARK_SWEEP_GC - - Collection_Scheduler* collection_scheduler = gc->collection_scheduler; - Space* space = NULL; - - space = (Space*) gc_get_wspace(gc); - - Space_Statistics* space_stat = space->space_statistic; - - unsigned int slot_index = collection_scheduler->last_slot_index_in_window; - unsigned int num_slot = collection_scheduler->num_slot_in_window; - - collection_scheduler->num_obj_traced_window[slot_index] = space_stat->num_live_obj; - collection_scheduler->size_alloced_window[slot_index] = space_stat->size_new_obj; - collection_scheduler->space_utilization_rate[slot_index] = space_stat->space_utilization_ratio; - - collection_scheduler->last_mutator_time = time_mutator; - collection_scheduler->last_collector_time = time_mark; - INFO2("gc.con","last_size_free_space"<<(space_stat->last_size_free_space)<<" new obj num "<size_alloced_window[slot_index]<<" "); - if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION) - return; - INFO2("gc.con","num_live_obj "<<(space_stat->num_live_obj)<<" last_size_free_space"<<(space_stat->last_size_free_space)<<" "); - - collection_scheduler->alloc_rate_window[slot_index] - = time_mutator == 0 ? 0 : (float)collection_scheduler->size_alloced_window[slot_index] / time_mutator; - - collection_scheduler->trace_rate_window[slot_index] - = time_mark == 0 ? MAX_TRACING_RATE : (float)collection_scheduler->num_obj_traced_window[slot_index] / time_mark; - - INFO2("gc.con","mutator time "<<(time_mutator>>10)<<" collection time "<<(time_mark>>10)<<" "); - - collection_scheduler->num_slot_in_window = num_slot >= STATISTICS_SAMPLING_WINDOW_SIZE ? num_slot : (++num_slot); - collection_scheduler->last_slot_index_in_window = (++slot_index)% STATISTICS_SAMPLING_WINDOW_SIZE; - - float sum_alloc_rate = 0; - float sum_trace_rate = 0; - float sum_space_util_ratio = 0; - - unsigned int i; - for(i = 0; i < collection_scheduler->num_slot_in_window; i++){ - sum_alloc_rate += collection_scheduler->alloc_rate_window[i]; - sum_trace_rate += collection_scheduler->trace_rate_window[i]; - sum_space_util_ratio += collection_scheduler->space_utilization_rate[i]; + if(gc_is_specify_con_gc()){ + gc_update_con_collection_scheduler(gc, time_mutator, time_collection); } - - TRACE2("gc.con","Allocation Rate: "); - for(i = 0; i < collection_scheduler->num_slot_in_window; i++){ - TRACE2("gc.con",i+1<<" "<alloc_rate_window[i]); - } - - TRACE2("gc.con","Tracing Rate: "); - - for(i = 0; i < collection_scheduler->num_slot_in_window; i++){ - TRACE2("gc.con",i+1<<" "<trace_rate_window[i]); - } - - float average_alloc_rate = sum_alloc_rate / collection_scheduler->num_slot_in_window; - float average_trace_rate = sum_trace_rate / collection_scheduler->num_slot_in_window; - float average_space_util_ratio = sum_space_util_ratio / collection_scheduler->num_slot_in_window; - - INFO2("gc.con","averAllocRate: "<size_free_space * average_space_util_ratio) / average_alloc_rate; - float time_trace_expected = space_stat->num_live_obj / average_trace_rate; - - INFO2("gc.con","[Concurrent GC] expected alloc time "< time_trace_expected){ - if(gc_concurrent_match_algorithm(OTF_REM_OBJ_SNAPSHOT_ALGO)||gc_concurrent_match_algorithm(OTF_REM_NEW_TARGET_ALGO)){ - collection_scheduler->time_delay_to_start_mark = (int64)((time_alloc_expected - time_trace_expected)*0.65); - }else if(gc_concurrent_match_algorithm(MOSTLY_CONCURRENT_ALGO)){ - collection_scheduler->time_delay_to_start_mark = (int64)(time_mutator* 0.5); - } - - }else{ - collection_scheduler->time_delay_to_start_mark = 0; - } - - time_delay_to_start_mark = collection_scheduler->time_delay_to_start_mark; - } - INFO2("gc.con","[Concurrent GC] concurrent marking will delay "<<(unsigned int)(time_delay_to_start_mark>>10)<<" ms "); - //[DEBUG] set to 0 for debugging. - //time_delay_to_start_mark = 0; -#endif return; - } -unsigned int gc_decide_marker_number(GC* gc) +Boolean gc_sched_collection(GC* gc, unsigned int gc_cause) { - unsigned int num_active_marker; - Collection_Scheduler* collection_scheduler = gc->collection_scheduler; - - if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION){ - /*Start trial cycle, collection set to 1 in trial cycle and */ - num_active_marker = 1; + /*collection scheduler only schedules concurrent collection now.*/ + if(GC_CAUSE_CONCURRENT_GC == gc_cause){ + assert(gc_is_specify_con_gc()); + return gc_sched_con_collection(gc, gc_cause); }else{ - num_active_marker = collection_scheduler->last_marker_num; - int64 c_time = collection_scheduler->last_collector_time; - int64 m_time = collection_scheduler->last_mutator_time; - int64 d_time = collection_scheduler->time_delay_to_start_mark; - - if(num_active_marker == 0) num_active_marker = 1; - - if((c_time + d_time) > m_time || (float)d_time < (m_time * 0.25)){ - INFO2("gc.con","[Concurrent GC] increase marker number."); - num_active_marker ++; - if(num_active_marker > gc->num_markers) num_active_marker = gc->num_markers; - }else if((float)d_time > (m_time * 0.6)){ - INFO2("gc.con","[Concurrent GC] decrease marker number."); - num_active_marker --; - if(num_active_marker == 0) num_active_marker = 1; - } - - INFO2("gc.con","[Concurrent GC] ctime "<<(unsigned)(c_time>>10)<<" mtime "<<(unsigned)(m_time>>10)<<" dtime "<<(unsigned)(d_time>>10)); - INFO2("gc.con","[Concurrent GC] marker num : "<last_marker_num = num_active_marker; - return num_active_marker; } -Boolean gc_try_schedule_collection(GC* gc, unsigned int gc_cause) -{ - if(!try_lock(gc->collection_scheduler_lock)) return FALSE; - - gc_check_concurrent_phase(gc); - - if(gc_need_prepare_rootset(gc)){ - /*TODO:Enable concurrent rootset enumeration.*/ - assert(0); - } - - if(gc_need_start_concurrent_mark(gc)){ - vm_gc_lock_enum(); - int64 pause_start = time_now(); - INFO2("gc.con", "[Concurrent GC] concurrent mark start ..."); - gc_start_concurrent_mark(gc); - vm_gc_unlock_enum(); - INFO2("gc.con","[Concurrent GC] pause time of concurrent enumeration: "<<((unsigned int)((time_now()-pause_start)>>10))<<" ms \n"); - unlock(gc->collection_scheduler_lock); - return TRUE; - } - - if(gc_need_start_concurrent_sweep(gc)){ - gc->num_collections++; - INFO2("gc.con", "[Concurrent GC] collection number:"<< gc->num_collections<<" "); - gc_start_concurrent_sweep(gc); - unlock(gc->collection_scheduler_lock); - return TRUE; - } - - if(gc_need_reset_status(gc)){ - int64 pause_start = time_now(); - vm_gc_lock_enum(); - int disable_count = hythread_reset_suspend_disable(); - gc_prepare_rootset(gc); - gc_reset_after_concurrent_collection(gc); - gc_start_mutator_time_measurement(gc); - set_collection_end_time(); - vm_resume_threads_after(); - hythread_set_suspend_disable(disable_count); - vm_gc_unlock_enum(); - INFO2("gc.con","[Concurrent GC] pause time after concurrent GC: "<<((unsigned int)((time_now()-pause_start)>>10))<<" ms \n"); - unlock(gc->collection_scheduler_lock); - return TRUE; - } - unlock(gc->collection_scheduler_lock); - return FALSE; - -} - - Index: src/common/collection_scheduler.h =================================================================== --- src/common/collection_scheduler.h (revision 650025) +++ src/common/collection_scheduler.h (working copy) @@ -18,40 +18,19 @@ #ifndef _COLLECTION_SCHEDULER_H_ #define _COLLECTION_SCHEDULER_H_ -#define STATISTICS_SAMPLING_WINDOW_SIZE 5 - typedef struct Collection_Scheduler { /*common field*/ - GC* gc; - - /*mark schedule */ - int64 time_delay_to_start_mark; - - int64 last_mutator_time; - int64 last_collector_time; - - unsigned int last_marker_num; - - unsigned int num_slot_in_window; - unsigned int last_slot_index_in_window; - - float alloc_rate_window[STATISTICS_SAMPLING_WINDOW_SIZE]; - float trace_rate_window[STATISTICS_SAMPLING_WINDOW_SIZE]; - float space_utilization_rate[STATISTICS_SAMPLING_WINDOW_SIZE]; - POINTER_SIZE_INT num_obj_traced_window[STATISTICS_SAMPLING_WINDOW_SIZE]; - POINTER_SIZE_INT size_alloced_window[STATISTICS_SAMPLING_WINDOW_SIZE]; + GC* gc; } Collection_Scheduler; void collection_scheduler_initialize(GC* gc); void collection_scheduler_destruct(GC* gc); -void gc_update_collection_scheduler(GC* gc, int64 mutator_time, int64 mark_time); -Boolean gc_try_schedule_collection(GC* gc, unsigned int gc_cause); -Boolean gc_need_start_concurrent_mark(GC* gc); -unsigned int gc_decide_marker_number(GC* gc); +void gc_update_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection); +Boolean gc_sched_collection(GC* gc, unsigned int gc_cause); - #endif + Index: src/common/concurrent_collection_scheduler.cpp =================================================================== --- src/common/concurrent_collection_scheduler.cpp (revision 0) +++ src/common/concurrent_collection_scheduler.cpp (revision 0) @@ -0,0 +1,373 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "gc_common.h" +#include "../gen/gen.h" +#include "../mark_sweep/gc_ms.h" +#include "../mark_sweep/wspace.h" +#include "collection_scheduler.h" +#include "concurrent_collection_scheduler.h" +#include "gc_concurrent.h" +#include "../thread/marker.h" +#include "../verify/verify_live_heap.h" + +#define NUM_TRIAL_COLLECTION 2 +#define MIN_DELAY_TIME 0x0 +#define MAX_DELAY_TIME 0x7fFfFfFf +#define MAX_TRACING_RATE 100 +#define MIN_TRACING_RATE 1 +#define MAX_SPACE_THRESHOLD (POINTER_SIZE_INT)((POINTER_SIZE_INT)1<<(BITS_OF_POINTER_SIZE_INT-1)) +#define MIN_SPACE_THRESHOLD 0 + +enum CC_Scheduler_Kind{ + SCHEDULER_NIL = 0x00, + TIME_BASED_SCHEDULER = 0x01, + SPACE_BASED_SCHEDULER = 0x02 +}; + +static unsigned int cc_scheduler_kind = SCHEDULER_NIL; + +void gc_enable_time_scheduler() +{ cc_scheduler_kind |= TIME_BASED_SCHEDULER; } + +void gc_enable_space_scheduler() +{ cc_scheduler_kind |= SPACE_BASED_SCHEDULER; } + +Boolean gc_use_time_scheduler() +{ return cc_scheduler_kind & TIME_BASED_SCHEDULER; } + +Boolean gc_use_space_scheduler() +{ return cc_scheduler_kind & SPACE_BASED_SCHEDULER; } + +static int64 time_delay_to_start_mark = MAX_DELAY_TIME; +static POINTER_SIZE_INT space_threshold_to_start_mark = MAX_SPACE_THRESHOLD; + +void con_collection_scheduler_initialize(GC* gc) +{ + Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*) STD_MALLOC(sizeof(Con_Collection_Scheduler)); + assert(cc_scheduler); + memset(cc_scheduler, 0, sizeof(Con_Collection_Scheduler)); + + cc_scheduler->gc = gc; + gc->collection_scheduler = (Collection_Scheduler*)cc_scheduler; + time_delay_to_start_mark = MAX_DELAY_TIME; + space_threshold_to_start_mark = MAX_SPACE_THRESHOLD; + + return; +} + +void con_collection_scheduler_destruct(GC* gc) +{ + STD_FREE(gc->collection_scheduler); +} + +void gc_decide_cc_scheduler_kind(char* cc_scheduler) +{ + string_to_upper(cc_scheduler); + if(!strcmp(cc_scheduler, "time")){ + gc_enable_time_scheduler(); + }else if(!strcmp(cc_scheduler, "space")){ + gc_enable_space_scheduler(); + }else if(!strcmp(cc_scheduler, "all")){ + gc_enable_time_scheduler(); + gc_enable_space_scheduler(); + } +} + +void gc_set_default_cc_scheduler_kind() +{ + gc_enable_time_scheduler(); +} + +static Boolean time_to_start_mark(GC* gc) +{ + if(!gc_use_time_scheduler()) return FALSE; + + int64 time_current = time_now(); + return (time_current - get_collection_end_time()) > time_delay_to_start_mark; +} + +static Boolean space_to_start_mark(GC* gc) +{ + if(!gc_use_space_scheduler()) return FALSE; + + POINTER_SIZE_INT size_new_obj = gc_get_new_object_size(gc,FALSE); + return (size_new_obj > space_threshold_to_start_mark); +} + +static Boolean gc_need_start_con_mark(GC* gc) +{ + if(!gc_is_specify_con_mark() || gc_mark_is_concurrent()) return FALSE; + + if(time_to_start_mark(gc) || space_to_start_mark(gc)) + return TRUE; + else + return FALSE; +} + +static Boolean gc_need_start_con_sweep(GC* gc) +{ + if(!gc_is_specify_con_sweep() || gc_sweep_is_concurrent()) return FALSE; + + /*if mark is concurrent and STW GC has not started, we should start concurrent sweep*/ + if(gc_mark_is_concurrent() && !gc_con_is_in_marking(gc)) + return TRUE; + else + return FALSE; +} + +static Boolean gc_need_reset_after_con_collect(GC* gc) +{ + if(gc_sweep_is_concurrent() && !gc_con_is_in_sweeping(gc)) + return TRUE; + else + return FALSE; +} + +static Boolean gc_need_start_con_enum(GC* gc) +{ + /*TODO: support on-the-fly root set enumeration.*/ + return FALSE; +} + +#define SPACE_UTIL_RATIO_CORRETION 0.2f +#define TIME_CORRECTION_OTF_MARK 0.65f +#define TIME_CORRECTION_OTF_MARK_SWEEP 1.0f +#define TIME_CORRECTION_MOSTLY_MARK 0.5f + +static void con_collection_scheduler_update_stat(GC* gc, int64 time_mutator, int64 time_collection) +{ + Space* space = NULL; + Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler; + +#ifdef USE_UNIQUE_MARK_SWEEP_GC + space = (Space*) gc_get_wspace(gc); +#endif + if(!space) return; + + Space_Statistics* space_stat = space->space_statistic; + + unsigned int slot_index = cc_scheduler->last_window_index; + unsigned int num_slot = cc_scheduler->num_window_slots; + + cc_scheduler->trace_load_window[slot_index] = space_stat->num_live_obj; + cc_scheduler->alloc_load_window[slot_index] = space_stat->size_new_obj; + cc_scheduler->space_utilization_ratio[slot_index] = space_stat->space_utilization_ratio; + + cc_scheduler->last_mutator_time = time_mutator; + cc_scheduler->last_collector_time = time_collection; + + if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION) + return; + + cc_scheduler->alloc_rate_window[slot_index] + = time_mutator == 0 ? 0 : (float)cc_scheduler->alloc_load_window[slot_index] / time_mutator; + + if(gc_mark_is_concurrent()){ + cc_scheduler->trace_rate_window[slot_index] + = time_collection == 0 ? MAX_TRACING_RATE : (float)cc_scheduler->trace_load_window[slot_index] / time_collection; + }else{ + cc_scheduler->trace_rate_window[slot_index] = MIN_TRACING_RATE; + } + + cc_scheduler->num_window_slots = num_slot >= STAT_SAMPLE_WINDOW_SIZE ? num_slot : (++num_slot); + cc_scheduler->last_window_index = (++slot_index)% STAT_SAMPLE_WINDOW_SIZE; +} + +static void con_collection_scheduler_update_start_point(GC* gc, int64 time_mutator, int64 time_collection) +{ + if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION) + return; + + Space* space = NULL; +#ifdef USE_UNIQUE_MARK_SWEEP_GC + space = (Space*) gc_get_wspace(gc); +#endif + if(!space) return; + + Space_Statistics* space_stat = space->space_statistic; + + float sum_alloc_rate = 0; + float sum_trace_rate = 0; + float sum_space_util_ratio = 0; + + Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler; + + int64 time_this_collection_correction = 0; +#if 0 + float space_util_ratio = space_stat->space_utilization_ratio; + if(space_util_ratio > (1-SPACE_UTIL_RATIO_CORRETION)){ + time_this_collection_correction = 0; + }else{ + time_this_collection_correction + = (int64)(((1 - space_util_ratio - SPACE_UTIL_RATIO_CORRETION)/(space_util_ratio))* time_mutator); + } +#endif + + unsigned int i; + for(i = 0; i < cc_scheduler->num_window_slots; i++){ + sum_alloc_rate += cc_scheduler->alloc_rate_window[i]; + sum_trace_rate += cc_scheduler->trace_rate_window[i]; + sum_space_util_ratio += cc_scheduler->space_utilization_ratio[i]; + } + + TRACE2("gc.con.cs","Allocation Rate: "); + for(i = 0; i < cc_scheduler->num_window_slots; i++){ + TRACE2("gc.con.cs",i+1<<"--"<alloc_rate_window[i]); + } + + TRACE2("gc.con.cs","Tracing Rate: "); + for(i = 0; i < cc_scheduler->num_window_slots; i++){ + TRACE2("gc.con.cs",i+1<<"--"<trace_rate_window[i]); + } + + float average_alloc_rate = sum_alloc_rate / cc_scheduler->num_window_slots; + float average_trace_rate = sum_trace_rate / cc_scheduler->num_window_slots; + float average_space_util_ratio = sum_space_util_ratio / cc_scheduler->num_window_slots; + + TRACE2("gc.con.cs","averAllocRate: "<size_free_space * average_space_util_ratio) / average_alloc_rate; + float time_trace_expected = space_stat->num_live_obj / average_trace_rate; + TRACE2("gc.con.cs","[GC][Con] expected alloc time "< time_trace_expected){ + if(gc_is_kind(ALGO_CON_OTF_OBJ)||gc_is_kind(ALGO_CON_OTF_REF)){ + float time_correction = gc_sweep_is_concurrent()? TIME_CORRECTION_OTF_MARK_SWEEP : TIME_CORRECTION_OTF_MARK; + cc_scheduler->time_delay_to_start_mark = (int64)((time_alloc_expected - time_trace_expected)*time_correction); + }else if(gc_is_kind(ALGO_CON_MOSTLY)){ + cc_scheduler->time_delay_to_start_mark = (int64)(time_mutator* TIME_CORRECTION_MOSTLY_MARK); + } + }else{ + cc_scheduler->time_delay_to_start_mark = MIN_DELAY_TIME; + } + + cc_scheduler->space_threshold_to_start_mark = + (POINTER_SIZE_INT)(space_stat->size_free_space * ((time_alloc_expected - time_trace_expected) / time_alloc_expected)); + + time_delay_to_start_mark = cc_scheduler->time_delay_to_start_mark + time_this_collection_correction; + space_threshold_to_start_mark = cc_scheduler->space_threshold_to_start_mark; + } + TRACE2("gc.con.cs","[GC][Con] concurrent marking will delay "<<(unsigned int)(time_delay_to_start_mark>>10)<<" ms "); + TRACE2("gc.con.cs","[GC][Con] time correction "<<(unsigned int)(time_this_collection_correction>>10)<<" ms "); + +} + +void gc_update_con_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection) +{ + assert(gc_is_specify_con_gc()); + if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause) return; + + con_collection_scheduler_update_stat(gc, time_mutator, time_collection); + con_collection_scheduler_update_start_point(gc, time_mutator, time_collection); + + return; +} + +Boolean gc_sched_con_collection(GC* gc, unsigned int gc_cause) +{ + if(!try_lock(gc->lock_collect_sched)) return FALSE; + vm_gc_lock_enum(); + + gc_try_finish_con_phase(gc); + + if(gc_need_start_con_enum(gc)){ + /*TODO:Concurrent rootset enumeration.*/ + assert(0); + } + + if(gc_need_start_con_mark(gc)){ + INFO2("gc.con.info", "[GC][Con] concurrent mark start ..."); + gc_start_con_mark(gc); + vm_gc_unlock_enum(); + unlock(gc->lock_collect_sched); + return TRUE; + } + + if(gc_need_start_con_sweep(gc)){ + gc->num_collections++; + INFO2("gc.con.info", "[GC][Con] collection number:"<< gc->num_collections<<" "); + gc_start_con_sweep(gc); + vm_gc_unlock_enum(); + unlock(gc->lock_collect_sched); + return TRUE; + } + + if(gc_need_reset_after_con_collect(gc)){ + int64 pause_start = time_now(); + int disable_count = vm_suspend_all_threads(); + gc_reset_after_con_collect(gc); + gc_start_mutator_time_measure(gc); + set_collection_end_time(); + vm_resume_all_threads(disable_count); + vm_gc_unlock_enum(); + INFO2("gc.con.time","[GC][Con]pause(reset collection): "<<((unsigned int)((time_now()-pause_start)>>10))<<" ms "); + unlock(gc->lock_collect_sched); + return TRUE; + } + vm_gc_unlock_enum(); + unlock(gc->lock_collect_sched); + return FALSE; +} + +extern unsigned int NUM_MARKERS; + +unsigned int gc_decide_marker_number(GC* gc) +{ + unsigned int num_active_marker; + Con_Collection_Scheduler* cc_scheduler = (Con_Collection_Scheduler*)gc->collection_scheduler; + + /*If the number of markers is specfied, just return the specified value.*/ + if(NUM_MARKERS != 0) return NUM_MARKERS; + + /*If the number of markers isn't specified, we decide the value dynamically.*/ + if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION){ + /*Start trial cycle, collection set to 1 in trial cycle and */ + num_active_marker = 1; + }else{ + num_active_marker = cc_scheduler->last_marker_num; + int64 c_time = cc_scheduler->last_collector_time; + int64 m_time = cc_scheduler->last_mutator_time; + int64 d_time = cc_scheduler->time_delay_to_start_mark; + + if(num_active_marker == 0) num_active_marker = 1; + + if((c_time + d_time) > m_time || (float)d_time < (m_time * 0.25)){ + TRACE2("gc.con.cs","[GC][Con] increase marker number."); + num_active_marker ++; + if(num_active_marker > gc->num_markers) num_active_marker = gc->num_markers; + }else if((float)d_time > (m_time * 0.6)){ + TRACE2("gc.con.cs","[GC][Con] decrease marker number."); + num_active_marker --; + if(num_active_marker == 0) num_active_marker = 1; + } + + TRACE2("gc.con.cs","[GC][Con] ctime "<<(unsigned)(c_time>>10)<<" mtime "<<(unsigned)(m_time>>10)<<" dtime "<<(unsigned)(d_time>>10)); + TRACE2("gc.con.cs","[GC][Con] marker num : "<last_marker_num = num_active_marker; + return num_active_marker; +} + Index: src/common/concurrent_collection_scheduler.h =================================================================== --- src/common/concurrent_collection_scheduler.h (revision 0) +++ src/common/concurrent_collection_scheduler.h (revision 0) @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CONCURRENT_COLLECTION_SCHEDULER_H_ +#define __CONCURRENT_COLLECTION_SCHEDULER_H_ + +#define STAT_SAMPLE_WINDOW_SIZE 5 + +typedef struct Con_Collection_Scheduler { + /*common field*/ + GC* gc; + + /*concurrent scheduler */ + int64 time_delay_to_start_mark; + POINTER_SIZE_INT space_threshold_to_start_mark; + + int64 last_mutator_time; + int64 last_collector_time; + + unsigned int last_marker_num; + + unsigned int num_window_slots; + unsigned int last_window_index; + + float alloc_rate_window[STAT_SAMPLE_WINDOW_SIZE]; + float trace_rate_window[STAT_SAMPLE_WINDOW_SIZE]; + float space_utilization_ratio[STAT_SAMPLE_WINDOW_SIZE]; + POINTER_SIZE_INT trace_load_window[STAT_SAMPLE_WINDOW_SIZE]; + POINTER_SIZE_INT alloc_load_window[STAT_SAMPLE_WINDOW_SIZE]; +} Con_Collection_Scheduler; + +void con_collection_scheduler_initialize(GC* gc); +void con_collection_scheduler_destruct(GC* gc); + +Boolean gc_sched_con_collection(GC* gc, unsigned int gc_cause); +void gc_update_con_collection_scheduler(GC* gc, int64 time_mutator, int64 time_collection); + +void gc_decide_cc_scheduler_kind(char* cc_scheduler); +void gc_set_default_cc_scheduler_kind(); +#endif + Index: src/common/gc_common.cpp =================================================================== --- src/common/gc_common.cpp (revision 650025) +++ src/common/gc_common.cpp (working copy) @@ -36,6 +36,7 @@ unsigned int Cur_Forward_Bit = 0x2; unsigned int SPACE_ALLOC_UNIT; +Boolean IGNORE_FORCE_GC = FALSE; void gc_assign_free_area_to_mutators(GC* gc) { @@ -92,141 +93,137 @@ } +void gc_update_space_stat(GC_MS* gc) +{ +#ifdef USE_UNIQUE_MARK_SWEEP_GC + gc_ms_update_space_stat((GC_MS*)gc); +#endif +} + +void gc_reset_space_stat(GC_MS* gc) +{ +#ifdef USE_UNIQUE_MARK_SWEEP_GC + gc_ms_reset_space_stat((GC_MS*)gc); +#endif +} + void gc_prepare_rootset(GC* gc) { /* Stop the threads and collect the roots. */ - lock(gc->enumerate_rootset_lock); INFO2("gc.process", "GC: stop the threads and enumerate rootset ...\n"); gc_clear_rootset(gc); gc_reset_rootset(gc); vm_enumerate_root_set_all_threads(); gc_copy_interior_pointer_table_to_rootset(); gc_set_rootset(gc); - unlock(gc->enumerate_rootset_lock); +} +void gc_reset_after_collection(GC* gc, int64 time_mutator, int64 time_collection) +{ + if(gc_is_gen_mode()) gc_prepare_mutator_remset(gc); + + /* Clear rootset pools here rather than in each collection algorithm */ + gc_clear_rootset(gc); + + if(!gc_is_specify_con_gc()) gc_metadata_verify(gc, FALSE); + + if(!IGNORE_FINREF ){ + INFO2("gc.process", "GC: finref process after collection ...\n"); + gc_put_finref_to_vm(gc); + gc_reset_finref_metadata(gc); + gc_activate_finref_threads((GC*)gc); +#ifndef BUILD_IN_REFERENT + } else { + gc_clear_weakref_pools(gc); + gc_clear_finref_repset_pool(gc); +#endif + } + + gc_update_space_stat((GC_MS*)gc); + + gc_update_collection_scheduler(gc, time_mutator, time_collection); + + gc_reset_space_stat((GC_MS*)gc); + + gc_reset_collector_state(gc); + + gc_clear_dirty_set(gc); + + vm_reclaim_native_objs(); + gc->in_collection = FALSE; + } void gc_reclaim_heap(GC* gc, unsigned int gc_cause) -{ +{ INFO2("gc.process", "\nGC: GC start ...\n"); - + collection_start_time = time_now(); - int64 mutator_time = collection_start_time - collection_end_time; + int64 time_mutator = collection_start_time - collection_end_time; - /* FIXME:: before mutators suspended, the ops below should be very careful - to avoid racing with mutators. */ gc->num_collections++; gc->cause = gc_cause; + if(gc_is_specify_con_gc()){ + gc_finish_con_GC(gc, time_mutator); + collection_end_time = time_now(); + INFO2("gc.process", "GC: GC end\n"); + return; + } + + /* FIXME:: before mutators suspended, the ops below should be very careful + to avoid racing with mutators. */ + gc_decide_collection_kind(gc, gc_cause); #ifdef MARK_BIT_FLIPPING if(collect_is_minor()) mark_bit_flip(); #endif - if(!USE_CONCURRENT_GC){ - gc_metadata_verify(gc, TRUE); + gc_metadata_verify(gc, TRUE); #ifndef BUILD_IN_REFERENT - gc_finref_metadata_verify((GC*)gc, TRUE); + gc_finref_metadata_verify((GC*)gc, TRUE); #endif - } - int disable_count = hythread_reset_suspend_disable(); + /* Stop the threads and collect the roots. */ + lock(gc->lock_enum); + int disable_count = hythread_reset_suspend_disable(); + gc_set_rootset_type(ROOTSET_IS_REF); gc_prepare_rootset(gc); + unlock(gc->lock_enum); + + gc->in_collection = TRUE; - if(USE_CONCURRENT_GC && gc_sweep_is_concurrent()){ - if(gc_is_concurrent_sweep_phase()) - gc_finish_concurrent_sweep(gc); - }else{ - if(USE_CONCURRENT_GC && gc_is_concurrent_mark_phase()){ - gc_finish_concurrent_mark(gc, TRUE); - } + /* this has to be done after all mutators are suspended */ + gc_reset_mutator_context(gc); - gc->in_collection = TRUE; - - /* this has to be done after all mutators are suspended */ - gc_reset_mutator_context(gc); - - if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc); + if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc); #if defined(USE_UNIQUE_MARK_SWEEP_GC) - gc_ms_reclaim_heap((GC_MS*)gc); + gc_ms_reclaim_heap((GC_MS*)gc); #elif defined(USE_UNIQUE_MOVE_COMPACT_GC) - gc_mc_reclaim_heap((GC_MC*)gc); + gc_mc_reclaim_heap((GC_MC*)gc); #else - gc_gen_reclaim_heap((GC_Gen*)gc, collection_start_time); + gc_gen_reclaim_heap((GC_Gen*)gc, collection_start_time); #endif - } - collection_end_time = time_now(); + int64 time_collection = collection_end_time - collection_start_time; + #if !defined(USE_UNIQUE_MARK_SWEEP_GC)&&!defined(USE_UNIQUE_MOVE_COMPACT_GC) - gc_gen_collection_verbose_info((GC_Gen*)gc, collection_end_time - collection_start_time, mutator_time); + gc_gen_collection_verbose_info((GC_Gen*)gc, time_collection, time_mutator); gc_gen_space_verbose_info((GC_Gen*)gc); #endif - if(gc_is_gen_mode()) gc_prepare_mutator_remset(gc); - - int64 collection_time = 0; - if(USE_CONCURRENT_GC && gc_mark_is_concurrent()){ - collection_time = gc_get_concurrent_mark_time(gc); - gc_reset_concurrent_mark(gc); - }else{ - collection_time = time_now()-collection_start_time; - } + gc_reset_after_collection(gc, time_mutator, time_collection); - if(USE_CONCURRENT_GC && gc_sweep_is_concurrent()){ - gc_reset_concurrent_sweep(gc); - } - -#if !defined(USE_UNIQUE_MARK_SWEEP_GC)&&!defined(USE_UNIQUE_MOVE_COMPACT_GC) - if(USE_CONCURRENT_GC && gc_need_start_concurrent_mark(gc)) - gc_start_concurrent_mark(gc); -#endif - - /* Clear rootset pools here rather than in each collection algorithm */ - gc_clear_rootset(gc); - - gc_metadata_verify(gc, FALSE); - - if(!IGNORE_FINREF ){ - INFO2("gc.process", "GC: finref process after collection ...\n"); - gc_put_finref_to_vm(gc); - gc_reset_finref_metadata(gc); - gc_activate_finref_threads((GC*)gc); -#ifndef BUILD_IN_REFERENT - } else { - gc_clear_weakref_pools(gc); - gc_clear_finref_repset_pool(gc); -#endif - } - -#ifdef USE_UNIQUE_MARK_SWEEP_GC - gc_ms_update_space_statistics((GC_MS*)gc); -#endif - gc_assign_free_area_to_mutators(gc); - if(USE_CONCURRENT_GC) gc_update_collection_scheduler(gc, mutator_time, collection_time); - -#ifdef USE_UNIQUE_MARK_SWEEP_GC - gc_ms_reset_space_statistics((GC_MS*)gc); -#endif - - vm_reclaim_native_objs(); - gc->in_collection = FALSE; - - gc_reset_collector_state(gc); - - gc_clear_dirty_set(gc); - vm_resume_threads_after(); assert(hythread_is_suspend_enabled()); hythread_set_suspend_disable(disable_count); INFO2("gc.process", "GC: GC end\n"); - int64 pause_time = time_now()-collection_start_time; - INFO2("gc.con","pause time: "<<((unsigned int)(pause_time>>10))<<" ms \n"); return; } @@ -234,3 +231,4 @@ + Index: src/common/gc_common.h =================================================================== --- src/common/gc_common.h (revision 650025) +++ src/common/gc_common.h (working copy) @@ -427,10 +427,10 @@ unsigned int gc_concurrent_status; /*concurrent GC status: only support CONCURRENT_MARK_PHASE now*/ Collection_Scheduler* collection_scheduler; - SpinLock concurrent_mark_lock; - SpinLock enumerate_rootset_lock; - SpinLock concurrent_sweep_lock; - SpinLock collection_scheduler_lock; + SpinLock lock_con_mark; + SpinLock lock_enum; + SpinLock lock_con_sweep; + SpinLock lock_collect_sched; /* system info */ unsigned int _system_alloc_unit; Index: src/common/gc_concurrent.cpp =================================================================== --- src/common/gc_concurrent.cpp (revision 650025) +++ src/common/gc_concurrent.cpp (working copy) @@ -27,160 +27,105 @@ #include "gc_concurrent.h" #include "../common/gc_for_barrier.h" -Boolean USE_CONCURRENT_GC = FALSE; -Boolean USE_CONCURRENT_ENUMERATION = FALSE; -Boolean USE_CONCURRENT_MARK = FALSE; -Boolean USE_CONCURRENT_SWEEP = FALSE; - -volatile Boolean concurrent_mark_phase = FALSE; +volatile Boolean concurrent_in_marking = FALSE; +volatile Boolean concurrent_in_sweeping = FALSE; volatile Boolean mark_is_concurrent = FALSE; -volatile Boolean concurrent_sweep_phase = FALSE; volatile Boolean sweep_is_concurrent = FALSE; -volatile Boolean gc_sweeping_global_normal_chunk = FALSE; +volatile Boolean gc_sweep_global_normal_chunk = FALSE; -unsigned int CONCURRENT_ALGO = 0; - -static void gc_check_concurrent_mark(GC* gc) +static void gc_check_con_mark(GC* gc) { if(!is_mark_finished(gc)){ - lock(gc->concurrent_mark_lock); -#ifndef USE_UNIQUE_MARK_SWEEP_GC - gc_gen_start_concurrent_mark((GC_Gen*)gc); -#else - if(gc_concurrent_match_algorithm(OTF_REM_OBJ_SNAPSHOT_ALGO)){ - gc_ms_start_concurrent_mark((GC_MS*)gc, MIN_NUM_MARKERS); - }else if(gc_concurrent_match_algorithm(OTF_REM_NEW_TARGET_ALGO)){ - gc_ms_start_concurrent_mark((GC_MS*)gc, MIN_NUM_MARKERS); - }else if(gc_concurrent_match_algorithm(MOSTLY_CONCURRENT_ALGO)){ - //ignore. + lock(gc->lock_con_mark); + if(gc_is_kind(ALGO_CON_OTF_OBJ)){ + gc_ms_start_con_mark((GC_MS*)gc, MIN_NUM_MARKERS); + }else if(gc_is_kind(ALGO_CON_OTF_REF)){ + gc_ms_start_con_mark((GC_MS*)gc, MIN_NUM_MARKERS); + }else if(gc_is_kind(ALGO_CON_MOSTLY)){ + //ignore. } -#endif - unlock(gc->concurrent_mark_lock); + unlock(gc->lock_con_mark); } } -void gc_check_mutator_barrier(GC* gc){ - lock(gc->mutator_list_lock); - - Mutator *mutator = gc->mutator_list; - while(mutator){ - wait_mutator_signal(mutator, MUTATOR_ENTER_BARRIER); - mutator = mutator->next; - } - - unlock(gc->mutator_list_lock); -} - -static void gc_wait_concurrent_mark_finish(GC* gc) +static void gc_wait_con_mark_finish(GC* gc) { wait_mark_finish(gc); - gc_set_barrier_function(WRITE_BARRIER_REM_NIL); - //mem_fence(); we do not need memory fence here. - gc_check_mutator_barrier(gc); - gc_set_concurrent_status(gc,GC_CONCURRENT_STATUS_NIL); + gc_set_barrier_function(WB_REM_NIL); + gc_set_concurrent_status(gc,GC_CON_STATUS_NIL); } -void gc_start_concurrent_mark(GC* gc) +unsigned int gc_decide_marker_number(GC* gc); + +void gc_start_con_mark(GC* gc) { int disable_count; unsigned int num_marker; - if(!try_lock(gc->concurrent_mark_lock) || gc_mark_is_concurrent()) return; + if(!try_lock(gc->lock_con_mark) || gc_mark_is_concurrent()) return; - /*prepare rootset*/ - if(TRUE){ - lock(gc->enumerate_rootset_lock); - gc_metadata_verify(gc, TRUE); - gc_reset_rootset(gc); - disable_count = hythread_reset_suspend_disable(); - vm_enumerate_root_set_all_threads(); - gc_copy_interior_pointer_table_to_rootset(); - gc_set_rootset(gc); - }else{ - gc_clear_remset((GC*)gc); - if(!IGNORE_FINREF){ - gc_copy_finaliable_obj_to_rootset(gc); - } - gc->root_set = NULL; - } - gc_set_concurrent_status(gc, GC_CONCURRENT_MARK_PHASE); + lock(gc->lock_enum); + disable_count = hythread_reset_suspend_disable(); + int64 pause_start = time_now(); + gc_set_rootset_type(ROOTSET_IS_OBJ); + gc_prepare_rootset(gc); + + gc_set_concurrent_status(gc, GC_CON_MARK_PHASE); -#ifndef USE_UNIQUE_MARK_SWEEP_GC - gc_decide_collection_kind((GC*)gc, GC_CAUSE_NIL); -#endif - num_marker = gc_decide_marker_number(gc); /*start concurrent mark*/ -#ifndef USE_UNIQUE_MARK_SWEEP_GC - gc_gen_start_concurrent_mark((GC_Gen*)gc); -#else - if(gc_concurrent_match_algorithm(OTF_REM_OBJ_SNAPSHOT_ALGO)){ - gc_set_barrier_function(WRITE_BARRIER_REM_OBJ_SNAPSHOT); - gc_check_mutator_barrier(gc); - gc_ms_start_concurrent_mark((GC_MS*)gc, num_marker); - }else if(gc_concurrent_match_algorithm(MOSTLY_CONCURRENT_ALGO)){ - gc_set_barrier_function(WRITE_BARRIER_REM_SOURCE_OBJ); - gc_check_mutator_barrier(gc); - gc_ms_start_most_concurrent_mark((GC_MS*)gc, num_marker); - }else if(gc_concurrent_match_algorithm(OTF_REM_NEW_TARGET_ALGO)){ - gc_set_barrier_function(WRITE_BARRIER_REM_OLD_VAR); - gc_check_mutator_barrier(gc); - gc_ms_start_concurrent_mark((GC_MS*)gc, num_marker); + if(gc_is_kind(ALGO_CON_OTF_OBJ)){ + gc_set_barrier_function(WB_REM_OBJ_SNAPSHOT); + gc_ms_start_con_mark((GC_MS*)gc, num_marker); + }else if(gc_is_kind(ALGO_CON_MOSTLY)){ + gc_set_barrier_function(WB_REM_SOURCE_OBJ); + gc_ms_start_mostly_con_mark((GC_MS*)gc, num_marker); + }else if(gc_is_kind(ALGO_CON_OTF_REF)){ + gc_set_barrier_function(WB_REM_OLD_VAR); + gc_ms_start_con_mark((GC_MS*)gc, num_marker); } -#endif - if(TRUE){ - unlock(gc->enumerate_rootset_lock); - vm_resume_threads_after(); - assert(hythread_is_suspend_enabled()); - hythread_set_suspend_disable(disable_count); - } + unlock(gc->lock_enum); + INFO2("gc.con.time","[GC][Con]pause(enumeration root): "<<((unsigned int)((time_now()-pause_start)>>10))<<" ms "); + vm_resume_threads_after(); + assert(hythread_is_suspend_enabled()); + hythread_set_suspend_disable(disable_count); - unlock(gc->concurrent_mark_lock); + unlock(gc->lock_con_mark); } -void wspace_mark_scan_mostly_concurrent_reset(); -void wspace_mark_scan_mostly_concurrent_terminate(); +void mostly_con_mark_terminate_reset(); +void terminate_mostly_con_mark(); -void gc_finish_concurrent_mark(GC* gc, Boolean is_STW) +void gc_finish_con_mark(GC* gc, Boolean need_STW) { - gc_check_concurrent_mark(gc); + gc_check_con_mark(gc); - if(gc_concurrent_match_algorithm(MOSTLY_CONCURRENT_ALGO)) - wspace_mark_scan_mostly_concurrent_terminate(); + if(gc_is_kind(ALGO_CON_MOSTLY)) + terminate_mostly_con_mark(); - gc_wait_concurrent_mark_finish(gc); + gc_wait_con_mark_finish(gc); int disable_count; - if(!is_STW){ + if(need_STW){ /*suspend the mutators.*/ - lock(gc->enumerate_rootset_lock); - gc_clear_rootset(gc); - gc_metadata_verify(gc, TRUE); - gc_reset_rootset(gc); - disable_count = hythread_reset_suspend_disable(); - vm_enumerate_root_set_all_threads(); - gc_copy_interior_pointer_table_to_rootset(); - gc_set_rootset(gc); + lock(gc->lock_enum); + if(gc_is_kind(ALGO_CON_MOSTLY)){ + /*In mostly concurrent algorithm, there's a final marking pause. + Prepare root set for final marking.*/ + disable_count = hythread_reset_suspend_disable(); + gc_set_rootset_type(ROOTSET_IS_OBJ); + gc_prepare_rootset(gc); + }else{ + disable_count = vm_suspend_all_threads(); + } } - - if(gc_concurrent_match_algorithm(MOSTLY_CONCURRENT_ALGO)){ - /*If gc use mostly concurrent algorithm, there's a final marking pause. + if(gc_is_kind(ALGO_CON_MOSTLY)){ + /*In mostly concurrent algorithm, there's a final marking pause. Suspend the mutators once again and finish the marking phase.*/ -// int disable_count; -// if(!is_STW){ -// /*suspend the mutators.*/ -// lock(gc->enumerate_rootset_lock); -// gc_metadata_verify(gc, TRUE); -// gc_reset_rootset(gc); -// disable_count = hythread_reset_suspend_disable(); -// vm_enumerate_root_set_all_threads(); -// gc_copy_interior_pointer_table_to_rootset(); -// gc_set_rootset(gc); -// } /*prepare dirty object*/ gc_prepare_dirty_set(gc); @@ -188,40 +133,34 @@ gc_set_weakref_sets(gc); /*start STW mark*/ -#ifndef USE_UNIQUE_MARK_SWEEP_GC - assert(0); -#else - gc_ms_start_final_mark_after_concurrent((GC_MS*)gc, MIN_NUM_MARKERS); -#endif + gc_ms_start_mostly_con_final_mark((GC_MS*)gc, MIN_NUM_MARKERS); - wspace_mark_scan_mostly_concurrent_reset(); + mostly_con_mark_terminate_reset(); gc_clear_dirty_set(gc); -// if(!is_STW){ -// unlock(gc->enumerate_rootset_lock); -// vm_resume_threads_after(); -// assert(hythread_is_suspend_enabled()); -// hythread_set_suspend_disable(disable_count); -// } } + gc_reset_dirty_set(gc); - if(!is_STW){ - unlock(gc->enumerate_rootset_lock); - vm_resume_threads_after(); - assert(hythread_is_suspend_enabled()); - hythread_set_suspend_disable(disable_count); + if(need_STW){ + unlock(gc->lock_enum); + if(gc_is_kind(ALGO_CON_MOSTLY)){ + vm_resume_threads_after(); + assert(hythread_is_suspend_enabled()); + hythread_set_suspend_disable(disable_count); + }else{ + vm_resume_all_threads(disable_count); + } } - gc_reset_dirty_set(gc); } -void gc_reset_concurrent_mark(GC* gc) +void gc_reset_con_mark(GC* gc) { gc->num_active_markers = 0; gc_mark_unset_concurrent(); } -int64 gc_get_concurrent_mark_time(GC* gc) +int64 gc_get_con_mark_time(GC* gc) { int64 time_mark = 0; Marker** markers = gc->markers; @@ -236,11 +175,10 @@ return time_mark; } -void gc_start_concurrent_sweep(GC* gc) +void gc_start_con_sweep(GC* gc) { + if(!try_lock(gc->lock_con_sweep) || gc_sweep_is_concurrent()) return; - if(!try_lock(gc->concurrent_sweep_lock) || gc_sweep_is_concurrent()) return; - /*FIXME: enable finref*/ if(!IGNORE_FINREF ){ gc_set_obj_with_fin(gc); @@ -253,7 +191,7 @@ #endif } - gc_set_concurrent_status(gc, GC_CONCURRENT_SWEEP_PHASE); + gc_set_concurrent_status(gc, GC_CON_SWEEP_PHASE); gc_set_weakref_sets(gc); @@ -262,120 +200,142 @@ gc_identify_dead_weak_roots(gc); /*start concurrent mark*/ -#ifndef USE_UNIQUE_MARK_SWEEP_GC - assert(0); -#else - gc_ms_start_concurrent_sweep((GC_MS*)gc, MIN_NUM_MARKERS); -#endif + gc_ms_start_con_sweep((GC_MS*)gc, MIN_NUM_MARKERS); - unlock(gc->concurrent_sweep_lock); + unlock(gc->lock_con_sweep); } -void gc_reset_concurrent_sweep(GC* gc) +void gc_reset_con_sweep(GC* gc) { gc->num_active_collectors = 0; gc_sweep_unset_concurrent(); } -void gc_wait_concurrent_sweep_finish(GC* gc) +void gc_wait_con_sweep_finish(GC* gc) { wait_collection_finish(gc); - gc_set_concurrent_status(gc,GC_CONCURRENT_STATUS_NIL); + gc_set_concurrent_status(gc,GC_CON_STATUS_NIL); } -void gc_finish_concurrent_sweep(GC * gc) +void gc_finish_con_sweep(GC * gc) { - gc_wait_concurrent_sweep_finish(gc); + gc_wait_con_sweep_finish(gc); } -void gc_check_concurrent_phase(GC * gc) +void gc_try_finish_con_phase(GC * gc) { /*Note: we do not finish concurrent mark here if we do not want to start concurrent sweep.*/ - if(gc_is_concurrent_mark_phase(gc) && is_mark_finished(gc) && USE_CONCURRENT_SWEEP){ + if(gc_con_is_in_marking(gc) && is_mark_finished(gc)){ /*Although all conditions above are satisfied, we can not guarantee concurrent marking is finished. Because, sometimes, the concurrent marking has not started yet. We check the concurrent mark lock here to guarantee this occasional case.*/ - if(try_lock(gc->concurrent_mark_lock)){ - unlock(gc->concurrent_mark_lock); - gc_finish_concurrent_mark(gc, FALSE); + if(try_lock(gc->lock_con_mark)){ + unlock(gc->lock_con_mark); + gc_finish_con_mark(gc, TRUE); } } - if(gc_is_concurrent_sweep_phase(gc) && is_collector_finished(gc)){ + if(gc_con_is_in_sweeping(gc) && is_collector_finished(gc)){ //The reason is same as concurrent mark above. - if(try_lock(gc->concurrent_sweep_lock)){ - unlock(gc->concurrent_sweep_lock); - gc_finish_concurrent_sweep(gc); + if(try_lock(gc->lock_con_sweep)){ + unlock(gc->lock_con_sweep); + gc_finish_con_sweep(gc); } } } +void gc_reset_after_collection(GC* gc, int64 time_mutator, int64 time_collection); -void gc_reset_after_concurrent_collection(GC* gc) +void gc_reset_after_con_collect(GC* gc) { + assert(gc_is_specify_con_gc()); + + int64 time_mutator = gc_get_mutator_time(gc); + int64 time_collection = gc_get_collector_time(gc) + gc_get_marker_time(gc); - int64 mutator_time = gc_get_mutator_time(gc); - int64 collection_time = gc_get_collector_time(gc) + gc_get_marker_time(gc); - - /*FIXME: enable concurrent GEN mode.*/ gc_reset_interior_pointer_table(); - if(gc_is_gen_mode()) gc_prepare_mutator_remset(gc); + + gc_reset_after_collection(gc, time_mutator, time_collection); - /* Clear rootset pools here rather than in each collection algorithm */ - gc_clear_rootset(gc); - - if(!IGNORE_FINREF ){ - INFO2("gc.process", "GC: finref process after collection ...\n"); - gc_put_finref_to_vm(gc); - gc_reset_finref_metadata(gc); - gc_activate_finref_threads((GC*)gc); -#ifndef BUILD_IN_REFERENT - } else { - gc_clear_weakref_pools(gc); - gc_clear_finref_repset_pool(gc); -#endif + if(gc_mark_is_concurrent()){ + gc_reset_con_mark(gc); } -#ifdef USE_UNIQUE_MARK_SWEEP_GC - gc_ms_update_space_statistics((GC_MS*)gc); -#endif + if(gc_sweep_is_concurrent()){ + gc_reset_con_sweep(gc); + } +} - gc_reset_collector_state(gc); +void gc_finish_con_GC(GC* gc, int64 time_mutator) +{ + int64 time_collection_start = time_now(); + + gc->num_collections++; - gc_clear_dirty_set(gc); + lock(gc->lock_enum); - vm_reclaim_native_objs(); - gc->in_collection = FALSE; + int disable_count = hythread_reset_suspend_disable(); + gc_set_rootset_type(ROOTSET_IS_REF); + gc_prepare_rootset(gc); + unlock(gc->lock_enum); - if(USE_CONCURRENT_GC && gc_mark_is_concurrent()){ - gc_reset_concurrent_mark(gc); + if(gc_sweep_is_concurrent()){ + if(gc_con_is_in_sweeping()) + gc_finish_con_sweep(gc); + }else{ + if(gc_con_is_in_marking()){ + gc_finish_con_mark(gc, FALSE); + } + gc->in_collection = TRUE; + gc_reset_mutator_context(gc); + if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc); + gc_ms_reclaim_heap((GC_MS*)gc); } + + int64 time_collection = 0; + if(gc_mark_is_concurrent()){ + time_collection = gc_get_con_mark_time(gc); + gc_reset_con_mark(gc); + }else{ + time_collection = time_now()-time_collection_start; + } - if(USE_CONCURRENT_GC && gc_sweep_is_concurrent()){ - gc_reset_concurrent_sweep(gc); + if(gc_sweep_is_concurrent()){ + gc_reset_con_sweep(gc); } + + gc_reset_after_collection(gc, time_mutator, time_collection); - gc_update_collection_scheduler(gc, mutator_time, collection_time); + gc_start_mutator_time_measure(gc); + + vm_resume_threads_after(); + assert(hythread_is_suspend_enabled()); + hythread_set_suspend_disable(disable_count); + int64 pause_time = time_now()-time_collection_start; -#ifdef USE_UNIQUE_MARK_SWEEP_GC - gc_ms_reset_space_statistics((GC_MS*)gc); -#endif + if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause){ + INFO2("gc.con.time","[GC][Con]pause( Forcing GC ): "<<((unsigned int)(pause_time>>10))<<" ms "); + }else{ + INFO2("gc.con.time","[GC][Con]pause( Heap exhuasted ): "<<((unsigned int)(pause_time>>10))<<" ms "); + } + return; } -void gc_decide_concurrent_algorithm(char* concurrent_algo) +void gc_set_default_con_algo() { - if(!concurrent_algo){ - CONCURRENT_ALGO = OTF_REM_OBJ_SNAPSHOT_ALGO; - }else{ - string_to_upper(concurrent_algo); - - if(!strcmp(concurrent_algo, "OTF_OBJ")){ - CONCURRENT_ALGO = OTF_REM_OBJ_SNAPSHOT_ALGO; - - }else if(!strcmp(concurrent_algo, "MOSTLY_CON")){ - CONCURRENT_ALGO = MOSTLY_CONCURRENT_ALGO; - }else if(!strcmp(concurrent_algo, "OTF_SLOT")){ - CONCURRENT_ALGO = OTF_REM_NEW_TARGET_ALGO; - } + assert((GC_PROP & ALGO_CON_MASK) == 0); + GC_PROP |= ALGO_CON_OTF_OBJ; +} + +void gc_decide_con_algo(char* concurrent_algo) +{ + string_to_upper(concurrent_algo); + GC_PROP &= ~ALGO_CON_MASK; + if(!strcmp(concurrent_algo, "OTF_OBJ")){ + GC_PROP |= ALGO_CON_OTF_OBJ; + }else if(!strcmp(concurrent_algo, "MOSTLY_CON")){ + GC_PROP |= ALGO_CON_MOSTLY; + }else if(!strcmp(concurrent_algo, "OTF_SLOT")){ + GC_PROP |= ALGO_CON_OTF_REF; } } Index: src/common/gc_concurrent.h =================================================================== --- src/common/gc_concurrent.h (revision 650025) +++ src/common/gc_concurrent.h (working copy) @@ -20,52 +20,49 @@ #include "gc_common.h" enum GC_CONCURRENT_STATUS{ - GC_CONCURRENT_STATUS_NIL = 0x00, - GC_CONCURRENT_MARK_PHASE = 0x01, - GC_CONCURRENT_MARK_FINAL_PAUSE_PHASE = 0x11, // for mostly concurrent only. - GC_CONCURRENT_SWEEP_PHASE = 0x02 + GC_CON_STATUS_NIL = 0x00, + GC_CON_MARK_PHASE = 0x01, + GC_MOSTLY_CON_FINAL_MARK_PHASE = 0x11, // for mostly concurrent only. + GC_CON_SWEEP_PHASE = 0x02 }; enum HANDSHAKE_SINGAL{ - HANDSHAKE_NIL = 0x00, - - /*mutator to collector*/ - ENABLE_COLLECTOR_SWEEP_LOCAL_CHUNKS = 0x01, - DISABLE_COLLECTOR_SWEEP_LOCAL_CHUNKS = 0x02, + HSIG_MUTATOR_SAFE = 0x0, - - ENABLE_COLLECTOR_SWEEP_GLOBAL_CHUNKS = 0x03, - DISABLE_COLLECTOR_SWEEP_GLOBAL_CHUNKS = 0x04, + HSIG_DISABLE_SWEEP_LOCAL_CHUNKS = 0x01, + HSIG_DISABLE_SWEEP_GLOBAL_CHUNKS = 0x02, + HSIG_MUTATOR_ENTER_ALLOC_MARK = 0x03, +}; - MUTATOR_ENTER_BARRIER = 0x05, - MUTATOR_EXIT_BARRIER = 0x06, +inline void gc_set_con_gc(unsigned int con_phase) +{ GC_PROP |= con_phase; } - MUTATOR_ENTER_ALLOCATION_MARK = 0x07, - MUTATOR_EXIT_ALLOCATION_MARK = 0x08 -}; +inline void gc_specify_con_enum() +{ gc_set_con_gc(ALGO_CON_ENUM); } -extern Boolean USE_CONCURRENT_GC; -extern Boolean USE_CONCURRENT_ENUMERATION; -extern Boolean USE_CONCURRENT_MARK; -extern Boolean USE_CONCURRENT_SWEEP; +inline void gc_specify_con_mark() +{ gc_set_con_gc(ALGO_CON_MARK); } -extern volatile Boolean concurrent_mark_phase; -extern volatile Boolean mark_is_concurrent; -extern volatile Boolean concurrent_sweep_phase; -extern volatile Boolean sweep_is_concurrent; -extern unsigned int CONCURRENT_ALGO; +inline void gc_specify_con_sweep() +{ gc_set_con_gc(ALGO_CON_SWEEP); } -enum CONCURRENT_MARK_ALGORITHM{ - OTF_REM_OBJ_SNAPSHOT_ALGO = 0x01, - OTF_REM_NEW_TARGET_ALGO = 0x02, - MOSTLY_CONCURRENT_ALGO = 0x03 -}; +inline Boolean gc_is_specify_con_gc() +{ return (GC_PROP & ALGO_CON) != 0; } -inline Boolean gc_concurrent_match_algorithm(unsigned int concurrent_algo) -{ - return CONCURRENT_ALGO == concurrent_algo; -} +inline Boolean gc_is_specify_con_enum() +{ return (GC_PROP & ALGO_CON_ENUM) == ALGO_CON_ENUM; } +inline Boolean gc_is_specify_con_mark() +{ return (GC_PROP & ALGO_CON_MARK) == ALGO_CON_MARK; } + +inline Boolean gc_is_specify_con_sweep() +{ return (GC_PROP & ALGO_CON_SWEEP) == ALGO_CON_SWEEP; } + +extern volatile Boolean concurrent_in_marking; +extern volatile Boolean concurrent_in_sweeping; +extern volatile Boolean mark_is_concurrent; +extern volatile Boolean sweep_is_concurrent; + inline Boolean gc_mark_is_concurrent() { return mark_is_concurrent; @@ -73,8 +70,7 @@ inline void gc_mark_set_concurrent() { - if(gc_concurrent_match_algorithm(OTF_REM_OBJ_SNAPSHOT_ALGO) - ||gc_concurrent_match_algorithm(OTF_REM_NEW_TARGET_ALGO)) + if(gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF)) gc_enable_alloc_obj_live(); mark_is_concurrent = TRUE; } @@ -85,14 +81,14 @@ mark_is_concurrent = FALSE; } -inline Boolean gc_is_concurrent_mark_phase() +inline Boolean gc_con_is_in_marking() { - return concurrent_mark_phase; + return concurrent_in_marking; } -inline Boolean gc_is_concurrent_mark_phase(GC* gc) +inline Boolean gc_con_is_in_marking(GC* gc) { - return gc->gc_concurrent_status == GC_CONCURRENT_MARK_PHASE; + return gc->gc_concurrent_status == GC_CON_MARK_PHASE; } inline Boolean gc_sweep_is_concurrent() @@ -110,62 +106,65 @@ sweep_is_concurrent = FALSE; } -inline Boolean gc_is_concurrent_sweep_phase() +inline Boolean gc_con_is_in_sweeping() { - return concurrent_sweep_phase; + return concurrent_in_sweeping; } -inline Boolean gc_is_concurrent_sweep_phase(GC* gc) +inline Boolean gc_con_is_in_sweeping(GC* gc) { - return gc->gc_concurrent_status == GC_CONCURRENT_SWEEP_PHASE; + return gc->gc_concurrent_status == GC_CON_SWEEP_PHASE; } inline void gc_set_concurrent_status(GC*gc, unsigned int status) { /*Reset status*/ - concurrent_mark_phase = FALSE; - concurrent_sweep_phase = FALSE; + concurrent_in_marking = FALSE; + concurrent_in_sweeping = FALSE; gc->gc_concurrent_status = status; switch(status){ - case GC_CONCURRENT_MARK_PHASE: + case GC_CON_MARK_PHASE: gc_mark_set_concurrent(); - concurrent_mark_phase = TRUE; + concurrent_in_marking = TRUE; break; - case GC_CONCURRENT_SWEEP_PHASE: + case GC_CON_SWEEP_PHASE: gc_sweep_set_concurrent(); - concurrent_sweep_phase = TRUE; + concurrent_in_sweeping = TRUE; break; default: - assert(!concurrent_mark_phase && !concurrent_sweep_phase); + assert(!concurrent_in_marking && !concurrent_in_sweeping); } return; } -void gc_reset_concurrent_mark(GC* gc); -void gc_start_concurrent_mark(GC* gc); -void gc_finish_concurrent_mark(GC* gc, Boolean is_STW); -int64 gc_get_concurrent_mark_time(GC* gc); +void gc_reset_con_mark(GC* gc); +void gc_start_con_mark(GC* gc); +void gc_finish_con_mark(GC* gc, Boolean need_STW); +int64 gc_get_con_mark_time(GC* gc); -void gc_start_concurrent_sweep(GC* gc); -void gc_finish_concurrent_sweep(GC * gc); +void gc_start_con_sweep(GC* gc); +void gc_finish_con_sweep(GC * gc); -void gc_reset_after_concurrent_collection(GC* gc); -void gc_check_concurrent_phase(GC * gc); +void gc_reset_after_con_collect(GC* gc); +void gc_try_finish_con_phase(GC * gc); -void gc_decide_concurrent_algorithm(char* concurrent_algo); +void gc_decide_con_algo(char* concurrent_algo); +void gc_set_default_con_algo(); -void gc_reset_concurrent_sweep(GC* gc); +void gc_reset_con_sweep(GC* gc); -extern volatile Boolean gc_sweeping_global_normal_chunk; +void gc_finish_con_GC(GC* gc, int64 time_mutator); -inline Boolean gc_is_sweeping_global_normal_chunk() -{ return gc_sweeping_global_normal_chunk; } +extern volatile Boolean gc_sweep_global_normal_chunk; -inline void gc_set_sweeping_global_normal_chunk() -{ gc_sweeping_global_normal_chunk = TRUE; } +inline Boolean gc_is_sweep_global_normal_chunk() +{ return gc_sweep_global_normal_chunk; } -inline void gc_unset_sweeping_global_normal_chunk() -{ gc_sweeping_global_normal_chunk = FALSE; } +inline void gc_set_sweep_global_normal_chunk() +{ gc_sweep_global_normal_chunk = TRUE; } + +inline void gc_unset_sweep_global_normal_chunk() +{ gc_sweep_global_normal_chunk = FALSE; } #endif Index: src/common/gc_for_barrier.cpp =================================================================== --- src/common/gc_for_barrier.cpp (revision 650025) +++ src/common/gc_for_barrier.cpp (working copy) @@ -229,12 +229,11 @@ { /*Concurrent Mark: Since object clone and array copy do not modify object slots, we treat it as an new object. It has already been marked when dest object was created. - We use WRITE_BARRIER_REM_SOURCE_OBJ function here to debug. + We use WB_REM_SOURCE_OBJ function here to debug. */ - Mutator *mutator = (Mutator *)gc_get_tls(); - mutator_post_signal(mutator,MUTATOR_ENTER_BARRIER); - if(WRITE_BARRIER_REM_SOURCE_OBJ == write_barrier_function){ + if(WB_REM_SOURCE_OBJ == write_barrier_function){ + Mutator *mutator = (Mutator *)gc_get_tls(); lock(mutator->dirty_set_lock); obj_dirty_in_table((Partial_Reveal_Object *) p_obj_written); @@ -242,7 +241,6 @@ unlock(mutator->dirty_set_lock); } - mutator_post_signal(mutator,MUTATOR_EXIT_BARRIER); if( !gc_is_gen_mode() || !object_has_ref_field((Partial_Reveal_Object*)p_obj_written)) return; @@ -261,14 +259,11 @@ /* FIXME:: this is not the right interface for write barrier */ void gc_heap_slot_write_ref (Managed_Object_Handle p_obj_holding_ref,Managed_Object_Handle *p_slot, Managed_Object_Handle p_target) { - //Mutator *mutator = (Mutator *)gc_get_tls(); - //mutator_post_signal(mutator,MUTATOR_ENTER_BARRIER); - switch(write_barrier_function){ - case WRITE_BARRIER_REM_NIL: + case WB_REM_NIL: *p_slot = p_target; break; - case WRITE_BARRIER_REM_SOURCE_REF: + case WB_REM_SOURCE_REF: *p_slot = p_target; #ifdef USE_REM_SLOTS gen_write_barrier_rem_slot(p_slot, p_target); @@ -276,15 +271,15 @@ gen_write_barrier_rem_obj(p_obj_holding_ref, p_target); #endif break; - case WRITE_BARRIER_REM_SOURCE_OBJ: + case WB_REM_SOURCE_OBJ: *p_slot = p_target; write_barrier_rem_source_obj(p_obj_holding_ref); break; - case WRITE_BARRIER_REM_OBJ_SNAPSHOT: + case WB_REM_OBJ_SNAPSHOT: write_barrier_rem_obj_snapshot(p_obj_holding_ref); *p_slot = p_target; break; - case WRITE_BARRIER_REM_OLD_VAR: + case WB_REM_OLD_VAR: write_barrier_rem_slot_oldvar(p_slot); *p_slot = p_target; break; @@ -292,8 +287,6 @@ assert(0); return; } - - //mutator_post_signal(mutator,MUTATOR_EXIT_BARRIER); return; } Index: src/common/gc_for_barrier.h =================================================================== --- src/common/gc_for_barrier.h (revision 650025) +++ src/common/gc_for_barrier.h (working copy) @@ -27,12 +27,12 @@ extern volatile unsigned int write_barrier_function; enum Write_Barrier_Function{ - WRITE_BARRIER_REM_NIL = 0x00, - WRITE_BARRIER_REM_SOURCE_OBJ = 0x01, - WRITE_BARRIER_REM_SOURCE_REF = 0x02, - WRITE_BARRIER_REM_OLD_VAR = 0x03, - WRITE_BARRIER_REM_NEW_VAR = 0x04, - WRITE_BARRIER_REM_OBJ_SNAPSHOT = 0x05 + WB_REM_NIL = 0x00, + WB_REM_SOURCE_OBJ = 0x01, + WB_REM_SOURCE_REF = 0x02, + WB_REM_OLD_VAR = 0x03, + WB_REM_NEW_VAR = 0x04, + WB_REM_OBJ_SNAPSHOT = 0x05 }; inline void gc_set_barrier_function(unsigned int wb_function) @@ -42,3 +42,4 @@ #endif /* _GC_FOR_BARRIER_H_ */ + Index: src/common/gc_for_class.cpp =================================================================== --- src/common/gc_for_class.cpp (revision 650025) +++ src/common/gc_for_class.cpp (working copy) @@ -159,7 +159,7 @@ Class_Handle array_element_class = class_get_array_element_class(ch); gc_set_prop_array(gcvt); - gcvt->array_elem_size = class_get_array_element_size(ch); + gcvt->array_elem_size = class_get_array_element_size(ch); unsigned int the_offset = vector_first_element_offset_unboxed(array_element_class); gcvt->array_first_elem_offset = the_offset; @@ -177,7 +177,7 @@ WeakReferenceType type = class_is_reference(ch); gc_set_prop_reference(gcvt, type); - unsigned int size = class_get_object_size(ch); + unsigned int size = class_get_object_size(ch); gcvt->gc_allocated_size = size; gcvt->gc_class_name = class_get_name(ch); @@ -202,3 +202,4 @@ + Index: src/common/gc_for_vm.cpp =================================================================== --- src/common/gc_for_vm.cpp (revision 650025) +++ src/common/gc_for_vm.cpp (working copy) @@ -21,11 +21,11 @@ #include #include "open/vm_properties.h" +#include "open/vm_properties.h" #include "port_sysinfo.h" #include "vm_threads.h" #include "jit_runtime_support.h" #include "compressed_ref.h" - #include "../gen/gen.h" #include "../mark_sweep/gc_ms.h" #include "../move_compact/gc_mc.h" @@ -111,8 +111,10 @@ #ifndef BUILD_IN_REFERENT gc_finref_metadata_initialize(gc); #endif - if(USE_CONCURRENT_GC){ - collection_scheduler_initialize(gc); + + collection_scheduler_initialize(gc); + + if(gc_is_specify_con_gc()){ marker_initialize(gc); } @@ -231,13 +233,16 @@ gc_weak_rootset_add_entry(p_global_gc, p_ref, is_short_weak); } +extern Boolean IGNORE_FORCE_GC; + /* VM to force GC */ void gc_force_gc() { vm_gc_lock_enum(); + + if(!IGNORE_FORCE_GC) + gc_reclaim_heap(p_global_gc, GC_CAUSE_RUNTIME_FORCE_GC); - gc_reclaim_heap(p_global_gc, GC_CAUSE_RUNTIME_FORCE_GC); - vm_gc_unlock_enum(); } Index: src/common/gc_metadata.cpp =================================================================== --- src/common/gc_metadata.cpp (revision 650025) +++ src/common/gc_metadata.cpp (working copy) @@ -33,6 +33,7 @@ #define METADATA_BLOCK_SIZE_BYTES VECTOR_BLOCK_DATA_SIZE_BYTES GC_Metadata gc_metadata; +unsigned int rootset_type; void gc_metadata_initialize(GC* gc) { @@ -283,17 +284,6 @@ mutator->rem_set = NULL; mutator = mutator->next; } - - /* put back last remset block of each collector (saved in last collection) */ - unsigned int num_active_collectors = gc->num_active_collectors; - for(unsigned int i=0; icollectors[i]; - /* 1. in the first time GC, rem_set is NULL. 2. it should be NULL when NOS is forwarding_all */ - if(collector->rem_set == NULL) continue; - pool_put_entry(metadata->collector_remset_pool, collector->rem_set); - collector->rem_set = NULL; - } assert( collect_is_major_normal() || collect_is_minor()); if( collect_is_major_normal() ){ @@ -608,10 +598,7 @@ pool_put_entry(metadata->free_set_pool,dirty_set); dirty_set = pool_get_entry(global_dirty_set_pool); } - } - - - + } } void gc_prepare_dirty_set(GC* gc) @@ -652,5 +639,19 @@ { pool_put_entry(metadata->free_set_pool, block); } +void gc_reset_collectors_rem_set(GC *gc) +{ + /* put back last remset block of each collector (saved in last collection) */ + GC_Metadata* metadata = gc->metadata; + unsigned int num_active_collectors = gc->num_active_collectors; + for(unsigned int i=0; icollectors[i]; + /* 1. in the first time GC, rem_set is NULL. 2. it should be NULL when NOS is forwarding_all */ + if(collector->rem_set == NULL) continue; + pool_put_entry(metadata->collector_remset_pool, collector->rem_set); + collector->rem_set = NULL; + } +} Index: src/common/gc_metadata.h =================================================================== --- src/common/gc_metadata.h (revision 650025) +++ src/common/gc_metadata.h (working copy) @@ -72,8 +72,8 @@ void gc_identify_dead_weak_roots(GC *gc); void gc_update_weak_roots(GC *gc, Boolean double_fix); +void gc_reset_collectors_rem_set(GC *gc); - inline void gc_task_pool_clear(Pool* task_pool) { Vector_Block* task = pool_get_entry(task_pool); @@ -223,6 +223,19 @@ assert(gc->weakroot_set); } +extern unsigned int rootset_type; + +enum ROOTSET_TYPE{ + ROOTSET_IS_OBJ = 0x01, + ROOTSET_IS_REF = 0x02 +}; + + +inline void gc_set_rootset_type(unsigned int rs_type) +{ + rootset_type = rs_type; +} + #ifdef COMPRESS_REFERENCE inline void gc_rootset_add_entry(GC* gc, Partial_Reveal_Object** p_ref) @@ -239,7 +252,10 @@ /* construct an Uncompressed_Root */ vector_block_add_entry(uncompressed_root_set, (POINTER_SIZE_INT)p_ref); assert(!vector_block_is_full(uncompressed_root_set)); - vector_block_add_entry(uncompressed_root_set, (POINTER_SIZE_INT)ref); + if(rootset_type == ROOTSET_IS_REF) + vector_block_add_entry(uncompressed_root_set, (POINTER_SIZE_INT)ref); + else if(rootset_type == ROOTSET_IS_OBJ) + vector_block_add_entry(uncompressed_root_set, (POINTER_SIZE_INT)p_obj); if(!vector_block_is_full(uncompressed_root_set)) return; @@ -252,9 +268,15 @@ inline void gc_rootset_add_entry(GC* gc, Partial_Reveal_Object** p_ref) { assert( p_ref < gc_heap_base_address() || p_ref >= gc_heap_ceiling_address()); + + Partial_Reveal_Object *p_obj = *p_ref; - Vector_Block* root_set = gc->root_set; - vector_block_add_entry(root_set, (POINTER_SIZE_INT)p_ref); + Vector_Block* root_set = gc->root_set; + + if(rootset_type == ROOTSET_IS_REF) + vector_block_add_entry(root_set, (POINTER_SIZE_INT)p_ref); + else if(rootset_type == ROOTSET_IS_OBJ) + vector_block_add_entry(root_set, (POINTER_SIZE_INT)p_obj); if( !vector_block_is_full(root_set)) return; Index: src/common/gc_options.cpp =================================================================== --- src/common/gc_options.cpp (revision 650025) +++ src/common/gc_options.cpp (working copy) @@ -17,6 +17,8 @@ #define LOG_DOMAIN "gc.base" #include "gc_common.h" #include "open/vm_properties.h" +#include "gc_concurrent.h" +#include "concurrent_collection_scheduler.h" /* FIXME:: need refactoring this function to distribute the options interpretation to their respective modules. */ @@ -44,13 +46,8 @@ extern Boolean IGNORE_FINREF; extern Boolean JVMTI_HEAP_ITERATION ; +extern Boolean IGNORE_FORCE_GC; -extern Boolean USE_CONCURRENT_GC; -extern Boolean USE_CONCURRENT_ENUMERATION; -extern Boolean USE_CONCURRENT_MARK; -extern Boolean USE_CONCURRENT_SWEEP; - - POINTER_SIZE_INT HEAP_SIZE_DEFAULT = 256 * MB; POINTER_SIZE_INT min_heap_size_bytes = 16 * MB; POINTER_SIZE_INT max_heap_size_bytes = 0; @@ -125,7 +122,7 @@ return vm_property_get_size(property_name, 0, VM_PROPERTIES); } -void gc_decide_concurrent_algorithm(char* concurrent_algo); +void gc_decide_con_algo(char* concurrent_algo); GC* gc_gen_decide_collection_algo(char* minor_algo, char* major_algo, Boolean has_los); void gc_set_gen_mode(Boolean status); @@ -149,7 +146,7 @@ major_algo = vm_properties_get_value("gc.major_algorithm", VM_PROPERTIES); } - if (vm_property_is_set("gc.uniqe_algorithm", VM_PROPERTIES) == 1) { + if (vm_property_is_set("gc.unique_algorithm", VM_PROPERTIES) == 1) { unique_algo = vm_properties_get_value("gc.unique_algorithm", VM_PROPERTIES); } @@ -308,47 +305,77 @@ if (vm_property_is_set("gc.share_los_boundary", VM_PROPERTIES) == 1){ share_los_boundary = vm_property_get_boolean("gc.share_los_boundary"); } + + if (vm_property_is_set("gc.ignore_force_gc", VM_PROPERTIES) == 1){ + IGNORE_FORCE_GC = vm_property_get_boolean("gc.ignore_force_gc"); + } + if (vm_property_is_set("gc.concurrent_gc", VM_PROPERTIES) == 1){ Boolean use_all_concurrent_phase= vm_property_get_boolean("gc.concurrent_gc"); if(use_all_concurrent_phase){ - USE_CONCURRENT_ENUMERATION = TRUE; - USE_CONCURRENT_MARK = TRUE; - USE_CONCURRENT_SWEEP = TRUE; +#ifndef USE_UNIQUE_MARK_SWEEP_GC + DIE(( "Please define USE_UNIQUE_MARK_SWEEP_GC macro.")); +#endif + gc_specify_con_enum(); + gc_specify_con_mark(); + gc_specify_con_sweep(); gc->generate_barrier = TRUE; } } if (vm_property_is_set("gc.concurrent_enumeration", VM_PROPERTIES) == 1){ - USE_CONCURRENT_ENUMERATION= vm_property_get_boolean("gc.concurrent_enumeration"); + Boolean USE_CONCURRENT_ENUMERATION = vm_property_get_boolean("gc.concurrent_enumeration"); if(USE_CONCURRENT_ENUMERATION){ - USE_CONCURRENT_GC = TRUE; +#ifndef USE_UNIQUE_MARK_SWEEP_GC + DIE(("Please define USE_UNIQUE_MARK_SWEEP_GC macro.")); +#endif + gc_specify_con_enum(); gc->generate_barrier = TRUE; } } if (vm_property_is_set("gc.concurrent_mark", VM_PROPERTIES) == 1){ - USE_CONCURRENT_MARK= vm_property_get_boolean("gc.concurrent_mark"); + Boolean USE_CONCURRENT_MARK = vm_property_get_boolean("gc.concurrent_mark"); if(USE_CONCURRENT_MARK){ - USE_CONCURRENT_GC = TRUE; +#ifndef USE_UNIQUE_MARK_SWEEP_GC + DIE(("Please define USE_UNIQUE_MARK_SWEEP_GC macro.")); +#endif + gc_specify_con_mark(); gc->generate_barrier = TRUE; + IGNORE_FINREF = TRUE; /*TODO: finref is unsupported.*/ } } if (vm_property_is_set("gc.concurrent_sweep", VM_PROPERTIES) == 1){ - USE_CONCURRENT_SWEEP= vm_property_get_boolean("gc.concurrent_sweep"); + Boolean USE_CONCURRENT_SWEEP= vm_property_get_boolean("gc.concurrent_sweep"); if(USE_CONCURRENT_SWEEP){ - USE_CONCURRENT_GC = TRUE; + /*currently, concurrent sweeping only starts after concurrent marking.*/ + assert(gc_is_specify_con_mark()); +#ifndef USE_UNIQUE_MARK_SWEEP_GC + DIE(("Please define USE_UNIQUE_MARK_SWEEP_GC macro.")); +#endif + gc_specify_con_sweep(); + IGNORE_FINREF = TRUE; /*TODO: finref is unsupported.*/ } } char* concurrent_algo = NULL; if (vm_property_is_set("gc.concurrent_algorithm", VM_PROPERTIES) == 1) { - concurrent_algo = vm_properties_get_value("gc.concurrent_algorithm", VM_PROPERTIES); + concurrent_algo = vm_properties_get_value("gc.concurrent_algorithm", VM_PROPERTIES); + gc_decide_con_algo(concurrent_algo); + }else if(gc_is_specify_con_gc()){ + gc_set_default_con_algo(); } - - gc_decide_concurrent_algorithm(concurrent_algo); + char* cc_scheduler = NULL; + if (vm_property_is_set("gc.cc_scheduler", VM_PROPERTIES) == 1) { + cc_scheduler = vm_properties_get_value("gc.cc_scheduler", VM_PROPERTIES); + gc_decide_cc_scheduler_kind(cc_scheduler); + }else if(gc_is_specify_con_gc()){ + gc_set_default_cc_scheduler_kind(); + } + #if defined(ALLOC_ZEROING) && defined(ALLOC_PREFETCH) if(vm_property_is_set("gc.prefetch",VM_PROPERTIES) ==1) { PREFETCH_ENABLED = vm_property_get_boolean("gc.prefetch"); @@ -384,3 +411,4 @@ + Index: src/common/gc_platform.h =================================================================== --- src/common/gc_platform.h (revision 650025) +++ src/common/gc_platform.h (working copy) @@ -146,6 +146,21 @@ return (int)hythread_is_suspend_enabled(); } +inline int vm_suspend_all_threads() +{ + int disable_count = hythread_reset_suspend_disable(); + hythread_suspend_all(NULL, NULL); + hythread_suspend_disable(); + return disable_count; +} + +inline void vm_resume_all_threads(int disable_count) +{ + hythread_suspend_enable(); + hythread_resume_all(NULL); + hythread_set_suspend_disable(disable_count); +} + inline void *atomic_casptr(volatile void **mem, void *with, const void *cmp) { return apr_atomic_casptr(mem, with, cmp); } Index: src/common/gc_properties.h =================================================================== --- src/common/gc_properties.h (revision 650025) +++ src/common/gc_properties.h (working copy) @@ -23,7 +23,8 @@ GC_CAUSE_NOS_IS_FULL, GC_CAUSE_LOS_IS_FULL, GC_CAUSE_MOS_IS_FULL, - GC_CAUSE_RUNTIME_FORCE_GC + GC_CAUSE_RUNTIME_FORCE_GC, + GC_CAUSE_CONCURRENT_GC }; extern unsigned int GC_PROP; @@ -85,6 +86,10 @@ ALGO_CON_SWEEP = 0x5000000, /* ALGO_CON|0x4000000 */ ALGO_CON_ENUM = 0x9000000, /* ALGO_CON|0x8000000 */ + ALGO_CON_OTF_OBJ = 0x10000000, + ALGO_CON_OTF_REF = 0x20000000, + ALGO_CON_MOSTLY = 0x40000000, + ALGO_CON_MASK = 0x70000000, }; FORCE_INLINE Boolean gc_is_kind(unsigned int kind) @@ -223,7 +228,7 @@ root slots after collection in an extra phase. i.e., collect_mark_and_move */ FORCE_INLINE Boolean collect_need_update_repset() { - return (gc_is_kind(ALGO_MAJOR) || gc_is_kind(ALGO_MS_COMPACT)); + return (gc_is_kind(ALGO_MAJOR) || gc_is_kind(ALGO_MS_COMPACT) || !gc_has_nos()); } #endif /* #ifndef _GC_PROPERTIES */ Index: src/common/hashcode.h =================================================================== --- src/common/hashcode.h (revision 650025) +++ src/common/hashcode.h (working copy) @@ -177,6 +177,34 @@ return; } +inline void hashcode_buf_update(Partial_Reveal_Object* p_obj, int32 hashcode, Hashcode_Buf* hashcode_buf) +{ + POINTER_SIZE_INT obj_addr = (POINTER_SIZE_INT)p_obj; + lock(hashcode_buf->lock); + Seq_List* list = hashcode_buf->list; + seq_list_iterate_init(list); + while(seq_list_has_next(list)){ + Vector_Block* curr_block = (Vector_Block*)seq_list_iterate_next(list); + POINTER_SIZE_INT *iter = vector_block_iterator_init(curr_block); + + while(!vector_block_iterator_end(curr_block, iter)){ + POINTER_SIZE_INT addr = (POINTER_SIZE_INT)*iter; + if(obj_addr != addr){ + iter = vector_block_iterator_advance(curr_block, iter); + iter = vector_block_iterator_advance(curr_block, iter); + }else{ + iter = vector_block_iterator_advance(curr_block, iter); + *iter = (POINTER_SIZE_INT)hashcode; + iter = vector_block_iterator_advance(curr_block, iter); + unlock(hashcode_buf->lock); + return; + } + } + } + unlock(hashcode_buf->lock); + hashcode_buf_add(p_obj, hashcode, hashcode_buf); +} + inline void hashcode_buf_refresh_all(Hashcode_Buf* hashcode_buf, POINTER_SIZE_INT dist) { Seq_List* list = hashcode_buf->list; @@ -288,6 +316,7 @@ } int obj_lookup_hashcode_in_buf(Partial_Reveal_Object *p_obj); +int obj_lookup_hashcode_in_chunk_buf(Partial_Reveal_Object *p_obj); inline int hashcode_lookup(Partial_Reveal_Object* p_obj,Obj_Info_Type obj_info) { @@ -297,7 +326,11 @@ unsigned char* pos = (unsigned char *)p_obj; hash = *(int*) (pos + offset); }else if(hashcode_is_buffered(p_obj)){ +#ifdef USE_UNIQUE_MARK_SWEEP_GC + hash = obj_lookup_hashcode_in_chunk_buf(p_obj); +#else hash = obj_lookup_hashcode_in_buf(p_obj); +#endif } return hash; } Index: src/finalizer_weakref/finalizer_weakref.cpp =================================================================== --- src/finalizer_weakref/finalizer_weakref.cpp (revision 650025) +++ src/finalizer_weakref/finalizer_weakref.cpp (working copy) @@ -151,7 +151,7 @@ else assert(0); } - } else if(collect_is_major_normal()){ + } else if(collect_is_major_normal() || !gc_has_nos()){ p_ref_or_obj = p_obj; if(gc_has_space_tuner(gc) && (gc->tuner->kind != TRANS_NOTHING)){ trace_object = trace_obj_in_space_tune_marking; @@ -165,7 +165,7 @@ } else { collector->los_live_obj_size += round_up_to_size(obj_size, KB); } - } else if(major_is_marksweep()){ + } else if(!gc_has_nos()){ trace_object = trace_obj_in_ms_marking; } else { trace_object = trace_obj_in_normal_marking; @@ -195,7 +195,7 @@ while(!vector_block_iterator_end(task_block, iter)){ void *p_ref_or_obj = (void*)*iter; assert(((collect_is_minor()||collect_is_fallback()) && *(Partial_Reveal_Object **)p_ref_or_obj) - || ((collect_is_major_normal()||major_is_marksweep()) && p_ref_or_obj)); + || ((collect_is_major_normal()||major_is_marksweep()||!gc_has_nos()) && p_ref_or_obj)); trace_object(collector, p_ref_or_obj); if(collector->result == FALSE) break; /* Resurrection fallback happens; force return */ @@ -855,3 +855,4 @@ + Index: src/gen/gen.cpp =================================================================== --- src/gen/gen.cpp (revision 650025) +++ src/gen/gen.cpp (working copy) @@ -60,10 +60,10 @@ { if(status){ gc_set_gen_flag(); - gc_set_barrier_function(WRITE_BARRIER_REM_SOURCE_REF); + gc_set_barrier_function(WB_REM_SOURCE_REF); }else{ gc_clear_gen_flag(); - gc_set_barrier_function(WRITE_BARRIER_REM_NIL); + gc_set_barrier_function(WB_REM_NIL); } HelperClass_set_GenMode(status); @@ -780,7 +780,6 @@ gc->collect_result = TRUE; #ifdef GC_GEN_STATS gc_gen_stats_reset_before_collection(gc); - gc_gen_collector_stats_reset(gc); #endif nos_prepare_for_collection(nos); @@ -814,7 +813,6 @@ #ifdef GC_GEN_STATS gc->stats->num_minor_collections++; #endif - if(LOS_ADJUST_BOUNDARY) gc->tuner->kind=TRANS_NOTHING; los_collection(los); } @@ -912,10 +910,14 @@ } gc_gen_update_space_info_after_gc(gc); - + if(gc_is_gen_mode()) { + gc_reset_collectors_rem_set((GC*)gc); + } + #ifdef GC_GEN_STATS gc_gen_stats_update_after_collection(gc); gc_gen_stats_verbose(gc); + gc_gen_collector_stats_reset(gc); #endif INFO2("gc.process", "GC: end of GC_Gen\n"); @@ -987,12 +989,12 @@ } } -void gc_gen_collection_verbose_info(GC_Gen *gc, int64 pause_time, int64 mutator_time) +void gc_gen_collection_verbose_info(GC_Gen *gc, int64 pause_time, int64 time_mutator) { #ifdef GC_GEN_STATS GC_Gen_Stats* stats = ((GC_Gen*)gc)->stats; - stats->total_mutator_time += mutator_time; + stats->total_mutator_time += time_mutator; stats->total_pause_time += pause_time; #endif @@ -1035,7 +1037,7 @@ } INFO2("gc.collect","GC: pause time: "<<(pause_time>>10)<<"ms" - <<"\nGC: mutator time from last collection: "<<(mutator_time>>10)<<"ms\n"); + <<"\nGC: mutator time from last collection: "<<(time_mutator>>10)<<"ms\n"); } Index: src/gen/gen.h =================================================================== --- src/gen/gen.h (revision 650025) +++ src/gen/gen.h (working copy) @@ -102,10 +102,10 @@ unsigned int gc_concurrent_status; Collection_Scheduler* collection_scheduler; - SpinLock concurrent_mark_lock; - SpinLock enumerate_rootset_lock; - SpinLock concurrent_sweep_lock; - SpinLock collection_scheduler_lock; + SpinLock lock_con_mark; + SpinLock lock_enum; + SpinLock lock_con_sweep; + SpinLock lock_collect_sched; /* system info */ @@ -133,7 +133,7 @@ void gc_gen_initialize(GC_Gen *gc, POINTER_SIZE_INT initial_heap_size, POINTER_SIZE_INT final_heap_size); void gc_gen_destruct(GC_Gen *gc); -void gc_gen_collection_verbose_info(GC_Gen *gc, int64 pause_time, int64 mutator_time); +void gc_gen_collection_verbose_info(GC_Gen *gc, int64 pause_time, int64 time_mutator); void gc_gen_space_verbose_info(GC_Gen *gc); void gc_gen_init_verbose(GC_Gen *gc); void gc_gen_wrapup_verbose(GC_Gen* gc); @@ -218,3 +218,4 @@ + Index: src/gen/gen_stats.cpp =================================================================== --- src/gen/gen_stats.cpp (revision 650025) +++ src/gen/gen_stats.cpp (working copy) @@ -143,7 +143,7 @@ { Collector** collector = gc->collectors; GC_Gen_Collector_Stats* stats; - for (unsigned int i=0; inum_collectors; i++){ + for (unsigned int i=0; inum_active_collectors; i++){ stats = (GC_Gen_Collector_Stats*)collector[i]->stats; memset(stats, 0, sizeof(GC_Gen_Collector_Stats)); } Index: src/los/lspace.h =================================================================== --- src/los/lspace.h (revision 650025) +++ src/los/lspace.h (working copy) @@ -90,7 +90,7 @@ { if(!lspace) return 0; /* FIXME:: */ - assert(lspace->committed_heap_size > (POINTER_SIZE_INT)lspace->last_surviving_size + (POINTER_SIZE_INT)lspace->last_alloced_size); + assert(lspace->committed_heap_size >= (POINTER_SIZE_INT)lspace->last_surviving_size + (POINTER_SIZE_INT)lspace->last_alloced_size); return (lspace->committed_heap_size - (POINTER_SIZE_INT)lspace->last_surviving_size - (POINTER_SIZE_INT)lspace->last_alloced_size); } Index: src/los/lspace_alloc_collect.cpp =================================================================== --- src/los/lspace_alloc_collect.cpp (revision 650025) +++ src/los/lspace_alloc_collect.cpp (working copy) @@ -205,8 +205,6 @@ POINTER_SIZE_INT alloc_size = ALIGN_UP_TO_KILO(size); Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)allocator->gc); Free_Area_Pool* pool = lspace->free_pool; - - gc_try_schedule_collection(allocator->gc, GC_CAUSE_NIL); while( try_count < 2 ){ if(p_result = lspace_try_alloc(lspace, alloc_size)) @@ -367,26 +365,26 @@ blocked_space_shrink((Blocked_Space*)nos, trans_size); } else { POINTER_SIZE_INT mos_free_size= blocked_space_free_mem_size((Blocked_Space*)mos); - void *uncommit_base=(void*)((POINTER_SIZE_INT)nos->heap_end-trans_size); - vm_decommit_mem(uncommit_base,trans_size); + void *decommit_base=(void*)((POINTER_SIZE_INT)nos->heap_end-trans_size); + vm_decommit_mem(decommit_base,trans_size); unsigned int reduced_mos_size = trans_size - nos->committed_heap_size; unsigned int size=round_down_to_size(mos_free_size-reduced_mos_size,SPACE_ALLOC_UNIT); - unsigned int nos_size=(unsigned int )(size*nos->survive_ratio/(nos->survive_ratio+mos->survive_ratio)); + unsigned int nos_size= mos_free_size - reduced_mos_size ; if(nos_sizenum_managed_blocks-=((nos_size+reduced_mos_size)>>GC_BLOCK_SHIFT_COUNT); + mos->num_managed_blocks -= (( mos_free_size )>>GC_BLOCK_SHIFT_COUNT); mos->num_used_blocks = mos->free_block_idx-mos->first_block_idx; mos->num_total_blocks=mos->num_managed_blocks; - mos->ceiling_block_idx-=((nos_size+reduced_mos_size)>>GC_BLOCK_SHIFT_COUNT); + mos->ceiling_block_idx -= (( mos_free_size )>>GC_BLOCK_SHIFT_COUNT); assert(mos->num_used_blocks<=mos->num_managed_blocks); void *start_address=(void*)&(mos->blocks[mos->num_managed_blocks]); - assert(start_address< uncommit_base); + assert(start_address< decommit_base); mos->heap_end = start_address; mos->committed_heap_size = (POINTER_SIZE_INT) start_address - (POINTER_SIZE_INT) mos->heap_start; - nos->heap_start = start_address; - nos->heap_end = uncommit_base; - nos->committed_heap_size=nos->reserved_heap_size = (POINTER_SIZE_INT)uncommit_base- (POINTER_SIZE_INT) start_address; - nos->num_total_blocks=nos->num_managed_blocks=nos_size>>GC_BLOCK_SHIFT_COUNT; + nos_boundary = nos->heap_start = start_address; + nos->heap_end = decommit_base; + nos->committed_heap_size = nos->reserved_heap_size = (POINTER_SIZE_INT)decommit_base- (POINTER_SIZE_INT) start_address; + nos->num_total_blocks = nos->num_managed_blocks = nos_size>>GC_BLOCK_SHIFT_COUNT; nos->free_block_idx=nos->first_block_idx=GC_BLOCK_INDEX_FROM(gc->heap_start,start_address); nos->ceiling_block_idx=nos->first_block_idx+nos->num_managed_blocks-1; nos->num_used_blocks = 0; @@ -545,3 +543,4 @@ + Index: src/mark_compact/mspace_move_compact.cpp =================================================================== --- src/mark_compact/mspace_move_compact.cpp (revision 650025) +++ src/mark_compact/mspace_move_compact.cpp (working copy) @@ -224,12 +224,10 @@ #endif gc_identify_dead_weak_roots(gc); -#ifndef LOS_ADJUST_BOUNDARY #ifdef USE_32BITS_HASHCODE - if(is_fallback) + if((!LOS_ADJUST_BOUNDARY) && (is_fallback)) fallback_clear_fwd_obj_oi_init(collector); #endif -#endif debug_num_compact_blocks = 0; /* let other collectors go */ num_marking_collectors++; Index: src/mark_sweep/gc_ms.cpp =================================================================== --- src/mark_sweep/gc_ms.cpp (revision 650025) +++ src/mark_sweep/gc_ms.cpp (working copy) @@ -81,7 +81,7 @@ } void wspace_mark_scan_concurrent(Marker* marker); -void gc_ms_start_concurrent_mark(GC_MS* gc, unsigned int num_markers) +void gc_ms_start_con_mark(GC_MS* gc, unsigned int num_markers) { if(gc->num_active_markers == 0) pool_iterator_init(gc->metadata->gc_rootset_pool); @@ -90,7 +90,7 @@ } void wspace_mark_scan_mostly_concurrent(Marker* marker); -void gc_ms_start_most_concurrent_mark(GC_MS* gc, unsigned int num_markers) +void gc_ms_start_mostly_con_mark(GC_MS* gc, unsigned int num_markers) { if(gc->num_active_markers == 0) pool_iterator_init(gc->metadata->gc_rootset_pool); @@ -98,7 +98,7 @@ marker_execute_task_concurrent((GC*)gc,(TaskType)wspace_mark_scan_mostly_concurrent,(Space*)gc->wspace, num_markers); } -void gc_ms_start_final_mark_after_concurrent(GC_MS* gc, unsigned int num_markers) +void gc_ms_start_mostly_con_final_mark(GC_MS* gc, unsigned int num_markers) { pool_iterator_init(gc->metadata->gc_rootset_pool); @@ -112,7 +112,7 @@ Mutator *mutator = gc->mutator_list; while(mutator){ - wait_mutator_signal(mutator, MUTATOR_ENTER_ALLOCATION_MARK); + wait_mutator_signal(mutator, HSIG_MUTATOR_SAFE); mutator = mutator->next; } @@ -120,12 +120,12 @@ } void wspace_sweep_concurrent(Collector* collector); -void gc_ms_start_concurrent_sweep(GC_MS* gc, unsigned int num_collectors) +void gc_ms_start_con_sweep(GC_MS* gc, unsigned int num_collectors) { ops_color_flip(); mem_fence(); + gc_check_mutator_allocation((GC*)gc); gc_disable_alloc_obj_live(); - gc_check_mutator_allocation((GC*)gc); wspace_init_pfc_pool_iterator(gc->wspace); collector_execute_task_concurrent((GC*)gc, (TaskType)wspace_sweep_concurrent, (Space*)gc->wspace, num_collectors); @@ -133,14 +133,14 @@ collector_release_weakref_sets((GC*)gc, num_collectors); } -void gc_ms_start_concurrent_mark(GC_MS* gc) +void gc_ms_start_con_mark(GC_MS* gc) { pool_iterator_init(gc->metadata->gc_rootset_pool); marker_execute_task_concurrent((GC*)gc,(TaskType)wspace_mark_scan_concurrent,(Space*)gc->wspace); } -void gc_ms_update_space_statistics(GC_MS* gc) +void gc_ms_update_space_stat(GC_MS* gc) { POINTER_SIZE_INT num_live_obj = 0; POINTER_SIZE_INT size_live_obj = 0; @@ -157,25 +157,25 @@ size_live_obj += collector->live_obj_size; } - lock(gc->mutator_list_lock); - Mutator* mutator = gc->mutator_list; - while (mutator) { - new_obj_size += mutator->new_obj_size; - mutator->new_obj_size = 0; - mutator = mutator->next; - } - unlock(gc->mutator_list_lock); + new_obj_size = gc_get_new_object_size((GC*)gc, TRUE); - wspace_stat->size_new_obj += new_obj_size; + wspace_stat->size_new_obj = new_obj_size; wspace_stat->num_live_obj = num_live_obj; wspace_stat->size_live_obj = size_live_obj; wspace_stat->last_size_free_space = wspace_stat->size_free_space; wspace_stat->size_free_space = gc->committed_heap_size - size_live_obj;/*TODO:inaccurate value.*/ - wspace_stat->space_utilization_ratio = (float)wspace_stat->size_new_obj / wspace_stat->last_size_free_space;; + wspace_stat->space_utilization_ratio = (float)wspace_stat->size_new_obj / wspace_stat->last_size_free_space; + + INFO2("gc.space.stat","[GC][Space Stat] num_live_obj : "<num_live_obj<<" "); + INFO2("gc.space.stat","[GC][Space Stat] size_live_obj : "<size_live_obj<<" "); + INFO2("gc.space.stat","[GC][Space Stat] size_free_space : "<size_free_space<<" "); + INFO2("gc.space.stat","[GC][Space Stat] last_size_free_space: "<last_size_free_space<<" "); + INFO2("gc.space.stat","[GC][Space Stat] size_new_obj : "<size_new_obj<<" "); + INFO2("gc.space.stat","[GC][Space Stat] utilization_ratio : "<space_utilization_ratio<<" "); } -void gc_ms_reset_space_statistics(GC_MS* gc) +void gc_ms_reset_space_stat(GC_MS* gc) { Space_Statistics* wspace_stat = gc->wspace->space_statistic; wspace_stat->size_new_obj = 0; Index: src/mark_sweep/gc_ms.h =================================================================== --- src/mark_sweep/gc_ms.h (revision 650025) +++ src/mark_sweep/gc_ms.h (working copy) @@ -73,10 +73,10 @@ unsigned int gc_concurrent_status; Collection_Scheduler* collection_scheduler; - SpinLock concurrent_mark_lock; - SpinLock enumerate_rootset_lock; - SpinLock concurrent_sweep_lock; - SpinLock collection_scheduler_lock; + SpinLock lock_con_mark; + SpinLock lock_enum; + SpinLock lock_con_sweep; + SpinLock lock_collect_sched; /* system info */ unsigned int _system_alloc_unit; @@ -115,12 +115,12 @@ void gc_ms_reclaim_heap(GC_MS *gc); void gc_ms_iterate_heap(GC_MS *gc); -void gc_ms_start_concurrent_mark(GC_MS* gc); -void gc_ms_start_concurrent_mark(GC_MS* gc, unsigned int num_markers); -void gc_ms_update_space_statistics(GC_MS* gc); -void gc_ms_start_concurrent_sweep(GC_MS* gc, unsigned int num_collectors); -void gc_ms_start_most_concurrent_mark(GC_MS* gc, unsigned int num_markers); -void gc_ms_start_final_mark_after_concurrent(GC_MS* gc, unsigned int num_markers); -void gc_ms_reset_space_statistics(GC_MS* gc); +void gc_ms_start_con_mark(GC_MS* gc); +void gc_ms_start_con_mark(GC_MS* gc, unsigned int num_markers); +void gc_ms_update_space_stat(GC_MS* gc); +void gc_ms_start_con_sweep(GC_MS* gc, unsigned int num_collectors); +void gc_ms_start_mostly_con_mark(GC_MS* gc, unsigned int num_markers); +void gc_ms_start_mostly_con_final_mark(GC_MS* gc, unsigned int num_markers); +void gc_ms_reset_space_stat(GC_MS* gc); #endif // _GC_MS_H_ Index: src/mark_sweep/wspace.cpp =================================================================== --- src/mark_sweep/wspace.cpp (revision 650025) +++ src/mark_sweep/wspace.cpp (working copy) @@ -125,15 +125,6 @@ allocator->local_chunks = local_chunks; } -void allocator_register_new_obj_size(Allocator *allocator) -{ - Mutator* mutator = (Mutator*)allocator; - Wspace *wspace = gc_get_wspace(allocator->gc); - Space_Statistics* space_stat = wspace->space_statistic; - space_stat->size_new_obj += mutator->new_obj_size; -} - - void allocactor_destruct_local_chunks(Allocator *allocator) { Wspace *wspace = gc_get_wspace(allocator->gc); @@ -153,12 +144,12 @@ /* Put local pfc to the according pools */ for(unsigned int j = 0; j < chunk_ptr_num; ++j){ if(chunk_ptrs[j]){ - if(!USE_CONCURRENT_GC){ + if(!gc_is_specify_con_gc()){ wspace_put_pfc(wspace, chunk_ptrs[j]); }else{ Chunk_Header* chunk_to_rem = chunk_ptrs[j]; chunk_to_rem->status = CHUNK_USED | CHUNK_NORMAL; - wspace_register_used_chunk(wspace, chunk_to_rem); + wspace_reg_used_chunk(wspace, chunk_to_rem); } } } @@ -243,13 +234,13 @@ wspace_alloc_info_summary(); #endif #ifdef SSPACE_CHUNK_INFO - wspace_chunks_info(wspace, FALSE); + wspace_chunks_info(wspace, TRUE); #endif wspace_clear_used_chunk_pool(wspace); wspace_decide_compaction_need(wspace); - if(wspace->need_compact && major_is_marksweep()){ + if(wspace->need_compact && gc_is_kind(ALGO_MARKSWEEP)){ assert(!collect_move_object()); GC_PROP |= ALGO_MS_COMPACT; } @@ -279,7 +270,7 @@ #endif #ifdef SSPACE_CHUNK_INFO - wspace_chunks_info(wspace, FALSE); + wspace_chunks_info(wspace, TRUE); #endif } Index: src/mark_sweep/wspace.h =================================================================== --- src/mark_sweep/wspace.h (revision 650025) +++ src/mark_sweep/wspace.h (working copy) @@ -91,7 +91,6 @@ void allocator_init_local_chunks(Allocator *allocator); void allocactor_destruct_local_chunks(Allocator *allocator); void gc_init_collector_free_chunk_list(Collector *collector); -void allocator_register_new_obj_size(Allocator *allocator); POINTER_SIZE_INT wspace_free_memory_size(Wspace *wspace); Index: src/mark_sweep/wspace_alloc.cpp =================================================================== --- src/mark_sweep/wspace_alloc.cpp (revision 650025) +++ src/mark_sweep/wspace_alloc.cpp (working copy) @@ -17,7 +17,6 @@ #include "wspace.h" #include "wspace_chunk.h" -//#include "wspace_mark_sweep.h" #include "wspace_alloc.h" #include "gc_ms.h" #include "../gen/gen.h" @@ -85,25 +84,28 @@ Chunk_Header **chunks = allocator->local_chunks[seg_index]; Chunk_Header *chunk = chunks[index]; if(!chunk){ - mutator_post_signal((Mutator*) allocator,DISABLE_COLLECTOR_SWEEP_LOCAL_CHUNKS); + mutator_post_signal((Mutator*) allocator,HSIG_DISABLE_SWEEP_LOCAL_CHUNKS); chunk = wspace_get_pfc(wspace, seg_index, index); //if(!chunk) chunk = wspace_steal_pfc(wspace, seg_index, index); - if(!chunk) return NULL; + if(!chunk){ + mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE); + return NULL; + } chunk->status |= CHUNK_IN_USE; chunks[index] = chunk; - mutator_post_signal((Mutator*) allocator,ENABLE_COLLECTOR_SWEEP_LOCAL_CHUNKS); + mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE); } - mutator_post_signal((Mutator*) allocator,MUTATOR_ENTER_ALLOCATION_MARK); + mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_ENTER_ALLOC_MARK); void *p_obj = alloc_in_chunk(chunks[index]); - mutator_post_signal((Mutator*) allocator,MUTATOR_EXIT_ALLOCATION_MARK); + mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE); if(chunk->slot_index == MAX_SLOT_INDEX){ chunk->status = CHUNK_USED | CHUNK_NORMAL; /*register to used chunk list.*/ - wspace_register_used_chunk(wspace,chunk); + wspace_reg_used_chunk(wspace,chunk); chunks[index] = NULL; chunk = NULL; } @@ -119,6 +121,7 @@ wspace_verify_alloc(p_obj, size); #endif + if(p_obj) ((Mutator*)allocator)->new_obj_size += size; return p_obj; } @@ -137,38 +140,39 @@ Chunk_Header **chunks = allocator->local_chunks[seg_index]; chunk = chunks[index]; if(!chunk){ - mutator_post_signal((Mutator*) allocator,DISABLE_COLLECTOR_SWEEP_LOCAL_CHUNKS); + mutator_post_signal((Mutator*) allocator,HSIG_DISABLE_SWEEP_LOCAL_CHUNKS); chunk = wspace_get_pfc(wspace, seg_index, index); if(!chunk){ chunk = (Chunk_Header*)wspace_get_normal_free_chunk(wspace); if(chunk) normal_chunk_init(chunk, size); } //if(!chunk) chunk = wspace_steal_pfc(wspace, seg_index, index); - if(!chunk) return NULL; + if(!chunk){ + mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE); + return NULL; + } chunk->status |= CHUNK_IN_USE; chunks[index] = chunk; - mutator_post_signal((Mutator*) allocator,ENABLE_COLLECTOR_SWEEP_LOCAL_CHUNKS); + mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE); } - mutator_post_signal((Mutator*) allocator,MUTATOR_ENTER_ALLOCATION_MARK); + mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_ENTER_ALLOC_MARK); p_obj = alloc_in_chunk(chunks[index]); - mutator_post_signal((Mutator*) allocator,MUTATOR_EXIT_ALLOCATION_MARK); + mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE); if(chunk->slot_index == MAX_SLOT_INDEX){ chunk->status = CHUNK_USED | CHUNK_NORMAL; /*register to used chunk list.*/ - wspace_register_used_chunk(wspace,chunk); + wspace_reg_used_chunk(wspace,chunk); chunks[index] = NULL; } - } else { - gc_try_schedule_collection(allocator->gc, GC_CAUSE_NIL); + } else { + mutator_post_signal((Mutator*) allocator,HSIG_DISABLE_SWEEP_GLOBAL_CHUNKS); - mutator_post_signal((Mutator*) allocator,DISABLE_COLLECTOR_SWEEP_GLOBAL_CHUNKS); - - if(USE_CONCURRENT_SWEEP){ - while(gc_is_sweeping_global_normal_chunk()){ - mutator_post_signal((Mutator*) allocator,ENABLE_COLLECTOR_SWEEP_GLOBAL_CHUNKS); + if(gc_is_specify_con_sweep()){ + while(gc_is_sweep_global_normal_chunk()){ + mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE); } } @@ -178,13 +182,16 @@ if(chunk) normal_chunk_init(chunk, size); } //if(!chunk) chunk = wspace_steal_pfc(wspace, seg_index, index); - if(!chunk) return NULL; + if(!chunk) { + mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE); + return NULL; + } p_obj = alloc_in_chunk(chunk); if(chunk->slot_index == MAX_SLOT_INDEX){ chunk->status = CHUNK_USED | CHUNK_NORMAL; /*register to used chunk list.*/ - wspace_register_used_chunk(wspace,chunk); + wspace_reg_used_chunk(wspace,chunk); chunk = NULL; } @@ -192,7 +199,7 @@ wspace_put_pfc(wspace, chunk); } - mutator_post_signal((Mutator*) allocator,ENABLE_COLLECTOR_SWEEP_GLOBAL_CHUNKS); + mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE); } return p_obj; @@ -202,8 +209,6 @@ { assert(size > SUPER_OBJ_THRESHOLD); - gc_try_schedule_collection(allocator->gc, GC_CAUSE_NIL); - unsigned int chunk_size = SUPER_SIZE_ROUNDUP(size); assert(chunk_size > SUPER_OBJ_THRESHOLD); assert(!(chunk_size & CHUNK_GRANULARITY_LOW_MASK)); @@ -217,18 +222,17 @@ if(!chunk) return NULL; abnormal_chunk_init(chunk, chunk_size, size); - mutator_post_signal((Mutator*) allocator,MUTATOR_ENTER_ALLOCATION_MARK); + mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_ENTER_ALLOC_MARK); if(is_obj_alloced_live()){ chunk->table[0] |= cur_mark_black_color ; } - mutator_post_signal((Mutator*) allocator,MUTATOR_EXIT_ALLOCATION_MARK); - //mem_fence(); + mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE); chunk->table[0] |= cur_alloc_color; set_super_obj_mask(chunk->base); assert(chunk->status == CHUNK_ABNORMAL); chunk->status = CHUNK_ABNORMAL| CHUNK_USED; - wspace_register_used_chunk(wspace, chunk); + wspace_reg_used_chunk(wspace, chunk); assert(get_obj_info_raw((Partial_Reveal_Object*)chunk->base) & SUPER_OBJ_MASK); return chunk->base; } @@ -251,10 +255,8 @@ #endif #ifdef WSPACE_CONCURRENT_GC_STATS - if(p_obj && gc_is_concurrent_mark_phase()) ((Partial_Reveal_Object*)p_obj)->obj_info |= NEW_OBJ_MASK; + if(p_obj && gc_con_is_in_marking()) ((Partial_Reveal_Object*)p_obj)->obj_info |= NEW_OBJ_MASK; #endif - - if(p_obj) ((Mutator*)allocator)->new_obj_size += size; return p_obj; } @@ -264,6 +266,9 @@ { void *p_obj = NULL; + if(gc_is_specify_con_gc()) + gc_sched_collection(allocator->gc, GC_CAUSE_CONCURRENT_GC); + /* First, try to allocate object from TLB (thread local chunk) */ p_obj = wspace_try_alloc(size, allocator); if(p_obj){ Index: src/mark_sweep/wspace_alloc.h =================================================================== --- src/mark_sweep/wspace_alloc.h (revision 650025) +++ src/mark_sweep/wspace_alloc.h (working copy) @@ -23,9 +23,9 @@ #include "../common/gc_concurrent.h" #include "../common/collection_scheduler.h" -extern POINTER_SIZE_INT cur_alloc_color; -extern POINTER_SIZE_INT cur_alloc_mask; -extern POINTER_SIZE_INT cur_mark_mask; +extern volatile POINTER_SIZE_INT cur_alloc_color; +extern volatile POINTER_SIZE_INT cur_alloc_mask; +extern volatile POINTER_SIZE_INT cur_mark_mask; inline Boolean slot_is_alloc_in_table(POINTER_SIZE_INT *table, unsigned int slot_index) @@ -186,9 +186,7 @@ /*mark black is placed here because of race condition between ops color flip. */ if(p_obj && is_obj_alloced_live()) obj_mark_black_in_table((Partial_Reveal_Object*)p_obj, chunk->slot_size); - - //mem_fence(); - + alloc_slot_in_table(table, slot_index); if(chunk->status & CHUNK_NEED_ZEROING) memset(p_obj, 0, chunk->slot_size); Index: src/mark_sweep/wspace_chunk.cpp =================================================================== --- src/mark_sweep/wspace_chunk.cpp (revision 650025) +++ src/mark_sweep/wspace_chunk.cpp (working copy) @@ -597,11 +597,37 @@ return NULL; } +static POINTER_SIZE_INT free_mem_in_used_chunk_list(Wspace *wspace, Boolean show_chunk_info) +{ + POINTER_SIZE_INT used_chunk_size = 0; + POINTER_SIZE_INT used_chunk_num = 0; + POINTER_SIZE_INT free_mem_size = 0; + Pool* used_chunk_pool = wspace->used_chunk_pool; + if(used_chunk_pool) { + pool_iterator_init(used_chunk_pool); + Chunk_Header* used_chunk = (Chunk_Header*)pool_iterator_next(used_chunk_pool); + while(used_chunk != NULL){ + used_chunk_num ++; + used_chunk_size += CHUNK_SIZE(used_chunk); + if(used_chunk->status & CHUNK_NORMAL) { + free_mem_size += (used_chunk->slot_num - used_chunk->alloc_num) * used_chunk->slot_size; + } + used_chunk = (Chunk_Header*)pool_iterator_next(used_chunk_pool); + } + } +#ifdef SSPACE_CHUNK_INFO + if(show_chunk_info) + printf("Used chunk num: %d\tTotal Size: %d\tFragmentation Ratio: %f\n", used_chunk_num, used_chunk_size, (float)free_mem_size/used_chunk_size); +#endif + return free_mem_size; +} + static POINTER_SIZE_INT free_mem_in_pfc_pools(Wspace *wspace, Boolean show_chunk_info) { Size_Segment **size_segs = wspace->size_segments; Pool ***pfc_pools = wspace->pfc_pools; POINTER_SIZE_INT free_mem_size = 0; + POINTER_SIZE_INT total_pfc_size = 0; for(unsigned int i = 0; i < SIZE_SEGMENT_NUM; ++i){ for(unsigned int j = 0; j < size_segs[i]->chunk_num; ++j){ @@ -627,9 +653,15 @@ printf("Size: %x\tchunk num: %d\tLive Ratio: %f\n", NORMAL_INDEX_TO_SIZE(j, size_segs[i]), chunk_num, (float)alloc_num/total_slot_num); #endif free_mem_size += NORMAL_INDEX_TO_SIZE(j, size_segs[i]) * (total_slot_num-alloc_num); + total_pfc_size += NORMAL_INDEX_TO_SIZE(j, size_segs[i]) * total_slot_num; assert(free_mem_size < wspace->committed_heap_size); } } + +#ifdef SSPACE_CHUNK_INFO + if(show_chunk_info) + printf("Total PFC pool Size: %d\tFragmentation Ratio: %f\n", total_pfc_size, (float)free_mem_size/total_pfc_size); +#endif return free_mem_size; } @@ -656,6 +688,11 @@ printf("Free Size: %x\tnum: %d\n", chunk_size, chunk_num); #endif } + +#ifdef SSPACE_CHUNK_INFO + if(show_chunk_info) + printf("Total Size in FreeList: %d\n", free_mem_size); +#endif return free_mem_size; } @@ -675,6 +712,11 @@ assert(free_mem_size <= wspace->committed_heap_size); chunk = chunk->next; } + +#ifdef SSPACE_CHUNK_INFO + if(show_chunk_info) + printf("Total Size in HyperFreeList: %d\n", free_mem_size); +#endif return free_mem_size; } @@ -717,6 +759,7 @@ if(!show_info) return; POINTER_SIZE_INT free_mem_size = free_mem_in_wspace(wspace, TRUE); + free_mem_size += free_mem_in_used_chunk_list(wspace, TRUE); float free_mem_ratio = (float)free_mem_size / wspace->committed_heap_size; printf("\n\nFree mem ratio: %f\n\n", free_mem_ratio); @@ -852,7 +895,7 @@ } unsigned int *shift_table; -unsigned short *compact_table[MAX_SLOT_SIZE_AFTER_SHIFTING]; +unsigned short *compact_table[MAX_SLOT_SIZE_AFTER_SHIFTING+1]; unsigned int mask[MAX_SLOT_SIZE_AFTER_SHIFTING]; static int already_inited = 0; void fastdiv_init() @@ -911,3 +954,4 @@ #endif + Index: src/mark_sweep/wspace_chunk.h =================================================================== --- src/mark_sweep/wspace_chunk.h (revision 650025) +++ src/mark_sweep/wspace_chunk.h (working copy) @@ -18,6 +18,10 @@ #ifndef _SSPACE_CHUNK_H_ #define _SSPACE_CHUNK_H_ +#ifdef USE_32BITS_HASHCODE +#include "hashcode.h" +#endif + #include "wspace.h" #define SSPACE_USE_FASTDIV @@ -33,7 +37,7 @@ #define INIT_ALLOC_SIZE (128 * 1024) extern unsigned int *shift_table; -extern unsigned short *compact_table[MAX_SLOT_SIZE_AFTER_SHIFTING]; +extern unsigned short *compact_table[MAX_SLOT_SIZE_AFTER_SHIFTING+1]; extern unsigned int mask[MAX_SLOT_SIZE_AFTER_SHIFTING]; extern void fastdiv_init(void); @@ -97,6 +101,9 @@ unsigned int slot_num; unsigned int slot_index; /* the index of which is the first free slot in this chunk */ unsigned int alloc_num; /* the index of which is the first free slot in this chunk */ +#ifdef USE_32BITS_HASHCODE + Hashcode_Buf* hashcode_buf; /*hash code entry list*/ +#endif POINTER_SIZE_INT table[1]; } Chunk_Header; @@ -208,6 +215,7 @@ chunk->next->prev = chunk->prev; else list->tail = chunk->prev; + --list->chunk_num; } inline void move_free_chunks_between_lists(Free_Chunk_List *to_list, Free_Chunk_List *from_list) @@ -269,7 +277,7 @@ chunk->table[last_word_index] &= depadding_mask; } -extern POINTER_SIZE_INT cur_alloc_mask; +extern volatile POINTER_SIZE_INT cur_alloc_mask; /* Used for allocating a fixed-size chunk from free area lists */ inline void normal_chunk_init(Chunk_Header *chunk, unsigned int slot_size) { @@ -285,6 +293,9 @@ chunk->slot_index = 0; chunk->alloc_num = 0; chunk->base = NORMAL_CHUNK_BASE(chunk); +#ifdef USE_32BITS_HASHCODE + chunk->hashcode_buf = NULL; +#endif memset(chunk->table, 0, NORMAL_CHUNK_TABLE_SIZE_BYTES(chunk));//memset table //chunk_pad_last_index_word(chunk, cur_alloc_mask); fastdiv_init(); @@ -304,6 +315,9 @@ chunk->slot_num = 1; chunk->slot_index = 0; chunk->base = ABNORMAL_CHUNK_BASE(chunk); +#ifdef USE_32BITS_HASHCODE + chunk->hashcode_buf = NULL; +#endif } @@ -388,7 +402,7 @@ Chunk_Header *chunk = (Chunk_Header*)pool_get_entry(pfc_pool); /*2. If in concurrent sweeping phase, search PFC backup pool*/ - if(!chunk && gc_is_concurrent_sweep_phase()){ + if(!chunk && gc_con_is_in_sweeping()){ pfc_pool = wspace->pfc_pools_backup[seg_index][index]; chunk = (Chunk_Header*)pool_get_entry(pfc_pool); } @@ -492,7 +506,7 @@ return; } -inline void wspace_register_used_chunk(Wspace* wspace, Chunk_Header* chunk) +inline void wspace_reg_used_chunk(Wspace* wspace, Chunk_Header* chunk) { pool_put_entry(wspace->used_chunk_pool, chunk); return; @@ -504,7 +518,7 @@ return; } -inline void wspace_register_unreusable_normal_chunk(Wspace* wspace, Chunk_Header* chunk) +inline void wspace_reg_unreusable_normal_chunk(Wspace* wspace, Chunk_Header* chunk) { pool_put_entry(wspace->unreusable_normal_chunk_pool, chunk); return; @@ -515,7 +529,7 @@ return (Chunk_Header*)pool_get_entry(wspace->unreusable_normal_chunk_pool); } -inline void wspace_register_live_abnormal_chunk(Wspace* wspace, Chunk_Header* chunk) +inline void wspace_reg_live_abnormal_chunk(Wspace* wspace, Chunk_Header* chunk) { pool_put_entry(wspace->live_abnormal_chunk_pool, chunk); return; @@ -551,4 +565,12 @@ extern void wspace_collect_free_chunks_to_list(Wspace *wspace, Free_Chunk_List *list); +#ifdef USE_32BITS_HASHCODE +inline int obj_lookup_hashcode_in_chunk_buf(Partial_Reveal_Object *p_obj) +{ + Hashcode_Buf* hashcode_buf = NORMAL_CHUNK_HEADER(p_obj)->hashcode_buf; + return hashcode_buf_lookup(p_obj,hashcode_buf); +} +#endif + #endif //#ifndef _SSPACE_CHUNK_H_ Index: src/mark_sweep/wspace_compact.cpp =================================================================== --- src/mark_sweep/wspace_compact.cpp (revision 650025) +++ src/mark_sweep/wspace_compact.cpp (working copy) @@ -20,6 +20,10 @@ #include "wspace_mark_sweep.h" #include "wspace_verify.h" +#ifdef USE_32BITS_HASHCODE +#include "../common/hashcode.h" +#endif + #define PFC_SORT_NUM 8 @@ -165,24 +169,61 @@ unsigned int alloc_num = src->alloc_num; assert(alloc_num); +#ifdef USE_32BITS_HASHCODE + Hashcode_Buf* old_hashcode_buf = src->hashcode_buf; + Hashcode_Buf* new_hashcode_buf = dest->hashcode_buf; +#endif + while(alloc_num && dest){ Partial_Reveal_Object *p_obj = next_alloc_slot_in_chunk(src); - void *target = alloc_in_chunk(dest); + Partial_Reveal_Object *target = (Partial_Reveal_Object *)alloc_in_chunk(dest); if(dest->slot_index == MAX_SLOT_INDEX){ dest->status = CHUNK_USED | CHUNK_NORMAL; - wspace_register_used_chunk(wspace,dest); + wspace_reg_used_chunk(wspace,dest); dest = NULL; } assert(p_obj && target); memcpy(target, p_obj, slot_size); + +#ifdef USE_32BITS_HASHCODE + if(hashcode_is_set(p_obj)){ + int hashcode; + if(new_hashcode_buf == NULL) { + new_hashcode_buf = hashcode_buf_create(); + hashcode_buf_init(new_hashcode_buf); + dest->hashcode_buf = new_hashcode_buf; + } + if(hashcode_is_buffered(p_obj)){ + /*already buffered objects;*/ + hashcode = hashcode_buf_lookup(p_obj, old_hashcode_buf); + hashcode_buf_update(target, hashcode, new_hashcode_buf); + }else{ + /*objects need buffering.*/ + hashcode = hashcode_gen(p_obj); + hashcode_buf_update(target, hashcode, new_hashcode_buf); + Obj_Info_Type oi = get_obj_info_raw(target); + set_obj_info(target, oi | HASHCODE_BUFFERED_BIT); + } + } +#endif + + #ifdef SSPACE_VERIFY wspace_modify_mark_in_compact(target, p_obj, slot_size); #endif obj_set_fw_in_oi(p_obj, target); --alloc_num; } + +#ifdef USE_32BITS_HASHCODE + if(alloc_num == 0) { + if(old_hashcode_buf) hashcode_buf_destory(old_hashcode_buf); + src->hashcode_buf = NULL; + } +#endif + /* dest might be set to NULL, so we use *dest_ptr here */ assert((*dest_ptr)->alloc_num <= (*dest_ptr)->slot_num); @@ -238,3 +279,4 @@ + Index: src/mark_sweep/wspace_mark_mostly_concurrent.cpp =================================================================== --- src/mark_sweep/wspace_mark_mostly_concurrent.cpp (revision 650025) +++ src/mark_sweep/wspace_mark_mostly_concurrent.cpp (working copy) @@ -19,7 +19,7 @@ #include "../finalizer_weakref/finalizer_weakref.h" #include "../thread/marker.h" -volatile Boolean need_terminate_concurrent_mark; +volatile Boolean need_terminate_mostly_con_mark; Boolean obj_is_marked_in_table(Partial_Reveal_Object *obj); @@ -87,15 +87,15 @@ } /* for marking phase termination detection */ -void wspace_mark_scan_mostly_concurrent_reset() -{ need_terminate_concurrent_mark = FALSE; } +void mostly_con_mark_terminate_reset() +{ need_terminate_mostly_con_mark = FALSE; } -void wspace_mark_scan_mostly_concurrent_terminate() -{ need_terminate_concurrent_mark = TRUE; } +void terminate_mostly_con_mark() +{ need_terminate_mostly_con_mark = TRUE; } static Boolean concurrent_mark_need_terminating(GC* gc) { - if(need_terminate_concurrent_mark) return TRUE; + if(need_terminate_mostly_con_mark) return TRUE; GC_Metadata *metadata = gc->metadata; return pool_is_empty(metadata->gc_dirty_set_pool); @@ -122,11 +122,9 @@ while(root_set){ POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set); while(!vector_block_iterator_end(root_set,iter)){ - REF *p_ref = (REF *)*iter; + Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter; iter = vector_block_iterator_advance(root_set,iter); - Partial_Reveal_Object *p_obj = read_slot(p_ref); - /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for ALGO_MAJOR */ assert(p_obj!=NULL); assert(address_belongs_to_gc_heap(p_obj, gc)); if(obj_mark_gray_in_table(p_obj)) @@ -137,8 +135,6 @@ /* put back the last trace_stack task */ pool_put_entry(metadata->mark_task_pool, marker->trace_stack); - marker_notify_mark_root_done(marker); - marker->trace_stack = free_task_pool_get_entry(metadata); retry: @@ -226,3 +222,4 @@ + Index: src/mark_sweep/wspace_mark_otf_concurrent.cpp =================================================================== --- src/mark_sweep/wspace_mark_otf_concurrent.cpp (revision 650025) +++ src/mark_sweep/wspace_mark_otf_concurrent.cpp (working copy) @@ -96,7 +96,7 @@ void wspace_mark_scan_concurrent(Marker* marker) { - int64 time_mark_start = time_now(); + marker->time_measurement_start = time_now(); GC *gc = marker->gc; GC_Metadata *metadata = gc->metadata; @@ -111,11 +111,9 @@ while(root_set){ POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set); while(!vector_block_iterator_end(root_set,iter)){ - REF *p_ref = (REF *)*iter; + Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter; iter = vector_block_iterator_advance(root_set,iter); - Partial_Reveal_Object *p_obj = read_slot(p_ref); - /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for ALGO_MAJOR */ assert(p_obj!=NULL); assert(address_belongs_to_gc_heap(p_obj, gc)); if(obj_mark_gray_in_table(p_obj)) @@ -126,8 +124,6 @@ /* put back the last trace_stack task */ pool_put_entry(metadata->mark_task_pool, marker->trace_stack); - marker_notify_mark_root_done(marker); - marker->trace_stack = free_task_pool_get_entry(metadata); retry: @@ -219,8 +215,8 @@ marker->trace_stack = NULL; assert(pool_is_empty(metadata->gc_dirty_set_pool)); - int64 time_mark = time_now() - time_mark_start; - marker->time_mark = time_mark; + marker->time_measurement_end = time_now(); + marker->time_mark = marker->time_measurement_end - marker->time_measurement_start; return; } @@ -233,3 +229,4 @@ + Index: src/mark_sweep/wspace_mark_sweep.cpp =================================================================== --- src/mark_sweep/wspace_mark_sweep.cpp (revision 650025) +++ src/mark_sweep/wspace_mark_sweep.cpp (working copy) @@ -25,16 +25,31 @@ #include "../common/fix_repointed_refs.h" #include "../common/gc_concurrent.h" -POINTER_SIZE_INT cur_alloc_mask = (~MARK_MASK_IN_TABLE) & FLIP_COLOR_MASK_IN_TABLE; -POINTER_SIZE_INT cur_mark_mask = MARK_MASK_IN_TABLE; -POINTER_SIZE_INT cur_alloc_color = OBJ_COLOR_WHITE; -POINTER_SIZE_INT cur_mark_gray_color = OBJ_COLOR_GRAY; -POINTER_SIZE_INT cur_mark_black_color = OBJ_COLOR_BLACK; +volatile POINTER_SIZE_INT cur_alloc_mask = (~MARK_MASK_IN_TABLE) & FLIP_COLOR_MASK_IN_TABLE; +volatile POINTER_SIZE_INT cur_mark_mask = MARK_MASK_IN_TABLE; +volatile POINTER_SIZE_INT cur_alloc_color = OBJ_COLOR_WHITE; +volatile POINTER_SIZE_INT cur_mark_gray_color = OBJ_COLOR_GRAY; +volatile POINTER_SIZE_INT cur_mark_black_color = OBJ_COLOR_BLACK; static Chunk_Header_Basic *volatile next_chunk_for_fixing; /******************** General interfaces for Mark-Sweep-Compact ***********************/ +Boolean obj_is_mark_black_in_table(Partial_Reveal_Object *obj) +{ + POINTER_SIZE_INT *p_color_word; + unsigned int index_in_word; + p_color_word = get_color_word_in_table(obj, index_in_word); + POINTER_SIZE_INT current_word = *p_color_word; + POINTER_SIZE_INT mark_black_color = cur_mark_black_color << index_in_word; + + if(current_word & mark_black_color) + return TRUE; + else + return FALSE; + +} + void gc_init_collector_free_chunk_list(Collector *collector) { Free_Chunk_List *list = (Free_Chunk_List*)STD_MALLOC(sizeof(Free_Chunk_List)); Index: src/mark_sweep/wspace_mark_sweep.h =================================================================== --- src/mark_sweep/wspace_mark_sweep.h (revision 650025) +++ src/mark_sweep/wspace_mark_sweep.h (working copy) @@ -52,11 +52,11 @@ //#define DIRY_MASK_IN_TABLE ((POINTER_SIZE_INT)0x44444444) #endif -extern POINTER_SIZE_INT cur_alloc_color; -extern POINTER_SIZE_INT cur_mark_gray_color; -extern POINTER_SIZE_INT cur_mark_black_color; -extern POINTER_SIZE_INT cur_alloc_mask; -extern POINTER_SIZE_INT cur_mark_mask; +extern volatile POINTER_SIZE_INT cur_alloc_color; +extern volatile POINTER_SIZE_INT cur_mark_gray_color; +extern volatile POINTER_SIZE_INT cur_mark_black_color; +extern volatile POINTER_SIZE_INT cur_alloc_mask; +extern volatile POINTER_SIZE_INT cur_mark_mask; inline Boolean is_super_obj(Partial_Reveal_Object *obj) { @@ -155,20 +155,7 @@ return FALSE; } -FORCE_INLINE Boolean obj_is_mark_black_in_table(Partial_Reveal_Object *obj) -{ - POINTER_SIZE_INT *p_color_word; - unsigned int index_in_word; - p_color_word = get_color_word_in_table(obj, index_in_word); - POINTER_SIZE_INT current_word = *p_color_word; - POINTER_SIZE_INT mark_black_color = cur_mark_black_color << index_in_word; - - if(current_word & mark_black_color) - return TRUE; - else - return FALSE; - -} +Boolean obj_is_mark_black_in_table(Partial_Reveal_Object *obj); FORCE_INLINE Boolean obj_is_mark_black_in_table(Partial_Reveal_Object *obj, unsigned int size) { @@ -431,7 +418,6 @@ { POINTER_SIZE_INT temp = cur_alloc_color; cur_alloc_color = cur_mark_black_color; - //mem_fence(); cur_mark_black_color = temp; cur_alloc_mask = (~cur_alloc_mask) & FLIP_COLOR_MASK_IN_TABLE; cur_mark_mask = (~cur_mark_mask) & FLIP_COLOR_MASK_IN_TABLE; Index: src/mark_sweep/wspace_sweep.cpp =================================================================== --- src/mark_sweep/wspace_sweep.cpp (revision 650025) +++ src/mark_sweep/wspace_sweep.cpp (working copy) @@ -132,21 +132,21 @@ } } assert(live_num <= slot_num); - //chunk->alloc_num = live_num; collector->live_obj_size += live_num * chunk->slot_size; collector->live_obj_num += live_num; if(!live_num){ /* all objects in this chunk are dead */ collector_add_free_chunk(collector, (Free_Chunk*)chunk); - } else if(chunk_is_reusable(chunk)){ /* most objects in this chunk are swept, add chunk to pfc list*/ - chunk->alloc_num = live_num; + } else { + chunk->alloc_num = live_num; + if(chunk_is_reusable(chunk)){ /* most objects in this chunk are swept, add chunk to pfc list*/ //chunk_pad_last_index_word((Chunk_Header*)chunk, cur_mark_mask); wspace_put_pfc(wspace, chunk); assert(chunk->next != chunk); - }else{ /* the rest: chunks with free rate < PFC_REUSABLE_RATIO. we don't use them */ - chunk->alloc_num = live_num; + }else{ /* the rest: chunks with free rate < PFC_REUSABLE_RATIO. we don't use them */ chunk->status = CHUNK_USED | CHUNK_NORMAL; - wspace_register_used_chunk(wspace,chunk); + wspace_reg_used_chunk(wspace,chunk); + } } } @@ -160,7 +160,7 @@ } else { chunk->status = CHUNK_ABNORMAL| CHUNK_USED; - wspace_register_used_chunk(wspace,chunk); + wspace_reg_used_chunk(wspace,chunk); collector->live_obj_size += CHUNK_SIZE(chunk); collector->live_obj_num++; } Index: src/mark_sweep/wspace_sweep_concurrent.cpp =================================================================== --- src/mark_sweep/wspace_sweep_concurrent.cpp (revision 650025) +++ src/mark_sweep/wspace_sweep_concurrent.cpp (working copy) @@ -4,7 +4,7 @@ #include "gc_ms.h" #include "../gen/gen.h" -static void collector_sweep_normal_chunk_concurrent(Collector *collector, Wspace *wspace, Chunk_Header *chunk) +static void collector_sweep_normal_chunk_con(Collector *collector, Wspace *wspace, Chunk_Header *chunk) { unsigned int slot_num = chunk->slot_num; unsigned int live_num = 0; @@ -27,16 +27,17 @@ if(!live_num){ /* all objects in this chunk are dead */ collector_add_free_chunk(collector, (Free_Chunk*)chunk); - } else if(!chunk_is_reusable(chunk)){ /* most objects in this chunk are swept, add chunk to pfc list*/ + } else { chunk->alloc_num = live_num; - wspace_register_unreusable_normal_chunk(wspace, chunk); + if(!chunk_is_reusable(chunk)){ /* most objects in this chunk are swept, add chunk to pfc list*/ + wspace_reg_unreusable_normal_chunk(wspace, chunk); } else { /* most objects in this chunk are swept, add chunk to pfc list*/ - chunk->alloc_num = live_num; wspace_put_pfc_backup(wspace, chunk); + } } } -static inline void collector_sweep_abnormal_chunk_concurrent(Collector *collector, Wspace *wspace, Chunk_Header *chunk) +static inline void collector_sweep_abnormal_chunk_con(Collector *collector, Wspace *wspace, Chunk_Header *chunk) { assert(chunk->status == (CHUNK_ABNORMAL | CHUNK_USED)); POINTER_SIZE_INT *table = chunk->table; @@ -45,20 +46,20 @@ collector_add_free_chunk(collector, (Free_Chunk*)chunk); } else { - wspace_register_live_abnormal_chunk(wspace, chunk); + wspace_reg_live_abnormal_chunk(wspace, chunk); collector->live_obj_size += CHUNK_SIZE(chunk); collector->live_obj_num++; } } -static void wspace_sweep_chunk_concurrent(Wspace* wspace, Collector* collector, Chunk_Header_Basic* chunk) +static void wspace_sweep_chunk_con(Wspace* wspace, Collector* collector, Chunk_Header_Basic* chunk) { if(chunk->status & CHUNK_NORMAL){ /* chunk is used as a normal sized obj chunk */ assert(chunk->status == (CHUNK_NORMAL | CHUNK_USED)); - collector_sweep_normal_chunk_concurrent(collector, wspace, (Chunk_Header*)chunk); + collector_sweep_normal_chunk_con(collector, wspace, (Chunk_Header*)chunk); } else { /* chunk is used as a super obj chunk */ assert(chunk->status == (CHUNK_ABNORMAL | CHUNK_USED)); - collector_sweep_abnormal_chunk_concurrent(collector, wspace, (Chunk_Header*)chunk); + collector_sweep_abnormal_chunk_con(collector, wspace, (Chunk_Header*)chunk); } } @@ -248,7 +249,7 @@ /* release local chunks of each mutator in unique mark-sweep GC */ Mutator *mutator = gc->mutator_list; while(mutator){ - wait_mutator_signal(mutator, DISABLE_COLLECTOR_SWEEP_LOCAL_CHUNKS); + wait_mutator_signal(mutator, HSIG_MUTATOR_SAFE); allocator_sweep_local_chunks((Allocator*)mutator); mutator = mutator->next; } @@ -257,7 +258,7 @@ #endif } -static void gc_check_mutator_local_chunks(GC *gc, unsigned int handshake_signal) +static void gc_wait_mutator_signal(GC *gc, unsigned int handshake_signal) { lock(gc->mutator_list_lock); // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv @@ -284,6 +285,9 @@ GC *gc = collector->gc; Wspace *wspace = gc_get_wspace(gc); + collector->live_obj_size = 0; + collector->live_obj_num = 0; + unsigned int num_active_collectors = gc->num_active_collectors; atomic_cas32(&num_sweeping_collectors, 0, num_active_collectors+1); @@ -295,7 +299,7 @@ /*1. Grab chunks from used list, sweep the chunk and push back to PFC backup list & free list.*/ chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool); while(chunk_to_sweep != NULL){ - wspace_sweep_chunk_concurrent(wspace, collector, chunk_to_sweep); + wspace_sweep_chunk_con(wspace, collector, chunk_to_sweep); chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool); } @@ -308,7 +312,7 @@ while(chunk_to_sweep != NULL){ assert(chunk_to_sweep->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING)); chunk_to_sweep->status = CHUNK_NORMAL | CHUNK_USED; - wspace_sweep_chunk_concurrent(wspace, collector, chunk_to_sweep); + wspace_sweep_chunk_con(wspace, collector, chunk_to_sweep); chunk_to_sweep = chunk_pool_get_chunk(pfc_pool); } } @@ -323,8 +327,8 @@ gc_sweep_mutator_local_chunks(wspace->gc); /*4. Sweep gloabl alloc normal chunks again*/ - gc_set_sweeping_global_normal_chunk(); - gc_check_mutator_local_chunks(wspace->gc, DISABLE_COLLECTOR_SWEEP_GLOBAL_CHUNKS); + gc_set_sweep_global_normal_chunk(); + gc_wait_mutator_signal(wspace->gc, HSIG_MUTATOR_SAFE); wspace_init_pfc_pool_iterator(wspace); Pool* pfc_pool = wspace_grab_next_pfc_pool(wspace); while(pfc_pool != NULL){ @@ -333,7 +337,7 @@ while(chunk_to_sweep != NULL){ assert(chunk_to_sweep->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING)); chunk_to_sweep->status = CHUNK_NORMAL | CHUNK_USED; - wspace_sweep_chunk_concurrent(wspace, collector, chunk_to_sweep); + wspace_sweep_chunk_con(wspace, collector, chunk_to_sweep); chunk_to_sweep = chunk_pool_get_chunk(pfc_pool); } } @@ -344,20 +348,20 @@ /*4. Check the used list again.*/ chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool); while(chunk_to_sweep != NULL){ - wspace_sweep_chunk_concurrent(wspace, collector, chunk_to_sweep); + wspace_sweep_chunk_con(wspace, collector, chunk_to_sweep); chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool); } /*5. Switch the PFC backup list to PFC list.*/ wspace_exchange_pfc_pool(wspace); - gc_unset_sweeping_global_normal_chunk(); + gc_unset_sweep_global_normal_chunk(); /*6. Put back live abnormal chunk and normal unreusable chunk*/ Chunk_Header* used_abnormal_chunk = wspace_get_live_abnormal_chunk(wspace); while(used_abnormal_chunk){ used_abnormal_chunk->status = CHUNK_USED | CHUNK_ABNORMAL; - wspace_register_used_chunk(wspace,used_abnormal_chunk); + wspace_reg_used_chunk(wspace,used_abnormal_chunk); used_abnormal_chunk = wspace_get_live_abnormal_chunk(wspace); } pool_empty(wspace->live_abnormal_chunk_pool); @@ -365,7 +369,7 @@ Chunk_Header* unreusable_normal_chunk = wspace_get_unreusable_normal_chunk(wspace); while(unreusable_normal_chunk){ unreusable_normal_chunk->status = CHUNK_USED | CHUNK_NORMAL; - wspace_register_used_chunk(wspace,unreusable_normal_chunk); + wspace_reg_used_chunk(wspace,unreusable_normal_chunk); unreusable_normal_chunk = wspace_get_unreusable_normal_chunk(wspace); } pool_empty(wspace->unreusable_normal_chunk_pool); @@ -385,3 +389,4 @@ + Index: src/thread/collector.cpp =================================================================== --- src/thread/collector.cpp (revision 650025) +++ src/thread/collector.cpp (working copy) @@ -145,7 +145,8 @@ collector_reset_thread(collector); collector->task_func = task_func; collector->collect_space = space; - notify_collector_to_work(collector); + collector->collector_is_active = TRUE; + notify_collector_to_work(collector); } return; } @@ -365,3 +366,4 @@ + Index: src/thread/marker.cpp =================================================================== --- src/thread/marker.cpp (revision 650025) +++ src/thread/marker.cpp (working copy) @@ -44,16 +44,6 @@ vm_set_event(marker->task_finished_event); } -void wait_marker_finish_mark_root(Marker* marker) -{ - vm_wait_event(marker->markroot_finished_event); -} - -void marker_notify_mark_root_done(Marker* marker) -{ - vm_set_event(marker->markroot_finished_event); -} - static int marker_thread_func(void *arg) { Marker* marker = (Marker *)arg; @@ -205,7 +195,8 @@ marker_reset_thread(marker); marker->task_func = task_func; - marker->mark_space= space; + marker->mark_space= space; + marker->marker_is_active = TRUE; notify_marker_to_work(marker); } return; @@ -224,38 +215,15 @@ marker_reset_thread(marker); marker->task_func = task_func; marker->mark_space= space; - notify_marker_to_work(marker); + marker->marker_is_active = TRUE; + notify_marker_to_work(marker); } return; } -void wait_mark_root_finish(GC* gc) -{ - unsigned int num_marker = gc->num_active_markers; - for(unsigned int i=0; imarkers[i]; - wait_marker_finish_mark_root(marker); - } - return; -} - -void wait_mark_root_finish(GC* gc, unsigned int num_markers) -{ - unsigned int num_active_marker = gc->num_active_markers; - unsigned int i= num_active_marker - num_markers; - for(; i < num_active_marker; i++) - { - Marker* marker = gc->markers[i]; - wait_marker_finish_mark_root(marker); - } - return; -} - void marker_execute_task(GC* gc, TaskType task_func, Space* space) { assign_marker_with_task(gc, task_func, space); - wait_mark_root_finish(gc); wait_mark_finish(gc); return; } @@ -263,7 +231,6 @@ void marker_execute_task_concurrent(GC* gc, TaskType task_func, Space* space) { assign_marker_with_task(gc, task_func, space); - wait_mark_root_finish(gc); return; } @@ -273,7 +240,6 @@ if(num_markers > num_free_markers) num_markers = num_free_markers; assign_marker_with_task(gc, task_func, space,num_markers); - wait_mark_root_finish(gc, num_markers); return; } @@ -293,3 +259,4 @@ + Index: src/thread/marker.h =================================================================== --- src/thread/marker.h (revision 650025) +++ src/thread/marker.h (working copy) @@ -94,7 +94,6 @@ void marker_execute_task_concurrent(GC* gc, TaskType task_func, Space* space, unsigned int num_markers); void marker_execute_task_concurrent(GC* gc, TaskType task_func, Space* space); -void marker_notify_mark_root_done(Marker* marker); void wait_mark_finish(GC* gc); Boolean is_mark_finished(GC* gc); int64 gc_get_marker_time(GC* gc); @@ -103,3 +102,4 @@ + Index: src/thread/mutator.cpp =================================================================== --- src/thread/mutator.cpp (revision 650025) +++ src/thread/mutator.cpp (working copy) @@ -64,6 +64,8 @@ return; } +void mutator_register_new_obj_size(Mutator * mutator); + void mutator_destruct(GC* gc, void *unused_gc_information) { @@ -75,9 +77,9 @@ lock(gc->mutator_list_lock); // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv #ifdef USE_UNIQUE_MARK_SWEEP_GC - allocactor_destruct_local_chunks((Allocator*)mutator); - allocator_register_new_obj_size((Allocator*)mutator); + allocactor_destruct_local_chunks((Allocator*)mutator); #endif + mutator_register_new_obj_size(mutator); volatile Mutator *temp = gc->mutator_list; if (temp == mutator) { /* it is at the head of the list */ @@ -176,7 +178,7 @@ return NULL; } -void gc_start_mutator_time_measurement(GC* gc) +void gc_start_mutator_time_measure(GC* gc) { lock(gc->mutator_list_lock); Mutator* mutator = gc->mutator_list; @@ -206,5 +208,29 @@ return time_mutator; } +static POINTER_SIZE_INT size_new_obj_desturcted_mutator_alloced; +void mutator_register_new_obj_size(Mutator * mutator) +{ + size_new_obj_desturcted_mutator_alloced += mutator->new_obj_size; +} +POINTER_SIZE_INT gc_get_new_object_size(GC* gc, Boolean need_reset) +{ + POINTER_SIZE_INT new_obj_size = 0; + + lock(gc->mutator_list_lock); + Mutator* mutator = gc->mutator_list; + while (mutator) { + new_obj_size += mutator->new_obj_size; + if(need_reset) mutator->new_obj_size = 0; + mutator = mutator->next; + } + unlock(gc->mutator_list_lock); + + new_obj_size += size_new_obj_desturcted_mutator_alloced; + if(need_reset) size_new_obj_desturcted_mutator_alloced = 0; + return new_obj_size; +} + + Index: src/thread/mutator.h =================================================================== --- src/thread/mutator.h (revision 650025) +++ src/thread/mutator.h (working copy) @@ -62,19 +62,19 @@ Boolean gc_local_dirtyset_is_empty(GC* gc); Vector_Block* gc_get_local_dirty_set(GC* gc, unsigned int shared_id); -void gc_start_mutator_time_measurement(GC* gc); +void gc_start_mutator_time_measure(GC* gc); int64 gc_get_mutator_time(GC* gc); +POINTER_SIZE_INT gc_get_new_object_size(GC* gc, Boolean need_reset); inline void mutator_post_signal(Mutator* mutator, unsigned int handshake_signal) { - //FIXME: Need barrier here. mem_fence(); mutator->handshake_signal = handshake_signal; mem_fence(); } inline void wait_mutator_signal(Mutator* mutator, unsigned int handshake_signal) -{ while(mutator->handshake_signal == handshake_signal); } +{ while(mutator->handshake_signal != handshake_signal); } #endif /*ifndef _MUTATOR_H_ */ Index: src/trace_forward/fspace_alloc.cpp =================================================================== --- src/trace_forward/fspace_alloc.cpp (revision 650025) +++ src/trace_forward/fspace_alloc.cpp (working copy) @@ -61,8 +61,6 @@ p_return = thread_local_alloc(size, allocator); if (p_return) return p_return; - gc_try_schedule_collection(allocator->gc, GC_CAUSE_NIL); - /* ran out local block, grab a new one*/ Fspace* fspace = (Fspace*)allocator->alloc_space; int attempts = 0; @@ -98,3 +96,4 @@ } + Index: src/verify/verifier_scanner.cpp =================================================================== --- src/verify/verifier_scanner.cpp (revision 650025) +++ src/verify/verifier_scanner.cpp (working copy) @@ -218,7 +218,6 @@ GC* gc = heap_verifier->gc; Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata; verifier_update_info_before_resurrect(heap_verifier); - return; #ifndef BUILD_IN_REFERENT heap_verifier->gc_verifier->is_tracing_resurrect_obj = TRUE; if(heap_verifier->is_before_gc){ @@ -445,3 +444,4 @@ + Index: src/verify/verify_live_heap.cpp =================================================================== --- src/verify/verify_live_heap.cpp (revision 650025) +++ src/verify/verify_live_heap.cpp (working copy) @@ -123,6 +123,9 @@ assert(collect_is_fallback()); if(!heap_verifier->need_verify_gc) return; + if(!major_is_marksweep()) + verifier_cleanup_block_info(gc); + /*finish the fallbacked gc verify*/ heap_verifier->is_before_gc = FALSE; verifier_set_fallback_collection(heap_verifier->gc_verifier, TRUE); @@ -153,3 +156,4 @@ +