Index: make/vm/gc_gen.xml
===================================================================
--- make/vm/gc_gen.xml (revision 636601)
+++ make/vm/gc_gen.xml (working copy)
@@ -59,6 +59,21 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -69,6 +84,7 @@
+
Index: vm/gc_gen/src/common/collection_scheduler.cpp
===================================================================
--- vm/gc_gen/src/common/collection_scheduler.cpp (revision 636625)
+++ vm/gc_gen/src/common/collection_scheduler.cpp (working copy)
@@ -21,11 +21,14 @@
#include "../mark_sweep/wspace.h"
#include "collection_scheduler.h"
#include "gc_concurrent.h"
+#include "../thread/marker.h"
#include "../verify/verify_live_heap.h"
#define NUM_TRIAL_COLLECTION 10
+#define MAX_DELAY_TIME 0x7fFfFfFf
+#define MAX_TRACING_RATE 2
-static int64 time_delay_to_start_mark = 0;
+static int64 time_delay_to_start_mark = MAX_DELAY_TIME;
void collection_scheduler_initialize(GC* gc)
{
@@ -36,7 +39,7 @@
collection_scheduler->gc = gc;
gc->collection_scheduler = collection_scheduler;
- time_delay_to_start_mark = 0;
+ time_delay_to_start_mark = MAX_DELAY_TIME;
return;
}
@@ -49,20 +52,18 @@
{
if(!USE_CONCURRENT_MARK) return FALSE;
//FIXME: GEN mode also needs the support of starting mark after thread resume.
-#ifdef USE_MARK_SWEEP_GC
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
if(gc_is_concurrent_mark_phase() || gc_mark_is_concurrent()) return FALSE;
int64 time_current = time_now();
if( time_current - get_collection_end_time() > time_delay_to_start_mark)
return TRUE;
- else return FALSE;
+ else
+ return FALSE;
#else
- /*FIXME: concurrent mark is not support in GC_GEN*/
+ /*FIXME: concurrent mark is not supported in GC_GEN*/
assert(0);
- if(gc_next_collection_kind((GC_Gen*)gc) == MAJOR_COLLECTION)
- return TRUE;
- else
- return FALSE;
+ return FALSE;
#endif
}
@@ -96,7 +97,7 @@
void gc_update_collection_scheduler(GC* gc, int64 time_mutator, int64 time_mark)
{
//FIXME: support GEN GC.
-#ifdef USE_MARK_SWEEP_GC
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
Collection_Scheduler* collection_scheduler = gc->collection_scheduler;
Space* space = NULL;
@@ -109,27 +110,36 @@
unsigned int num_slot = collection_scheduler->num_slot_in_window;
collection_scheduler->num_obj_traced_window[slot_index] = space_stat->num_live_obj;
- collection_scheduler->size_alloced_window[slot_index] = space_stat->last_size_free_space;
+ collection_scheduler->size_alloced_window[slot_index] = space_stat->size_new_obj;
+ collection_scheduler->space_utilization_rate[slot_index] = space_stat->space_utilization_ratio;
collection_scheduler->last_mutator_time = time_mutator;
collection_scheduler->last_collector_time = time_mark;
+ INFO2("gc.con","last_size_free_space"<<(space_stat->last_size_free_space)<<" new obj num "<size_alloced_window[slot_index]<<" ");
+ if(NUM_TRIAL_COLLECTION == 0 || gc->num_collections < NUM_TRIAL_COLLECTION)
+ return;
+ INFO2("gc.con","num_live_obj "<<(space_stat->num_live_obj)<<" last_size_free_space"<<(space_stat->last_size_free_space)<<" ");
collection_scheduler->alloc_rate_window[slot_index]
= time_mutator == 0 ? 0 : (float)collection_scheduler->size_alloced_window[slot_index] / time_mutator;
collection_scheduler->trace_rate_window[slot_index]
- = time_mark == 0 ? 0 : (float)collection_scheduler->num_obj_traced_window[slot_index] / time_mark;
-
+ = time_mark == 0 ? MAX_TRACING_RATE : (float)collection_scheduler->num_obj_traced_window[slot_index] / time_mark;
+
+ INFO2("gc.con","mutator time "<<(time_mutator>>10)<<" collection time "<<(time_mark>>10)<<" ");
+
collection_scheduler->num_slot_in_window = num_slot >= STATISTICS_SAMPLING_WINDOW_SIZE ? num_slot : (++num_slot);
collection_scheduler->last_slot_index_in_window = (++slot_index)% STATISTICS_SAMPLING_WINDOW_SIZE;
float sum_alloc_rate = 0;
float sum_trace_rate = 0;
+ float sum_space_util_ratio = 0;
unsigned int i;
for(i = 0; i < collection_scheduler->num_slot_in_window; i++){
sum_alloc_rate += collection_scheduler->alloc_rate_window[i];
sum_trace_rate += collection_scheduler->trace_rate_window[i];
+ sum_space_util_ratio += collection_scheduler->space_utilization_rate[i];
}
TRACE2("gc.con","Allocation Rate: ");
@@ -145,14 +155,19 @@
float average_alloc_rate = sum_alloc_rate / collection_scheduler->num_slot_in_window;
float average_trace_rate = sum_trace_rate / collection_scheduler->num_slot_in_window;
+ float average_space_util_ratio = sum_space_util_ratio / collection_scheduler->num_slot_in_window;
- if(average_alloc_rate == 0 || average_trace_rate == 0){
+ INFO2("gc.con","averAllocRate: "<size_free_space / average_alloc_rate;
+ float time_alloc_expected = (space_stat->size_free_space * average_space_util_ratio) / average_alloc_rate;
float time_trace_expected = space_stat->num_live_obj / average_trace_rate;
-
+ INFO2("gc.con","[Concurrent GC] expected alloc time "< time_trace_expected){
if(gc_concurrent_match_algorithm(OTF_REM_OBJ_SNAPSHOT_ALGO)||gc_concurrent_match_algorithm(OTF_REM_NEW_TARGET_ALGO)){
collection_scheduler->time_delay_to_start_mark = (int64)((time_alloc_expected - time_trace_expected)*0.65);
@@ -224,7 +239,7 @@
if(gc_need_start_concurrent_mark(gc)){
vm_gc_lock_enum();
int64 pause_start = time_now();
- INFO2("gc.con", "[Concurrent GC] concurrent mark start ...\n");
+ INFO2("gc.con", "[Concurrent GC] concurrent mark start ...");
gc_start_concurrent_mark(gc);
vm_gc_unlock_enum();
INFO2("gc.con","[Concurrent GC] pause time of concurrent enumeration: "<<((unsigned int)((time_now()-pause_start)>>10))<<" ms \n");
@@ -234,6 +249,7 @@
if(gc_need_start_concurrent_sweep(gc)){
gc->num_collections++;
+ INFO2("gc.con", "[Concurrent GC] collection number:"<< gc->num_collections<<" ");
gc_start_concurrent_sweep(gc);
unlock(gc->collection_scheduler_lock);
return TRUE;
@@ -245,6 +261,8 @@
int disable_count = hythread_reset_suspend_disable();
gc_prepare_rootset(gc);
gc_reset_after_concurrent_collection(gc);
+ gc_start_mutator_time_measurement(gc);
+ set_collection_end_time();
vm_resume_threads_after();
hythread_set_suspend_disable(disable_count);
vm_gc_unlock_enum();
Index: vm/gc_gen/src/common/collection_scheduler.h
===================================================================
--- vm/gc_gen/src/common/collection_scheduler.h (revision 636625)
+++ vm/gc_gen/src/common/collection_scheduler.h (working copy)
@@ -37,6 +37,7 @@
float alloc_rate_window[STATISTICS_SAMPLING_WINDOW_SIZE];
float trace_rate_window[STATISTICS_SAMPLING_WINDOW_SIZE];
+ float space_utilization_rate[STATISTICS_SAMPLING_WINDOW_SIZE];
POINTER_SIZE_INT num_obj_traced_window[STATISTICS_SAMPLING_WINDOW_SIZE];
POINTER_SIZE_INT size_alloced_window[STATISTICS_SAMPLING_WINDOW_SIZE];
} Collection_Scheduler;
@@ -52,3 +53,4 @@
#endif
+
Index: vm/gc_gen/src/common/compressed_ref.cpp
===================================================================
--- vm/gc_gen/src/common/compressed_ref.cpp (revision 636625)
+++ vm/gc_gen/src/common/compressed_ref.cpp (working copy)
@@ -32,7 +32,7 @@
}Compressed_Root;
POINTER_SIZE_INT vtable_base = 0;
-POINTER_SIZE_INT HEAP_NULL = 0;
+POINTER_SIZE_INT HEAP_BASE = 0;
void gc_set_uncompressed_rootset(GC *gc)
{
@@ -71,3 +71,4 @@
}
+
Index: vm/gc_gen/src/common/fix_repointed_refs.h
===================================================================
--- vm/gc_gen/src/common/fix_repointed_refs.h (revision 636625)
+++ vm/gc_gen/src/common/fix_repointed_refs.h (working copy)
@@ -24,7 +24,7 @@
#include "gc_common.h"
#include "compressed_ref.h"
-extern Boolean IS_MOVE_COMPACT;
+#include "../finalizer_weakref/finalizer_weakref.h"
extern void* los_boundary;
inline void slot_fix(REF* p_ref)
@@ -40,7 +40,7 @@
#endif
- if(IS_MOVE_COMPACT){
+ if(collect_is_compact_move()){
/* This condition is removed because we do los sliding compaction at every major compaction after add los minor sweep. */
//if(obj_is_moved(p_obj))
/*Fixme: los_boundery ruined the modularity of gc_common.h*/
@@ -94,6 +94,13 @@
slot_fix(p_ref);
}
+#ifndef BUILD_IN_REFERENT
+ if(IGNORE_FINREF && is_reference_obj(p_obj)) {
+ REF* p_ref = obj_get_referent_field(p_obj);
+ slot_fix(p_ref);
+ }
+#endif
+
return;
}
Index: vm/gc_gen/src/common/gc_block.h
===================================================================
--- vm/gc_gen/src/common/gc_block.h (revision 636625)
+++ vm/gc_gen/src/common/gc_block.h (working copy)
@@ -47,9 +47,7 @@
void* ceiling;
void* new_free; /* used only during compaction */
unsigned int block_idx;
-#ifdef USE_UNIQUE_MOVE_COMPACT_GC
- unsigned int num_multi_block; /*number of block in large block*/
-#endif
+ unsigned int num_multi_block; /* number of blocks in large block */
volatile unsigned int status;
volatile unsigned int num_live_objs; /* for verification debugging */
@@ -63,9 +61,6 @@
Hashcode_Buf* hashcode_buf; /*hash code entry list*/
#endif
Block_Header* next;
-#ifdef USE_UNIQUE_MOVE_COMPACT_GC
- Block_Header* next_large_block; /*used to link free super large block in gc_mc*/
-#endif
POINTER_SIZE_INT table[1]; /* entry num == OFFSET_TABLE_SIZE_WORDS */
}Block_Header;
@@ -106,18 +101,15 @@
#define ADDRESS_OFFSET_TO_BLOCK_HEADER(addr) ((unsigned int)((POINTER_SIZE_INT)addr&GC_BLOCK_LOW_MASK))
#define ADDRESS_OFFSET_IN_BLOCK_BODY(addr) ((unsigned int)(ADDRESS_OFFSET_TO_BLOCK_HEADER(addr)- GC_BLOCK_HEADER_SIZE_BYTES))
-#ifdef USE_UNIQUE_MOVE_COMPACT_GC
-#define NUM_BLOCKS_PER_LARGE_OBJECT(size) (((size)+GC_BLOCK_HEADER_SIZE_BYTES+ GC_BLOCK_SIZE_BYTES-1)/GC_BLOCK_SIZE_BYTES)
-#endif
+#define NUM_BLOCKS_IN_LARGE_BLOCK_FOR_SIZE(size) ((unsigned int)(((size)+ GC_BLOCK_HEADER_SIZE_BYTES + GC_BLOCK_SIZE_BYTES - 1)>>GC_BLOCK_SHIFT_COUNT))
+
inline void block_init(Block_Header* block)
{
block->free = (void*)((POINTER_SIZE_INT)block + GC_BLOCK_HEADER_SIZE_BYTES);
block->ceiling = (void*)((POINTER_SIZE_INT)block + GC_BLOCK_SIZE_BYTES);
block->base = block->free;
block->new_free = block->free;
-#ifdef USE_UNIQUE_MOVE_COMPACT_GC
block->num_multi_block = 0;
-#endif
block->status = BLOCK_FREE;
block->dest_counter = 0;
block->src = NULL;
@@ -282,3 +274,4 @@
+
Index: vm/gc_gen/src/common/gc_common.cpp
===================================================================
--- vm/gc_gen/src/common/gc_common.cpp (revision 636625)
+++ vm/gc_gen/src/common/gc_common.cpp (working copy)
@@ -37,338 +37,16 @@
unsigned int SPACE_ALLOC_UNIT;
-extern char* GC_VERIFY;
-
-extern POINTER_SIZE_INT NOS_SIZE;
-extern POINTER_SIZE_INT MIN_NOS_SIZE;
-extern POINTER_SIZE_INT INIT_LOS_SIZE;
-extern POINTER_SIZE_INT TOSPACE_SIZE;
-extern POINTER_SIZE_INT MOS_RESERVE_SIZE;
-
-extern Boolean FORCE_FULL_COMPACT;
-
-extern unsigned int NUM_MARKERS;
-extern unsigned int NUM_COLLECTORS;
-extern unsigned int MINOR_COLLECTORS;
-extern unsigned int MAJOR_COLLECTORS;
-
-extern Boolean IGNORE_VTABLE_TRACING;
-
-POINTER_SIZE_INT HEAP_SIZE_DEFAULT = 256 * MB;
-POINTER_SIZE_INT min_heap_size_bytes = 16 * MB;
-POINTER_SIZE_INT max_heap_size_bytes = 0;
-
-extern Boolean JVMTI_HEAP_ITERATION ;
-
-extern Boolean IS_MOVE_COMPACT;
-extern Boolean USE_CONCURRENT_GC;
-
-static int get_int_property(const char *property_name)
-{
- assert(property_name);
- char *value = get_property(property_name, VM_PROPERTIES);
- int return_value;
- if (NULL != value)
- {
- return_value = atoi(value);
- destroy_property_value(value);
- }else{
- DIE2("gc.base","Warning: property value "< max_heap_size){
- max_heap_size = min_heap_size;
- WARN2("gc.base","Warning: Max heap size is too small, reset to "<generate_barrier = gc_is_gen_mode();
-
- if( minor_algo) destroy_property_value(minor_algo);
- if( major_algo) destroy_property_value(major_algo);
-
- /* Step 2: */
- /* NOTE:: this has to stay after above!! */
- if (is_property_set("gc.force_major_collect", VM_PROPERTIES) == 1) {
- FORCE_FULL_COMPACT = get_boolean_property("gc.force_major_collect");
- if(FORCE_FULL_COMPACT){
- gc_disable_gen_mode();
- gc->generate_barrier = FALSE;
- }
- }
-
- /* Step 3: */
- /* NOTE:: this has to stay after above!! */
- if (is_property_set("gc.generate_barrier", VM_PROPERTIES) == 1) {
- Boolean generate_barrier = get_boolean_property("gc.generate_barrier");
- gc->generate_barrier = generate_barrier || gc->generate_barrier;
- }
-
- if (is_property_set("gc.tospace_size", VM_PROPERTIES) == 1) {
- TOSPACE_SIZE = get_size_property("gc.tospace_size");
- }
-
- if (is_property_set("gc.mos_reserve_size", VM_PROPERTIES) == 1) {
- MOS_RESERVE_SIZE = get_size_property("gc.mos_reserve_size");
- }
-
- if (is_property_set("gc.nos_partial_forward", VM_PROPERTIES) == 1) {
- NOS_PARTIAL_FORWARD = get_boolean_property("gc.nos_partial_forward");
- }
-
- if (is_property_set("gc.minor_collectors", VM_PROPERTIES) == 1) {
- MINOR_COLLECTORS = get_int_property("gc.minor_collectors");
- }
-
- if (is_property_set("gc.major_collectors", VM_PROPERTIES) == 1) {
- MAJOR_COLLECTORS = get_int_property("gc.major_collectors");
- }
-
- if (is_property_set("gc.ignore_finref", VM_PROPERTIES) == 1) {
- IGNORE_FINREF = get_boolean_property("gc.ignore_finref");
- }
-
- if (is_property_set("gc.verify", VM_PROPERTIES) == 1) {
- char* value = get_property("gc.verify", VM_PROPERTIES);
- GC_VERIFY = strdup(value);
- destroy_property_value(value);
- }
-
- if (is_property_set("gc.gen_nongen_switch", VM_PROPERTIES) == 1){
- GEN_NONGEN_SWITCH= get_boolean_property("gc.gen_nongen_switch");
- gc->generate_barrier = TRUE;
- }
-
- if (is_property_set("gc.heap_iteration", VM_PROPERTIES) == 1) {
- JVMTI_HEAP_ITERATION = get_boolean_property("gc.heap_iteration");
- }
-
- if (is_property_set("gc.ignore_vtable_tracing", VM_PROPERTIES) == 1) {
- IGNORE_VTABLE_TRACING = get_boolean_property("gc.ignore_vtable_tracing");
- }
-
- if (is_property_set("gc.use_large_page", VM_PROPERTIES) == 1){
- char* value = get_property("gc.use_large_page", VM_PROPERTIES);
- large_page_hint = strdup(value);
- destroy_property_value(value);
- }
-
- if (is_property_set("gc.concurrent_gc", VM_PROPERTIES) == 1){
- Boolean use_all_concurrent_phase= get_boolean_property("gc.concurrent_gc");
- if(use_all_concurrent_phase){
- USE_CONCURRENT_ENUMERATION = TRUE;
- USE_CONCURRENT_MARK = TRUE;
- USE_CONCURRENT_SWEEP = TRUE;
- gc->generate_barrier = TRUE;
- }
- }
-
- if (is_property_set("gc.concurrent_enumeration", VM_PROPERTIES) == 1){
- USE_CONCURRENT_ENUMERATION= get_boolean_property("gc.concurrent_enumeration");
- if(USE_CONCURRENT_ENUMERATION){
- USE_CONCURRENT_GC = TRUE;
- gc->generate_barrier = TRUE;
- }
- }
-
- if (is_property_set("gc.concurrent_mark", VM_PROPERTIES) == 1){
- USE_CONCURRENT_MARK= get_boolean_property("gc.concurrent_mark");
- if(USE_CONCURRENT_MARK){
- USE_CONCURRENT_GC = TRUE;
- gc->generate_barrier = TRUE;
- }
- }
-
- if (is_property_set("gc.concurrent_sweep", VM_PROPERTIES) == 1){
- USE_CONCURRENT_SWEEP= get_boolean_property("gc.concurrent_sweep");
- if(USE_CONCURRENT_SWEEP){
- USE_CONCURRENT_GC = TRUE;
- }
- }
-
- char* concurrent_algo = NULL;
-
- if (is_property_set("gc.concurrent_algorithm", VM_PROPERTIES) == 1) {
- concurrent_algo = get_property("gc.concurrent_algorithm", VM_PROPERTIES);
- }
-
- gc_decide_concurrent_algorithm(gc, concurrent_algo);
-
-#if defined(ALLOC_ZEROING) && defined(ALLOC_PREFETCH)
- if(is_property_set("gc.prefetch",VM_PROPERTIES) ==1) {
- PREFETCH_ENABLED = get_boolean_property("gc.prefetch");
- }
-
- if(is_property_set("gc.prefetch_distance",VM_PROPERTIES)==1) {
- PREFETCH_DISTANCE = get_size_property("gc.prefetch_distance");
- if(!PREFETCH_ENABLED) {
- WARN2("gc.prefetch_distance","Warning: Prefetch distance set with Prefetch disabled!");
- }
- }
-
- if(is_property_set("gc.prefetch_stride",VM_PROPERTIES)==1) {
- PREFETCH_STRIDE = get_size_property("gc.prefetch_stride");
- if(!PREFETCH_ENABLED) {
- WARN2("gc.prefetch_stride","Warning: Prefetch stride set with Prefetch disabled!");
- }
- }
-
- if(is_property_set("gc.zeroing_size",VM_PROPERTIES)==1) {
- ZEROING_SIZE = get_size_property("gc.zeroing_size");
- }
-#endif
-
-#ifdef PREFETCH_SUPPORTED
- if(is_property_set("gc.mark_prefetch",VM_PROPERTIES) ==1) {
- mark_prefetch = get_boolean_property("gc.mark_prefetch");
- }
-#endif
-
- return;
-}
-
void gc_assign_free_area_to_mutators(GC* gc)
{
-#if !defined(USE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
+#if !defined(USE_UNIQUE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
gc_gen_assign_free_area_to_mutators((GC_Gen*)gc);
#endif
}
void gc_init_collector_alloc(GC* gc, Collector* collector)
{
-#ifndef USE_MARK_SWEEP_GC
+#ifndef USE_UNIQUE_MARK_SWEEP_GC
gc_gen_init_collector_alloc((GC_Gen*)gc, collector);
#else
gc_init_collector_free_chunk_list(collector);
@@ -377,14 +55,14 @@
void gc_reset_collector_alloc(GC* gc, Collector* collector)
{
-#if !defined(USE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
+#if !defined(USE_UNIQUE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
gc_gen_reset_collector_alloc((GC_Gen*)gc, collector);
#endif
}
void gc_destruct_collector_alloc(GC* gc, Collector* collector)
{
-#ifndef USE_MARK_SWEEP_GC
+#ifndef USE_UNIQUE_MARK_SWEEP_GC
gc_gen_destruct_collector_alloc((GC_Gen*)gc, collector);
#endif
}
@@ -398,6 +76,22 @@
int64 get_collection_end_time()
{ return collection_end_time; }
+void set_collection_end_time()
+{ collection_end_time = time_now(); }
+
+void gc_decide_collection_kind(GC* gc, unsigned int cause)
+{
+ /* this is for debugging and for gen-nongen-switch. */
+ gc->last_collect_kind = GC_PROP;
+
+#if !defined(USE_UNIQUE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
+
+ gc_gen_decide_collection_kind((GC_Gen*)gc, cause);
+
+#endif
+
+}
+
void gc_prepare_rootset(GC* gc)
{
/* Stop the threads and collect the roots. */
@@ -423,10 +117,11 @@
to avoid racing with mutators. */
gc->num_collections++;
gc->cause = gc_cause;
- gc_decide_collection_kind((GC_Gen*)gc, gc_cause);
+ gc_decide_collection_kind(gc, gc_cause);
+
#ifdef MARK_BIT_FLIPPING
- if(gc_match_kind(gc, MINOR_COLLECTION)) mark_bit_flip();
+ if(collect_is_minor()) mark_bit_flip();
#endif
if(!USE_CONCURRENT_GC){
@@ -454,35 +149,38 @@
if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
-#if defined(USE_MARK_SWEEP_GC)
+#if defined(USE_UNIQUE_MARK_SWEEP_GC)
gc_ms_reclaim_heap((GC_MS*)gc);
#elif defined(USE_UNIQUE_MOVE_COMPACT_GC)
gc_mc_reclaim_heap((GC_MC*)gc);
#else
gc_gen_reclaim_heap((GC_Gen*)gc, collection_start_time);
#endif
+
}
collection_end_time = time_now();
-#if !defined(USE_MARK_SWEEP_GC)&&!defined(USE_UNIQUE_MOVE_COMPACT_GC)
+#if !defined(USE_UNIQUE_MARK_SWEEP_GC)&&!defined(USE_UNIQUE_MOVE_COMPACT_GC)
gc_gen_collection_verbose_info((GC_Gen*)gc, collection_end_time - collection_start_time, mutator_time);
gc_gen_space_verbose_info((GC_Gen*)gc);
#endif
if(gc_is_gen_mode()) gc_prepare_mutator_remset(gc);
- int64 mark_time = 0;
+ int64 collection_time = 0;
if(USE_CONCURRENT_GC && gc_mark_is_concurrent()){
- mark_time = gc_get_concurrent_mark_time(gc);
+ collection_time = gc_get_concurrent_mark_time(gc);
gc_reset_concurrent_mark(gc);
+ }else{
+ collection_time = time_now()-collection_start_time;
}
if(USE_CONCURRENT_GC && gc_sweep_is_concurrent()){
gc_reset_concurrent_sweep(gc);
}
-#if !defined(USE_MARK_SWEEP_GC)&&!defined(USE_UNIQUE_MOVE_COMPACT_GC)
+#if !defined(USE_UNIQUE_MARK_SWEEP_GC)&&!defined(USE_UNIQUE_MOVE_COMPACT_GC)
if(USE_CONCURRENT_GC && gc_need_start_concurrent_mark(gc))
gc_start_concurrent_mark(gc);
#endif
@@ -500,20 +198,27 @@
#ifndef BUILD_IN_REFERENT
} else {
gc_clear_weakref_pools(gc);
+ gc_clear_finref_repset_pool(gc);
#endif
}
-#ifdef USE_MARK_SWEEP_GC
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
gc_ms_update_space_statistics((GC_MS*)gc);
#endif
gc_assign_free_area_to_mutators(gc);
- if(USE_CONCURRENT_GC) gc_update_collection_scheduler(gc, mutator_time, mark_time);
-
+ if(USE_CONCURRENT_GC) gc_update_collection_scheduler(gc, mutator_time, collection_time);
+
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
+ gc_ms_reset_space_statistics((GC_MS*)gc);
+#endif
+
vm_reclaim_native_objs();
gc->in_collection = FALSE;
+ gc_reset_collector_state(gc);
+
gc_clear_dirty_set(gc);
vm_resume_threads_after();
@@ -527,3 +232,4 @@
+
Index: vm/gc_gen/src/common/gc_common.h
===================================================================
--- vm/gc_gen/src/common/gc_common.h (revision 636625)
+++ vm/gc_gen/src/common/gc_common.h (working copy)
@@ -35,14 +35,19 @@
#include "gc_for_class.h"
#include "gc_platform.h"
+#include "gc_properties.h"
#include "../common/gc_for_barrier.h"
/*
-#define USE_MARK_SWEEP_GC //define it to only use Mark-Sweep GC (no NOS, no LOS).
+#define USE_UNIQUE_MARK_SWEEP_GC //define it to only use Mark-Sweep GC (no NOS, no LOS).
+#define USE_UNIQUE_MOVE_COMPACT_GC //define it to only use Move-Compact GC (no NOS, no LOS).
*/
-//#define USE_UNIQUE_MOVE_COMPACT_GC //define it to only use Move-Compact GC (no NOS, no LOS).
+
#define GC_GEN_STATS
+#define USE_32BITS_HASHCODE
+#define GC_LOS_OBJ_SIZE_THRESHOLD (5*KB)
+
#define null 0
#define KB (1<<10)
@@ -77,77 +82,12 @@
#define BYTES_OF_POINTER_SIZE_INT (sizeof(POINTER_SIZE_INT))
#define BIT_SHIFT_TO_BYTES_OF_POINTER_SIZE_INT ((sizeof(POINTER_SIZE_INT)==4)? 2: 3)
-#define GC_OBJ_SIZE_THRESHOLD (5*KB)
-
-#define USE_32BITS_HASHCODE
-
-/* define it to use only mark-sweep GC for entire heap management */
-//#define USE_MARK_SWEEP_GC
-
typedef void (*TaskType)(void*);
-enum Collection_Algorithm{
- COLLECTION_ALGOR_NIL,
-
- MINOR_GEN_FORWARD_POOL,
- MINOR_NONGEN_FORWARD_POOL,
-
- MINOR_GEN_SEMISPACE_POOL,
- MINOR_NONGEN_SEMISPACE_POOL,
-
- MAJOR_COMPACT_SLIDE,
- MAJOR_COMPACT_MOVE,
- MAJOR_MARK_SWEEP
-};
+extern POINTER_SIZE_INT HEAP_BASE;
-/* Possible combinations:
- * MINOR_COLLECTION
- * NORMAL_MAJOR_COLLECTION
- * FALLBACK_COLLECTION
- * NORMAL_MAJOR_COLLECTION | EXTEND_COLLECTION
- * FALLBACK_COLLECTION | EXTEND_COLLECTION
- * MS_COLLECTION
- * MS_COMPACT_COLLECTION
- */
-enum Collection_Kind {
- /* Two main kinds: generational GC and mark-sweep GC; this is decided at compiling time */
- GEN_GC = 0x1,
- MARK_SWEEP_GC = 0x2,
- MOVE_COMPACT_NO_LOS = 0x4,
- /* Mask of bits standing for two basic kinds */
- GC_BASIC_KIND_MASK = ~(unsigned int)0x7,
-
- /* Sub-kinds of generational GC use the 4~7th LSB */
- MINOR_COLLECTION = 0x11, /* 0x10 & GEN_GC */
- MAJOR_COLLECTION = 0x21, /* 0x20 & GEN_GC */
-
- /* Sub-kinds of major collection use the 8~11th LSB */
- NORMAL_MAJOR_COLLECTION = 0x121, /* 0x100 & MAJOR_COLLECTION */
- FALLBACK_COLLECTION = 0x221, /* 0x200 & MAJOR_COLLECTION */
- EXTEND_COLLECTION = 0x421, /* 0x400 & MAJOR_COLLECTION */
-
- /* Sub-kinds of mark-sweep GC use the 12~15th LSB */
- MS_COLLECTION = 0x1002, /* 0x1000 & MARK_SWEEP_GC */
- MS_COMPACT_COLLECTION = 0x2002, /* 0x2000 & MARK_SWEEP_GC */
- MC_COLLECTION = 0x1004
-};
+//#define COMPRESS_REFERENCE // Now it's a VM-wide macro, defined in build file
-extern Boolean IS_FALLBACK_COMPACTION; /* only for mark/fw bits debugging purpose */
-
-enum GC_CAUSE{
- GC_CAUSE_NIL,
- GC_CAUSE_NOS_IS_FULL,
- GC_CAUSE_LOS_IS_FULL,
- GC_CAUSE_COS_IS_FULL,
- GC_CAUSE_WSPACE_IS_FULL,
- GC_CAUSE_RUNTIME_FORCE_GC
-};
-
-
-extern POINTER_SIZE_INT HEAP_NULL;
-
-//#define COMPRESS_REFERENCE // Now passed from outside
-
#if !defined(POINTER64) && defined(COMPRESS_REFERENCE)
#error "32-bit architecture does not support references compression"
#endif
@@ -166,13 +106,13 @@
#ifdef COMPRESS_REFERENCE
if(!p_obj){
/*Fixme: em64t: vm performs a simple compress/uncompress machenism
- i.e. just add or minus HEAP_NULL to p_obj
+ i.e. just add or minus HEAP_BASE to p_obj
But in gc we distinguish zero from other p_obj
Now only in prefetch next live object we can hit this point. */
return (REF)0;
}
else
- return (REF) ((POINTER_SIZE_INT) p_obj - HEAP_NULL);
+ return (REF) ((POINTER_SIZE_INT) p_obj - HEAP_BASE);
#else
return (REF)p_obj;
#endif
@@ -184,7 +124,7 @@
if(!ref){
return NULL;
}
- return (Partial_Reveal_Object *)(HEAP_NULL + ref);
+ return (Partial_Reveal_Object *)(HEAP_BASE + ref);
#else
return (Partial_Reveal_Object *)ref;
@@ -316,7 +256,7 @@
inline void obj_set_fw_in_oi(Partial_Reveal_Object *obj, void *dest)
{
- assert(IS_FALLBACK_COMPACTION || (!(get_obj_info_raw(obj) & FLIP_FORWARD_BIT)));
+ assert(collect_is_fallback() || (!(get_obj_info_raw(obj) & FLIP_FORWARD_BIT)));
/* This assert should always exist except it's fall back compaction. In fall-back compaction
an object can be marked in last time minor collection, which is exactly this time's fw bit,
because the failed minor collection flipped the bits. */
@@ -499,6 +439,14 @@
}GC;
+
+inline Boolean collect_last_is_minor(GC* gc)
+{
+ return (Boolean)((gc->last_collect_kind & ALGO_MAJOR) == 0);
+}
+
+/* ============================================================================ */
+
void mark_scan_pool(Collector* collector);
inline void mark_scan_heap(Collector* collector)
@@ -515,30 +463,17 @@
Boolean obj_belongs_to_gc_heap(Partial_Reveal_Object* p_obj);
-/* gc must match exactly that kind if returning TRUE */
-inline Boolean gc_match_kind(GC *gc, unsigned int kind)
-{
- assert(gc->collect_kind && kind);
- return (Boolean)((gc->collect_kind & kind) == kind);
-}
-/* multi_kinds is a combination of multi collect kinds
- * gc must match one of them.
- */
-inline Boolean gc_match_either_kind(GC *gc, unsigned int multi_kinds)
-{
- multi_kinds &= GC_BASIC_KIND_MASK;
- assert(gc->collect_kind && multi_kinds);
- return (Boolean)(gc->collect_kind & multi_kinds);
-}
+inline void gc_reset_collector_state(GC* gc){ gc->num_active_collectors = 0;}
inline unsigned int gc_get_processor_num(GC* gc) { return gc->_num_processors; }
-void gc_parse_options(GC* gc);
+GC* gc_parse_options();
void gc_reclaim_heap(GC* gc, unsigned int gc_cause);
void gc_prepare_rootset(GC* gc);
int64 get_collection_end_time();
+void set_collection_end_time();
/* generational GC related */
@@ -562,6 +497,7 @@
void gc_init_collector_alloc(GC* gc, Collector* collector);
void gc_reset_collector_alloc(GC* gc, Collector* collector);
void gc_destruct_collector_alloc(GC* gc, Collector* collector);
+void gc_decide_collection_kind(GC* gc, unsigned int cause);
FORCE_INLINE Boolean addr_belongs_to_nos(void* addr)
{ return addr >= nos_boundary; }
Index: vm/gc_gen/src/common/gc_concurrent.cpp
===================================================================
--- vm/gc_gen/src/common/gc_concurrent.cpp (revision 636625)
+++ vm/gc_gen/src/common/gc_concurrent.cpp (working copy)
@@ -45,7 +45,7 @@
{
if(!is_mark_finished(gc)){
lock(gc->concurrent_mark_lock);
-#ifndef USE_MARK_SWEEP_GC
+#ifndef USE_UNIQUE_MARK_SWEEP_GC
gc_gen_start_concurrent_mark((GC_Gen*)gc);
#else
if(gc_concurrent_match_algorithm(OTF_REM_OBJ_SNAPSHOT_ALGO)){
@@ -76,7 +76,7 @@
{
wait_mark_finish(gc);
gc_set_barrier_function(WRITE_BARRIER_REM_NIL);
- mem_fence();
+ //mem_fence(); we do not need memory fence here.
gc_check_mutator_barrier(gc);
gc_set_concurrent_status(gc,GC_CONCURRENT_STATUS_NIL);
}
@@ -106,12 +106,14 @@
}
gc_set_concurrent_status(gc, GC_CONCURRENT_MARK_PHASE);
- gc_decide_collection_kind((GC_Gen*)gc, GC_CAUSE_NIL);
+#ifndef USE_UNIQUE_MARK_SWEEP_GC
+ gc_decide_collection_kind((GC*)gc, GC_CAUSE_NIL);
+#endif
num_marker = gc_decide_marker_number(gc);
/*start concurrent mark*/
-#ifndef USE_MARK_SWEEP_GC
+#ifndef USE_UNIQUE_MARK_SWEEP_GC
gc_gen_start_concurrent_mark((GC_Gen*)gc);
#else
if(gc_concurrent_match_algorithm(OTF_REM_OBJ_SNAPSHOT_ALGO)){
@@ -150,21 +152,35 @@
wspace_mark_scan_mostly_concurrent_terminate();
gc_wait_concurrent_mark_finish(gc);
+
+ int disable_count;
+ if(!is_STW){
+ /*suspend the mutators.*/
+ lock(gc->enumerate_rootset_lock);
+ gc_clear_rootset(gc);
+ gc_metadata_verify(gc, TRUE);
+ gc_reset_rootset(gc);
+ disable_count = hythread_reset_suspend_disable();
+ vm_enumerate_root_set_all_threads();
+ gc_copy_interior_pointer_table_to_rootset();
+ gc_set_rootset(gc);
+ }
+
if(gc_concurrent_match_algorithm(MOSTLY_CONCURRENT_ALGO)){
/*If gc use mostly concurrent algorithm, there's a final marking pause.
Suspend the mutators once again and finish the marking phase.*/
- int disable_count;
- if(!is_STW){
- /*suspend the mutators.*/
- lock(gc->enumerate_rootset_lock);
- gc_metadata_verify(gc, TRUE);
- gc_reset_rootset(gc);
- disable_count = hythread_reset_suspend_disable();
- vm_enumerate_root_set_all_threads();
- gc_copy_interior_pointer_table_to_rootset();
- gc_set_rootset(gc);
- }
+// int disable_count;
+// if(!is_STW){
+// /*suspend the mutators.*/
+// lock(gc->enumerate_rootset_lock);
+// gc_metadata_verify(gc, TRUE);
+// gc_reset_rootset(gc);
+// disable_count = hythread_reset_suspend_disable();
+// vm_enumerate_root_set_all_threads();
+// gc_copy_interior_pointer_table_to_rootset();
+// gc_set_rootset(gc);
+// }
/*prepare dirty object*/
gc_prepare_dirty_set(gc);
@@ -172,7 +188,7 @@
gc_set_weakref_sets(gc);
/*start STW mark*/
-#ifndef USE_MARK_SWEEP_GC
+#ifndef USE_UNIQUE_MARK_SWEEP_GC
assert(0);
#else
gc_ms_start_final_mark_after_concurrent((GC_MS*)gc, MIN_NUM_MARKERS);
@@ -180,13 +196,22 @@
wspace_mark_scan_mostly_concurrent_reset();
gc_clear_dirty_set(gc);
- if(!is_STW){
- unlock(gc->enumerate_rootset_lock);
- vm_resume_threads_after();
- assert(hythread_is_suspend_enabled());
- hythread_set_suspend_disable(disable_count);
- }
+// if(!is_STW){
+// unlock(gc->enumerate_rootset_lock);
+// vm_resume_threads_after();
+// assert(hythread_is_suspend_enabled());
+// hythread_set_suspend_disable(disable_count);
+// }
}
+
+
+ if(!is_STW){
+ unlock(gc->enumerate_rootset_lock);
+ vm_resume_threads_after();
+ assert(hythread_is_suspend_enabled());
+ hythread_set_suspend_disable(disable_count);
+ }
+
gc_reset_dirty_set(gc);
}
@@ -237,7 +262,7 @@
gc_identify_dead_weak_roots(gc);
/*start concurrent mark*/
-#ifndef USE_MARK_SWEEP_GC
+#ifndef USE_UNIQUE_MARK_SWEEP_GC
assert(0);
#else
gc_ms_start_concurrent_sweep((GC_MS*)gc, MIN_NUM_MARKERS);
@@ -288,6 +313,10 @@
void gc_reset_after_concurrent_collection(GC* gc)
{
+
+ int64 mutator_time = gc_get_mutator_time(gc);
+ int64 collection_time = gc_get_collector_time(gc) + gc_get_marker_time(gc);
+
/*FIXME: enable concurrent GEN mode.*/
gc_reset_interior_pointer_table();
if(gc_is_gen_mode()) gc_prepare_mutator_remset(gc);
@@ -302,14 +331,17 @@
gc_activate_finref_threads((GC*)gc);
#ifndef BUILD_IN_REFERENT
} else {
- gc_clear_weakref_pools(gc);
+ gc_clear_weakref_pools(gc);
+ gc_clear_finref_repset_pool(gc);
#endif
}
-#ifdef USE_MARK_SWEEP_GC
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
gc_ms_update_space_statistics((GC_MS*)gc);
#endif
+ gc_reset_collector_state(gc);
+
gc_clear_dirty_set(gc);
vm_reclaim_native_objs();
@@ -322,9 +354,15 @@
if(USE_CONCURRENT_GC && gc_sweep_is_concurrent()){
gc_reset_concurrent_sweep(gc);
}
+
+ gc_update_collection_scheduler(gc, mutator_time, collection_time);
+
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
+ gc_ms_reset_space_statistics((GC_MS*)gc);
+#endif
}
-void gc_decide_concurrent_algorithm(GC* gc, char* concurrent_algo)
+void gc_decide_concurrent_algorithm(char* concurrent_algo)
{
if(!concurrent_algo){
CONCURRENT_ALGO = OTF_REM_OBJ_SNAPSHOT_ALGO;
Index: vm/gc_gen/src/common/gc_concurrent.h
===================================================================
--- vm/gc_gen/src/common/gc_concurrent.h (revision 636625)
+++ vm/gc_gen/src/common/gc_concurrent.h (working copy)
@@ -154,7 +154,7 @@
void gc_reset_after_concurrent_collection(GC* gc);
void gc_check_concurrent_phase(GC * gc);
-void gc_decide_concurrent_algorithm(GC* gc, char* concurrent_algo);
+void gc_decide_concurrent_algorithm(char* concurrent_algo);
void gc_reset_concurrent_sweep(GC* gc);
Index: vm/gc_gen/src/common/gc_for_barrier.cpp
===================================================================
--- vm/gc_gen/src/common/gc_for_barrier.cpp (revision 636625)
+++ vm/gc_gen/src/common/gc_for_barrier.cpp (working copy)
@@ -261,8 +261,8 @@
/* FIXME:: this is not the right interface for write barrier */
void gc_heap_slot_write_ref (Managed_Object_Handle p_obj_holding_ref,Managed_Object_Handle *p_slot, Managed_Object_Handle p_target)
{
- Mutator *mutator = (Mutator *)gc_get_tls();
- mutator_post_signal(mutator,MUTATOR_ENTER_BARRIER);
+ //Mutator *mutator = (Mutator *)gc_get_tls();
+ //mutator_post_signal(mutator,MUTATOR_ENTER_BARRIER);
switch(write_barrier_function){
case WRITE_BARRIER_REM_NIL:
@@ -293,7 +293,7 @@
return;
}
- mutator_post_signal(mutator,MUTATOR_EXIT_BARRIER);
+ //mutator_post_signal(mutator,MUTATOR_EXIT_BARRIER);
return;
}
Index: vm/gc_gen/src/common/gc_for_vm.cpp
===================================================================
--- vm/gc_gen/src/common/gc_for_vm.cpp (revision 636625)
+++ vm/gc_gen/src/common/gc_for_vm.cpp (working copy)
@@ -58,7 +58,6 @@
static void init_gc_helpers()
{
set_property("vm.component.classpath.gc_gen", "gc_gen.jar", VM_PROPERTIES);
-
vm_helper_register_magic_helper(VM_RT_NEW_RESOLVED_USING_VTABLE_AND_SIZE, "org/apache/harmony/drlvm/gc_gen/GCHelper", "alloc");
vm_helper_register_magic_helper(VM_RT_NEW_VECTOR_USING_VTABLE, "org/apache/harmony/drlvm/gc_gen/GCHelper", "allocArray");
vm_helper_register_magic_helper(VM_RT_GC_HEAP_WRITE_REF, "org/apache/harmony/drlvm/gc_gen/GCHelper", "write_barrier_slot_rem");
@@ -71,20 +70,11 @@
vm_gc_lock_init();
-#if defined(USE_MARK_SWEEP_GC)
-unsigned int gc_struct_size = sizeof(GC_MS);
-#elif defined(USE_UNIQUE_MOVE_COMPACT_GC)
-unsigned int gc_struct_size = sizeof(GC_MC);
-#else
-unsigned int gc_struct_size = sizeof(GC_Gen);
-#endif
- GC* gc = (GC*)STD_MALLOC(gc_struct_size);
+ GC* gc = gc_parse_options();
assert(gc);
- memset(gc, 0, sizeof(GC));
+
p_global_gc = gc;
- gc_parse_options(gc);
-
#ifdef BUILD_IN_REFERENT
if( ! IGNORE_FINREF){
INFO2(" gc.init" , "finref must be ignored, since BUILD_IN_REFERENT is defined." );
@@ -104,7 +94,7 @@
gc_metadata_initialize(gc); /* root set and mark stack */
-#if defined(USE_MARK_SWEEP_GC)
+#if defined(USE_UNIQUE_MARK_SWEEP_GC)
gc_ms_initialize((GC_MS*)gc, min_heap_size_bytes, max_heap_size_bytes);
#elif defined(USE_UNIQUE_MOVE_COMPACT_GC)
gc_mc_initialize((GC_MC*)gc, min_heap_size_bytes, max_heap_size_bytes);
@@ -140,7 +130,7 @@
INFO2("gc.process", "GC: call GC wrapup ....");
GC* gc = p_global_gc;
-#if defined(USE_MARK_SWEEP_GC)
+#if defined(USE_UNIQUE_MARK_SWEEP_GC)
gc_ms_destruct((GC_MS*)gc);
#elif defined(USE_UNIQUE_MOVE_COMPACT_GC)
gc_mc_destruct((GC_MC*)gc);
@@ -186,7 +176,7 @@
FIXME:: nos_boundary is a static field in GCHelper.java for fast write barrier, not a real object reference
this should be fixed that magic Address field should not be enumerated. */
#ifdef COMPRESS_REFERENCE
- if (p_obj == (Partial_Reveal_Object*)HEAP_NULL || p_obj == NULL || p_obj == nos_boundary ) return;
+ if (p_obj == (Partial_Reveal_Object*)HEAP_BASE || p_obj == NULL || p_obj == nos_boundary ) return;
#else
if (p_obj == NULL || p_obj == nos_boundary ) return;
#endif
@@ -229,7 +219,7 @@
FIXME:: nos_boundary is a static field in GCHelper.java for fast write barrier, not a real object reference
this should be fixed that magic Address field should not be enumerated. */
#ifdef COMPRESS_REFERENCE
- if (p_obj == (Partial_Reveal_Object*)HEAP_NULL || p_obj == NULL || p_obj == nos_boundary ) return;
+ if (p_obj == (Partial_Reveal_Object*)HEAP_BASE || p_obj == NULL || p_obj == nos_boundary ) return;
#else
if (p_obj == NULL || p_obj == nos_boundary ) return;
#endif
@@ -265,7 +255,7 @@
int64 gc_free_memory()
{
-#if defined(USE_MARK_SWEEP_GC)
+#if defined(USE_UNIQUE_MARK_SWEEP_GC)
return (int64)gc_ms_free_memory_size((GC_MS*)p_global_gc);
#elif defined(USE_UNIQUE_MOVE_COMPACT_GC)
return (int64)gc_mc_free_memory_size((GC_MC*)p_global_gc);
@@ -277,7 +267,7 @@
/* java heap size.*/
int64 gc_total_memory()
{
-#if defined(USE_MARK_SWEEP_GC)
+#if defined(USE_UNIQUE_MARK_SWEEP_GC)
return (int64)((POINTER_SIZE_INT)gc_ms_total_memory_size((GC_MS*)p_global_gc));
#elif defined(USE_UNIQUE_MOVE_COMPACT_GC)
return (int64)((POINTER_SIZE_INT)gc_mc_total_memory_size((GC_MC*)p_global_gc));
@@ -288,7 +278,7 @@
int64 gc_max_memory()
{
-#if defined(USE_MARK_SWEEP_GC)
+#if defined(USE_UNIQUE_MARK_SWEEP_GC)
return (int64)((POINTER_SIZE_INT)gc_ms_total_memory_size((GC_MS*)p_global_gc));
#elif defined(USE_UNIQUE_MOVE_COMPACT_GC)
return (int64)((POINTER_SIZE_INT)gc_mc_total_memory_size((GC_MC*)p_global_gc));
@@ -361,7 +351,7 @@
#else //USE_32BITS_HASHCODE
int32 gc_get_hashcode(Managed_Object_Handle p_object)
{
-#if defined(USE_MARK_SWEEP_GC) || defined(USE_UNIQUE_MOVE_COMPACT_GC)
+#if defined(USE_UNIQUE_MARK_SWEEP_GC) || defined(USE_UNIQUE_MOVE_COMPACT_GC)
return (int32)0;//p_object;
#endif
@@ -421,7 +411,7 @@
// data structures in not consistent for heap iteration
if (!JVMTI_HEAP_ITERATION) return;
-#if defined(USE_MARK_SWEEP_GC)
+#if defined(USE_UNIQUE_MARK_SWEEP_GC)
gc_ms_iterate_heap((GC_MS*)p_global_gc);
#elif defined(USE_UNIQUE_MOVE_COMPACT_GC)
gc_mc_iterate_heap((GC_MC*)p_global_gc);
@@ -445,3 +435,4 @@
return address_belongs_to_gc_heap(p_obj, p_global_gc);
}
+
Index: vm/gc_gen/src/common/gc_metadata.cpp
===================================================================
--- vm/gc_gen/src/common/gc_metadata.cpp (revision 636625)
+++ vm/gc_gen/src/common/gc_metadata.cpp (working copy)
@@ -167,13 +167,10 @@
return block;
}
-extern Boolean IS_MOVE_COMPACT;
-
static void gc_update_repointed_sets(GC* gc, Pool* pool, Boolean double_fix)
{
GC_Metadata* metadata = gc->metadata;
- /* NOTE:: this is destructive to the root sets. */
pool_iterator_init(pool);
Vector_Block* root_set = pool_iterator_next(pool);
@@ -184,23 +181,19 @@
iter = vector_block_iterator_advance(root_set,iter);
Partial_Reveal_Object* p_obj = read_slot(p_ref);
- if(IS_MOVE_COMPACT){
- /*This condition is removed because we do los sliding compaction at every major compaction after add los minor sweep.*/
- //if(obj_is_moved(p_obj))
- /*Fixme: los_boundery ruined the modularity of gc_common.h*/
- if(p_obj < los_boundary){
- p_obj = obj_get_fw_in_oi(p_obj);
- }else{
- p_obj = obj_get_fw_in_table(p_obj);
+ if( collect_is_compact_move()){ /* move-compact uses offset table */
+ /*This condition is removed because we do los sliding compaction at every major compaction after add los minor sweep.*/
+ //if(obj_is_moved(p_obj))
+ /*Fixme: los_boundery ruined the modularity of gc_common.h*/
+ if( gc_has_los() && p_obj < los_boundary){
+ p_obj = obj_get_fw_in_oi(p_obj);
+ }else{ /* this is the case with unique move_compact */
+ p_obj = obj_get_fw_in_table(p_obj);
}
write_slot(p_ref, p_obj);
-
- }else if(gc_match_kind(gc, MC_COLLECTION)){
- p_obj = obj_get_fw_in_table(p_obj);
- write_slot(p_ref, p_obj);
-
- }else{
+
+ }else{ /* this is the case of non-move-compact major collection, such as slide-compact and mark-sweep */
if(obj_is_fw_in_oi(p_obj)){
/* Condition obj_is_moved(p_obj) is for preventing mistaking previous mark bit of large obj as fw bit when fallback happens.
* Because until fallback happens, perhaps the large obj hasn't been marked. So its mark bit remains as the last time.
@@ -218,11 +211,12 @@
assert(address_belongs_to_gc_heap(p_obj, gc));
}
write_slot(p_ref, p_obj);
- }
- }
- }
+ } /* obj is forwarded */
+ } /* collect is not move-compact */
+
+ } /* while root_set has entry */
root_set = pool_iterator_next(pool);
- }
+ } /* while pool has root_set */
return;
}
@@ -233,8 +227,8 @@
gc_update_weak_roots(gc, double_fix);
- /* MINOR_COLLECTION doesn't need rootset update, but need reset */
- if( !gc_match_kind(gc, MINOR_COLLECTION)){
+ /* ALGO_MINOR doesn't need rootset update, but need reset */
+ if( !collect_is_minor()){
gc_update_repointed_sets(gc, gc->metadata->gc_rootset_pool, double_fix);
#ifndef BUILD_IN_REFERENT
gc_update_finref_repointed_refs(gc, double_fix);
@@ -302,8 +296,8 @@
collector->rem_set = NULL;
}
- assert(gc_match_either_kind(gc, MINOR_COLLECTION|NORMAL_MAJOR_COLLECTION));
- if( gc_match_kind(gc, NORMAL_MAJOR_COLLECTION )){
+ assert( collect_is_major_normal() || collect_is_minor());
+ if( collect_is_major_normal() ){
/* all the remsets are useless now */
/* clean and put back mutator remsets */
#ifdef USE_REM_SLOTS
@@ -341,7 +335,7 @@
root_set = pool_get_entry( collector_remset_pool );
}
- }else{ /* generational MINOR_COLLECTION */
+ }else{ /* generational ALGO_MINOR */
/* all the remsets are put into the shared pool */
#ifdef USE_REM_SLOTS
@@ -433,7 +427,7 @@
void gc_clear_remset(GC* gc)
{
/* this function clears all the remset before fallback */
- assert(gc_match_kind(gc, FALLBACK_COLLECTION));
+ assert(collect_is_fallback());
/* rootset pool has some entries that are actually remset, because all the remsets are put into rootset pool
before the collection. gc->root_set is a pointer pointing to the boundary between remset and rootset in the pool */
@@ -583,7 +577,7 @@
while(!vector_block_iterator_end(local_dirty_set,iter)){
Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*) *iter;
iter = vector_block_iterator_advance(local_dirty_set, iter);
-#ifdef USE_MARK_SWEEP_GC
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
assert(obj_is_mark_black_in_table(p_obj));
#endif
}
@@ -605,7 +599,7 @@
while(!vector_block_iterator_end(dirty_set,iter)){
Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*) *iter;
iter = vector_block_iterator_advance(dirty_set, iter);
-#ifdef USE_MARK_SWEEP_GC
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
assert(obj_is_mark_black_in_table(p_obj));
#endif
}
@@ -658,3 +652,4 @@
void free_set_pool_put_entry(Vector_Block* block, GC_Metadata *metadata)
{ pool_put_entry(metadata->free_set_pool, block); }
+
Index: vm/gc_gen/src/common/gc_options.cpp
===================================================================
--- vm/gc_gen/src/common/gc_options.cpp (revision 0)
+++ vm/gc_gen/src/common/gc_options.cpp (revision 0)
@@ -0,0 +1,418 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gc_common.h"
+
+extern char* GC_VERIFY;
+extern POINTER_SIZE_INT NOS_SIZE;
+extern POINTER_SIZE_INT MIN_NOS_SIZE;
+extern POINTER_SIZE_INT INIT_LOS_SIZE;
+extern POINTER_SIZE_INT TOSPACE_SIZE;
+extern POINTER_SIZE_INT MOS_RESERVE_SIZE;
+
+extern Boolean GEN_NONGEN_SWITCH;
+
+extern Boolean FORCE_FULL_COMPACT;
+
+extern unsigned int NUM_MARKERS;
+extern unsigned int NUM_COLLECTORS;
+extern unsigned int MINOR_COLLECTORS;
+extern unsigned int MAJOR_COLLECTORS;
+
+extern Boolean IGNORE_VTABLE_TRACING;
+extern Boolean IGNORE_FINREF;
+
+extern Boolean JVMTI_HEAP_ITERATION ;
+
+extern Boolean USE_CONCURRENT_GC;
+extern Boolean USE_CONCURRENT_ENUMERATION;
+extern Boolean USE_CONCURRENT_MARK;
+extern Boolean USE_CONCURRENT_SWEEP;
+
+
+POINTER_SIZE_INT HEAP_SIZE_DEFAULT = 256 * MB;
+POINTER_SIZE_INT min_heap_size_bytes = 16 * MB;
+POINTER_SIZE_INT max_heap_size_bytes = 0;
+
+
+unsigned int GC_PROP;
+
+GC* gc_mc_create();
+GC* gc_ms_create();
+
+static GC* gc_decide_collection_algo(char* unique_algo, Boolean has_los)
+{
+ /* if unique_algo is not set, gc_gen_decide_collection_algo is called. */
+ assert(unique_algo);
+
+ GC_PROP = ALGO_POOL_SHARE | ALGO_DEPTH_FIRST;
+
+ assert(!has_los); /* currently unique GCs don't use LOS */
+ if(has_los)
+ GC_PROP |= ALGO_HAS_LOS;
+
+ Boolean use_default = FALSE;
+
+ GC* gc;
+
+ string_to_upper(unique_algo);
+
+ if(!strcmp(unique_algo, "MOVE_COMPACT")){
+ GC_PROP |= ALGO_COMPACT_MOVE;
+ gc = gc_mc_create();
+
+ }else if(!strcmp(unique_algo, "MARK_SWEEP")){
+ GC_PROP |= ALGO_MS_NORMAL;
+ gc = gc_ms_create();
+ }else{
+ WARN2("gc.base","\nWarning: GC algorithm setting incorrect. Will use default value.\n");
+ GC_PROP |= ALGO_COMPACT_MOVE;
+ gc = gc_mc_create();
+ }
+
+ return gc;
+}
+
+static int get_int_property(const char *property_name)
+{
+ assert(property_name);
+ char *value = get_property(property_name, VM_PROPERTIES);
+ int return_value;
+ if (NULL != value)
+ {
+ return_value = atoi(value);
+ destroy_property_value(value);
+ }else{
+ DIE2("gc.base","Warning: property value "<generate_barrier = gc_is_gen_mode();
+
+ if (is_property_set("gc.generate_barrier", VM_PROPERTIES) == 1) {
+ Boolean generate_barrier = get_boolean_property("gc.generate_barrier");
+ gc->generate_barrier = (generate_barrier || gc->generate_barrier);
+ }
+
+/* /////////////////////////////////////////////////// */
+
+ POINTER_SIZE_INT max_heap_size = HEAP_SIZE_DEFAULT;
+ POINTER_SIZE_INT min_heap_size = min_heap_size_bytes;
+
+ if (is_property_set("gc.mx", VM_PROPERTIES) == 1) {
+ max_heap_size = get_size_property("gc.mx");
+
+ if (max_heap_size < min_heap_size){
+ max_heap_size = min_heap_size;
+ WARN2("gc.base","Warning: Max heap size you set is too small, reset to "< max_heap_size){
+ max_heap_size = min_heap_size;
+ WARN2("gc.base","Warning: Max heap size is too small, reset to "<generate_barrier = TRUE;
+ }
+
+ if (is_property_set("gc.heap_iteration", VM_PROPERTIES) == 1) {
+ JVMTI_HEAP_ITERATION = get_boolean_property("gc.heap_iteration");
+ }
+
+ if (is_property_set("gc.ignore_vtable_tracing", VM_PROPERTIES) == 1) {
+ IGNORE_VTABLE_TRACING = get_boolean_property("gc.ignore_vtable_tracing");
+ }
+
+ if (is_property_set("gc.use_large_page", VM_PROPERTIES) == 1){
+ char* value = get_property("gc.use_large_page", VM_PROPERTIES);
+ large_page_hint = strdup(value);
+ destroy_property_value(value);
+ }
+
+ if (is_property_set("gc.concurrent_gc", VM_PROPERTIES) == 1){
+ Boolean use_all_concurrent_phase= get_boolean_property("gc.concurrent_gc");
+ if(use_all_concurrent_phase){
+ USE_CONCURRENT_ENUMERATION = TRUE;
+ USE_CONCURRENT_MARK = TRUE;
+ USE_CONCURRENT_SWEEP = TRUE;
+ gc->generate_barrier = TRUE;
+ }
+ }
+
+ if (is_property_set("gc.concurrent_enumeration", VM_PROPERTIES) == 1){
+ USE_CONCURRENT_ENUMERATION= get_boolean_property("gc.concurrent_enumeration");
+ if(USE_CONCURRENT_ENUMERATION){
+ USE_CONCURRENT_GC = TRUE;
+ gc->generate_barrier = TRUE;
+ }
+ }
+
+ if (is_property_set("gc.concurrent_mark", VM_PROPERTIES) == 1){
+ USE_CONCURRENT_MARK= get_boolean_property("gc.concurrent_mark");
+ if(USE_CONCURRENT_MARK){
+ USE_CONCURRENT_GC = TRUE;
+ gc->generate_barrier = TRUE;
+ }
+ }
+
+ if (is_property_set("gc.concurrent_sweep", VM_PROPERTIES) == 1){
+ USE_CONCURRENT_SWEEP= get_boolean_property("gc.concurrent_sweep");
+ if(USE_CONCURRENT_SWEEP){
+ USE_CONCURRENT_GC = TRUE;
+ }
+ }
+
+ char* concurrent_algo = NULL;
+
+ if (is_property_set("gc.concurrent_algorithm", VM_PROPERTIES) == 1) {
+ concurrent_algo = get_property("gc.concurrent_algorithm", VM_PROPERTIES);
+ }
+
+ gc_decide_concurrent_algorithm(concurrent_algo);
+
+#if defined(ALLOC_ZEROING) && defined(ALLOC_PREFETCH)
+ if(is_property_set("gc.prefetch",VM_PROPERTIES) ==1) {
+ PREFETCH_ENABLED = get_boolean_property("gc.prefetch");
+ }
+
+ if(is_property_set("gc.prefetch_distance",VM_PROPERTIES)==1) {
+ PREFETCH_DISTANCE = get_size_property("gc.prefetch_distance");
+ if(!PREFETCH_ENABLED) {
+ WARN2("gc.prefetch_distance","Warning: Prefetch distance set with Prefetch disabled!");
+ }
+ }
+
+ if(is_property_set("gc.prefetch_stride",VM_PROPERTIES)==1) {
+ PREFETCH_STRIDE = get_size_property("gc.prefetch_stride");
+ if(!PREFETCH_ENABLED) {
+ WARN2("gc.prefetch_stride","Warning: Prefetch stride set with Prefetch disabled!");
+ }
+ }
+
+ if(is_property_set("gc.zeroing_size",VM_PROPERTIES)==1) {
+ ZEROING_SIZE = get_size_property("gc.zeroing_size");
+ }
+#endif
+
+#ifdef PREFETCH_SUPPORTED
+ if(is_property_set("gc.mark_prefetch",VM_PROPERTIES) ==1) {
+ mark_prefetch = get_boolean_property("gc.mark_prefetch");
+ }
+#endif
+
+ return gc;
+}
+
+
Index: vm/gc_gen/src/common/gc_platform.h
===================================================================
--- vm/gc_gen/src/common/gc_platform.h (revision 636625)
+++ vm/gc_gen/src/common/gc_platform.h (working copy)
@@ -25,6 +25,7 @@
#include "port_vmem.h"
#include "port_atomic.h"
#include "port_malloc.h"
+#include "port_barriers.h"
#include
@@ -81,6 +82,10 @@
#define PREFETCH(x)
#endif
+#ifdef PREFETCH_SUPPORTED
+extern Boolean mark_prefetch;
+#endif
+
#define ABS_DIFF(x, y) (((x)>(y))?((x)-(y)):((y)-(x)))
#define USEC_PER_SEC INT64_C(1000000)
@@ -287,8 +292,7 @@
inline void mem_fence()
{
- //FIXME: enable mem fence.
- //apr_memory_rw_barrier();
+ port_rw_barrier();
}
inline int64 time_now()
Index: vm/gc_gen/src/common/gc_properties.h
===================================================================
--- vm/gc_gen/src/common/gc_properties.h (revision 0)
+++ vm/gc_gen/src/common/gc_properties.h (revision 0)
@@ -0,0 +1,229 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _GC_PROPERTIES
+#define _GC_PROPERTIES
+
+enum GC_CAUSE{
+ GC_CAUSE_NIL,
+ GC_CAUSE_NOS_IS_FULL,
+ GC_CAUSE_LOS_IS_FULL,
+ GC_CAUSE_MOS_IS_FULL,
+ GC_CAUSE_RUNTIME_FORCE_GC
+};
+
+extern unsigned int GC_PROP;
+
+/* ============================================================================ */
+/* legal collection kinds:
+ for ALGO_HAS_NOS:
+ MINOR (i.e., !ALGO_MAJOR)
+
+ ALGO_COPY_SEMISPACE
+ ALGO_COPY_FORWARD
+
+ ALGO_MAJOR_FALLBACK
+ ALGO_MAJOR_NORMAL
+
+ ALGO_MS_NORMAL
+ ALGO_MS_COMPACT
+ ALGO_COMPACT_SLIDE
+ ALGO_COMPACT_MOVE
+
+ for !ALGO_HAS_NOS (and actually !ALGO_HAS_LOS)
+ ALGO_COMPACT_MOVE
+ ALGO_MS_NORMAL
+ ALGO_MS_COMPACT
+*/
+
+enum GC_Property{
+ ALGO_HAS_NOS = 0x1,
+ ALGO_HAS_LOS = 0x2,
+ ALGO_IS_GEN = 0x4,
+
+ ALGO_COPY_FORWARD = 0x10,
+ ALGO_COPY_SEMISPACE = 0x20,
+
+ ALGO_COMPACT_MOVE = 0x40,
+ ALGO_COMPACT_SLIDE = 0x80,
+ ALGO_COMPACT_MASK = 0xc0,
+
+ ALGO_MARKSWEEP = 0x100,
+ ALGO_MS_NORMAL = 0x300, /* ALGO_MARKSWEEP|0x200 */
+ ALGO_MS_COMPACT = 0x500, /* ALGO_MARKSWEEP|0x400 */
+ ALGO_MARKSWEEP_MASK = 0x700,
+
+ ALGO_WORK_STEAL = 0x1000,
+ ALGO_TASK_PUSH = 0x2000,
+ ALGO_POOL_SHARE = 0x4000,
+
+ ALGO_BREADTH_FIRST = 0x10000,
+ ALGO_DEPTH_FIRST = 0x20000,
+ ALGO_REORDERING = 0x40000,
+
+ ALGO_MAJOR = 0x100000,
+ ALGO_MAJOR_NORMAL = 0x300000, /* ALGO_MAJOR|0x200000 */
+ ALGO_MAJOR_FALLBACK = 0x500000, /* ALGO_MAJOR|0x400000 */
+ ALGO_MAJOR_MASK = 0x700000,
+
+ ALGO_CON = 0x1000000,
+ ALGO_CON_MARK = 0x3000000, /* ALGO_CON|0x2000000 */
+ ALGO_CON_SWEEP = 0x5000000, /* ALGO_CON|0x4000000 */
+ ALGO_CON_ENUM = 0x9000000, /* ALGO_CON|0x8000000 */
+
+};
+
+FORCE_INLINE Boolean gc_is_kind(unsigned int kind)
+{
+ return (Boolean)((GC_PROP & kind) == kind);
+}
+
+FORCE_INLINE void gc_set_gen_flag()
+{
+ GC_PROP |= ALGO_IS_GEN;
+}
+
+FORCE_INLINE void gc_clear_gen_flag()
+{
+ GC_PROP &= ~ALGO_IS_GEN;
+}
+
+FORCE_INLINE Boolean gc_is_gen_mode()
+{
+ return gc_is_kind(ALGO_IS_GEN);
+}
+
+FORCE_INLINE Boolean gc_has_los()
+{
+ return gc_is_kind(ALGO_HAS_LOS);
+}
+
+FORCE_INLINE Boolean gc_has_nos()
+{
+ return gc_is_kind(ALGO_HAS_NOS);
+}
+
+FORCE_INLINE Boolean collect_is_major()
+{
+ return gc_is_kind(ALGO_MAJOR);
+}
+
+FORCE_INLINE Boolean collect_is_minor()
+{
+ return gc_has_nos() && !collect_is_major();
+}
+
+FORCE_INLINE Boolean collect_is_major_normal()
+{
+ return gc_is_kind(ALGO_MAJOR_NORMAL);
+}
+
+FORCE_INLINE void collect_set_major_normal()
+{
+ GC_PROP &= ~ALGO_MAJOR_MASK;
+ GC_PROP |= ALGO_MAJOR_NORMAL;
+}
+
+FORCE_INLINE void collect_set_minor()
+{
+ assert( gc_has_nos());
+ GC_PROP &= ~ALGO_MAJOR_MASK;
+}
+
+FORCE_INLINE Boolean collect_is_fallback()
+{
+ return gc_is_kind(ALGO_MAJOR_FALLBACK);
+}
+
+FORCE_INLINE Boolean major_is_marksweep()
+{
+ return gc_is_kind(ALGO_MARKSWEEP|ALGO_HAS_NOS);
+}
+
+FORCE_INLINE Boolean major_is_compact_move()
+{
+ return gc_is_kind(ALGO_COMPACT_MOVE|ALGO_HAS_NOS);
+}
+
+FORCE_INLINE void major_set_compact_move()
+{
+ GC_PROP &= ~ALGO_COMPACT_MASK;
+ GC_PROP |= ALGO_COMPACT_MOVE;
+}
+
+FORCE_INLINE Boolean major_is_compact_slide()
+{
+ return gc_is_kind(ALGO_COMPACT_SLIDE|ALGO_HAS_NOS);
+}
+
+FORCE_INLINE void major_set_compact_slide()
+{
+ GC_PROP &= ~ALGO_COMPACT_MASK;
+ GC_PROP |= ALGO_COMPACT_SLIDE;
+}
+
+FORCE_INLINE Boolean minor_is_semispace()
+{
+ return gc_is_kind(ALGO_COPY_SEMISPACE|ALGO_HAS_NOS);
+}
+
+FORCE_INLINE Boolean minor_is_forward()
+{
+ return gc_is_kind(ALGO_COPY_FORWARD|ALGO_HAS_NOS);
+}
+
+FORCE_INLINE Boolean collect_move_object()
+{
+ if(gc_has_nos())
+ return collect_is_minor() ||
+ (collect_is_major() && !gc_is_kind(ALGO_MS_NORMAL));
+ else
+ return !gc_is_kind(ALGO_MS_NORMAL);
+}
+
+FORCE_INLINE Boolean collect_is_compact_move()
+{
+ if(gc_has_nos())
+ return collect_is_major() && gc_is_kind(ALGO_COMPACT_MOVE);
+ else
+ return gc_is_kind(ALGO_COMPACT_MOVE);
+}
+
+FORCE_INLINE Boolean collect_is_ms_compact()
+{
+ if(gc_has_nos())
+ return collect_is_major() && gc_is_kind(ALGO_MS_COMPACT);
+ else
+ return gc_is_kind(ALGO_MS_COMPACT);
+}
+
+FORCE_INLINE void collect_set_ms_normal()
+{
+ GC_PROP &= ~ALGO_MARKSWEEP_MASK;
+ GC_PROP |= ALGO_MS_NORMAL;
+
+}
+
+/* This is to distinct from the case of non-moving or trace-moving, where root slots
+ either are updated on-the-fly, or need not updating. The kind below needs to update
+ root slots after collection in an extra phase. i.e., collect_mark_and_move */
+FORCE_INLINE Boolean collect_need_update_repset()
+{
+ return (gc_is_kind(ALGO_MAJOR) || gc_is_kind(ALGO_MS_COMPACT));
+}
+
+#endif /* #ifndef _GC_PROPERTIES */
Index: vm/gc_gen/src/common/gc_space.h
===================================================================
--- vm/gc_gen/src/common/gc_space.h (revision 636625)
+++ vm/gc_gen/src/common/gc_space.h (working copy)
@@ -31,6 +31,8 @@
POINTER_SIZE_INT size_live_obj;
POINTER_SIZE_INT size_free_space;
POINTER_SIZE_INT last_size_free_space;
+ POINTER_SIZE_INT size_new_obj;
+ float space_utilization_ratio;
}Space_Statistics;
struct GC;
Index: vm/gc_gen/src/common/large_pages.cpp
===================================================================
--- vm/gc_gen/src/common/large_pages.cpp (revision 636625)
+++ vm/gc_gen/src/common/large_pages.cpp (working copy)
@@ -56,7 +56,7 @@
Boolean release_lock_memory_priv()
{
HANDLE process = GetCurrentProcess();
- return set_privilege(process, SE_LOCK_MEMORY_NAME, TRUE);
+ return set_privilege(process, SE_LOCK_MEMORY_NAME, FALSE);
}
void* alloc_large_pages(size_t size, const char* hint)
Index: vm/gc_gen/src/common/mark_scan_pool.cpp
===================================================================
--- vm/gc_gen/src/common/mark_scan_pool.cpp (revision 636625)
+++ vm/gc_gen/src/common/mark_scan_pool.cpp (working copy)
@@ -147,7 +147,7 @@
iter = vector_block_iterator_advance(root_set,iter);
Partial_Reveal_Object *p_obj = read_slot(p_ref);
- /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */
+ /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for ALGO_MAJOR */
assert(p_obj!=NULL);
/* we have to mark the object before put it into marktask, because
it is possible to have two slots containing a same object. They will
Index: vm/gc_gen/src/common/object_status.h
===================================================================
--- vm/gc_gen/src/common/object_status.h (revision 636625)
+++ vm/gc_gen/src/common/object_status.h (working copy)
@@ -55,7 +55,7 @@
return !obj_is_marked_in_vt(p_obj);
}
-#ifdef USE_MARK_SWEEP_GC
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
inline Boolean obj_is_dead_in_mark_sweep_gc(Partial_Reveal_Object *p_obj)
{
return !obj_is_mark_black_in_table(p_obj);
@@ -72,7 +72,7 @@
{
assert(p_obj);
-#ifdef USE_MARK_SWEEP_GC
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
return obj_is_dead_in_mark_sweep_gc(p_obj);
#endif
@@ -80,12 +80,12 @@
return obj_is_dead_in_move_compact_no_los_gc(p_obj);
#endif
- if(gc_match_kind(gc, MINOR_COLLECTION)){
+ if(collect_is_minor()){
if(gc_is_gen_mode())
return obj_is_dead_in_gen_minor_gc(p_obj);
else
return obj_is_dead_in_nongen_minor_gc(p_obj);
- } else if(gc_get_mos((GC_Gen*)gc)->collect_algorithm == MAJOR_MARK_SWEEP){
+ } else if(major_is_marksweep()){
return obj_is_dead_in_sweep_major_gc(p_obj);
} else {
return obj_is_dead_in_compact_major_gc(p_obj);
@@ -100,7 +100,7 @@
{
/* assert(!gc_obj_is_dead(gc, p_obj)); commented out for weakroot */
-#ifdef USE_MARK_SWEEP_GC
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
Wspace *wspace = gc_ms_get_wspace((GC_MS*)gc);
return wspace->move_object;
#endif
@@ -109,9 +109,9 @@
Cspace *cspace = gc_mc_get_cspace((GC_MC*)gc);
return cspace->move_object;
#endif
- if(gc_match_kind(gc, MINOR_COLLECTION)){
+ if(collect_is_minor()){
if(!obj_belongs_to_nos(p_obj)) return FALSE;
- if(MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL)
+ if(minor_is_semispace())
return TRUE;
else if(gc_is_gen_mode())
return fspace_obj_to_be_forwarded(p_obj);
Index: vm/gc_gen/src/common/space_tuner.cpp
===================================================================
--- vm/gc_gen/src/common/space_tuner.cpp (revision 636625)
+++ vm/gc_gen/src/common/space_tuner.cpp (working copy)
@@ -96,7 +96,7 @@
* Else, we call the marking function for space tuning. */
void gc_compute_space_tune_size_before_marking(GC* gc)
{
- if(gc_match_kind(gc, MINOR_COLLECTION)) return;
+ if(collect_is_minor()) return;
gc_decide_space_tune(gc);
@@ -204,13 +204,14 @@
memset(collector->segment_live_size, 0, sizeof(POINTER_SIZE_INT) * NORMAL_SIZE_SEGMENT_NUM);
}
- //POINTER_SIZE_INT additional_non_los_size = ((collector_num * 2) << GC_BLOCK_SHIFT_COUNT) + (non_los_live_obj_size >> GC_BLOCK_SHIFT_COUNT) * (GC_OBJ_SIZE_THRESHOLD/4);
+ //POINTER_SIZE_INT additional_non_los_size = ((collector_num * 2) << GC_BLOCK_SHIFT_COUNT) + (non_los_live_obj_size >> GC_BLOCK_SHIFT_COUNT) * (GC_LOS_OBJ_SIZE_THRESHOLD/4);
double additional_non_los_size = 0;
for(unsigned int i = 0; i < NORMAL_SIZE_SEGMENT_NUM; i++) {
additional_non_los_size += (double)segment_live_size[i] * SEGMENT_INDEX_TO_SIZE(i) / non_los_live_obj_size;
}
additional_non_los_size *= 1.2; // in case of some cases worse than average one
POINTER_SIZE_INT non_los_live_block = non_los_live_obj_size / (GC_BLOCK_BODY_SIZE_BYTES-(POINTER_SIZE_INT)additional_non_los_size);
+ non_los_live_block += collector_num << 2;
non_los_live_obj_size = (non_los_live_block << GC_BLOCK_SHIFT_COUNT);
if(non_los_live_obj_size > non_los_size)
non_los_live_obj_size = non_los_size;
@@ -264,7 +265,7 @@
assert(max_heap_size_bytes >= gc->committed_heap_size);
POINTER_SIZE_INT extend_heap_size = 0;
POINTER_SIZE_INT potential_max_tuning_size = max_tuning_size + max_heap_size_bytes - gc->committed_heap_size;
- potential_max_tuning_size -= LOS_HEAD_RESERVE_FOR_HEAP_NULL;
+ potential_max_tuning_size -= LOS_HEAD_RESERVE_FOR_HEAP_BASE;
//debug_adjust
assert(!(potential_max_tuning_size % SPACE_ALLOC_UNIT));
@@ -438,7 +439,7 @@
void gc_space_tuner_reset(GC* gc)
{
Space_Tuner* tuner = gc->tuner;
- if( gc_match_kind(gc, MAJOR_COLLECTION)){
+ if( collect_is_major()){
/*Clear the fields every major collection except the wast area statistic.*/
tuner->tuning_size = 0;
tuner->interim_blocks = NULL;
@@ -537,3 +538,4 @@
return;
}
+
Index: vm/gc_gen/src/common/weak_roots.cpp
===================================================================
--- vm/gc_gen/src/common/weak_roots.cpp (revision 636625)
+++ vm/gc_gen/src/common/weak_roots.cpp (working copy)
@@ -32,9 +32,9 @@
if(!p_obj){ // reference has been cleared
continue;
}
- if(IS_FALLBACK_COMPACTION) {
+ assert(p_obj->vt_raw);
+ if(collect_is_fallback()) {
if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){
- //this is unreachable for VTable->jlc(p_obj), but needed by general weak roots
assert(!obj_is_marked_in_vt(p_obj));
assert(obj_get_vt(p_obj) == obj_get_vt(obj_get_fw_in_oi(p_obj)));
p_obj = obj_get_fw_in_oi(p_obj);
@@ -48,8 +48,6 @@
}
}
-extern Boolean IS_MOVE_COMPACT;
-
/* parameter pointer_addr_in_pool means it is p_ref or p_obj in pool */
void gc_update_weak_roots(GC *gc, Boolean double_fix)
{
@@ -67,29 +65,33 @@
if(!p_obj || !obj_need_move(gc, p_obj)){ // reference has been cleared or not moved
continue;
}
+ /* following code knows p_obj's space is movable. So mark-sweep is not considered below. */
+ if( collect_is_compact_move()){ /* move-compact uses offset table */
+ if( gc_has_los() && p_obj < los_boundary){
+ p_obj = obj_get_fw_in_oi(p_obj);
+ }else{ /* this is the case with unique move_compact */
+ p_obj = obj_get_fw_in_table(p_obj);
+ }
- if(IS_MOVE_COMPACT){
- assert(space_of_addr(gc, p_obj)->move_object);
- *p_ref = obj_get_fw_in_table(p_obj);
- } else if(gc_match_kind(gc, MC_COLLECTION)){
- *p_ref = obj_get_fw_in_table(p_obj);
- } else if(gc_match_kind(gc, MS_COMPACT_COLLECTION) || gc_get_mos((GC_Gen*)gc)->collect_algorithm==MAJOR_MARK_SWEEP){
+ } else if(collect_is_ms_compact()){
+ /* ms-compact does not move all live objects, and sometimes need double-fix */
if(obj_is_fw_in_oi(p_obj)){
p_obj = obj_get_fw_in_oi(p_obj);
/* Only major collection in MS Gen GC might need double_fix.
* Double fixing happens when both forwarding and compaction happen.
*/
if(double_fix && obj_is_fw_in_oi(p_obj)){
- assert(gc_get_mos((GC_Gen*)gc)->collect_algorithm == MAJOR_MARK_SWEEP);
+ assert(major_is_marksweep());
p_obj = obj_get_fw_in_oi(p_obj);
assert(address_belongs_to_gc_heap(p_obj, gc));
}
- *p_ref = p_obj;
}
- } else {
+ } else { /* minor collection or slide major compaction */
assert(obj_is_fw_in_oi(p_obj));
- *p_ref = obj_get_fw_in_oi(p_obj);
+ p_obj = obj_get_fw_in_oi(p_obj);
}
+
+ *p_ref = p_obj;
}
}
}
Index: vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp
===================================================================
--- vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (revision 636625)
+++ vm/gc_gen/src/finalizer_weakref/finalizer_weakref.cpp (working copy)
@@ -56,7 +56,7 @@
static inline void fallback_update_fw_ref(REF *p_ref)
{
- assert(IS_FALLBACK_COMPACTION);
+ assert(collect_is_fallback());
Partial_Reveal_Object *p_obj = read_slot(p_ref);
if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){
@@ -82,8 +82,8 @@
POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
REF *p_ref = (REF*)iter;
- if(IS_FALLBACK_COMPACTION)
- fallback_update_fw_ref(p_ref); // in case that this collection is FALLBACK_COLLECTION
+ if(collect_is_fallback())
+ fallback_update_fw_ref(p_ref); // in case that this collection is ALGO_MAJOR_FALLBACK
Partial_Reveal_Object *p_obj = read_slot(p_ref);
if(!p_obj)
continue;
@@ -91,7 +91,7 @@
gc_add_finalizable_obj(gc, p_obj);
*p_ref = (REF)NULL;
} else {
- if(gc_match_kind(gc, MINOR_COLLECTION) && obj_need_move(gc, p_obj)){
+ if(collect_is_minor() && obj_need_move(gc, p_obj)){
assert(obj_is_fw_in_oi(p_obj));
write_slot(p_ref, obj_get_fw_in_oi(p_obj));
}
@@ -105,7 +105,7 @@
}
gc_put_finalizable_objects(gc);
- if(gc_match_either_kind(gc, MAJOR_COLLECTION|MS_COMPACT_COLLECTION))
+ if(collect_need_update_repset())
finref_add_repset_from_pool(gc, obj_with_fin_pool);
}
@@ -135,23 +135,23 @@
Trace_Object_Func trace_object;
/* set trace_object() function */
- if(gc_match_kind(gc, MINOR_COLLECTION)){
- switch( MINOR_ALGO ){
- case MINOR_NONGEN_FORWARD_POOL:
- trace_object = trace_obj_in_nongen_fw;
- break;
- case MINOR_GEN_FORWARD_POOL:
- trace_object = trace_obj_in_gen_fw;
- break;
- case MINOR_NONGEN_SEMISPACE_POOL:
- trace_object = trace_obj_in_nongen_ss;
- break;
- case MINOR_GEN_SEMISPACE_POOL:
- trace_object = trace_obj_in_gen_ss;
- break;
- default: assert(0);
+ if(collect_is_minor()){
+ if(gc_is_gen_mode()){
+ if(minor_is_forward())
+ trace_object = trace_obj_in_gen_fw;
+ else if(minor_is_semispace())
+ trace_object = trace_obj_in_gen_ss;
+ else
+ assert(0);
+ }else{
+ if(minor_is_forward())
+ trace_object = trace_obj_in_nongen_fw;
+ else if(minor_is_semispace())
+ trace_object = trace_obj_in_nongen_ss;
+ else
+ assert(0);
}
- } else if(gc_match_kind(gc, NORMAL_MAJOR_COLLECTION)){
+ } else if(collect_is_major_normal()){
p_ref_or_obj = p_obj;
if(gc_has_space_tuner(gc) && (gc->tuner->kind != TRANS_NOTHING)){
trace_object = trace_obj_in_space_tune_marking;
@@ -165,18 +165,18 @@
} else {
collector->los_live_obj_size += round_up_to_size(obj_size, KB);
}
- } else if(gc_get_mos((GC_Gen*)gc)->collect_algorithm == MAJOR_MARK_SWEEP){
+ } else if(major_is_marksweep()){
trace_object = trace_obj_in_ms_marking;
} else {
trace_object = trace_obj_in_normal_marking;
}
- } else if(gc_match_kind(gc, FALLBACK_COLLECTION)){
- if(gc_get_mos((GC_Gen*)gc)->collect_algorithm == MAJOR_MARK_SWEEP)
+ } else if(collect_is_fallback()){
+ if(major_is_marksweep())
trace_object = trace_obj_in_ms_fallback_marking;
else
trace_object = trace_obj_in_fallback_marking;
} else {
- assert(gc_match_kind(gc, MARK_SWEEP_GC));
+ assert(major_is_marksweep());
p_ref_or_obj = p_obj;
if(!gc_mark_is_concurrent())
trace_object = trace_obj_in_ms_marking;
@@ -194,8 +194,8 @@
POINTER_SIZE_INT *iter = vector_block_iterator_init(task_block);
while(!vector_block_iterator_end(task_block, iter)){
void *p_ref_or_obj = (void*)*iter;
- assert((gc_match_either_kind(gc, MINOR_COLLECTION|FALLBACK_COLLECTION) && *(Partial_Reveal_Object **)p_ref_or_obj)
- || (gc_match_either_kind(gc, NORMAL_MAJOR_COLLECTION|MS_COLLECTION|MS_COMPACT_COLLECTION) && p_ref_or_obj));
+ assert(((collect_is_minor()||collect_is_fallback()) && *(Partial_Reveal_Object **)p_ref_or_obj)
+ || ((collect_is_major_normal()||major_is_marksweep()) && p_ref_or_obj));
trace_object(collector, p_ref_or_obj);
if(collector->result == FALSE) break; /* Resurrection fallback happens; force return */
@@ -240,7 +240,7 @@
/* Perhaps obj has been resurrected by previous resurrections */
if(!gc_obj_is_dead(gc, p_obj)){
- if(gc_match_kind(gc, MINOR_COLLECTION) && obj_need_move(gc, p_obj))
+ if(collect_is_minor() && obj_need_move(gc, p_obj))
write_slot(p_ref, obj_get_fw_in_oi(p_obj));
continue;
}
@@ -248,7 +248,7 @@
resurrect_obj_tree(collector, p_ref);
if(collector->result == FALSE){
/* Resurrection fallback happens */
- assert(gc_match_kind(gc, MINOR_COLLECTION));
+ assert(collect_is_minor());
return; /* force return */
}
}
@@ -260,7 +260,7 @@
* Because it is outside heap, we can't update it in ref fixing.
* In minor collection p_ref of the root dead obj is automatically updated while tracing.
*/
- if(gc_match_either_kind(gc, MAJOR_COLLECTION|MS_COMPACT_COLLECTION))
+ if(collect_need_update_repset())
finref_add_repset_from_pool(gc, finalizable_obj_pool);
metadata->pending_finalizers = TRUE;
@@ -271,7 +271,7 @@
static void identify_dead_refs(GC *gc, Pool *pool)
{
- if(gc_match_either_kind(gc, MAJOR_COLLECTION|MS_COMPACT_COLLECTION))
+ if(collect_need_update_repset())
finref_reset_repset(gc);
pool_iterator_init(pool);
@@ -283,7 +283,7 @@
Partial_Reveal_Object *p_obj = read_slot(p_ref);
assert(p_obj);
REF *p_referent_field = obj_get_referent_field(p_obj);
- if(IS_FALLBACK_COMPACTION)
+ if(collect_is_fallback())
fallback_update_fw_ref(p_referent_field);
Partial_Reveal_Object *p_referent = read_slot(p_referent_field);
@@ -297,7 +297,7 @@
}
if(!gc_obj_is_dead(gc, p_referent)){ // referent is alive
if(obj_need_move(gc, p_referent)){
- if(gc_match_kind(gc, MINOR_COLLECTION)){
+ if(collect_is_minor()){
assert(obj_is_fw_in_oi(p_referent));
Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
write_slot(p_referent_field, p_new_referent);
@@ -308,7 +308,7 @@
if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field);
- } else if(!gc_match_kind(gc, MS_COLLECTION)){
+ } else{ // if(collect_move_object()){ the condition is redundant because obj_need_move already checks
finref_repset_add_entry(gc, p_referent_field);
}
}
@@ -324,7 +324,7 @@
block = pool_iterator_next(pool);
}
- if(gc_match_either_kind(gc, MAJOR_COLLECTION|MS_COMPACT_COLLECTION)){
+ if(collect_need_update_repset()){
finref_put_repset(gc);
finref_add_repset_from_pool(gc, pool);
}
@@ -333,7 +333,7 @@
static void identify_dead_softrefs(Collector *collector)
{
GC *gc = collector->gc;
- if(gc_match_kind(gc, MINOR_COLLECTION)){
+ if(collect_is_minor()){
assert(softref_pool_is_empty(gc));
return;
}
@@ -360,7 +360,7 @@
Finref_Metadata *metadata = gc->finref_metadata;
Pool *phanref_pool = metadata->phanref_pool;
- if(gc_match_either_kind(gc, MAJOR_COLLECTION|MS_COMPACT_COLLECTION))
+ if(collect_need_update_repset())
finref_reset_repset(gc);
// collector_reset_repset(collector);
pool_iterator_init(phanref_pool);
@@ -372,17 +372,17 @@
Partial_Reveal_Object *p_obj = read_slot((REF*)p_ref);
assert(p_obj);
REF *p_referent_field = obj_get_referent_field(p_obj);
- if(IS_FALLBACK_COMPACTION)
- fallback_update_fw_ref(p_referent_field);
- Partial_Reveal_Object *p_referent = read_slot(p_referent_field);
-
+ if(collect_is_fallback())
+ fallback_update_fw_ref(p_referent_field);
+
+ Partial_Reveal_Object *p_referent = read_slot(p_referent_field);
if(!p_referent){ // referent field has been cleared
*p_ref = NULL;
continue;
}
if(!gc_obj_is_dead(gc, p_referent)){ // referent is alive
- if(obj_need_move(gc, p_referent))
- if(gc_match_kind(gc, MINOR_COLLECTION)){
+ if(obj_need_move(gc, p_referent)){
+ if(collect_is_minor()){
assert(obj_is_fw_in_oi(p_referent));
Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
write_slot(p_referent_field, p_new_referent);
@@ -390,9 +390,10 @@
if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field);
- } else if(!gc_match_kind(gc, MS_COLLECTION)){
+ } else{ // if(collect_move_object()){ this check is redundant because obj_need_move checks
finref_repset_add_entry(gc, p_referent_field);
}
+ }
*p_ref = (REF)NULL;
continue;
}
@@ -408,7 +409,7 @@
block = pool_iterator_next(phanref_pool);
}
// collector_put_repset(collector);
- if(gc_match_either_kind(gc, MAJOR_COLLECTION|MS_COMPACT_COLLECTION)){
+ if(collect_need_update_repset()){
finref_put_repset(gc);
finref_add_repset_from_pool(gc, phanref_pool);
}
@@ -588,7 +589,7 @@
resurrect_finalizable_objects(collector);
gc->collect_result = gc_collection_result(gc);
if(!gc->collect_result){
- assert(gc_match_kind(gc, MINOR_COLLECTION));
+ assert(collect_is_minor());
resurrection_fallback_handler(gc);
return;
}
@@ -650,17 +651,17 @@
Partial_Reveal_Object *p_obj = read_slot(p_ref);
assert(p_obj);
REF *p_referent_field = obj_get_referent_field(p_obj);
- if(IS_FALLBACK_COMPACTION)
+ if(collect_is_fallback())
fallback_update_fw_ref(p_referent_field);
- Partial_Reveal_Object *p_referent = read_slot(p_referent_field);
-
+
+ Partial_Reveal_Object *p_referent = read_slot(p_referent_field);
if(!p_referent){ // referent field has been cleared
*p_ref = (REF)NULL;
continue;
}
if(!gc_obj_is_dead(gc, p_referent)){ // referent is alive
if(obj_need_move(gc, p_referent))
- if(gc_match_kind(gc, MINOR_COLLECTION)){
+ if(collect_is_minor()){
assert(obj_is_fw_in_oi(p_referent));
Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
write_slot(p_referent_field, p_new_referent);
@@ -684,14 +685,14 @@
{
Finref_Metadata *metadata = gc->finref_metadata;
- if(gc_match_either_kind(gc, MAJOR_COLLECTION|MS_COMPACT_COLLECTION))
+ if(collect_need_update_repset())
finref_reset_repset(gc);
- if(!gc_match_kind(gc, MS_COLLECTION)){
+ if(collect_move_object()){
update_referent_field_ignore_finref(gc, metadata->softref_pool);
update_referent_field_ignore_finref(gc, metadata->weakref_pool);
update_referent_field_ignore_finref(gc, metadata->phanref_pool);
}
- if(gc_match_either_kind(gc, MAJOR_COLLECTION|MS_COMPACT_COLLECTION))
+ if(collect_need_update_repset())
finref_put_repset(gc);
}
@@ -743,7 +744,7 @@
* Double fixing happens when both forwarding and compaction happen.
*/
if(double_fix && obj_is_fw_in_oi(p_new_ref)){
- assert(gc_get_mos((GC_Gen*)gc)->collect_algorithm == MAJOR_MARK_SWEEP);
+ assert(major_is_marksweep());
p_new_ref = obj_get_fw_in_oi(p_new_ref);
assert(address_belongs_to_gc_heap(p_new_ref, gc));
}
@@ -762,16 +763,14 @@
* Double fixing happens when both forwarding and compaction happen.
*/
if(double_fix && obj_is_fw_in_oi(p_obj)){
- assert(gc_get_mos((GC_Gen*)gc)->collect_algorithm == MAJOR_MARK_SWEEP);
+ assert(major_is_marksweep());
p_obj = obj_get_fw_in_oi(p_obj);
assert(address_belongs_to_gc_heap(p_obj, gc));
}
write_slot(p_ref, p_obj);
}
-extern Boolean IS_MOVE_COMPACT;
-
-/* parameter pointer_addr_in_pool means it is p_ref or p_obj in pool */
+/* only called in non-minor collection. parameter pointer_addr_in_pool means it is p_ref or p_obj in pool*/
static void nondestructively_fix_finref_pool(GC *gc, Pool *pool, Boolean pointer_addr_in_pool, Boolean double_fix)
{
Finref_Metadata *metadata = gc->finref_metadata;
@@ -790,12 +789,12 @@
p_ref = (REF*)iter;
p_obj = read_slot(p_ref);
- if(IS_MOVE_COMPACT){
+ if(collect_is_compact_move()){ /* include both unique move-compact and major move-compact */
move_compaction_update_ref(gc, p_ref);
- } else if(gc_match_kind(gc, MS_COMPACT_COLLECTION) || gc_get_mos((GC_Gen*)gc)->collect_algorithm==MAJOR_MARK_SWEEP){
+ } else if(collect_is_ms_compact()){
if(obj_is_fw_in_oi(p_obj))
moving_mark_sweep_update_ref(gc, p_ref, double_fix);
- } else {
+ } else { /* major slide compact */
assert((obj_is_marked_in_vt(p_obj) && obj_is_fw_in_oi(p_obj)));
write_slot(p_ref , obj_get_fw_in_oi(p_obj));
}
@@ -806,7 +805,7 @@
void gc_update_finref_repointed_refs(GC *gc, Boolean double_fix)
{
- assert(!gc_match_kind(gc, MINOR_COLLECTION));
+ assert(!collect_is_minor());
Finref_Metadata *metadata = gc->finref_metadata;
Pool *repset_pool = metadata->repset_pool;
@@ -814,7 +813,7 @@
nondestructively_fix_finref_pool(gc, repset_pool, TRUE, double_fix);
if(!pool_is_empty(fallback_ref_pool)){
- assert(IS_FALLBACK_COMPACTION);
+ assert(collect_is_fallback());
nondestructively_fix_finref_pool(gc, fallback_ref_pool, FALSE, double_fix);
}
}
@@ -854,3 +853,4 @@
+
Index: vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h
===================================================================
--- vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h (revision 636625)
+++ vm/gc_gen/src/finalizer_weakref/finalizer_weakref.h (working copy)
@@ -71,7 +71,7 @@
}
switch(type){
case SOFT_REFERENCE :
- if(gc_match_kind(collector->gc, MINOR_COLLECTION))
+ if(collect_is_minor())
scan_slot(collector, p_referent_field);
else
collector_add_softref(collector, p_obj);
Index: vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp
===================================================================
--- vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp (revision 636625)
+++ vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.cpp (working copy)
@@ -426,6 +426,11 @@
finref_metadata_clear_pool(gc->finref_metadata->phanref_pool);
}
+void gc_clear_finref_repset_pool(GC* gc)
+{
+ finref_metadata_clear_pool(gc->finref_metadata->repset_pool);
+}
+
Boolean finref_copy_pool(Pool *src_pool, Pool *dest_pool, GC *gc)
{
Vector_Block *dest_block = finref_get_free_block(gc);
@@ -441,3 +446,4 @@
return TRUE;
}
+
Index: vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h
===================================================================
--- vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h (revision 636625)
+++ vm/gc_gen/src/finalizer_weakref/finalizer_weakref_metadata.h (working copy)
@@ -91,6 +91,7 @@
extern void gc_clear_weakref_pools(GC *gc);
+extern void gc_clear_finref_repset_pool(GC* gc);
extern Vector_Block *finref_metadata_extend(void);
/* Every place requesting a free vector block in finref should call this function */
Index: vm/gc_gen/src/gen/gen.cpp
===================================================================
--- vm/gc_gen/src/gen/gen.cpp (revision 636625)
+++ vm/gc_gen/src/gen/gen.cpp (working copy)
@@ -42,16 +42,31 @@
POINTER_SIZE_INT MIN_NOS_SIZE = 0;
POINTER_SIZE_INT MAX_NOS_SIZE = 0;
-/* should clean up */
-unsigned int MINOR_ALGO = 0;
-unsigned int MAJOR_ALGO = 0;
-
Boolean GEN_NONGEN_SWITCH = FALSE;
Boolean JVMTI_HEAP_ITERATION = true;
-Boolean gen_mode;
+GC* gc_gen_create()
+{
+ GC* gc = (GC*)STD_MALLOC(sizeof(GC_Gen));
+ assert(gc);
+ memset(gc, 0, sizeof(GC_Gen));
+ return gc;
+}
+void gc_set_gen_mode(Boolean status)
+{
+ if(status){
+ gc_set_gen_flag();
+ gc_set_barrier_function(WRITE_BARRIER_REM_SOURCE_REF);
+ }else{
+ gc_clear_gen_flag();
+ gc_set_barrier_function(WRITE_BARRIER_REM_NIL);
+ }
+
+ HelperClass_set_GenMode(status);
+}
+
#ifndef STATIC_NOS_MAPPING
void* nos_boundary;
#endif
@@ -96,7 +111,7 @@
determine_min_nos_size(gc_gen, min_heap_size);
POINTER_SIZE_INT los_size = 0;
- if(MAJOR_ALGO == MAJOR_MARK_SWEEP)
+ if(major_is_marksweep())
min_los_size_bytes = 0;
else
los_size = determine_los_size(min_heap_size);
@@ -202,7 +217,7 @@
#endif /* STATIC_NOS_MAPPING else */
- HEAP_NULL = (POINTER_SIZE_INT)reserved_base;
+ HEAP_BASE = (POINTER_SIZE_INT)reserved_base;
gc_gen->physical_start = physical_start;
gc_gen->heap_start = reserved_base;
@@ -218,7 +233,7 @@
gc_gen->num_collections = 0;
gc_gen->time_collections = 0;
gc_gen->blocks = (Block*)reserved_base;
- gc_gen->force_major_collect = FALSE;
+ gc_gen->next_collect_force_major = FALSE;
gc_gen->force_gen_mode = FALSE;
max_heap_size_bytes = max_heap_size;
@@ -232,7 +247,7 @@
+ space_committed_size(gc_get_mos(gc_gen))
+ space_committed_size(gc_get_los(gc_gen));
- if(MAJOR_ALGO != MAJOR_MARK_SWEEP){
+ if(!major_is_marksweep()){
Blocked_Space *nos = (Blocked_Space*)gc_get_nos(gc_gen);
Blocked_Space *mos = (Blocked_Space*)gc_get_mos(gc_gen);
/* Connect mos and nos, so that they can be compacted as one space */
@@ -264,7 +279,7 @@
gc_nos_destruct(gc_gen);
gc_mos_destruct(gc_gen);
- if(MAJOR_ALGO != MAJOR_MARK_SWEEP){
+ if(!major_is_marksweep()){
los_size = (int)space_committed_size((Space*)gc_gen->los);
gc_los_destruct(gc_gen);
}
@@ -305,7 +320,7 @@
void gc_nos_initialize(GC_Gen *gc, void *start, POINTER_SIZE_INT nos_size, POINTER_SIZE_INT commit_size)
{
Space *nos;
- if(MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL){
+ if(minor_is_semispace()){
nos = (Space*)sspace_initialize((GC*)gc, start, nos_size, commit_size);
nos_alloc = sspace_alloc;
}else{
@@ -314,12 +329,11 @@
}
gc_set_nos(gc, nos);
- nos->collect_algorithm = MINOR_ALGO;
}
void gc_nos_destruct(GC_Gen *gc)
{
- if(MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL)
+ if(minor_is_semispace())
sspace_destruct((Sspace*)gc->nos);
else
fspace_destruct((Fspace*)gc->nos);
@@ -328,7 +342,7 @@
void gc_mos_initialize(GC_Gen *gc, void *start, POINTER_SIZE_INT mos_size, POINTER_SIZE_INT commit_size)
{
Space *mos;
- if(MAJOR_ALGO == MAJOR_MARK_SWEEP){
+ if(major_is_marksweep()){
mos = (Space*)wspace_initialize((GC*)gc, start, mos_size, commit_size);
mos_alloc = wspace_alloc;
} else {
@@ -336,12 +350,11 @@
mos_alloc = mspace_alloc;
}
gc_set_mos(gc, mos);
- mos->collect_algorithm = MAJOR_ALGO;
}
void gc_mos_destruct(GC_Gen *gc)
{
- if(MAJOR_ALGO == MAJOR_MARK_SWEEP)
+ if(major_is_marksweep())
wspace_destruct((Wspace*)gc->mos);
else
mspace_destruct((Mspace*)gc->mos);
@@ -350,7 +363,7 @@
void gc_los_initialize(GC_Gen *gc, void *start, POINTER_SIZE_INT los_size)
{
Space *los;
- if(MAJOR_ALGO == MAJOR_MARK_SWEEP){
+ if(major_is_marksweep()){
assert(los_size == 0);
los = NULL;
los_alloc = wspace_alloc;
@@ -363,7 +376,7 @@
void gc_los_destruct(GC_Gen *gc)
{
- if(MAJOR_ALGO != MAJOR_MARK_SWEEP)
+ if(!major_is_marksweep())
lspace_destruct((Lspace*)gc->los);
}
@@ -372,102 +385,86 @@
Boolean IGNORE_VTABLE_TRACING = FALSE;
Boolean TRACE_JLC_VIA_VTABLE = FALSE;
-unsigned int gc_next_collection_kind(GC_Gen* gc)
+void gc_gen_decide_collection_kind(GC_Gen* gc, unsigned int cause)
{
- if(gc->force_major_collect || FORCE_FULL_COMPACT)
- return MAJOR_COLLECTION;
+ if(gc->next_collect_force_major || cause== GC_CAUSE_LOS_IS_FULL || FORCE_FULL_COMPACT)
+ collect_set_major_normal();
else
- return MINOR_COLLECTION;
-}
-
-
-void gc_decide_collection_kind(GC_Gen* gc, unsigned int cause)
-{
- /* this is for debugging. */
- gc->last_collect_kind = gc->collect_kind;
-#if defined(USE_MARK_SWEEP_GC)
- gc->collect_kind = MS_COLLECTION;
-#elif defined(USE_UNIQUE_MOVE_COMPACT_GC)
- gc->collect_kind = MC_COLLECTION;
-#else
- if(gc->force_major_collect || cause== GC_CAUSE_LOS_IS_FULL || FORCE_FULL_COMPACT)
- gc->collect_kind = NORMAL_MAJOR_COLLECTION;
- else
- gc->collect_kind = MINOR_COLLECTION;
+ collect_set_minor();
- if(IGNORE_VTABLE_TRACING || (gc->collect_kind == MINOR_COLLECTION))
+ if(IGNORE_VTABLE_TRACING || collect_is_minor())
TRACE_JLC_VIA_VTABLE = FALSE;
else
TRACE_JLC_VIA_VTABLE = TRUE;
-#endif
+
return;
}
-void gc_decide_collection_algorithm(GC_Gen* gc, char* minor_algo, char* major_algo)
+GC* gc_gen_decide_collection_algo(char* minor_algo, char* major_algo, Boolean has_los)
{
- if(!minor_algo){
- MINOR_ALGO = MINOR_NONGEN_FORWARD_POOL;
- gc_disable_gen_mode();
+ GC_PROP = ALGO_POOL_SHARE | ALGO_DEPTH_FIRST;
- }else{
+ /* set default GC properties for generational GC */
+ GC_PROP |= ALGO_HAS_NOS;
+
+ /* default is has LOS */
+ GC_PROP |= ALGO_HAS_LOS;
+
+ Boolean use_default = FALSE;
+
+ if(minor_algo){
string_to_upper(minor_algo);
- if(!strcmp(minor_algo, "MINOR_NONGEN_FORWARD_POOL")){
- MINOR_ALGO = MINOR_NONGEN_FORWARD_POOL;
- gc_disable_gen_mode();
-
- }else if(!strcmp(minor_algo, "MINOR_GEN_FORWARD_POOL")){
- MINOR_ALGO = MINOR_GEN_FORWARD_POOL;
- gc_enable_gen_mode();
+ if(!strcmp(minor_algo, "PARTIAL_FORWARD")){
+ GC_PROP |= ALGO_COPY_FORWARD;
- }else if(!strcmp(minor_algo, "MINOR_NONGEN_SEMISPACE_POOL")){
- MINOR_ALGO = MINOR_NONGEN_SEMISPACE_POOL;
- gc_disable_gen_mode();
+ }else if(!strcmp(minor_algo, "SEMI_SPACE")){
+ GC_PROP |= ALGO_COPY_SEMISPACE;
- }else if(!strcmp(minor_algo, "MINOR_GEN_SEMISPACE_POOL")){
- MINOR_ALGO = MINOR_GEN_SEMISPACE_POOL;
- gc_enable_gen_mode();
-
}else {
WARN2("gc.base","\nWarning: GC algorithm setting incorrect. Will use default value.\n");
- MINOR_ALGO = MINOR_NONGEN_FORWARD_POOL;
- gc_disable_gen_mode();
+ use_default = TRUE;
}
}
+ if(!minor_algo || use_default)
+ GC_PROP |= ALGO_COPY_FORWARD;
- if(!major_algo){
- MAJOR_ALGO = MAJOR_COMPACT_MOVE;
-
- }else{
+
+ use_default = FALSE;
+
+ if(major_algo){
string_to_upper(major_algo);
- if(!strcmp(major_algo, "MAJOR_COMPACT_SLIDE")){
- MAJOR_ALGO = MAJOR_COMPACT_SLIDE;
+ if(!strcmp(major_algo, "SLIDE_COMPACT")){
+ GC_PROP |= ALGO_COMPACT_SLIDE;
- }else if(!strcmp(major_algo, "MAJOR_COMPACT_MOVE")){
- MAJOR_ALGO = MAJOR_COMPACT_MOVE;
+ }else if(!strcmp(major_algo, "MOVE_COMPACT")){
+ GC_PROP |= ALGO_COMPACT_MOVE;
+
+ }else if(!strcmp(major_algo, "MARK_SWEEP")){
+ GC_PROP |= ALGO_MARKSWEEP;
- }else if(!strcmp(major_algo, "MAJOR_MARK_SWEEP")){
- MAJOR_ALGO = MAJOR_MARK_SWEEP;
- is_collector_local_alloc = FALSE;
}else{
WARN2("gc.base","\nWarning: GC algorithm setting incorrect. Will use default value.\n");
- MAJOR_ALGO = MAJOR_COMPACT_MOVE;
-
+ use_default = TRUE;
}
}
- return;
-
+ if(!major_algo || use_default)
+ GC_PROP |= ALGO_COMPACT_MOVE;
+
+ GC* gc = gc_gen_create();
+
+ return gc;
}
-static Boolean nos_alloc_block(Space* space, Allocator* allocator)
+static Boolean nos_alloc_block(Space* nos, Allocator* allocator)
{
Boolean result;
- if(MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL)
- result = sspace_alloc_block((Sspace*)space, allocator);
+ if(minor_is_semispace())
+ result = sspace_alloc_block((Sspace*)nos, allocator);
else
- result = fspace_alloc_block((Fspace*)space, allocator);
+ result = fspace_alloc_block((Fspace*)nos, allocator);
return result;
}
@@ -495,9 +492,9 @@
static void gc_gen_adjust_heap_size(GC_Gen* gc)
{
- assert(gc_match_kind((GC*)gc, MAJOR_COLLECTION));
+ assert(collect_is_major());
- if(gc->committed_heap_size == max_heap_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_NULL) return;
+ if(gc->committed_heap_size == max_heap_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_BASE) return;
Mspace* mos = (Mspace*)gc->mos;
Blocked_Space* nos = (Blocked_Space*)gc->nos;
@@ -534,8 +531,8 @@
if(new_heap_total_size <= heap_total_size) return;
/*If there is only small piece of area left not committed, we just merge it into the heap at once*/
- if(new_heap_total_size + (max_heap_size_bytes >> 5) > max_heap_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_NULL)
- new_heap_total_size = max_heap_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_NULL;
+ if(new_heap_total_size + (max_heap_size_bytes >> 5) > max_heap_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_BASE)
+ new_heap_total_size = max_heap_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_BASE;
adjust_size = new_heap_total_size - heap_total_size;
assert( !(adjust_size % SPACE_ALLOC_UNIT) );
@@ -600,7 +597,7 @@
static inline void nos_collection(Space *nos)
{
- if(MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL)
+ if(minor_is_semispace())
sspace_collection((Sspace*)nos);
else
fspace_collection((Fspace*)nos);
@@ -608,7 +605,7 @@
static inline void mos_collection(Space *mos)
{
- if(MAJOR_ALGO == MAJOR_MARK_SWEEP)
+ if(major_is_marksweep())
wspace_collection((Wspace*)mos);
else
mspace_collection((Mspace*)mos);
@@ -616,7 +613,7 @@
static inline void los_collection(Space *los)
{
- if(MAJOR_ALGO != MAJOR_MARK_SWEEP)
+ if(!major_is_marksweep())
lspace_collection((Lspace*)los);
}
@@ -636,7 +633,7 @@
mos->num_used_blocks = mos_used_space_size((Space*)mos)>> GC_BLOCK_SHIFT_COUNT;
if(los){
- assert(MAJOR_ALGO != MAJOR_MARK_SWEEP);
+ assert(!major_is_marksweep());
los->accumu_alloced_size += los->last_alloced_size;
}
}
@@ -648,12 +645,12 @@
Space *los = gc_get_los(gc);
/* Minor collection, but also can be every n minor collections, use fspace->num_collections to identify. */
- if (gc_match_kind((GC*)gc, MINOR_COLLECTION)){
+ if (collect_is_minor()){
mos->accumu_alloced_size += mos->last_alloced_size;
/* The alloced_size reset operation of mos and nos is not necessary, because they are not accumulated.
* But los->last_alloced_size must be reset, because it is accumulated. */
if(los){
- assert(MAJOR_ALGO != MAJOR_MARK_SWEEP);
+ assert(!major_is_marksweep());
los->last_alloced_size = 0;
}
/* Major collection, but also can be every n major collections, use mspace->num_collections to identify. */
@@ -667,7 +664,7 @@
nos->accumu_alloced_size = 0;
if(los){
- assert(MAJOR_ALGO != MAJOR_MARK_SWEEP);
+ assert(!major_is_marksweep());
los->total_alloced_size += los->accumu_alloced_size;
los->last_alloced_size = 0;
los->accumu_alloced_size = 0;
@@ -677,7 +674,7 @@
static void nos_reset_after_collection(Space *nos)
{
- if(MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL)
+ if(minor_is_semispace())
sspace_reset_after_collection((Sspace*)nos);
else
fspace_reset_after_collection((Fspace*)nos);
@@ -685,19 +682,18 @@
static void nos_prepare_for_collection(Space *nos)
{
- if(MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL)
+ if(minor_is_semispace())
sspace_prepare_for_collection((Sspace*)nos);
}
static void mos_reset_after_collection(Space *mos)
{
- if(MAJOR_ALGO != MAJOR_MARK_SWEEP)
+ if(!major_is_marksweep())
mspace_reset_after_collection((Mspace*)mos);
else
wspace_reset_after_collection((Wspace*)mos);
}
-Boolean IS_FALLBACK_COMPACTION = FALSE; /* only for debugging, don't use it. */
void gc_gen_stats_verbose(GC_Gen* gc);
void gc_gen_reclaim_heap(GC_Gen *gc, int64 gc_start_time)
@@ -709,10 +705,10 @@
Space *los = gc->los;
- if(verify_live_heap && (MAJOR_ALGO != MAJOR_MARK_SWEEP))
+ if(verify_live_heap && (!major_is_marksweep()))
gc_verify_heap((GC*)gc, TRUE);
- if(MAJOR_ALGO != MAJOR_MARK_SWEEP){
+ if(!major_is_marksweep()){
gc_gen_update_space_info_before_gc(gc);
gc_compute_space_tune_size_before_marking((GC*)gc);
}
@@ -725,7 +721,7 @@
nos_prepare_for_collection(nos);
- if(gc_match_kind((GC*)gc, MINOR_COLLECTION)){
+ if(collect_is_minor()){
INFO2("gc.process", "GC: start minor collection ...\n");
@@ -734,7 +730,7 @@
/* This is for compute mos->last_alloced_size */
unsigned int mos_used_blocks_before_minor, mos_used_blocks_after_minor; /* only used for non MAJOR_MARK_SWEEP collection */
- if(MAJOR_ALGO != MAJOR_MARK_SWEEP)
+ if(!major_is_marksweep())
mos_used_blocks_before_minor = ((Blocked_Space*)mos)->free_block_idx - ((Blocked_Space*)mos)->first_block_idx;
nos_collection(nos);
@@ -743,7 +739,7 @@
gc_gen_collector_stats_verbose_minor_collection(gc);
#endif
- if(MAJOR_ALGO != MAJOR_MARK_SWEEP){
+ if(!major_is_marksweep()){
mos_used_blocks_after_minor = ((Blocked_Space*)mos)->free_block_idx - ((Blocked_Space*)mos)->first_block_idx;
assert( mos_used_blocks_before_minor <= mos_used_blocks_after_minor );
((Blocked_Space*)mos)->last_alloced_size = GC_BLOCK_SIZE_BYTES * ( mos_used_blocks_after_minor - mos_used_blocks_before_minor );
@@ -765,13 +761,13 @@
INFO2("gc.process", "GC: start major collection ...\n");
- if(MAJOR_ALGO != MAJOR_MARK_SWEEP)
+ if(!major_is_marksweep())
los->move_object = TRUE;
mos_collection(mos); /* collect mos and nos together */
los_collection(los);
- if(MAJOR_ALGO != MAJOR_MARK_SWEEP)
+ if(!major_is_marksweep())
los->move_object = FALSE;
#ifdef GC_GEN_STATS
@@ -782,19 +778,18 @@
INFO2("gc.process", "GC: end of major collection ...\n");
}
- if(gc->collect_result == FALSE && gc_match_kind((GC*)gc, MINOR_COLLECTION)){
+ if(gc->collect_result == FALSE && collect_is_minor()){
INFO2("gc.process", "GC: Minor collection failed, transform to fallback collection ...");
/* runout mos in minor collection */
- if(MAJOR_ALGO != MAJOR_MARK_SWEEP){
+ if(!major_is_marksweep()){
assert(((Blocked_Space*)mos)->free_block_idx == ((Blocked_Space*)mos)->ceiling_block_idx + 1);
((Blocked_Space*)mos)->num_used_blocks = ((Blocked_Space*)mos)->num_managed_blocks;
}
- IS_FALLBACK_COMPACTION = TRUE;
gc_reset_collect_result((GC*)gc);
- gc->collect_kind = FALLBACK_COLLECTION;
+ GC_PROP |= ALGO_MAJOR_FALLBACK;
#ifdef GC_GEN_STATS
/*since stats is changed in minor collection, we need to reset stats before fallback collection*/
@@ -805,19 +800,17 @@
if(gc_is_gen_mode())
gc_clear_remset((GC*)gc);
- if(verify_live_heap && (MAJOR_ALGO != MAJOR_MARK_SWEEP))
+ if(verify_live_heap && (!major_is_marksweep()))
event_gc_collect_kind_changed((GC*)gc);
- if(MAJOR_ALGO != MAJOR_MARK_SWEEP)
+ if(!major_is_marksweep())
los->move_object = TRUE;
mos_collection(mos); /* collect both mos and nos */
los_collection(los);
- if(MAJOR_ALGO != MAJOR_MARK_SWEEP)
+ if(!major_is_marksweep())
los->move_object = FALSE;
- IS_FALLBACK_COMPACTION = FALSE;
-
#ifdef GC_GEN_STATS
gc->stats->num_fallback_collections++;
gc_gen_collector_stats_verbose_major_collection(gc);
@@ -834,20 +827,20 @@
}
nos_reset_after_collection(nos);
- if(gc_match_kind((GC*)gc, MAJOR_COLLECTION))
+ if(collect_is_major())
mos_reset_after_collection(mos);
- if(verify_live_heap && (MAJOR_ALGO != MAJOR_MARK_SWEEP))
+ if(verify_live_heap && (!major_is_marksweep()))
gc_verify_heap((GC*)gc, FALSE);
- assert(MAJOR_ALGO == MAJOR_MARK_SWEEP || !los->move_object);
+ assert(major_is_marksweep() || !los->move_object);
int64 pause_time = time_now() - gc_start_time;
gc->time_collections += pause_time;
- if(MAJOR_ALGO != MAJOR_MARK_SWEEP){ /* adaptations here */
+ if(!major_is_marksweep()){ /* adaptations here */
- if(gc_match_kind((GC*)gc, MAJOR_COLLECTION))
+ if(collect_is_major())
gc_gen_adjust_heap_size(gc); /* adjust committed GC heap size */
gc_gen_adapt(gc, pause_time); /* 1. decide next collection kind; 2. adjust nos_boundary */
@@ -944,24 +937,24 @@
<<"\nGC: GC id: GC["<num_collections<<"]"
<<"\nGC: current collection num: "<num_collections);
- switch(gc->collect_kind) {
- case MINOR_COLLECTION:
+ if( collect_is_minor()) {
INFO2("gc.collect","GC: collection type: minor");
#ifdef GC_GEN_STATS
INFO2("gc.collect","GC: current minor collection num: "<stats->num_minor_collections);
#endif
- break;
- case NORMAL_MAJOR_COLLECTION:
+ }else if( collect_is_major_normal() ){
INFO2("gc.collect","GC: collection type: normal major");
#ifdef GC_GEN_STATS
INFO2("gc.collect","GC: current normal major collection num: "<stats->num_major_collections);
#endif
- break;
- case FALLBACK_COLLECTION:
+
+ }else if( collect_is_fallback() ){
INFO2("gc.collect","GC: collection type: fallback");
#ifdef GC_GEN_STATS
INFO2("gc.collect","GC: current fallback collection num: "<stats->num_fallback_collections);
#endif
+ }else{
+ assert(0);
}
switch(gc->cause) {
@@ -973,6 +966,9 @@
break;
case GC_CAUSE_RUNTIME_FORCE_GC:
INFO2("gc.collect","GC: collection cause: runtime force gc");
+ break;
+ default:
+ assert(0);
}
INFO2("gc.collect","GC: pause time: "<<(pause_time>>10)<<"ms"
@@ -987,7 +983,7 @@
<<"\nGC: LOS size: "<los->committed_heap_size)<<", free size:"<los))
<<"\nGC: MOS size: "<mos->committed_heap_size)<<", free size:"<mos)) << "\n");
- if(MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL){
+ if(minor_is_semispace()){
INFO2("gc.space",
"GC: NOS size: "<nos->committed_heap_size)
<<", tospace size:"<nos))
@@ -1008,10 +1004,10 @@
<<"\ninitial num collectors: "<num_collectors
<<"\ninitial nos size: "<nos->committed_heap_size)
<<"\nnos collection algo: "
- <<((gc->nos->collect_algorithm==MINOR_NONGEN_SEMISPACE_POOL || gc->nos->collect_algorithm==MINOR_GEN_SEMISPACE_POOL)?"semi space":"partial forward")
+ <<(minor_is_semispace()?"semi space":"partial forward")
<<"\ninitial mos size: "<mos->committed_heap_size)
<<"\nmos collection algo: "
- <<((gc->mos->collect_algorithm==MAJOR_COMPACT_MOVE)?"move compact":"slide compact")
+ <<(major_is_compact_move()?"move compact":"slide compact")
<<"\ninitial los size: "<los->committed_heap_size)<<"\n");
}
@@ -1035,14 +1031,14 @@
/* init collector alloc_space */
void gc_gen_init_collector_alloc(GC_Gen* gc, Collector* collector)
{
- if(MAJOR_ALGO == MAJOR_MARK_SWEEP){
+ if(major_is_marksweep()){
allocator_init_local_chunks((Allocator*)collector);
gc_init_collector_free_chunk_list(collector);
}
Allocator* allocator = (Allocator*)collector;
- if( MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL){
+ if( minor_is_semispace()){
allocator->alloc_space = gc->nos;
/* init backup allocator */
unsigned int size = sizeof(Allocator);
@@ -1057,14 +1053,14 @@
void gc_gen_reset_collector_alloc(GC_Gen* gc, Collector* collector)
{
alloc_context_reset((Allocator*)collector);
- if( MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL){
+ if( minor_is_semispace()){
alloc_context_reset(collector->backup_allocator);
}
}
void gc_gen_destruct_collector_alloc(GC_Gen* gc, Collector* collector)
{
- if( MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL){
+ if( minor_is_semispace()){
STD_FREE(collector->backup_allocator);
}
}
Index: vm/gc_gen/src/gen/gen.h
===================================================================
--- vm/gc_gen/src/gen/gen.h (revision 636625)
+++ vm/gc_gen/src/gen/gen.h (working copy)
@@ -33,34 +33,8 @@
struct GC_Gen_Stats;
#endif
-extern Boolean gen_mode;
+void gc_set_gen_mode(Boolean status);
-inline Boolean gc_is_gen_mode()
-{ return gen_mode; }
-
-inline void gc_enable_gen_mode()
-{
- gen_mode = TRUE;
- gc_set_barrier_function(WRITE_BARRIER_REM_SOURCE_REF);
- HelperClass_set_GenMode(TRUE);
-}
-
-inline void gc_disable_gen_mode()
-{
- gen_mode = FALSE;
- gc_set_barrier_function(WRITE_BARRIER_REM_NIL);
- HelperClass_set_GenMode(FALSE);
-}
-
-inline void gc_set_gen_mode(Boolean status)
-{
- gen_mode = status;
- if(gen_mode)
- gc_set_barrier_function(WRITE_BARRIER_REM_SOURCE_REF);
- HelperClass_set_GenMode(status);
-}
-
-
/* some globals */
extern POINTER_SIZE_INT NOS_SIZE;
@@ -145,7 +119,7 @@
Space *mos;
Space *los;
- Boolean force_major_collect;
+ Boolean next_collect_force_major;
Gen_Mode_Adaptor* gen_mode_adaptor;
Boolean force_gen_mode;
@@ -164,16 +138,6 @@
void gc_gen_init_verbose(GC_Gen *gc);
void gc_gen_wrapup_verbose(GC_Gen* gc);
-inline POINTER_SIZE_INT gc_gen_free_memory_size(GC_Gen* gc)
-{ return blocked_space_free_mem_size((Blocked_Space*)gc->nos) +
- blocked_space_free_mem_size((Blocked_Space*)gc->mos) +
- lspace_free_memory_size((Lspace*)gc->los); }
-
-inline POINTER_SIZE_INT gc_gen_total_memory_size(GC_Gen* gc)
-{ return space_committed_size((Space*)gc->nos) +
- space_committed_size((Space*)gc->mos) +
- lspace_committed_size((Lspace*)gc->los); }
-
/////////////////////////////////////////////////////////////////////////////////////////
void gc_nos_initialize(GC_Gen *gc, void *start, POINTER_SIZE_INT nos_size, POINTER_SIZE_INT commit_size);
@@ -204,9 +168,8 @@
void gc_set_mos(GC_Gen* gc, Space* mos);
void gc_set_los(GC_Gen* gc, Space* los);
-void gc_decide_collection_algorithm(GC_Gen* gc, char* minor_algo, char* major_algo);
-void gc_decide_collection_kind(GC_Gen* gc, unsigned int cause);
-unsigned int gc_next_collection_kind(GC_Gen* gc);
+GC* gc_gen_decide_collection_algo(char* minor_algo, char* major_algo, Boolean has_los);
+void gc_gen_decide_collection_kind(GC_Gen* gc, unsigned int cause);
void gc_gen_adapt(GC_Gen* gc, int64 pause_time);
@@ -235,6 +198,16 @@
POINTER_SIZE_INT mos_used_space_size(Space* mos);
POINTER_SIZE_INT nos_used_space_size(Space* nos);
+inline POINTER_SIZE_INT gc_gen_free_memory_size(GC_Gen* gc)
+{ return nos_free_space_size((Space*)gc->nos) +
+ blocked_space_free_mem_size((Blocked_Space*)gc->mos) +
+ lspace_free_memory_size((Lspace*)gc->los); }
+
+inline POINTER_SIZE_INT gc_gen_total_memory_size(GC_Gen* gc)
+{ return space_committed_size((Space*)gc->nos) +
+ space_committed_size((Space*)gc->mos) +
+ lspace_committed_size((Lspace*)gc->los); }
+
#ifndef STATIC_NOS_MAPPING
void* nos_space_adjust(Space* space, void* new_nos_boundary, POINTER_SIZE_INT new_nos_size);
#endif
@@ -243,3 +216,4 @@
+
Index: vm/gc_gen/src/gen/gen_adapt.cpp
===================================================================
--- vm/gc_gen/src/gen/gen_adapt.cpp (revision 636625)
+++ vm/gc_gen/src/gen/gen_adapt.cpp (working copy)
@@ -103,14 +103,14 @@
POINTER_SIZE_INT nos_free_size = blocked_space_free_mem_size(nos);
POINTER_SIZE_INT total_free_size = mos_free_size + nos_free_size;
- if(gc_match_kind((GC*)gc, MAJOR_COLLECTION)) {
+ if(collect_is_major()) {
assert(!gc_is_gen_mode());
if(gen_mode_adaptor->major_survive_ratio_threshold != 0 && mos->survive_ratio > gen_mode_adaptor->major_survive_ratio_threshold){
if(gen_mode_adaptor->major_repeat_count > MAX_MAJOR_REPEAT_COUNT ){
gc->force_gen_mode = TRUE;
- gc_enable_gen_mode();
- gc->force_major_collect = FALSE;
+ gc_set_gen_mode(TRUE);
+ gc->next_collect_force_major = FALSE;
return;
}else{
gen_mode_adaptor->major_repeat_count++;
@@ -121,7 +121,7 @@
}else{
/*compute throughput*/
- if(gc->last_collect_kind != MINOR_COLLECTION){
+ if(!collect_last_is_minor((GC*)gc)){
gen_mode_adaptor->nongen_minor_throughput = 1.0f;
}
if(gc->force_gen_mode){
@@ -141,7 +141,7 @@
}
if(gen_mode_adaptor->nongen_minor_throughput <= gen_mode_adaptor->gen_minor_throughput ){
- if( gc->last_collect_kind != MINOR_COLLECTION ){
+ if( !collect_last_is_minor((GC*)gc) ){
gen_mode_adaptor->major_survive_ratio_threshold = mos->survive_ratio;
}else if( !gc->force_gen_mode ){
gc->force_gen_mode = TRUE;
@@ -149,18 +149,18 @@
}
}
- if(gc->force_major_collect && !gc->force_gen_mode){
- gc->force_major_collect = FALSE;
+ if(gc->next_collect_force_major && !gc->force_gen_mode){
+ gc->next_collect_force_major = FALSE;
gc->force_gen_mode = TRUE;
gen_mode_adaptor->gen_mode_trial_count = 2;
- }else if(gc->last_collect_kind != MINOR_COLLECTION && gc->force_gen_mode){
+ }else if( collect_last_is_minor((GC*)gc) && gc->force_gen_mode){
gen_mode_adaptor->gen_mode_trial_count = MAX_INT32;
}
if(gc->force_gen_mode && (total_free_size <= ((float)min_nos_size_bytes) * 1.3 )){
gc->force_gen_mode = FALSE;
- gc_disable_gen_mode();
- gc->force_major_collect = TRUE;
+ gc_set_gen_mode(FALSE);
+ gc->next_collect_force_major = TRUE;
gen_mode_adaptor->gen_mode_trial_count = 0;
return;
}
@@ -170,17 +170,17 @@
gen_mode_adaptor->gen_mode_trial_count --;
if( gen_mode_adaptor->gen_mode_trial_count >= 0){
- gc_enable_gen_mode();
+ gc_set_gen_mode(TRUE);
return;
}
gc->force_gen_mode = FALSE;
- gc->force_major_collect = TRUE;
+ gc->next_collect_force_major = TRUE;
gen_mode_adaptor->gen_mode_trial_count = 0;
}
}
- gc_disable_gen_mode();
+ gc_set_gen_mode(FALSE);
return;
}
@@ -206,31 +206,35 @@
POINTER_SIZE_INT nos_free_size = space_committed_size(nos);
POINTER_SIZE_INT total_free_size = mos_free_size + nos_free_size;
- if(gc_match_kind((GC*)gc, MAJOR_COLLECTION)) gc->force_gen_mode = FALSE;
+ if(collect_is_major()) gc->force_gen_mode = FALSE;
if(!gc->force_gen_mode){
/*Major collection:*/
- if(gc_match_kind((GC*)gc, MAJOR_COLLECTION)){
+ if(collect_is_major()){
mos->time_collections += pause_time;
Tslow = (float)pause_time;
SMax = total_free_size;
/*If fall back happens, and nos_boundary reaches heap_ceiling, then we force major.*/
if( nos_free_size == 0)
- gc->force_major_collect = TRUE;
- else gc->force_major_collect = FALSE;
+ gc->next_collect_force_major = TRUE;
+ else gc->next_collect_force_major = FALSE;
- /*If major is caused by LOS, or collection kind is EXTEND_COLLECTION, all survive ratio is not updated.*/
- if((gc->cause != GC_CAUSE_LOS_IS_FULL) && (!gc_match_kind((GC*)gc, EXTEND_COLLECTION))){
+ /*If major is caused by LOS, or collection kind is ALGO_MAJOR_EXTEND, all survive ratio is not updated.*/
+ extern Boolean mos_extended;
+ if((gc->cause != GC_CAUSE_LOS_IS_FULL) && !mos_extended ){
survive_ratio = (float)mos->period_surviving_size/(float)mos->committed_heap_size;
mos->survive_ratio = survive_ratio;
}
+ /* why do I set it FALSE here? because here is the only place where it's used. */
+ mos_extended = FALSE;
+
/*If there is no minor collection at all, we must give mos expected threshold a reasonable value.*/
if((gc->tuner->kind != TRANS_NOTHING) && (nos->num_collections == 0))
mspace_set_expected_threshold_ratio((Mspace *)mos, 0.5f);
/*If this major is caused by fall back compaction, we must give nos->survive_ratio
*a conservative and reasonable number to avoid next fall back.
*In fallback compaction, the survive_ratio of mos must be 1.*/
- if(gc_match_kind((GC*)gc, FALLBACK_COLLECTION)) nos->survive_ratio = 1;
+ if(collect_is_fallback()) nos->survive_ratio = 1;
}
/*Minor collection:*/
@@ -250,7 +254,7 @@
POINTER_SIZE_INT minor_surviving_size = last_total_free_size - total_free_size;
/*If the first GC is caused by LOS, mos->last_alloced_size should be smaller than this minor_surviving_size
*Because the last_total_free_size is not accurate.*/
- extern unsigned int MINOR_ALGO;
+
if(nos->num_collections != 1){
assert(minor_surviving_size == mos->last_alloced_size);
}
@@ -267,8 +271,8 @@
/* FIXME: if the total free size is lesser than threshold, the time point might be too late!
* Have a try to test whether the backup solution is better for specjbb.
*/
- // if ((mos_free_size + nos_free_size + minor_surviving_size) < free_size_threshold) gc->force_major_collect = TRUE;
- if ((mos_free_size + nos_free_size)< free_size_threshold) gc->force_major_collect = TRUE;
+ // if ((mos_free_size + nos_free_size + minor_surviving_size) < free_size_threshold) gc->next_collect_force_major = TRUE;
+ if ((mos_free_size + nos_free_size)< free_size_threshold) gc->next_collect_force_major = TRUE;
survive_ratio = (float)minor_surviving_size/(float)space_committed_size((Space*)nos);
nos->survive_ratio = survive_ratio;
@@ -314,7 +318,7 @@
total_size = max_heap_size_bytes - space_committed_size(los);
#else
POINTER_SIZE_INT curr_heap_commit_end =
- (POINTER_SIZE_INT)gc->heap_start + LOS_HEAD_RESERVE_FOR_HEAP_NULL + gc->committed_heap_size;
+ (POINTER_SIZE_INT)gc->heap_start + LOS_HEAD_RESERVE_FOR_HEAP_BASE + gc->committed_heap_size;
assert(curr_heap_commit_end > (POINTER_SIZE_INT)mos->heap_start);
total_size = curr_heap_commit_end - (POINTER_SIZE_INT)mos->heap_start;
#endif
@@ -408,7 +412,7 @@
/* below are ajustment */
POINTER_SIZE_INT curr_heap_commit_end =
- (POINTER_SIZE_INT)gc->heap_start + LOS_HEAD_RESERVE_FOR_HEAP_NULL + gc->committed_heap_size;
+ (POINTER_SIZE_INT)gc->heap_start + LOS_HEAD_RESERVE_FOR_HEAP_BASE + gc->committed_heap_size;
void* new_nos_boundary = (void*)(curr_heap_commit_end - new_nos_size);
Index: vm/gc_gen/src/gen/gen_stats.cpp
===================================================================
--- vm/gc_gen/src/gen/gen_stats.cpp (revision 636625)
+++ vm/gc_gen/src/gen/gen_stats.cpp (working copy)
@@ -43,7 +43,7 @@
{
GC_Gen_Stats* stats = gc->stats;
- if(gc_match_kind((GC*)gc, MINOR_COLLECTION)){
+ if(collect_is_minor()){
stats->nos_surviving_obj_num_minor = 0;
stats->nos_surviving_obj_size_minor = 0;
stats->los_suviving_obj_num = 0;
@@ -65,7 +65,7 @@
GC_Gen_Collector_Stats* collector_stats;
Boolean is_los_collected = gc_gen_stats->is_los_collected;
- if(gc_match_kind((GC*)gc, MINOR_COLLECTION)) {
+ if(collect_is_minor()) {
for (unsigned int i=0; inum_active_collectors; i++) {
collector_stats = (GC_Gen_Collector_Stats*)collector[i]->stats;
@@ -101,15 +101,16 @@
{
GC_Gen_Stats* stats = gc->stats;
Boolean is_los_collected = stats->is_los_collected;
- if (gc_match_kind((GC*)gc, MINOR_COLLECTION)){
- TRACE2("gc.space", "GC: Fspace Collection stats: "
- <<"\nGC: collection algo: "<<((stats->nos_collection_algo_minor==MINOR_NONGEN_FORWARD_POOL)?"nongen forward":"gen forward")
- <<"\nGC: num surviving objs: "<nos_surviving_obj_num_minor
- <<"\nGC: size surviving objs: "<nos_surviving_obj_size_minor)
- <<"\nGC: surviving ratio: "<<(int)(stats->nos_surviving_ratio_minor*100)<<"%\n");
+ if (collect_is_minor()){
+ TRACE2("gc.space", "GC: NOS Collection stats: "
+ <<"\nGC: " << (gc_is_gen_mode()?"generational":"nongenerational")
+ <<"\nGC: collection algo: " << (minor_is_semispace()?"semi-space":"partial-forward")
+ <<"\nGC: num surviving objs: " << stats->nos_surviving_obj_num_minor
+ <<"\nGC: size surviving objs: " << verbose_print_size(stats->nos_surviving_obj_size_minor)
+ <<"\nGC: surviving ratio: " << (int)(stats->nos_surviving_ratio_minor*100) << "%\n");
}else{
- TRACE2("gc.space", "GC: Mspace Collection stats: "
- <<"\nGC: collection algo: "<<((stats->nos_mos_collection_algo_major==MAJOR_COMPACT_SLIDE)?"slide compact":"move compact")
+ TRACE2("gc.space", "GC: MOS Collection stats: "
+ <<"\nGC: collection algo: " << (major_is_marksweep()?"mark-sweep":"slide compact")
<<"\nGC: num surviving objs: "<nos_mos_suviving_obj_num_major
<<"\nGC: size surviving objs: "<nos_mos_suviving_obj_size_major)
<<"\nGC: surviving ratio: "<<(int)(stats->nos_mos_suviving_ratio_major*100)<<"%\n");
@@ -117,7 +118,7 @@
if(stats->is_los_collected) { /*if los is collected, need to output los related info*/
TRACE2("gc.space", "GC: Lspace Collection stats: "
- <<"\nGC: collection algo: "<<((stats->los_collection_algo==MAJOR_COMPACT_SLIDE)?"slide compact":"mark sweep")
+ <<"\nGC: collection algo: "<<(collect_is_major()?"slide compact":"mark sweep")
<<"\nGC: num surviving objs: "<los_suviving_obj_num
<<"\nGC: size surviving objs: "<los_suviving_obj_size)
<<"\nGC: surviving ratio: "<<(int)(stats->los_surviving_ratio*100)<<"%\n");
Index: vm/gc_gen/src/gen/gen_utils.cpp
===================================================================
--- vm/gc_gen/src/gen/gen_utils.cpp (revision 636625)
+++ vm/gc_gen/src/gen/gen_utils.cpp (working copy)
@@ -24,9 +24,9 @@
#ifndef STATIC_NOS_MAPPING
void* nos_space_adjust(Space* nos, void* new_nos_boundary, POINTER_SIZE_INT new_nos_size)
{
- if(nos->collect_algorithm == MINOR_NONGEN_SEMISPACE_POOL || nos->collect_algorithm == MINOR_GEN_SEMISPACE_POOL)
+ if(minor_is_semispace())
return sspace_heap_start_adjust((Sspace*)nos, new_nos_boundary, new_nos_size);
- else if( nos->collect_algorithm == MINOR_NONGEN_FORWARD_POOL || nos->collect_algorithm == MINOR_GEN_FORWARD_POOL )
+ else if(minor_is_forward())
return fspace_heap_start_adjust((Fspace*)nos, new_nos_boundary, new_nos_size);
assert(0);
@@ -37,7 +37,7 @@
POINTER_SIZE_INT mos_free_space_size(Space* mos)
{
POINTER_SIZE_INT free_size = 0;
- if( mos->collect_algorithm != MAJOR_MARK_SWEEP )
+ if( !major_is_marksweep())
return mspace_free_space_size((Mspace*)mos);
assert(0);
@@ -47,9 +47,9 @@
POINTER_SIZE_INT nos_free_space_size(Space* nos)
{
POINTER_SIZE_INT free_size = 0;
- if(nos->collect_algorithm == MINOR_NONGEN_SEMISPACE_POOL || nos->collect_algorithm == MINOR_GEN_SEMISPACE_POOL)
+ if(minor_is_semispace())
return sspace_free_space_size((Sspace*)nos);
- else if( nos->collect_algorithm == MINOR_NONGEN_FORWARD_POOL || nos->collect_algorithm == MINOR_GEN_FORWARD_POOL)
+ else if( minor_is_forward())
return fspace_free_space_size((Fspace*)nos);
assert(0);
@@ -60,7 +60,7 @@
POINTER_SIZE_INT mos_used_space_size(Space* mos)
{
POINTER_SIZE_INT free_size = 0;
- if( mos->collect_algorithm != MAJOR_MARK_SWEEP )
+ if( !major_is_marksweep() )
return mspace_used_space_size((Mspace*)mos);
assert(0);
@@ -70,9 +70,9 @@
POINTER_SIZE_INT nos_used_space_size(Space* nos)
{
POINTER_SIZE_INT free_size = 0;
- if(nos->collect_algorithm == MINOR_NONGEN_SEMISPACE_POOL || nos->collect_algorithm == MINOR_GEN_SEMISPACE_POOL)
+ if(minor_is_semispace())
return sspace_used_space_size((Sspace*)nos);
- else if( nos->collect_algorithm == MINOR_NONGEN_FORWARD_POOL || nos->collect_algorithm == MINOR_GEN_FORWARD_POOL)
+ else if( minor_is_forward())
return fspace_used_space_size((Fspace*)nos);
assert(0);
Index: vm/gc_gen/src/jni/java_natives.cpp
===================================================================
--- vm/gc_gen/src/jni/java_natives.cpp (revision 636625)
+++ vm/gc_gen/src/jni/java_natives.cpp (working copy)
@@ -116,7 +116,7 @@
JNIEXPORT jint JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getLargeObjectSize(JNIEnv *, jclass)
{
- return (jint) GC_OBJ_SIZE_THRESHOLD;
+ return (jint) GC_LOS_OBJ_SIZE_THRESHOLD;
}
#ifdef __cplusplus
Index: vm/gc_gen/src/jni/java_support.cpp
===================================================================
--- vm/gc_gen/src/jni/java_support.cpp (revision 636625)
+++ vm/gc_gen/src/jni/java_support.cpp (working copy)
@@ -50,10 +50,10 @@
//"org.apache.harmony.drlvm.gc_gen.GCHelper"
jclass GCHelper = jni_env->FindClass("GCHelper");
- jfieldID gen_mode = jni_env->GetStaticFieldID(GCHelper, "GEN_MODE", "Z");
- assert(gen_mode);
+ jfieldID gen_mode_field = jni_env->GetStaticFieldID(GCHelper, "GEN_MODE", "Z");
+ assert(gen_mode_field);
- jni_env->SetStaticBooleanField(GCHelper, gen_mode, status?JNI_TRUE:JNI_FALSE);
+ jni_env->SetStaticBooleanField(GCHelper, gen_mode_field, status?JNI_TRUE:JNI_FALSE);
hythread_suspend_disable();
*/
Index: vm/gc_gen/src/los/free_area_pool.cpp
===================================================================
--- vm/gc_gen/src/los/free_area_pool.cpp (revision 636625)
+++ vm/gc_gen/src/los/free_area_pool.cpp (working copy)
@@ -41,7 +41,7 @@
Free_Area* free_pool_find_size_area(Free_Area_Pool* pool, POINTER_SIZE_INT size)
{
- assert(size >= GC_OBJ_SIZE_THRESHOLD);
+ assert(size >= GC_LOS_OBJ_SIZE_THRESHOLD);
size = ALIGN_UP_TO_KILO(size);
unsigned int index = pool_list_index_with_size(size);
Index: vm/gc_gen/src/los/free_area_pool.h
===================================================================
--- vm/gc_gen/src/los/free_area_pool.h (revision 636625)
+++ vm/gc_gen/src/los/free_area_pool.h (working copy)
@@ -61,7 +61,7 @@
area->next = area->prev = (Bidir_List*)area;
area->size = size;
- if( size < GC_OBJ_SIZE_THRESHOLD) return NULL;
+ if( size < GC_LOS_OBJ_SIZE_THRESHOLD) return NULL;
else return area;
}
@@ -92,7 +92,7 @@
inline unsigned int pool_list_index_with_size(POINTER_SIZE_INT size)
{
- assert(size >= GC_OBJ_SIZE_THRESHOLD);
+ assert(size >= GC_LOS_OBJ_SIZE_THRESHOLD);
unsigned int index;
index = (unsigned int) (size >> BIT_SHIFT_TO_KILO);
@@ -102,7 +102,7 @@
inline Free_Area* free_pool_add_area(Free_Area_Pool* pool, Free_Area* free_area)
{
- assert( free_area->size >= GC_OBJ_SIZE_THRESHOLD);
+ assert( free_area->size >= GC_LOS_OBJ_SIZE_THRESHOLD);
unsigned int index = pool_list_index_with_size(free_area->size);
bidir_list_add_item((Bidir_List*)&(pool->sized_area_list[index]), (Bidir_List*)free_area);
Index: vm/gc_gen/src/los/lspace.cpp
===================================================================
--- vm/gc_gen/src/los/lspace.cpp (revision 636625)
+++ vm/gc_gen/src/los/lspace.cpp (working copy)
@@ -42,10 +42,10 @@
vm_commit_mem(reserved_base, lspace_size);
memset(reserved_base, 0, lspace_size);
- min_los_size_bytes -= LOS_HEAD_RESERVE_FOR_HEAP_NULL;
- lspace->committed_heap_size = committed_size - LOS_HEAD_RESERVE_FOR_HEAP_NULL;
- lspace->reserved_heap_size = gc->reserved_heap_size - min_none_los_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_NULL;
- lspace->heap_start = (void*)((POINTER_SIZE_INT)reserved_base + LOS_HEAD_RESERVE_FOR_HEAP_NULL);
+ min_los_size_bytes -= LOS_HEAD_RESERVE_FOR_HEAP_BASE;
+ lspace->committed_heap_size = committed_size - LOS_HEAD_RESERVE_FOR_HEAP_BASE;
+ lspace->reserved_heap_size = gc->reserved_heap_size - min_none_los_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_BASE;
+ lspace->heap_start = (void*)((POINTER_SIZE_INT)reserved_base + LOS_HEAD_RESERVE_FOR_HEAP_BASE);
lspace->heap_end = (void *)((POINTER_SIZE_INT)reserved_base + committed_size);
lspace->gc = gc;
@@ -131,3 +131,4 @@
return lspace->failure_size;
}
+
Index: vm/gc_gen/src/los/lspace.h
===================================================================
--- vm/gc_gen/src/los/lspace.h (revision 636625)
+++ vm/gc_gen/src/los/lspace.h (working copy)
@@ -29,11 +29,11 @@
#include "../common/hashcode.h"
#endif
-/*Fixme: This macro is for handling HEAP_NULL issues caused by JIT OPT*/
+/*Fixme: This macro is for handling HEAP_BASE issues caused by JIT OPT*/
#ifdef COMPRESS_REFERENCE
- #define LOS_HEAD_RESERVE_FOR_HEAP_NULL ( SPACE_ALLOC_UNIT )
+ #define LOS_HEAD_RESERVE_FOR_HEAP_BASE ( SPACE_ALLOC_UNIT )
#else
- #define LOS_HEAD_RESERVE_FOR_HEAP_NULL ( 0*KB )
+ #define LOS_HEAD_RESERVE_FOR_HEAP_BASE ( 0*KB )
#endif
typedef struct Lspace{
Index: vm/gc_gen/src/los/lspace_alloc_collect.cpp
===================================================================
--- vm/gc_gen/src/los/lspace_alloc_collect.cpp (revision 636625)
+++ vm/gc_gen/src/los/lspace_alloc_collect.cpp (working copy)
@@ -66,7 +66,7 @@
/*if the list head is not NULL, it definitely satisfies the request. */
remain_size = free_area->size - alloc_size;
assert(remain_size >= 0);
- if( remain_size >= GC_OBJ_SIZE_THRESHOLD){
+ if( remain_size >= GC_LOS_OBJ_SIZE_THRESHOLD){
new_list_nr = pool_list_index_with_size(remain_size);
p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size);
if(new_list_nr == list_hint){
@@ -89,7 +89,7 @@
free_pool_unlock_nr_list(pool, list_hint);
p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size);
if(remain_size > 0){
- assert((remain_size >= KB) && (remain_size < GC_OBJ_SIZE_THRESHOLD));
+ assert((remain_size >= KB) && (remain_size < GC_LOS_OBJ_SIZE_THRESHOLD));
free_area->size = remain_size;
}
return p_result;
@@ -118,7 +118,7 @@
free_area = (Free_Area*)(head->next);
while( free_area != (Free_Area*)head ){
remain_size = free_area->size - alloc_size;
- if( remain_size >= GC_OBJ_SIZE_THRESHOLD){
+ if( remain_size >= GC_LOS_OBJ_SIZE_THRESHOLD){
new_list_nr = pool_list_index_with_size(remain_size);
p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size);
if(new_list_nr == MAX_LIST_INDEX){
@@ -141,7 +141,7 @@
free_pool_unlock_nr_list(pool, MAX_LIST_INDEX);
p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size);
if(remain_size > 0){
- assert((remain_size >= KB) && (remain_size < GC_OBJ_SIZE_THRESHOLD));
+ assert((remain_size >= KB) && (remain_size < GC_LOS_OBJ_SIZE_THRESHOLD));
free_area->size = remain_size;
}
return p_result;
@@ -343,7 +343,7 @@
assert(!(tuner->tuning_size % GC_BLOCK_SIZE_BYTES));
new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size;
Free_Area* fa = free_area_new(lspace->scompact_fa_start, new_fa_size);
- if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
+ if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
lspace->committed_heap_size += trans_size;
break;
}
@@ -358,7 +358,7 @@
assert((POINTER_SIZE_INT)lspace->scompact_fa_end > (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size);
new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start - tuner->tuning_size;
Free_Area* fa = free_area_new(lspace->scompact_fa_start, new_fa_size);
- if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
+ if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
break;
}
default:{
@@ -368,7 +368,7 @@
new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start;
if(new_fa_size == 0) break;
Free_Area* fa = free_area_new(lspace->scompact_fa_start, new_fa_size);
- if(new_fa_size >= GC_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
+ if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
break;
}
}
@@ -481,3 +481,4 @@
}
+
Index: vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp
===================================================================
--- vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp (revision 636625)
+++ vm/gc_gen/src/mark_compact/fallback_mark_scan.cpp (working copy)
@@ -120,7 +120,7 @@
GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif
- assert(gc_match_kind(gc, FALLBACK_COLLECTION));
+ assert(collect_is_fallback());
/* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
unsigned int num_active_collectors = gc->num_active_collectors;
@@ -138,7 +138,7 @@
REF *p_ref = (REF *)*iter;
iter = vector_block_iterator_advance(root_set,iter);
- /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */
+ /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for ALGO_MAJOR */
assert(*p_ref);
collector_tracestack_push(collector, p_ref);
@@ -210,7 +210,7 @@
GC* gc = collector->gc;
Blocked_Space* space = (Blocked_Space*)((GC_Gen*)gc)->nos;
- assert(gc_match_kind(gc, FALLBACK_COLLECTION));
+ assert(collect_is_fallback());
unsigned int num_active_collectors = gc->num_active_collectors;
atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
@@ -258,3 +258,4 @@
+
Index: vm/gc_gen/src/mark_compact/mspace_alloc.cpp
===================================================================
--- vm/gc_gen/src/mark_compact/mspace_alloc.cpp (revision 636625)
+++ vm/gc_gen/src/mark_compact/mspace_alloc.cpp (working copy)
@@ -55,7 +55,7 @@
/* All chunks of data requested need to be multiples of GC_OBJECT_ALIGNMENT */
assert((size % GC_OBJECT_ALIGNMENT) == 0);
- assert( size <= GC_OBJ_SIZE_THRESHOLD );
+ assert( size <= GC_LOS_OBJ_SIZE_THRESHOLD );
/* check if collector local alloc block is ok. If not, grab a new block */
p_return = thread_local_alloc(size, allocator);
@@ -73,3 +73,4 @@
}
+
Index: vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp
===================================================================
--- vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (revision 636625)
+++ vm/gc_gen/src/mark_compact/mspace_collect_compact.cpp (working copy)
@@ -22,8 +22,6 @@
#include "mspace_collect_compact.h"
-Boolean IS_MOVE_COMPACT;
-
struct GC_Gen;
Space* gc_get_nos(GC_Gen* gc);
@@ -267,69 +265,34 @@
pool_iterator_init(gc->metadata->gc_rootset_pool);
- /* dual mark bits will consume two bits in obj info, that makes current
- header hashbits only 5 bits. That's not enough. We implement on-demend
- hash field allocation in obj during moving. move_compact doesn't support it.
- Dual mark bits is used for MINOR_NONGEN_FORWARD algorithm */
-
//For_LOS_extend
if(gc->tuner->kind != TRANS_NOTHING){
+ major_set_compact_slide();
+ }else if (collect_is_fallback()){
+ major_set_compact_slide();
+ }else{
+ major_set_compact_move();
+ }
+ if(major_is_compact_slide()){
+
TRACE2("gc.process", "GC: slide compact algo start ... \n");
collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace);
TRACE2("gc.process", "\nGC: end of slide compact algo ... \n");
+
+ }else if( major_is_compact_move()){
+
+ TRACE2("gc.process", "GC: move compact algo start ... \n");
+ collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace);
+ TRACE2("gc.process", "\nGC: end of move compact algo ... \n");
-#ifdef GC_GEN_STATS
- gc_gen_stats_set_mos_algo((GC_Gen*)gc, MAJOR_COMPACT_SLIDE);
-#endif
-
- }else if (gc_match_kind(gc, FALLBACK_COLLECTION)){
-
- TRACE2("gc.process", "GC: slide compact algo start ... \n");
- collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace);
- TRACE2("gc.process", "\nGC: end of slide compact algo ... \n");
-
-#ifdef GC_GEN_STATS
- gc_gen_stats_set_los_collected_flag((GC_Gen*)gc, true);
- gc_gen_stats_set_mos_algo((GC_Gen*)gc, MAJOR_COMPACT_SLIDE);
-#endif
- //IS_MOVE_COMPACT = TRUE;
- //collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace);
- //IS_MOVE_COMPACT = FALSE;
}else{
+ DIE2("gc.collect", "The speficied major collection algorithm doesn't exist!");
+ exit(0);
+ }
- switch(mspace->collect_algorithm){
- case MAJOR_COMPACT_SLIDE:
- TRACE2("gc.process", "GC: slide compact algo start ... \n");
- collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace);
- TRACE2("gc.process", "\nGC: end of slide compact algo ... \n");
-#ifdef GC_GEN_STATS
- gc_gen_stats_set_los_collected_flag((GC_Gen*)gc, true);
- gc_gen_stats_set_mos_algo((GC_Gen*)gc, MAJOR_COMPACT_SLIDE);
-#endif
- break;
-
- case MAJOR_COMPACT_MOVE:
- IS_MOVE_COMPACT = TRUE;
-
- TRACE2("gc.process", "GC: move compact algo start ... \n");
- collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace);
- TRACE2("gc.process", "\nGC: end of move compact algo ... \n");
- IS_MOVE_COMPACT = FALSE;
-#ifdef GC_GEN_STATS
- gc_gen_stats_set_mos_algo((GC_Gen*)gc, MAJOR_COMPACT_MOVE);
-#endif
- break;
-
- default:
- DIE2("gc.collect", "The speficied major collection algorithm doesn't exist!");
- exit(0);
- break;
- }
-
- }
-
return;
}
+
Index: vm/gc_gen/src/mark_compact/mspace_collect_compact.h
===================================================================
--- vm/gc_gen/src/mark_compact/mspace_collect_compact.h (revision 636625)
+++ vm/gc_gen/src/mark_compact/mspace_collect_compact.h (working copy)
@@ -49,9 +49,8 @@
void fallback_clear_fwd_obj_oi_init(Collector* collector);
#endif
-extern Boolean IS_MOVE_COMPACT;
-
#endif /* _MSPACE_COLLECT_COMPACT_H_ */
+
Index: vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp
===================================================================
--- vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp (revision 636625)
+++ vm/gc_gen/src/mark_compact/mspace_extend_compact.cpp (working copy)
@@ -27,6 +27,8 @@
static volatile Block *nos_first_free_block = NULL;
static volatile Block *first_block_to_move = NULL;
+Boolean mos_extended = FALSE;
+
static void set_first_and_end_block_to_move(Collector *collector, unsigned int mem_changed_size)
{
GC_Gen *gc_gen = (GC_Gen *)collector->gc;
@@ -205,7 +207,7 @@
static void gc_reupdate_repointed_sets(GC* gc, Pool* pool, void *start_address, void *end_address, unsigned int addr_diff)
{
GC_Metadata *metadata = gc->metadata;
- assert(gc_match_kind(gc, EXTEND_COLLECTION));
+ assert(mos_extended);
pool_iterator_init(pool);
@@ -227,8 +229,8 @@
GC *gc = collector->gc;
GC_Metadata *metadata = gc->metadata;
- /* only for MAJOR_COLLECTION and FALLBACK_COLLECTION */
- assert(gc_match_kind(gc, EXTEND_COLLECTION));
+ /* only for ALGO_MAJOR and ALGO_MAJOR_FALLBACK */
+ assert(mos_extended);
gc_reupdate_repointed_sets(gc, metadata->gc_rootset_pool, start_address, end_address, addr_diff);
@@ -272,8 +274,8 @@
Blocked_Space *mspace = (Blocked_Space *)gc_gen->mos;
Blocked_Space *nspace = (Blocked_Space *)gc_gen->nos;
- /*For_LOS adaptive: when doing EXTEND_COLLECTION, mspace->survive_ratio should not be updated in gc_decide_next_collect( )*/
- gc_gen->collect_kind |= EXTEND_COLLECTION;
+ /*For_LOS adaptive: when doing ALGO_MAJOR_EXTEND, mspace->survive_ratio should not be updated in gc_decide_next_collect( )*/
+ mos_extended = TRUE;
unsigned int num_active_collectors = gc_gen->num_active_collectors;
unsigned int old_num;
@@ -319,8 +321,8 @@
Fspace *nspace = gc_gen->nos;
Lspace *lspace = gc_gen->los;
- /*For_LOS adaptive: when doing EXTEND_COLLECTION, mspace->survive_ratio should not be updated in gc_decide_next_collect( )*/
- gc_gen->collect_kind |= EXTEND_COLLECTION;
+ /*For_LOS adaptive: when doing ALGO_MAJOR_EXTEND, mspace->survive_ratio should not be updated in gc_decide_next_collect()*/
+ mos_extended = TRUE;
unsigned int num_active_collectors = gc_gen->num_active_collectors;
unsigned int old_num;
Index: vm/gc_gen/src/mark_compact/mspace_move_compact.cpp
===================================================================
--- vm/gc_gen/src/mark_compact/mspace_move_compact.cpp (revision 636625)
+++ vm/gc_gen/src/mark_compact/mspace_move_compact.cpp (working copy)
@@ -46,7 +46,7 @@
Block_Header *local_last_dest = dest_block;
void* dest_sector_addr = dest_block->base;
- Boolean is_fallback = gc_match_kind(collector->gc, FALLBACK_COLLECTION);
+ Boolean is_fallback = collect_is_fallback();
#ifdef USE_32BITS_HASHCODE
Hashcode_Buf* old_hashcode_buf = NULL;
@@ -83,10 +83,9 @@
void* src_sector_addr = p_obj;
while( p_obj ){
-
debug_num_live_obj++;
assert( obj_is_marked_in_vt(p_obj));
- /* we don't check if it's set, since only remaining objs from last NOS partial collection need it. */
+ /* we don't check if it's set, since only non-forwarded objs from last NOS partial-forward collection need it. */
obj_clear_dual_bits_in_oi(p_obj);
#ifdef GC_GEN_STATS
@@ -125,7 +124,7 @@
assert(((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size) <= block_end );
- /* check if current sector has no more sector. If not, loop back. FIXME:: we should add a condition for block check */
+ /* check if next live object is out of current sector. If not, loop back to continue within this sector. FIXME:: we should add a condition for block check (?) */
p_obj = block_get_next_marked_object(curr_block, &start_pos);
if ((p_obj != NULL) && (OBJECT_INDEX_TO_OFFSET_TABLE(p_obj) == curr_sector))
continue;
@@ -133,7 +132,7 @@
/* current sector is done, let's move it. */
POINTER_SIZE_INT sector_distance = (POINTER_SIZE_INT)src_sector_addr - (POINTER_SIZE_INT)dest_sector_addr;
assert((sector_distance % GC_OBJECT_ALIGNMENT) == 0);
- /* if sector_distance is zero, we don't do anything. But since block stable is never cleaned, we have to set 0 to it. */
+ /* if sector_distance is zero, we don't do anything. But since block offset table is never cleaned, we have to set 0 to it. */
curr_block->table[curr_sector] = sector_distance;
if(sector_distance != 0)
@@ -203,7 +202,7 @@
unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);
- if(!gc_match_kind(gc, FALLBACK_COLLECTION))
+ if(!collect_is_fallback())
mark_scan_heap(collector);
else
mark_scan_heap_for_fallback(collector);
Index: vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp
===================================================================
--- vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp (revision 636625)
+++ vm/gc_gen/src/mark_compact/mspace_slide_compact.cpp (working copy)
@@ -158,7 +158,7 @@
{
Block_Header *curr_block = blocked_space_block_iterator_next((Blocked_Space*)mspace);
- /* for MAJOR_COLLECTION, we must iterate over all compact blocks */
+ /* for ALGO_MAJOR, we must iterate over all compact blocks */
while( curr_block){
block_fix_ref_after_repointing(curr_block);
curr_block = blocked_space_block_iterator_next((Blocked_Space*)mspace);
@@ -420,7 +420,7 @@
unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);
- if(gc_match_kind(gc, FALLBACK_COLLECTION))
+ if(collect_is_fallback())
mark_scan_heap_for_fallback(collector);
else if(gc->tuner->kind != TRANS_NOTHING)
mark_scan_heap_for_space_tune(collector);
@@ -447,7 +447,7 @@
gc_init_block_for_collectors(gc, mspace);
#ifdef USE_32BITS_HASHCODE
- if(gc_match_kind(gc, FALLBACK_COLLECTION))
+ if(collect_is_fallback())
fallback_clear_fwd_obj_oi_init(collector);
#endif
@@ -465,7 +465,7 @@
atomic_cas32( &num_repointing_collectors, 0, num_active_collectors+1);
#ifdef USE_32BITS_HASHCODE
- if(gc_match_kind(gc, FALLBACK_COLLECTION))
+ if(collect_is_fallback())
fallback_clear_fwd_obj_oi(collector);
#endif
mspace_compute_object_target(collector, mspace);
Index: vm/gc_gen/src/mark_compact/space_tune_mark_scan.cpp
===================================================================
--- vm/gc_gen/src/mark_compact/space_tune_mark_scan.cpp (revision 636625)
+++ vm/gc_gen/src/mark_compact/space_tune_mark_scan.cpp (working copy)
@@ -155,7 +155,7 @@
iter = vector_block_iterator_advance(root_set,iter);
Partial_Reveal_Object *p_obj = read_slot(p_ref);
- /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */
+ /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for ALGO_MAJOR */
assert(p_obj!=NULL);
/* we have to mark the object before put it into marktask, because
it is possible to have two slots containing a same object. They will
Index: vm/gc_gen/src/mark_sweep/gc_ms.cpp
===================================================================
--- vm/gc_gen/src/mark_sweep/gc_ms.cpp (revision 636625)
+++ vm/gc_gen/src/mark_sweep/gc_ms.cpp (working copy)
@@ -17,8 +17,6 @@
#include "../common/gc_common.h"
-#ifdef USE_MARK_SWEEP_GC
-
#include "gc_ms.h"
#include "wspace_mark_sweep.h"
#include "../finalizer_weakref/finalizer_weakref.h"
@@ -29,6 +27,13 @@
#include "../common/hashcode.h"
#endif
+GC* gc_ms_create()
+{
+ GC* gc = (GC*)STD_MALLOC(sizeof(GC_MS));
+ assert(gc);
+ memset(gc, 0, sizeof(GC_MS));
+ return gc;
+}
void gc_ms_initialize(GC_MS *gc_ms, POINTER_SIZE_INT min_heap_size, POINTER_SIZE_INT max_heap_size)
{
@@ -43,7 +48,7 @@
wspace_base = vm_reserve_mem(0, max_heap_size);
wspace_initialize((GC*)gc_ms, wspace_base, max_heap_size, max_heap_size);
- HEAP_NULL = (POINTER_SIZE_INT)wspace_base;
+ HEAP_BASE = (POINTER_SIZE_INT)wspace_base;
gc_ms->heap_start = wspace_base;
gc_ms->heap_end = (void*)((POINTER_SIZE_INT)wspace_base + max_heap_size);
@@ -138,7 +143,8 @@
void gc_ms_update_space_statistics(GC_MS* gc)
{
POINTER_SIZE_INT num_live_obj = 0;
- POINTER_SIZE_INT size_live_obj = 0;
+ POINTER_SIZE_INT size_live_obj = 0;
+ POINTER_SIZE_INT new_obj_size = 0;
Space_Statistics* wspace_stat = gc->wspace->space_statistic;
@@ -151,14 +157,33 @@
size_live_obj += collector->live_obj_size;
}
+ lock(gc->mutator_list_lock);
+ Mutator* mutator = gc->mutator_list;
+ while (mutator) {
+ new_obj_size += mutator->new_obj_size;
+ mutator->new_obj_size = 0;
+ mutator = mutator->next;
+ }
+ unlock(gc->mutator_list_lock);
+
+ wspace_stat->size_new_obj += new_obj_size;
+
wspace_stat->num_live_obj = num_live_obj;
wspace_stat->size_live_obj = size_live_obj;
wspace_stat->last_size_free_space = wspace_stat->size_free_space;
- wspace_stat->size_free_space = gc->committed_heap_size - size_live_obj;/*TODO:inaccurate value.*/
+ wspace_stat->size_free_space = gc->committed_heap_size - size_live_obj;/*TODO:inaccurate value.*/
+ wspace_stat->space_utilization_ratio = (float)wspace_stat->size_new_obj / wspace_stat->last_size_free_space;;
}
+void gc_ms_reset_space_statistics(GC_MS* gc)
+{
+ Space_Statistics* wspace_stat = gc->wspace->space_statistic;
+ wspace_stat->size_new_obj = 0;
+ wspace_stat->num_live_obj = 0;
+ wspace_stat->size_live_obj = 0;
+ wspace_stat->space_utilization_ratio = 0;
+}
+
void gc_ms_iterate_heap(GC_MS *gc)
{
}
-
-#endif // USE_MARK_SWEEP_GC
Index: vm/gc_gen/src/mark_sweep/gc_ms.h
===================================================================
--- vm/gc_gen/src/mark_sweep/gc_ms.h (revision 636625)
+++ vm/gc_gen/src/mark_sweep/gc_ms.h (working copy)
@@ -18,8 +18,6 @@
#ifndef _GC_MS_H_
#define _GC_MS_H_
-#ifdef USE_MARK_SWEEP_GC
-
#include "wspace.h"
@@ -123,9 +121,6 @@
void gc_ms_start_concurrent_sweep(GC_MS* gc, unsigned int num_collectors);
void gc_ms_start_most_concurrent_mark(GC_MS* gc, unsigned int num_markers);
void gc_ms_start_final_mark_after_concurrent(GC_MS* gc, unsigned int num_markers);
+void gc_ms_reset_space_statistics(GC_MS* gc);
-
-
-#endif // USE_MARK_SWEEP_GC
-
#endif // _GC_MS_H_
Index: vm/gc_gen/src/mark_sweep/wspace.cpp
===================================================================
--- vm/gc_gen/src/mark_sweep/wspace.cpp (revision 636625)
+++ vm/gc_gen/src/mark_sweep/wspace.cpp (working copy)
@@ -61,8 +61,9 @@
wspace->space_statistic = (Space_Statistics*)STD_MALLOC(sizeof(Space_Statistics));
assert(wspace->space_statistic);
memset(wspace->space_statistic, 0, sizeof(Space_Statistics));
+ wspace->space_statistic->size_free_space = commit_size;
-#ifdef USE_MARK_SWEEP_GC
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
gc_ms_set_wspace((GC_MS*)gc, wspace);
#else
gc_set_mos((GC_Gen*)gc, (Space*)wspace);
@@ -124,6 +125,15 @@
allocator->local_chunks = local_chunks;
}
+void allocator_register_new_obj_size(Allocator *allocator)
+{
+ Mutator* mutator = (Mutator*)allocator;
+ Wspace *wspace = gc_get_wspace(allocator->gc);
+ Space_Statistics* space_stat = wspace->space_statistic;
+ space_stat->size_new_obj += mutator->new_obj_size;
+}
+
+
void allocactor_destruct_local_chunks(Allocator *allocator)
{
Wspace *wspace = gc_get_wspace(allocator->gc);
@@ -140,28 +150,26 @@
if(!chunk_ptrs){
chunk_ptrs = local_chunks[i];
-
/* Put local pfc to the according pools */
- for(unsigned int i = 0; i < chunk_ptr_num; ++i){
- if(chunk_ptrs[i]){
+ for(unsigned int j = 0; j < chunk_ptr_num; ++j){
+ if(chunk_ptrs[j]){
if(!USE_CONCURRENT_GC){
- wspace_put_pfc(wspace, chunk_ptrs[i]);
+ wspace_put_pfc(wspace, chunk_ptrs[j]);
}else{
- Chunk_Header* chunk_to_rem = chunk_ptrs[i];
+ Chunk_Header* chunk_to_rem = chunk_ptrs[j];
chunk_to_rem->status = CHUNK_USED | CHUNK_NORMAL;
wspace_register_used_chunk(wspace, chunk_to_rem);
}
}
}
-
- /* Free mem for local chunk pointers */
- STD_FREE(chunk_ptrs);
+
chunk_ptrs = NULL;
}
}
}
-
+ /* Free mem for local chunk pointers */
+ STD_FREE(*local_chunks);
/* Free mem for size segments (Chunk_Header**) */
STD_FREE(local_chunks);
}
@@ -189,7 +197,7 @@
static void gc_clear_mutator_local_chunks(GC *gc)
{
-#ifdef USE_MARK_SWEEP_GC
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
/* release local chunks of each mutator in unique mark-sweep GC */
Mutator *mutator = gc->mutator_list;
while(mutator){
@@ -201,14 +209,14 @@
void gc_clear_collector_local_chunks(GC *gc)
{
- if(!gc_match_kind(gc, MAJOR_COLLECTION)) return;
+ if(!collect_is_major()) return;
/* release local chunks of each collector in gen GC */
for(unsigned int i = gc->num_collectors; i--;){
allocator_clear_local_chunks((Allocator*)gc->collectors[i]);
}
}
-#ifdef USE_MARK_SWEEP_GC
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
void wspace_set_space_statistic(Wspace *wspace)
{
GC_MS *gc = (GC_MS*)wspace->gc;
@@ -241,11 +249,11 @@
wspace_decide_compaction_need(wspace);
- if(wspace->need_compact && gc_match_kind(gc, MARK_SWEEP_GC)){
- assert(gc_match_kind(gc, MS_COLLECTION));
- gc->collect_kind = MS_COMPACT_COLLECTION;
+ if(wspace->need_compact && major_is_marksweep()){
+ assert(!collect_move_object());
+ GC_PROP |= ALGO_MS_COMPACT;
}
- if(wspace->need_compact || gc_match_kind(gc, MAJOR_COLLECTION))
+ if(wspace->need_compact || collect_is_major())
wspace->need_fix = TRUE;
//printf("\n\n>>>>>>>>%s>>>>>>>>>>>>\n\n", wspace->need_compact ? "COMPACT" : "NO COMPACT");
@@ -263,6 +271,9 @@
collector_execute_task(gc, (TaskType)mark_sweep_wspace, (Space*)wspace);
+ /* set the collection type back to ms_normal in case it's ms_compact */
+ collect_set_ms_normal();
+
#ifdef SSPACE_TIME
wspace_gc_time(gc, FALSE);
#endif
Index: vm/gc_gen/src/mark_sweep/wspace.h
===================================================================
--- vm/gc_gen/src/mark_sweep/wspace.h (revision 636625)
+++ vm/gc_gen/src/mark_sweep/wspace.h (working copy)
@@ -75,7 +75,7 @@
POINTER_SIZE_INT surviving_obj_size;
} Wspace;
-#ifdef USE_MARK_SWEEP_GC
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
void wspace_set_space_statistic(Wspace *wspace);
#endif
@@ -91,11 +91,12 @@
void allocator_init_local_chunks(Allocator *allocator);
void allocactor_destruct_local_chunks(Allocator *allocator);
void gc_init_collector_free_chunk_list(Collector *collector);
+void allocator_register_new_obj_size(Allocator *allocator);
POINTER_SIZE_INT wspace_free_memory_size(Wspace *wspace);
-#ifndef USE_MARK_SWEEP_GC
+#ifndef USE_UNIQUE_MARK_SWEEP_GC
#define gc_get_wspace(gc) ((Wspace*)gc_get_mos((GC_Gen*)(gc)))
#else
#define gc_get_wspace(gc) (gc_ms_get_wspace((GC_MS*)(gc)))
Index: vm/gc_gen/src/mark_sweep/wspace_alloc.cpp
===================================================================
--- vm/gc_gen/src/mark_sweep/wspace_alloc.cpp (revision 636625)
+++ vm/gc_gen/src/mark_sweep/wspace_alloc.cpp (working copy)
@@ -222,7 +222,7 @@
chunk->table[0] |= cur_mark_black_color ;
}
mutator_post_signal((Mutator*) allocator,MUTATOR_EXIT_ALLOCATION_MARK);
- mem_fence();
+ //mem_fence();
chunk->table[0] |= cur_alloc_color;
set_super_obj_mask(chunk->base);
@@ -254,6 +254,8 @@
if(p_obj && gc_is_concurrent_mark_phase()) ((Partial_Reveal_Object*)p_obj)->obj_info |= NEW_OBJ_MASK;
#endif
+ if(p_obj) ((Mutator*)allocator)->new_obj_size += size;
+
return p_obj;
}
@@ -264,7 +266,10 @@
/* First, try to allocate object from TLB (thread local chunk) */
p_obj = wspace_try_alloc(size, allocator);
- if(p_obj) return p_obj;
+ if(p_obj){
+ ((Mutator*)allocator)->new_obj_size += size;
+ return p_obj;
+ }
if(allocator->gc->in_collection) return NULL;
@@ -273,9 +278,10 @@
p_obj = wspace_try_alloc(size, allocator);
if(p_obj){
vm_gc_unlock_enum();
+ ((Mutator*)allocator)->new_obj_size += size;
return p_obj;
}
- gc_reclaim_heap(allocator->gc, GC_CAUSE_WSPACE_IS_FULL);
+ gc_reclaim_heap(allocator->gc, GC_CAUSE_MOS_IS_FULL);
vm_gc_unlock_enum();
#ifdef SSPACE_CHUNK_INFO
@@ -284,5 +290,7 @@
p_obj = wspace_try_alloc(size, allocator);
+ if(p_obj) ((Mutator*)allocator)->new_obj_size += size;
+
return p_obj;
}
Index: vm/gc_gen/src/mark_sweep/wspace_alloc.h
===================================================================
--- vm/gc_gen/src/mark_sweep/wspace_alloc.h (revision 636625)
+++ vm/gc_gen/src/mark_sweep/wspace_alloc.h (working copy)
@@ -187,7 +187,7 @@
if(p_obj && is_obj_alloced_live())
obj_mark_black_in_table((Partial_Reveal_Object*)p_obj, chunk->slot_size);
- mem_fence();
+ //mem_fence();
alloc_slot_in_table(table, slot_index);
if(chunk->status & CHUNK_NEED_ZEROING)
Index: vm/gc_gen/src/mark_sweep/wspace_compact.cpp
===================================================================
--- vm/gc_gen/src/mark_sweep/wspace_compact.cpp (revision 636625)
+++ vm/gc_gen/src/mark_sweep/wspace_compact.cpp (working copy)
@@ -28,10 +28,10 @@
POINTER_SIZE_INT free_mem_size = free_mem_in_wspace(wspace, FALSE);
float free_mem_ratio = (float)free_mem_size / wspace->committed_heap_size;
-#ifdef USE_MARK_SWEEP_GC
- if(!gc_mark_is_concurrent() && (free_mem_ratio > SSPACE_COMPACT_RATIO) && (wspace->gc->cause != GC_CAUSE_RUNTIME_FORCE_GC)){
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
+ if(!gc_mark_is_concurrent() && (free_mem_ratio > WSPACE_COMPACT_RATIO) && (wspace->gc->cause != GC_CAUSE_RUNTIME_FORCE_GC)){
#else
- if(gc_match_kind(wspace->gc, MAJOR_COLLECTION)){
+ if(collect_is_major()){
#endif
wspace->need_compact = wspace->move_object = TRUE;
} else {
@@ -156,7 +156,7 @@
return result;
}
-static inline void move_obj_between_chunks(Chunk_Header **dest_ptr, Chunk_Header *src)
+static inline void move_obj_between_chunks(Wspace *wspace, Chunk_Header **dest_ptr, Chunk_Header *src)
{
Chunk_Header *dest = *dest_ptr;
assert(dest->slot_size == src->slot_size);
@@ -170,7 +170,8 @@
void *target = alloc_in_chunk(dest);
if(dest->slot_index == MAX_SLOT_INDEX){
- dest->status = CHUNK_USED | CHUNK_NORMAL;
+ dest->status = CHUNK_USED | CHUNK_NORMAL;
+ wspace_register_used_chunk(wspace,dest);
dest = NULL;
}
@@ -209,8 +210,8 @@
while(dest && src){
if(src_is_new)
src->slot_index = 0;
- chunk_depad_last_index_word(src);
- move_obj_between_chunks(&dest, src);
+ //chunk_depad_last_index_word(src);
+ move_obj_between_chunks(wspace, &dest, src);
if(!dest)
dest = get_least_free_chunk(&least_free_chunk, &most_free_chunk);
if(!src->alloc_num){
@@ -235,3 +236,4 @@
+
Index: vm/gc_gen/src/mark_sweep/wspace_concurrent_gc_stats.cpp
===================================================================
--- vm/gc_gen/src/mark_sweep/wspace_concurrent_gc_stats.cpp (revision 636625)
+++ vm/gc_gen/src/mark_sweep/wspace_concurrent_gc_stats.cpp (working copy)
@@ -30,7 +30,7 @@
static void normal_chunk_scanning(Chunk_Header *chunk)
{
chunk->slot_index = 0;
- chunk_depad_last_index_word(chunk);
+ //chunk_depad_last_index_word(chunk);
unsigned int alloc_num = chunk->alloc_num;
assert(alloc_num);
@@ -139,7 +139,7 @@
static void normal_chunk_clear(Chunk_Header *chunk)
{
chunk->slot_index = 0;
- chunk_depad_last_index_word(chunk);
+ //chunk_depad_last_index_word(chunk);
unsigned int alloc_num = chunk->alloc_num;
assert(alloc_num);
@@ -199,3 +199,4 @@
}
#endif
+
Index: vm/gc_gen/src/mark_sweep/wspace_fallback_mark.cpp
===================================================================
--- vm/gc_gen/src/mark_sweep/wspace_fallback_mark.cpp (revision 636625)
+++ vm/gc_gen/src/mark_sweep/wspace_fallback_mark.cpp (working copy)
@@ -26,7 +26,7 @@
if(obj_belongs_to_space(obj, (Space*)wspace_in_fallback_marking)){
Boolean marked_by_self = obj_mark_black_in_table(obj);
-#ifndef USE_MARK_SWEEP_GC
+#ifndef USE_UNIQUE_MARK_SWEEP_GC
/* When fallback happens, some objects in MOS have their fw bit set, which is actually their mark bit in the last minor gc.
* If we don't clear it, some objects that didn't be moved will be mistaken for being moved in the coming fixing phase.
*/
@@ -150,7 +150,7 @@
REF *p_ref = (REF*)*iter;
iter = vector_block_iterator_advance(root_set,iter);
- /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */
+ /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for ALGO_MAJOR */
assert(read_slot(p_ref) != NULL);
/* we have to mark the object before putting it into marktask, because
it is possible to have two slots containing a same object. They will
Index: vm/gc_gen/src/mark_sweep/wspace_mark.cpp
===================================================================
--- vm/gc_gen/src/mark_sweep/wspace_mark.cpp (revision 636625)
+++ vm/gc_gen/src/mark_sweep/wspace_mark.cpp (working copy)
@@ -32,7 +32,7 @@
if(obj_belongs_to_space(obj, (Space*)wspace_in_marking)){
Boolean marked_by_self = obj_mark_black_in_table(obj);
-#ifndef USE_MARK_SWEEP_GC
+#ifndef USE_UNIQUE_MARK_SWEEP_GC
/* When fallback happens, some objects in MOS have their fw bit set, which is actually their mark bit in the last minor gc.
* If we don't clear it, some objects that didn't be moved will be mistaken for being moved in the coming fixing phase.
*/
@@ -115,8 +115,12 @@
scan_slot(collector, p_ref);
}
+ if(!IGNORE_FINREF )
+ scan_weak_reference(collector, p_obj, scan_slot);
#ifndef BUILD_IN_REFERENT
- scan_weak_reference(collector, p_obj, scan_slot);
+ else {
+ scan_weak_reference_direct(collector, p_obj, scan_slot);
+ }
#endif
}
@@ -176,7 +180,7 @@
iter = vector_block_iterator_advance(root_set,iter);
Partial_Reveal_Object *p_obj = read_slot(p_ref);
- /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */
+ /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for ALGO_MAJOR */
assert(p_obj != NULL);
/* we have to mark the object before putting it into marktask, because
it is possible to have two slots containing a same object. They will
Index: vm/gc_gen/src/mark_sweep/wspace_mark_mostly_concurrent.cpp
===================================================================
--- vm/gc_gen/src/mark_sweep/wspace_mark_mostly_concurrent.cpp (revision 636625)
+++ vm/gc_gen/src/mark_sweep/wspace_mark_mostly_concurrent.cpp (working copy)
@@ -126,7 +126,7 @@
iter = vector_block_iterator_advance(root_set,iter);
Partial_Reveal_Object *p_obj = read_slot(p_ref);
- /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */
+ /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for ALGO_MAJOR */
assert(p_obj!=NULL);
assert(address_belongs_to_gc_heap(p_obj, gc));
if(obj_mark_gray_in_table(p_obj))
@@ -224,3 +224,4 @@
trace_object((Marker*)collector, (Partial_Reveal_Object *)p_obj);
}
+
Index: vm/gc_gen/src/mark_sweep/wspace_mark_otf_concurrent.cpp
===================================================================
--- vm/gc_gen/src/mark_sweep/wspace_mark_otf_concurrent.cpp (revision 636625)
+++ vm/gc_gen/src/mark_sweep/wspace_mark_otf_concurrent.cpp (working copy)
@@ -115,7 +115,7 @@
iter = vector_block_iterator_advance(root_set,iter);
Partial_Reveal_Object *p_obj = read_slot(p_ref);
- /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */
+ /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for ALGO_MAJOR */
assert(p_obj!=NULL);
assert(address_belongs_to_gc_heap(p_obj, gc));
if(obj_mark_gray_in_table(p_obj))
@@ -195,11 +195,18 @@
*/
if(local_dirty_set != NULL){
atomic_inc32(&num_active_markers);
- while(!vector_block_is_empty(local_dirty_set) || !vector_block_not_full_set_unshared(local_dirty_set)){
- Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*) vector_block_get_entry(local_dirty_set);
- if(obj_mark_gray_in_table(p_obj))
- collector_tracestack_push((Collector*)marker, p_obj);
- }
+ do{
+ while(!vector_block_is_empty(local_dirty_set)){ //|| !vector_block_not_full_set_unshared(local_dirty_set)){
+ Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*) vector_block_get_entry(local_dirty_set);
+ if(!obj_belongs_to_gc_heap(p_obj)) {
+ assert(0);
+ }
+
+ if(obj_mark_gray_in_table(p_obj)){
+ collector_tracestack_push((Collector*)marker, p_obj);
+ }
+ }
+ }while(!vector_block_not_full_set_unshared(local_dirty_set) && !vector_block_is_empty(local_dirty_set));
goto retry;
}
}
@@ -224,3 +231,4 @@
trace_object((Marker*)collector, (Partial_Reveal_Object *)p_obj);
}
+
Index: vm/gc_gen/src/mark_sweep/wspace_mark_sweep.cpp
===================================================================
--- vm/gc_gen/src/mark_sweep/wspace_mark_sweep.cpp (revision 636625)
+++ vm/gc_gen/src/mark_sweep/wspace_mark_sweep.cpp (working copy)
@@ -149,13 +149,20 @@
REF * p_ref = object_ref_iterator_get(ref_iterator+i, p_obj);
slot_double_fix(p_ref);
}
+
+#ifndef BUILD_IN_REFERENT
+ if(IGNORE_FINREF && is_reference_obj(p_obj)) {
+ REF* p_ref = obj_get_referent_field(p_obj);
+ slot_double_fix(p_ref);
+ }
+#endif
}
static void normal_chunk_fix_repointed_refs(Chunk_Header *chunk, Boolean double_fix)
{
/* Init field slot_index and depad the last index word in table for fixing */
chunk->slot_index = 0;
- chunk_depad_last_index_word(chunk);
+ //chunk_depad_last_index_word(chunk);
unsigned int alloc_num = chunk->alloc_num;
assert(alloc_num);
@@ -238,7 +245,7 @@
GC *gc = collector->gc;
Wspace *wspace = gc_get_wspace(gc);
Space *nos = NULL;
- if(gc_match_kind(gc, MAJOR_COLLECTION))
+ if(collect_is_major())
nos = gc_get_nos((GC_Gen*)gc);
unsigned int num_active_collectors = gc->num_active_collectors;
@@ -248,7 +255,7 @@
atomic_cas32(&num_marking_collectors, 0, num_active_collectors+1);
if(!gc_mark_is_concurrent()){
- if(gc_match_kind(gc, FALLBACK_COLLECTION))
+ if(collect_is_fallback())
wspace_fallback_mark_scan(collector, wspace);
else
wspace_mark_scan(collector, wspace);
@@ -291,7 +298,7 @@
wspace_verify_after_sweep(gc);
#endif
- if(gc_match_kind(gc, MAJOR_COLLECTION)){
+ if(collect_is_major()){
wspace_merge_free_chunks(gc, wspace);
nos_init_block_for_forwarding((GC_Gen*)gc);
}
@@ -306,7 +313,7 @@
/* Optional Pass: *******************************************
Forward live obj in nos to mos (wspace) ******************/
- if(gc_match_kind(gc, MAJOR_COLLECTION)){
+ if(collect_is_major()){
atomic_cas32( &num_forwarding_collectors, 0, num_active_collectors+1);
collector_forward_nos_to_wspace(collector, wspace);
@@ -330,7 +337,7 @@
/* If we need forward nos to mos, i.e. in major collection, an extra fixing phase after compaction is needed. */
old_num = atomic_inc32(&num_compacting_collectors);
if( ++old_num == num_active_collectors ){
- if(gc_match_kind(gc, MAJOR_COLLECTION))
+ if(collect_is_major())
wspace_remerge_free_chunks(gc, wspace);
/* let other collectors go */
num_compacting_collectors++;
@@ -347,7 +354,7 @@
* we need double fix object slots,
* because some objects are forwarded from nos to mos and compacted into another chunk afterwards.
*/
- Boolean double_fix = gc_match_kind(gc, MAJOR_COLLECTION) && wspace->need_compact;
+ Boolean double_fix = collect_is_major() && wspace->need_compact;
wspace_fix_repointed_refs(collector, wspace, double_fix);
atomic_inc32(&num_fixing_collectors);
@@ -360,17 +367,17 @@
/* Leftover: *************************************************/
if(wspace->need_fix){
- Boolean double_fix = gc_match_kind(gc, MAJOR_COLLECTION) && wspace->need_compact;
+ Boolean double_fix = collect_is_major() && wspace->need_compact;
gc_fix_rootset(collector, double_fix);
#ifdef SSPACE_TIME
wspace_fix_time(FALSE);
#endif
}
- if(!gc_match_kind(gc, MAJOR_COLLECTION))
+ if(!collect_is_major())
wspace_merge_free_chunks(gc, wspace);
-#ifdef USE_MARK_SWEEP_GC
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
wspace_set_space_statistic(wspace);
#endif
Index: vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h
===================================================================
--- vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h (revision 636625)
+++ vm/gc_gen/src/mark_sweep/wspace_mark_sweep.h (working copy)
@@ -22,7 +22,7 @@
#include "wspace_verify.h"
#define PFC_REUSABLE_RATIO 0.1
-#define SSPACE_COMPACT_RATIO 0.06
+#define WSPACE_COMPACT_RATIO 0.06
inline Boolean chunk_is_reusable(Chunk_Header *chunk)
{ return (float)(chunk->slot_num-chunk->alloc_num)/chunk->slot_num > PFC_REUSABLE_RATIO; }
@@ -431,7 +431,7 @@
{
POINTER_SIZE_INT temp = cur_alloc_color;
cur_alloc_color = cur_mark_black_color;
- mem_fence();
+ //mem_fence();
cur_mark_black_color = temp;
cur_alloc_mask = (~cur_alloc_mask) & FLIP_COLOR_MASK_IN_TABLE;
cur_mark_mask = (~cur_mark_mask) & FLIP_COLOR_MASK_IN_TABLE;
Index: vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp
===================================================================
--- vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp (revision 636625)
+++ vm/gc_gen/src/mark_sweep/wspace_sweep_concurrent.cpp (working copy)
@@ -242,7 +242,7 @@
static void gc_sweep_mutator_local_chunks(GC *gc)
{
-#ifdef USE_MARK_SWEEP_GC
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
lock(gc->mutator_list_lock); // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
/* release local chunks of each mutator in unique mark-sweep GC */
@@ -280,6 +280,7 @@
*/
void wspace_sweep_concurrent(Collector* collector)
{
+ collector->time_measurement_start = time_now();
GC *gc = collector->gc;
Wspace *wspace = gc_get_wspace(gc);
@@ -378,6 +379,8 @@
/* let other collectors go */
num_sweeping_collectors++;
}
- while(num_sweeping_collectors != num_active_collectors + 1);
+ while(num_sweeping_collectors != num_active_collectors + 1);
+ collector->time_measurement_end = time_now();
}
+
Index: vm/gc_gen/src/mark_sweep/wspace_verify.cpp
===================================================================
--- vm/gc_gen/src/mark_sweep/wspace_verify.cpp (revision 636625)
+++ vm/gc_gen/src/mark_sweep/wspace_verify.cpp (working copy)
@@ -366,7 +366,7 @@
static void gc_verify_allocator_local_chunks(GC *gc)
{
- if(gc_match_kind(gc, MARK_SWEEP_GC)){
+ if(major_is_marksweep()){
Mutator *mutator = gc->mutator_list;
while(mutator){
allocator_verify_local_chunks((Allocator*)mutator);
@@ -374,7 +374,7 @@
}
}
- if(gc_match_kind(gc, MAJOR_COLLECTION))
+ if(collect_is_major())
for(unsigned int i = gc->num_collectors; i--;){
allocator_verify_local_chunks((Allocator*)gc->collectors[i]);
}
Index: vm/gc_gen/src/move_compact/gc_mc.cpp
===================================================================
--- vm/gc_gen/src/move_compact/gc_mc.cpp (revision 0)
+++ vm/gc_gen/src/move_compact/gc_mc.cpp (revision 0)
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gc_common.h"
+#include "gc_mc.h"
+
+GC* gc_mc_create()
+{
+ assert(0);
+ return NULL;
+}
Index: vm/gc_gen/src/move_compact/gc_mc.h
===================================================================
--- vm/gc_gen/src/move_compact/gc_mc.h (revision 636625)
+++ vm/gc_gen/src/move_compact/gc_mc.h (working copy)
@@ -14,51 +14,3 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
Index: vm/gc_gen/src/semi_space/sspace.cpp
===================================================================
--- vm/gc_gen/src/semi_space/sspace.cpp (revision 636625)
+++ vm/gc_gen/src/semi_space/sspace.cpp (working copy)
@@ -19,6 +19,8 @@
#include "sspace.h"
POINTER_SIZE_INT TOSPACE_SIZE = 0;
+void* tospace_start;
+void* tospace_end;
static unsigned int sspace_compute_num_tospace_blocks(Sspace* sspace)
{
@@ -221,7 +223,7 @@
unsigned int sspace_first_idx = sspace->first_block_idx;
- Boolean is_major_collection = gc_match_kind(sspace->gc, MAJOR_COLLECTION);
+ Boolean is_major_collection = collect_is_major();
if( is_major_collection ){
/* prepare for from-space, first half */
@@ -337,7 +339,7 @@
It's troublesome to do config again after minor collection. */
/* major collection leaves no survivor area in nos */
- if( gc_match_kind(gc, MAJOR_COLLECTION)){
+ if( collect_is_major()){
/* retore the fromspace last block next pointer. It was set a moment ago in sspace_reset_after_collection. */
Block_Header* block_before_survivor_area = (Block_Header*)((Block*)(sspace->survivor_area_start) - 1);
block_before_survivor_area->next = (Block_Header*)(sspace->survivor_area_start);
@@ -385,40 +387,25 @@
{
sspace->num_collections++;
- if(gc_is_gen_mode()){
- sspace->collect_algorithm = MINOR_GEN_SEMISPACE_POOL;
- }else{
- sspace->collect_algorithm = MINOR_NONGEN_SEMISPACE_POOL;
- }
-
GC* gc = sspace->gc;
/* we should not destruct rootset structure in case we need fall back */
pool_iterator_init(gc->metadata->gc_rootset_pool);
- switch(sspace->collect_algorithm){
-
+ if(!gc_is_gen_mode()){
#ifdef MARK_BIT_FLIPPING
+ TRACE2("gc.process", "GC: nongenerational semispace algo start ... \n");
+ collector_execute_task(gc, (TaskType)nongen_ss_pool, (Space*)sspace);
+ TRACE2("gc.process", "\nGC: end of nongen semispace pool algo ... \n");
+#else
+ assert(0);
+#endif /*#ifdef MARK_BIT_FLIPPING #else */
- case MINOR_NONGEN_SEMISPACE_POOL:
- TRACE2("gc.process", "GC: nongen_semispace_pool algo start ... \n");
- collector_execute_task(gc, (TaskType)nongen_ss_pool, (Space*)sspace);
- TRACE2("gc.process", "\nGC: end of nongen semispace pool algo ... \n");
- break;
-
-#endif /*#ifdef MARK_BIT_FLIPPING */
-
- case MINOR_GEN_SEMISPACE_POOL:
- TRACE2("gc.process", "gen_semispace_pool algo start ... \n");
- collector_execute_task(gc, (TaskType)gen_ss_pool, (Space*)sspace);
- TRACE2("gc.process", "\nGC: end of gen semispace pool algo ... \n");
- break;
-
- default:
- DIE2("gc.collection","Specified minor collection algorithm doesn't exist!");
- exit(0);
- break;
- }
+ }else{
+ TRACE2("gc.process", "generational semispace algo start ... \n");
+ collector_execute_task(gc, (TaskType)gen_ss_pool, (Space*)sspace);
+ TRACE2("gc.process", "\nGC: end of gen semispace pool algo ... \n");
+ }
return;
}
Index: vm/gc_gen/src/semi_space/sspace.h
===================================================================
--- vm/gc_gen/src/semi_space/sspace.h (revision 636625)
+++ vm/gc_gen/src/semi_space/sspace.h (working copy)
@@ -19,6 +19,8 @@
#define _SEMI_SPACE_H_
#include "../thread/gc_thread.h"
+extern void* tospace_start;
+extern void* tospace_end;
typedef struct Sspace{
/* <-- first couple of fields are overloadded as Space */
@@ -93,6 +95,11 @@
return (sspace->cur_free_block != NULL);
}
+FORCE_INLINE Boolean obj_belongs_to_tospace(Partial_Reveal_Object* p_obj)
+{
+ return ( p_obj >= tospace_start && p_obj < tospace_end );
+}
+
FORCE_INLINE Boolean obj_belongs_to_survivor_area(Sspace* sspace, Partial_Reveal_Object* p_obj)
{
return (p_obj >= sspace->survivor_area_start &&
@@ -111,7 +118,7 @@
}
/* treat semispace alloc as thread local alloc. If it fails or p_obj is old, forward it to MOS */
-FORCE_INLINE void* semispace_forward_obj(Partial_Reveal_Object* p_obj, unsigned int size, Allocator* allocator)
+FORCE_INLINE void* semispace_copy_object(Partial_Reveal_Object* p_obj, unsigned int size, Allocator* allocator)
{
void* p_targ_obj = NULL;
Sspace* sspace = (Sspace*)allocator->alloc_space;
Index: vm/gc_gen/src/semi_space/sspace_forward.cpp
===================================================================
--- vm/gc_gen/src/semi_space/sspace_forward.cpp (revision 636625)
+++ vm/gc_gen/src/semi_space/sspace_forward.cpp (working copy)
@@ -50,7 +50,7 @@
/* All chunks of data requested need to be multiples of GC_OBJECT_ALIGNMENT */
assert((size % GC_OBJECT_ALIGNMENT) == 0);
- assert( size <= GC_OBJ_SIZE_THRESHOLD );
+ assert( size <= GC_LOS_OBJ_SIZE_THRESHOLD );
/* check if collector local alloc block is ok. If not, grab a new block */
p_return = thread_local_alloc(size, allocator);
Index: vm/gc_gen/src/semi_space/sspace_gen_ss_pool.cpp
===================================================================
--- vm/gc_gen/src/semi_space/sspace_gen_ss_pool.cpp (revision 636625)
+++ vm/gc_gen/src/semi_space/sspace_gen_ss_pool.cpp (working copy)
@@ -30,14 +30,6 @@
#include "../gen/gen_stats.h"
#endif
-static void* tospace_start;
-static void* tospace_end;
-
-static Boolean obj_belongs_to_tospace(Partial_Reveal_Object* p_obj)
-{
- return ( p_obj >= tospace_start && p_obj < tospace_end );
-}
-
static FORCE_INLINE void scan_slot(Collector *collector, REF *p_ref)
{
Partial_Reveal_Object *p_obj = read_slot(p_ref);
@@ -121,7 +113,7 @@
write_slot(p_ref, p_target_obj);
/* check if the target obj stays in NOS, and p_ref from MOS. If yes, rem p_ref. */
- if(obj_is_survivor(p_target_obj))
+ if(obj_belongs_to_tospace(p_target_obj))
if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc))
collector_remset_add_entry(collector, ( Partial_Reveal_Object**) p_ref);
@@ -163,7 +155,7 @@
write_slot(p_ref, p_target_obj);
/* check if the target obj stays in NOS, and p_ref from MOS. If yes, rem p_ref. */
- if(obj_is_survivor(p_target_obj)){
+ if(obj_belongs_to_tospace(p_target_obj)){
if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc))
collector_remset_add_entry(collector, ( Partial_Reveal_Object**) p_ref);
}
@@ -178,10 +170,18 @@
Vector_Block* trace_stack = (Vector_Block*)collector->trace_stack;
while( !vector_stack_is_empty(trace_stack)){
p_ref = (REF *)vector_stack_pop(trace_stack);
+#ifdef PREFETCH_SUPPORTED
+ /* DO PREFETCH */
+ if(mark_prefetch) {
+ if(!vector_stack_is_empty(trace_stack)) {
+ REF *pref = (REF*)vector_stack_read(trace_stack, 0);
+ PREFETCH( read_slot(pref) );
+ }
+ }
+#endif
forward_object(collector, p_ref);
trace_stack = (Vector_Block*)collector->trace_stack;
}
-
return;
}
@@ -246,6 +246,15 @@
REF *p_ref = (REF *)*iter;
iter = vector_block_iterator_advance(trace_task,iter);
assert(*p_ref); /* a task can't be NULL, it was checked before put into the task stack */
+#ifdef PREFETCH_SUPPORTED
+ /* DO PREFETCH */
+ if( mark_prefetch ) {
+ if(!vector_block_iterator_end(trace_task, iter)) {
+ REF *pref= (REF*) *iter;
+ PREFETCH( read_slot(pref));
+ }
+ }
+#endif
/* in sequential version, we only trace same object once, but we were using a local hashset for that,
which couldn't catch the repetition between multiple collectors. This is subject to more study. */
Index: vm/gc_gen/src/semi_space/sspace_nongen_ss_pool.cpp
===================================================================
--- vm/gc_gen/src/semi_space/sspace_nongen_ss_pool.cpp (revision 636625)
+++ vm/gc_gen/src/semi_space/sspace_nongen_ss_pool.cpp (working copy)
@@ -87,6 +87,8 @@
GC* gc = collector->gc;
Partial_Reveal_Object *p_obj = read_slot(p_ref);
+ if(obj_belongs_to_tospace(p_obj)) return;
+
if(!obj_belongs_to_nos(p_obj)){
if(obj_mark_in_oi(p_obj)){
#ifdef GC_GEN_STATS
@@ -150,6 +152,15 @@
Vector_Block* trace_stack = (Vector_Block*)collector->trace_stack;
while( !vector_stack_is_empty(trace_stack)){
p_ref = (REF *)vector_stack_pop(trace_stack);
+#ifdef PREFETCH_SUPPORTED
+ /* DO PREFETCH */
+ if(mark_prefetch) {
+ if(!vector_stack_is_empty(trace_stack)) {
+ REF *pref = (REF*)vector_stack_read(trace_stack, 0);
+ PREFETCH( read_slot(pref) );
+ }
+ }
+#endif
forward_object(collector, p_ref);
trace_stack = (Vector_Block*)collector->trace_stack;
}
@@ -213,6 +224,15 @@
while(!vector_block_iterator_end(trace_task,iter)){
REF *p_ref = (REF *)*iter;
iter = vector_block_iterator_advance(trace_task, iter);
+#ifdef PREFETCH_SUPPORTED
+ /* DO PREFETCH */
+ if( mark_prefetch ) {
+ if(!vector_block_iterator_end(trace_task, iter)) {
+ REF *pref= (REF*) *iter;
+ PREFETCH( read_slot(pref));
+ }
+ }
+#endif
trace_object(collector, p_ref);
if(collector->result == FALSE) break; /* force return */
@@ -255,6 +275,11 @@
void nongen_ss_pool(Collector* collector)
{
GC* gc = collector->gc;
+
+ Sspace* sspace = (Sspace*)collector->collect_space;
+ unsigned int sspace_first_idx = sspace->first_block_idx;
+ tospace_start = (void*)&(sspace->blocks[sspace->tospace_first_idx - sspace_first_idx]);
+ tospace_end = (void*)&(sspace->blocks[sspace->ceiling_block_idx - sspace_first_idx + 1]);
collector_trace_rootsets(collector);
/* the rest work is not enough for parallelization, so let only one thread go */
Index: vm/gc_gen/src/thread/collector.cpp
===================================================================
--- vm/gc_gen/src/thread/collector.cpp (revision 636625)
+++ vm/gc_gen/src/thread/collector.cpp (working copy)
@@ -30,7 +30,6 @@
unsigned int MINOR_COLLECTORS = 0;
unsigned int MAJOR_COLLECTORS = 0;
static volatile unsigned int live_collector_num = 0;
-Boolean is_collector_local_alloc = TRUE;
void collector_restore_obj_info(Collector* collector)
{
@@ -85,8 +84,8 @@
GC_Metadata* metadata = collector->gc->metadata;
- if(gc_is_gen_mode() && gc_match_kind(collector->gc, MINOR_COLLECTION)){
- if( NOS_PARTIAL_FORWARD || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL ){
+ if(gc_is_gen_mode() && collect_is_minor()){
+ if( NOS_PARTIAL_FORWARD || minor_is_semispace() ){
assert(collector->rem_set==NULL);
collector->rem_set = free_set_pool_get_entry(metadata);
}
@@ -96,7 +95,7 @@
collector_reset_weakref_sets(collector);
#endif
-#if !defined(USE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
+#if !defined(USE_UNIQUE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
/*For LOS_Shrink and LOS_Extend*/
if(gc_has_space_tuner(collector->gc) && collector->gc->tuner->kind != TRANS_NOTHING){
collector->non_los_live_obj_size = 0;
@@ -131,9 +130,9 @@
static void assign_collector_with_task(GC* gc, TaskType task_func, Space* space)
{
/* FIXME:: to adaptively identify the num_collectors_to_activate */
- if( MINOR_COLLECTORS && gc_match_kind(gc, MINOR_COLLECTION)){
+ if( MINOR_COLLECTORS && collect_is_minor()){
gc->num_active_collectors = MINOR_COLLECTORS;
- }else if ( MAJOR_COLLECTORS && gc_match_kind(gc, MAJOR_COLLECTION)){
+ }else if ( MAJOR_COLLECTORS && collect_is_major()){
gc->num_active_collectors = MAJOR_COLLECTORS;
}else{
gc->num_active_collectors = gc->num_collectors;
@@ -182,7 +181,7 @@
task_func(collector);
//conducted after collection to return last TLB in hand
- #if !defined(USE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
+ #if !defined(USE_UNIQUE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
gc_reset_collector_alloc(collector->gc, collector);
#endif
collector_notify_work_done(collector);
@@ -230,14 +229,14 @@
void collector_init_stats(Collector* collector)
{
-#if !defined(USE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
+#if !defined(USE_UNIQUE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
gc_gen_collector_stats_initialize(collector);
#endif
}
void collector_destruct_stats(Collector* collector)
{
-#if !defined(USE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
+#if !defined(USE_UNIQUE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
gc_gen_collector_stats_destruct(collector);
#endif
}
@@ -350,3 +349,18 @@
}
+int64 gc_get_collector_time(GC* gc)
+{
+ int64 time_collector = 0;
+ unsigned int num_active_collectors = gc->num_active_collectors;
+ unsigned int i = 0;
+ for(; icollectors[i];
+ int64 time_measured = collector->time_measurement_end - collector->time_measurement_start;
+ if(time_measured > time_collector)
+ time_collector = time_measured;
+ }
+ return time_collector;
+}
+
+
Index: vm/gc_gen/src/thread/collector.h
===================================================================
--- vm/gc_gen/src/thread/collector.h (revision 636625)
+++ vm/gc_gen/src/thread/collector.h (working copy)
@@ -29,11 +29,9 @@
struct Chunk_Header;
struct Free_Chunk_List;
-extern Boolean is_collector_local_alloc;
-
#define NORMAL_SIZE_SEGMENT_GRANULARITY_BITS 8
#define NORMAL_SIZE_SEGMENT_GRANULARITY (1 << NORMAL_SIZE_SEGMENT_GRANULARITY_BITS)
-#define NORMAL_SIZE_SEGMENT_NUM (GC_OBJ_SIZE_THRESHOLD / NORMAL_SIZE_SEGMENT_GRANULARITY)
+#define NORMAL_SIZE_SEGMENT_NUM (GC_LOS_OBJ_SIZE_THRESHOLD / NORMAL_SIZE_SEGMENT_GRANULARITY)
#define SIZE_TO_SEGMENT_INDEX(size) ((((size) + NORMAL_SIZE_SEGMENT_GRANULARITY-1) >> NORMAL_SIZE_SEGMENT_GRANULARITY_BITS) - 1)
#define SEGMENT_INDEX_TO_SIZE(index) (((index)+1) << NORMAL_SIZE_SEGMENT_GRANULARITY_BITS)
@@ -49,6 +47,8 @@
VmThreadHandle thread_handle; /* This thread; */
unsigned int handshake_signal; /*Handshake is used in concurrent GC.*/
unsigned int num_alloc_blocks; /* the number of allocated blocks in this collection. */
+ int64 time_measurement_start;
+ int64 time_measurement_end;
/* End of Allocator --> */
/* FIXME:: for testing */
@@ -111,12 +111,13 @@
void collector_attach_hashcode(Collector *collector);
#endif
-#ifndef USE_MARK_SWEEP_GC
+#ifndef USE_UNIQUE_MARK_SWEEP_GC
void gc_gen_hook_for_collector_init(Collector *collector);
#endif
Boolean is_collector_finished(GC* gc);
void wait_collection_finish(GC* gc);
+int64 gc_get_collector_time(GC* gc);
inline Boolean gc_collection_result(GC* gc)
{
Index: vm/gc_gen/src/thread/collector_alloc.h
===================================================================
--- vm/gc_gen/src/thread/collector_alloc.h (revision 636625)
+++ vm/gc_gen/src/thread/collector_alloc.h (working copy)
@@ -34,10 +34,7 @@
extern Space_Alloc_Func mos_alloc;
-//FIXME: MINOR_ALGO is static
-extern unsigned int MINOR_ALGO;
-
-/* NOS forward obj to other space in MINOR_COLLECTION */
+/* NOS forward obj to other space in ALGO_MINOR */
FORCE_INLINE Partial_Reveal_Object* collector_forward_object(Collector* collector, Partial_Reveal_Object* p_obj)
{
Obj_Info_Type oi = get_obj_info_raw(p_obj);
@@ -65,19 +62,17 @@
Allocator* allocator = (Allocator*)collector;
/* can also use collector->collect_space->collect_algorithm */
- if( MINOR_ALGO == MINOR_NONGEN_SEMISPACE_POOL || MINOR_ALGO == MINOR_GEN_SEMISPACE_POOL){
+ if( minor_is_semispace()){
- p_targ_obj = (Partial_Reveal_Object*)semispace_forward_obj(p_obj, size, allocator);
+ p_targ_obj = (Partial_Reveal_Object*)semispace_copy_object(p_obj, size, allocator);
if( !p_targ_obj )
allocator = ((Collector*)collector)->backup_allocator;
- }else{ /* other non-ss algorithms. FIXME:: I am going to remove this branch if it has no perf impact. */
+ } /*
+ else{ // other non-ss algorithms. can do thread_local_alloc here to speedup. I removed it for simplicity.
+ if(support thread local alloc in MOS) p_targ_obj = thread_local_alloc(size, allocator);
+ }*/
- if(is_collector_local_alloc){ /* try local alloc first if collector supports it. Marksweep doesn't. */
- p_targ_obj = thread_local_alloc(size, allocator);
- }
- }
-
if(!p_targ_obj){
p_targ_obj = (Partial_Reveal_Object*)mos_alloc(size, allocator);
}
@@ -85,7 +80,7 @@
if(p_targ_obj == NULL){
/* failed to forward an obj */
collector->result = FALSE;
- TRACE2("gc.collect", "failed to forward an obj, minor collection failed.");
+ TRACE2("gc.collect", "failed to forward an object, minor collection failed.");
return NULL;
}
Index: vm/gc_gen/src/thread/gc_thread.h
===================================================================
--- vm/gc_gen/src/thread/gc_thread.h (revision 636625)
+++ vm/gc_gen/src/thread/gc_thread.h (working copy)
@@ -27,7 +27,7 @@
#define ALLOC_ZEROING
-#ifndef _IPF_
+#ifdef PREFETCH_SUPPORTED
#define ALLOC_PREFETCH
#endif
@@ -68,7 +68,10 @@
VmThreadHandle thread_handle; /* This thread; */
unsigned int handshake_signal; /*Handshake is used in concurrent GC.*/
/* the number of allocated blocks. For collector, it reflects the load balance; for mutator, it reflects mutator activities. */
- unsigned int num_alloc_blocks;
+ unsigned int num_alloc_blocks;
+ /* Time measurment. For collector, it's used to collect collection time; for mutator, it's used to collect mutator time. */
+ int64 time_measurement_start;
+ int64 time_measurement_end;
}Allocator;
inline void thread_local_unalloc(unsigned int size, Allocator* allocator)
Index: vm/gc_gen/src/thread/marker.cpp
===================================================================
--- vm/gc_gen/src/thread/marker.cpp (revision 636625)
+++ vm/gc_gen/src/thread/marker.cpp (working copy)
@@ -277,3 +277,18 @@
return;
}
+int64 gc_get_marker_time(GC* gc)
+{
+ int64 time_marker = 0;
+ unsigned int num_active_markers = gc->num_active_markers;
+ unsigned int i = 0;
+ for(; imarkers[i];
+ int64 time_measured = marker->time_measurement_end - marker->time_measurement_start;
+ if(time_measured > time_marker)
+ time_marker = time_measured;
+ }
+ return time_marker;
+}
+
+
Index: vm/gc_gen/src/thread/marker.h
===================================================================
--- vm/gc_gen/src/thread/marker.h (revision 636625)
+++ vm/gc_gen/src/thread/marker.h (working copy)
@@ -33,6 +33,8 @@
VmThreadHandle thread_handle; /* This thread; */
unsigned int handshake_signal; /*Handshake is used in concurrent GC.*/
unsigned int num_alloc_blocks; /* the number of allocated blocks in this collection. */
+ int64 time_measurement_start;
+ int64 time_measurement_end;
/* End of Allocator --> */
/* FIXME:: for testing */
@@ -95,8 +97,8 @@
void marker_notify_mark_root_done(Marker* marker);
void wait_mark_finish(GC* gc);
Boolean is_mark_finished(GC* gc);
+int64 gc_get_marker_time(GC* gc);
+#endif //_MARKER_H_
-#endif //_MARKER_H_
-
Index: vm/gc_gen/src/thread/mutator.cpp
===================================================================
--- vm/gc_gen/src/thread/mutator.cpp (revision 636625)
+++ vm/gc_gen/src/thread/mutator.cpp (working copy)
@@ -21,6 +21,7 @@
#include "mutator.h"
#include "../trace_forward/fspace.h"
+#include "../mark_sweep/gc_ms.h"
#include "../mark_sweep/wspace.h"
#include "../finalizer_weakref/finalizer_weakref.h"
@@ -45,7 +46,7 @@
else
mutator->obj_with_fin = NULL;
-#ifdef USE_MARK_SWEEP_GC
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
allocator_init_local_chunks((Allocator*)mutator);
#endif
@@ -54,7 +55,8 @@
mutator->next = (Mutator *)gc->mutator_list;
gc->mutator_list = mutator;
gc->num_mutators++;
-
+ /*Begin to measure the mutator thread execution time. */
+ mutator->time_measurement_start = time_now();
unlock(gc->mutator_list_lock); // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
gc_set_tls(mutator);
@@ -72,8 +74,9 @@
lock(gc->mutator_list_lock); // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
-#ifdef USE_MARK_SWEEP_GC
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
allocactor_destruct_local_chunks((Allocator*)mutator);
+ allocator_register_new_obj_size((Allocator*)mutator);
#endif
volatile Mutator *temp = gc->mutator_list;
@@ -173,4 +176,34 @@
return NULL;
}
+void gc_start_mutator_time_measurement(GC* gc)
+{
+ lock(gc->mutator_list_lock);
+ Mutator* mutator = gc->mutator_list;
+ while (mutator) {
+ mutator->time_measurement_start = time_now();
+ mutator = mutator->next;
+ }
+ unlock(gc->mutator_list_lock);
+}
+int64 gc_get_mutator_time(GC* gc)
+{
+ int64 time_mutator = 0;
+ lock(gc->mutator_list_lock);
+ Mutator* mutator = gc->mutator_list;
+ while (mutator) {
+#ifdef _DEBUG
+ mutator->time_measurement_end = time_now();
+#endif
+ int64 time_measured = time_now() - mutator->time_measurement_start;
+ if(time_measured > time_mutator){
+ time_mutator = time_measured;
+ }
+ mutator = mutator->next;
+ }
+ unlock(gc->mutator_list_lock);
+ return time_mutator;
+}
+
+
Index: vm/gc_gen/src/thread/mutator.h
===================================================================
--- vm/gc_gen/src/thread/mutator.h (revision 636625)
+++ vm/gc_gen/src/thread/mutator.h (working copy)
@@ -39,6 +39,8 @@
VmThreadHandle thread_handle; /* This thread; */
volatile unsigned int handshake_signal; /*Handshake is used in concurrent GC.*/
unsigned int num_alloc_blocks; /* the number of allocated blocks since last collection. */
+ int64 time_measurement_start;
+ int64 time_measurement_end;
/* END of Allocator --> */
Vector_Block* rem_set;
@@ -48,6 +50,7 @@
SpinLock dirty_set_lock;
unsigned int dirty_obj_slot_num; //only ON_THE_FLY
unsigned int dirty_obj_num; //concurrent mark
+ POINTER_SIZE_INT new_obj_size;
} Mutator;
void mutator_initialize(GC* gc, void* tls_gc_info);
@@ -59,6 +62,8 @@
Boolean gc_local_dirtyset_is_empty(GC* gc);
Vector_Block* gc_get_local_dirty_set(GC* gc, unsigned int shared_id);
+void gc_start_mutator_time_measurement(GC* gc);
+int64 gc_get_mutator_time(GC* gc);
inline void mutator_post_signal(Mutator* mutator, unsigned int handshake_signal)
{
Index: vm/gc_gen/src/thread/mutator_alloc.cpp
===================================================================
--- vm/gc_gen/src/thread/mutator_alloc.cpp (revision 636625)
+++ vm/gc_gen/src/thread/mutator_alloc.cpp (working copy)
@@ -80,25 +80,33 @@
gc_alloc_statistic_obj_distrubution(size);
#endif
-#if defined(USE_MARK_SWEEP_GC)
+#if defined(USE_UNIQUE_MARK_SWEEP_GC)
+
p_obj = (Managed_Object_Handle)gc_ms_alloc(size, allocator);
+
#elif defined(USE_UNIQUE_MOVE_COMPACT_GC)
+
p_obj = (Managed_Object_Handle)gc_mc_alloc(size, allocator);
+
#else
- if ( size > GC_OBJ_SIZE_THRESHOLD ){
+
+ if ( size > GC_LOS_OBJ_SIZE_THRESHOLD ){
p_obj = (Managed_Object_Handle)los_alloc(size, allocator);
+
#ifdef GC_GEN_STATS
if (p_obj != NULL){
GC_Gen* gc = (GC_Gen*)allocator->gc;
gc->stats->obj_num_los_alloc++;
gc->stats->total_size_los_alloc += size;
}
-#endif
+#endif /* #ifdef GC_GEN_STATS */
+
}else{
p_obj = (Managed_Object_Handle)nos_alloc(size, allocator);
}
-#endif
+#endif /* defined(USE_UNIQUE_MARK_SWEEP_GC) else */
+
if( p_obj == NULL )
return NULL;
@@ -125,21 +133,28 @@
#ifdef GC_OBJ_SIZE_STATISTIC
gc_alloc_statistic_obj_distrubution(size);
#endif
-
- /* object shoud be handled specially */
- if ( size > GC_OBJ_SIZE_THRESHOLD ) return NULL;
-
+
Allocator* allocator = (Allocator*)gc_get_tls();
/* Try to allocate an object from the current Thread Local Block */
Managed_Object_Handle p_obj;
-#if defined(USE_MARK_SWEEP_GC)
+
+#if defined(USE_UNIQUE_MARK_SWEEP_GC)
+
p_obj = (Managed_Object_Handle)gc_ms_fast_alloc(size, allocator);
+
#elif defined(USE_UNIQUE_MOVE_COMPACT_GC)
- p_obj = (Managed_Object_Handle)gc_mc_fast_alloc(size, allocator);
+
+ if ( size > GC_LARGE_OBJ_SIZE_THRESHOLD ) return NULL;
+ p_obj = (Managed_Object_Handle)thread_local_alloc(size, allocator);
+
#else
+ /* object shoud be handled specially */
+ if ( size > GC_LOS_OBJ_SIZE_THRESHOLD ) return NULL;
p_obj = (Managed_Object_Handle)thread_local_alloc(size, allocator);
+
#endif
+
if(p_obj == NULL) return NULL;
assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0);
Index: vm/gc_gen/src/trace_forward/fspace.cpp
===================================================================
--- vm/gc_gen/src/trace_forward/fspace.cpp (revision 636625)
+++ vm/gc_gen/src/trace_forward/fspace.cpp (working copy)
@@ -106,11 +106,10 @@
unsigned int first_idx = fspace->first_block_idx;
unsigned int marked_start_idx = 0; //was for oi markbit reset, now useless
unsigned int marked_last_idx = 0;
- Boolean is_major_collection = gc_match_kind(fspace->gc, MAJOR_COLLECTION);
- Boolean gen_mode = gc_is_gen_mode();
+ Boolean is_major_collection = collect_is_major();
if( is_major_collection ||
- NOS_PARTIAL_FORWARD == FALSE || !gen_mode)
+ NOS_PARTIAL_FORWARD == FALSE || !gc_is_gen_mode())
{
fspace->free_block_idx = first_idx;
fspace->ceiling_block_idx = first_idx + fspace->num_managed_blocks - 1;
@@ -189,45 +188,24 @@
fspace->num_collections++;
GC* gc = fspace->gc;
-
- if(gc_is_gen_mode()){
- fspace->collect_algorithm = MINOR_GEN_FORWARD_POOL;
- }else{
- fspace->collect_algorithm = MINOR_NONGEN_FORWARD_POOL;
- }
/* we should not destruct rootset structure in case we need fall back */
pool_iterator_init(gc->metadata->gc_rootset_pool);
- switch(fspace->collect_algorithm){
-
+ if( !gc_is_gen_mode() ){
#ifdef MARK_BIT_FLIPPING
- case MINOR_NONGEN_FORWARD_POOL:
- TRACE2("gc.process", "GC: nongen_forward_pool algo start ... \n");
- collector_execute_task(gc, (TaskType)nongen_forward_pool, (Space*)fspace);
- TRACE2("gc.process", "\nGC: end of nongen forward algo ... \n");
-#ifdef GC_GEN_STATS
- gc_gen_stats_set_nos_algo((GC_Gen*)gc, MINOR_NONGEN_FORWARD_POOL);
-#endif
- break;
-
-#endif /*#ifdef MARK_BIT_FLIPPING */
-
- case MINOR_GEN_FORWARD_POOL:
- TRACE2("gc.process", "gen_forward_pool algo start ... \n");
- collector_execute_task(gc, (TaskType)gen_forward_pool, (Space*)fspace);
- TRACE2("gc.process", "\nGC: end of gen forward algo ... \n");
-#ifdef GC_GEN_STATS
- gc_gen_stats_set_nos_algo((GC_Gen*)gc, MINOR_NONGEN_FORWARD_POOL);
-#endif
- break;
-
- default:
- DIE2("gc.collection","Specified minor collection algorithm doesn't exist!");
- exit(0);
- break;
- }
+ TRACE2("gc.process", "GC: nongenerational forward algo start ... \n");
+ collector_execute_task(gc, (TaskType)nongen_forward_pool, (Space*)fspace);
+ TRACE2("gc.process", "\nGC: end of nongen forward algo ... \n");
+#else
+ assert(0);
+#endif /*#ifdef MARK_BIT_FLIPPING #else */
+ }else{
+ TRACE2("gc.process", "generational forward algo start ... \n");
+ collector_execute_task(gc, (TaskType)gen_forward_pool, (Space*)fspace);
+ TRACE2("gc.process", "\nGC: end of gen forward algo ... \n");
+ }
return;
}
Index: vm/gc_gen/src/trace_forward/fspace.h
===================================================================
--- vm/gc_gen/src/trace_forward/fspace.h (revision 636625)
+++ vm/gc_gen/src/trace_forward/fspace.h (working copy)
@@ -29,9 +29,6 @@
* In our Gen GC, not all live objects are copied to tspace space, the newer baby will
* still be preserved in fspace, that means to give them time to die.
*/
-#ifdef PREFETCH_SUPPORTED
-extern Boolean mark_prefetch;
-#endif
extern Boolean forward_first_half;
/* boundary splitting fspace into forwarding part and remaining part */
Index: vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp
===================================================================
--- vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp (revision 636625)
+++ vm/gc_gen/src/trace_forward/fspace_nongen_forward_pool.cpp (working copy)
@@ -40,21 +40,10 @@
return;
}
-/* forward declaration */
-static void forward_jlc_instance(Collector* collector, Partial_Reveal_Object *p_obj);
-
static FORCE_INLINE void scan_object(Collector* collector, Partial_Reveal_Object *p_obj)
{
assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0);
-
- Partial_Reveal_VTable *vtable = decode_vt(obj_get_vt(p_obj));
- if(TRACE_JLC_VIA_VTABLE){
- if(vtable->vtmark == VT_UNMARKED) {
- vtable->vtmark = VT_MARKED;
- forward_jlc_instance(collector, vtable->jlC);
- }
- }
-
+
if (!object_has_ref_field_before_scan(p_obj)) return;
REF *p_ref;
@@ -147,34 +136,6 @@
return;
}
-/*
- Forward the vtable->jlc and trace the forwarded object.
- But do not update the vtable->jlc but leave them for weakroots updating
- We probably do not need this function if we do not perform class unloading in minor collections.
- That means all the weakroots to jlc instances are treated as strong roots.
-*/
-static void forward_jlc_instance(Collector* collector, Partial_Reveal_Object *p_obj)
-{
- if(!obj_belongs_to_nos(p_obj)){
- if(obj_mark_in_oi(p_obj))
- scan_object(collector, p_obj);
- return;
- }
-
- /* following is the logic for forwarding */
- Partial_Reveal_Object* p_target_obj = collector_forward_object(collector, p_obj);
- if( p_target_obj == NULL ){
- if(collector->result == FALSE ){
- vector_stack_clear(collector->trace_stack);
- return; /* FIXME: the failure result is not propagated back to GC */
- }
- assert(obj_get_fw_in_oi(p_obj));
- return;
- }
- scan_object(collector, p_target_obj);
- return;
-}
-
static void trace_object(Collector *collector, REF *p_ref)
{
forward_object(collector, p_ref);
Index: vm/gc_gen/src/utils/uneven_map.h
===================================================================
--- vm/gc_gen/src/utils/uneven_map.h (revision 0)
+++ vm/gc_gen/src/utils/uneven_map.h (revision 0)
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _UNEVEN_MAP_H_
+#define _UNEVEN_MAP_H_
+
+#include "../common/gc_metadata.h"
+
+#define MAP_ARRAY_SIZE 512
+#define MAP_VECTOR_ENTRY_SHIFT 8
+#define MAP_VECTOR_ENTRY_NUM (1<> MAP_VECTOR_ENTRY_SHIFT;
+ unsigned int vector_block_entry = to_block_index & (MAP_VECTOR_ENTRY_NUM - 1);
+ Vector_Block *vector_block = (*map)[vector_block_index];
+ if(vector_block == NULL){
+ Vector_Block *new_vector_block = free_set_pool_get_entry(&gc_metadata);
+ assert(new_vector_block);
+ vector_block_set_zero(new_vector_block);
+ (*map)[vector_block_index] = new_vector_block;
+ vector_block = new_vector_block;
+ }
+ assert(vector_block_entry < MAP_VECTOR_ENTRY_NUM);
+ vector_block_set_at_index(vector_block, vector_block_entry, (POINTER_SIZE_INT)from_block_index);
+}
+
+FORCE_INLINE unsigned int uneven_map_get(Uneven_Map* map, unsigned int to_block_index)
+{
+ unsigned int vector_block_index = to_block_index >> MAP_VECTOR_ENTRY_SHIFT;
+ unsigned int vector_block_entry = to_block_index & (MAP_VECTOR_ENTRY_NUM - 1);
+ Vector_Block *vector_block = (*map)[vector_block_index];
+ assert(vector_block_entry < MAP_VECTOR_ENTRY_NUM);
+
+ if(vector_block)
+ return (unsigned int)vector_block_get_at_index(vector_block, vector_block_entry);
+
+ return 0;
+}
+
+FORCE_INLINE void uneven_map_free(Uneven_Map* map)
+{
+ Vector_Block *block;
+ for(unsigned int i = 0; ientries[index] = value;
}
-inline POINTER_SIZE_INT vector_block_get_entry_from_index(Vector_Block* block, unsigned int index)
+inline POINTER_SIZE_INT vector_block_get_at_index(Vector_Block* block, unsigned int index)
{
#ifdef _DEBUG
assert(index < VECTOR_BLOCK_ENTRY_NUM);
@@ -187,6 +187,13 @@
#endif
}
+inline void vector_block_set_zero(Vector_Block* block)
+{
+ block->head = (POINTER_SIZE_INT*)block->entries;
+ block->tail = (POINTER_SIZE_INT*)block->entries;
+ memset(block->entries, 0, (POINTER_SIZE_INT)block->heap_end - (POINTER_SIZE_INT)block->entries);
+}
+
/* Below is for sequential local access */
inline POINTER_SIZE_INT* vector_block_iterator_init(Vector_Block* block)
{ return block->head; }
Index: vm/gc_gen/src/verify/verifier_common.cpp
===================================================================
--- vm/gc_gen/src/verify/verifier_common.cpp (revision 636625)
+++ vm/gc_gen/src/verify/verifier_common.cpp (working copy)
@@ -85,14 +85,14 @@
{
Partial_Reveal_Object* p_obj = read_slot(p_ref);
assert(address_belongs_to_gc_heap(p_obj,heap_verifier->gc));
-#if !defined(USE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
+#if !defined(USE_UNIQUE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
GC_Gen* gc = (GC_Gen*)heap_verifier->gc;
Space* mspace = gc_get_mos(gc);
Space* lspace = gc_get_los(gc);
Space* nos = gc_get_nos(gc);
if(p_obj == NULL){
- if(gc_match_kind((GC*)gc, MAJOR_COLLECTION) ||(!heap_verifier->gc_is_gen_mode && !NOS_PARTIAL_FORWARD)){
+ if(collect_is_major() ||(!heap_verifier->gc_is_gen_mode && !NOS_PARTIAL_FORWARD)){
assert(0);
return FALSE;
}else{
@@ -116,7 +116,7 @@
assert(0);
return FALSE;
}
-#if !defined(USE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
+#if !defined(USE_UNIQUE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
}else{
if(heap_verifier->gc_verifier->is_before_fallback_collection){
if(!address_belongs_to_gc_heap(p_obj, heap_verifier->gc)){
@@ -128,8 +128,8 @@
}
if(!address_belongs_to_space(p_obj, mspace) && !address_belongs_to_space(p_obj, lspace) && !NOS_PARTIAL_FORWARD){
- if( gc_match_kind((GC*)gc, MINOR_COLLECTION)){
- if( nos->collect_algorithm == MINOR_NONGEN_SEMISPACE_POOL || nos->collect_algorithm == MINOR_GEN_SEMISPACE_POOL){
+ if( collect_is_minor()){
+ if( minor_is_semispace()){
if( obj_belongs_to_survivor_area((Sspace*)nos, p_obj))
return TRUE;
}
@@ -218,17 +218,15 @@
{
GC* gc = heap_verifier->gc;
char* gc_kind;
- if(gc_match_kind(gc, MINOR_COLLECTION)){
+ if(collect_is_minor()){
gc_kind = " minor collection.";
- }else if(gc_match_kind(gc, FALLBACK_COLLECTION)){
+ }else if(collect_is_fallback()){
gc_kind = " fallback collection.";
- }else if(gc_match_kind(gc, EXTEND_COLLECTION)){
- gc_kind = " extend collection.";
- }else if(gc_match_kind(gc, NORMAL_MAJOR_COLLECTION)){
+ }else if(collect_is_major_normal()){
if(gc->tuner->kind == TRANS_NOTHING) gc_kind = "major collection (normal)";
else if(gc->tuner->kind == TRANS_FROM_LOS_TO_MOS) gc_kind = "major collection (LOS shrink)";
else if(gc->tuner->kind == TRANS_FROM_MOS_TO_LOS) gc_kind = "major collection (LOS extend)";
- }else if(gc_match_kind(gc, MARK_SWEEP_GC)){
+ }else if(major_is_marksweep()){
gc_kind = " mark sweep collection.";
}
printf(" GC_kind: %s\n", gc_kind);
@@ -270,3 +268,4 @@
}
+
Index: vm/gc_gen/src/verify/verifier_common.h
===================================================================
--- vm/gc_gen/src/verify/verifier_common.h (revision 636625)
+++ vm/gc_gen/src/verify/verifier_common.h (working copy)
@@ -28,7 +28,7 @@
#ifdef USE_32BITS_HASHCODE
#include "../common/hashcode.h"
#endif
-#ifdef USE_MARK_SWEEP_GC
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
#include "../mark_sweep/wspace_mark_sweep.h"
#endif
#include "../common/gc_concurrent.h"
@@ -120,7 +120,7 @@
assert(decode_vt(obj_get_vt(p_obj)));
assert(!address_belongs_to_gc_heap(decode_vt(obj_get_vt(p_obj)), (GC*)heap_verifier->gc));
-#ifdef USE_MARK_SWEEP_GC
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
GC_MS* gc = (GC_MS*)heap_verifier->gc;
if(!heap_verifier->is_before_gc){
/*in GC_MS mark sweep algorithm, all live objects should be set their mark bit*/
Index: vm/gc_gen/src/verify/verifier_scanner.cpp
===================================================================
--- vm/gc_gen/src/verify/verifier_scanner.cpp (revision 636625)
+++ vm/gc_gen/src/verify/verifier_scanner.cpp (working copy)
@@ -37,7 +37,7 @@
{
GC_Verifier* gc_verifier = heap_verifier->gc_verifier;
-#if !defined(USE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
+#if !defined(USE_UNIQUE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
if(gc_verifier->is_before_fallback_collection) {
if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){
assert(obj_get_vt(p_obj) == obj_get_vt(obj_get_fw_in_oi(p_obj)));
@@ -49,8 +49,7 @@
if(!obj_mark_in_vt(p_obj)) return;
- extern unsigned int MAJOR_ALGO;
- if( MAJOR_ALGO != MAJOR_MARK_SWEEP && p_obj >= los_boundary ){
+ if( !major_is_marksweep() && p_obj >= los_boundary ){
Block_Header* block = GC_BLOCK_HEADER(p_obj);
if( heap_verifier->is_before_gc) block->num_live_objs++;
/* we can't set block->num_live_objs = 0 if !is_before_gc, because the some blocks may be freed hence not
@@ -87,7 +86,7 @@
#ifndef BUILD_IN_REFERENT
WeakReferenceType type = special_reference_type(p_obj);
- if(type == SOFT_REFERENCE && verifier_get_gc_collect_kind(heap_verifier->gc_verifier) == MINOR_COLLECTION){
+ if(type == SOFT_REFERENCE && verifier_collect_is_minor(gc_verifier)){
p_ref = obj_get_referent_field(p_obj);
scan_slot(heap_verifier, p_ref);
}
@@ -300,7 +299,7 @@
WeakReferenceType type = special_reference_type(p_obj);
if(type == NOT_REFERENCE) return;
- //if(type != SOFT_REFERENCE && verifier_get_gc_collect_kind(heap_verifier) == MINOR_COLLECTION){
+ //if(type != SOFT_REFERENCE && verifier_collect_is_minor(heap_verifier->gc_verifier)){
{
p_ref = obj_get_referent_field(p_obj);
verify_write_barrier(p_ref, heap_verifier);
@@ -374,7 +373,7 @@
void verifier_scan_all_objects(Heap_Verifier* heap_verifier)
{
-#if !defined(USE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
+#if !defined(USE_UNIQUE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
GC_Gen* gc = (GC_Gen*)heap_verifier->gc;
Space* fspace = gc_get_nos(gc);
Space* mspace = gc_get_mos(gc);
@@ -422,7 +421,7 @@
void verifier_scan_unreachable_objects(Heap_Verifier* heap_verifier)
{
-#if !defined(USE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
+#if !defined(USE_UNIQUE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
if(heap_verifier->is_before_gc) return;
GC_Gen* gc = (GC_Gen*)heap_verifier->gc;
Space* mspace = gc_get_mos(gc);
@@ -443,3 +442,4 @@
}
+
Index: vm/gc_gen/src/verify/verify_gc_effect.cpp
===================================================================
--- vm/gc_gen/src/verify/verify_gc_effect.cpp (revision 636625)
+++ vm/gc_gen/src/verify/verify_gc_effect.cpp (working copy)
@@ -17,7 +17,7 @@
#include "verifier_common.h"
#include "verify_gc_effect.h"
-#ifdef USE_MARK_SWEEP_GC
+#ifdef USE_UNIQUE_MARK_SWEEP_GC
#include "../mark_sweep/wspace_mark_sweep.h"
#endif
@@ -103,6 +103,7 @@
assert(p_obj_information);
p_obj_information->vt_raw = obj_get_vt_raw(p_obj);
p_obj_information->address = p_obj;
+ p_obj_information->obj_info=get_obj_info_raw(p_obj)& OBJ_INFO_MASK;
return (void*) p_obj_information;
}else{
REF *p_ref;
@@ -113,7 +114,8 @@
p_obj_information->vt_raw = obj_get_vt_raw(p_obj);
p_obj_information->address = p_obj;
-
+ p_obj_information->obj_info=get_obj_info_raw(p_obj)& OBJ_INFO_MASK;
+
p_ref = (REF *)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array));
unsigned int i = 0;
@@ -128,6 +130,7 @@
p_obj_information->vt_raw = obj_get_vt_raw(p_obj);
p_obj_information->address = p_obj;
+ p_obj_information->obj_info=get_obj_info_raw(p_obj)& OBJ_INFO_MASK;
int* ref_iterator = object_ref_iterator_init(p_obj);
@@ -147,7 +150,7 @@
GC_Verifier* gc_verifier = heap_verifier->gc_verifier;
assert(obj_belongs_to_space(p_obj, (Space*)fspace));
unsigned int forwarded_first_part;
- if(!(gc_verifier->gc_collect_kind == MINOR_COLLECTION) || !NOS_PARTIAL_FORWARD || heap_verifier->gc_is_gen_mode)
+ if(!verifier_collect_is_minor(gc_verifier) || !NOS_PARTIAL_FORWARD || heap_verifier->gc_is_gen_mode)
forwarded_first_part = true;
else
forwarded_first_part = forward_first_half^1;
@@ -296,13 +299,13 @@
Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata;
GC_Verifier* gc_verifier = heap_verifier->gc_verifier;
-#ifndef USE_MARK_SWEEP_GC
+#ifndef USE_UNIQUE_MARK_SWEEP_GC
GC_Gen* gc = (GC_Gen*)heap_verifier->gc;
Space* mspace = gc_get_mos(gc);
Space* nspace = gc_get_nos(gc);
Space* lspace = gc_get_los(gc);
- if(!gc_verifier->is_before_fallback_collection && gc_verifier->gc_collect_kind == MINOR_COLLECTION){
+ if(!gc_verifier->is_before_fallback_collection && verifier_collect_is_minor(gc_verifier)){
if(!heap_verifier->is_before_gc){
assert(!obj_belongs_to_space(p_obj, nspace) || !fspace_object_was_forwarded(p_obj, (Fspace*)nspace, heap_verifier) || obj_belongs_to_survivor_area((Sspace*)nspace, p_obj));
if(obj_belongs_to_space(p_obj, nspace) && fspace_object_was_forwarded(p_obj, (Fspace*)nspace, heap_verifier) && !obj_belongs_to_survivor_area((Sspace*)nspace, p_obj) ){
@@ -376,12 +379,24 @@
{
Live_Object_Inform* obj_inform_1 = (Live_Object_Inform*)*obj_container1;
Live_Object_Inform* obj_inform_2 = (Live_Object_Inform*)*obj_container2;
+ Boolean ret=TRUE;
+
if(((POINTER_SIZE_INT)obj_inform_1->vt_raw) == ((POINTER_SIZE_INT)obj_inform_2->vt_raw)){
+ if(obj_inform_1->obj_info != obj_inform_2->obj_info) {
+ assert(0);
+ ret = FALSE;
+ goto free_ref;
+ }
/*FIXME: erase live object information in compare_function. */
if( object_has_ref_field((Partial_Reveal_Object*)obj_inform_1) ){
Live_Object_Ref_Slot_Inform* obj_ref_inform_1 = (Live_Object_Ref_Slot_Inform*)obj_inform_1;
Live_Object_Ref_Slot_Inform* obj_ref_inform_2 = (Live_Object_Ref_Slot_Inform*)obj_inform_2;
-
+
+ if(obj_ref_inform_1->obj_info != obj_ref_inform_2->obj_info) {
+ assert(0);
+ ret = FALSE;
+ goto free_ref;
+ }
if (object_is_array((Partial_Reveal_Object*)obj_ref_inform_1)){
Partial_Reveal_Array* array = (Partial_Reveal_Array*)obj_ref_inform_2->address;
unsigned int array_length = array->array_len;
@@ -389,10 +404,9 @@
unsigned int i = 0;
for(; iref_slot[i] != (POINTER_SIZE_INT)obj_ref_inform_2->ref_slot[i]){
- assert(0);
- STD_FREE(obj_ref_inform_1);
- STD_FREE(obj_ref_inform_1);
- return FALSE;
+ assert(0);
+ ret = FALSE;
+ goto free_ref;
}
}
}else{
@@ -403,27 +417,21 @@
for(; iref_slot[i] != (POINTER_SIZE_INT)obj_ref_inform_2->ref_slot[i]){
assert(0);
- STD_FREE(obj_ref_inform_1);
- STD_FREE(obj_ref_inform_1);
- return FALSE;
+ ret = FALSE;
+ goto free_ref;
}
}
}
-
- STD_FREE(obj_ref_inform_1);
- STD_FREE(obj_ref_inform_2);
- }else{
- STD_FREE(obj_inform_1);
- STD_FREE(obj_inform_2);
- }
- return TRUE;
+ }
}else{
- assert(0);
- STD_FREE(obj_inform_1);
- STD_FREE(obj_inform_2);
- return FALSE;
+ assert(0);
+ ret = FALSE;
}
+free_ref:
+ STD_FREE(obj_inform_1);
+ STD_FREE(obj_inform_2);
+ return ret;
}
Boolean compare_obj_hash_inform(POINTER_SIZE_INT* container1,POINTER_SIZE_INT* container2)
@@ -553,3 +561,4 @@
+
Index: vm/gc_gen/src/verify/verify_gc_effect.h
===================================================================
--- vm/gc_gen/src/verify/verify_gc_effect.h (revision 636625)
+++ vm/gc_gen/src/verify/verify_gc_effect.h (working copy)
@@ -27,7 +27,7 @@
Vector_Block* hashcode_set;
Boolean is_tracing_resurrect_obj;
- unsigned int gc_collect_kind;
+ unsigned int collect_kind;
Boolean is_before_fallback_collection;
POINTER_SIZE_INT num_live_objects_before_gc;
@@ -54,14 +54,18 @@
Boolean is_verification_passed;
}GC_Verifier;
+#define OBJ_INFO_MASK (~0x3ff)
+
typedef struct Live_Object_Inform_struct{
VT vt_raw;
Partial_Reveal_Object* address;
+ Obj_Info_Type obj_info;
} Live_Object_Inform;
typedef struct Live_Object_Ref_Slot_Inform_Struct{
VT vt_raw;
Partial_Reveal_Object* address;
+ Obj_Info_Type obj_info;
VT ref_slot[1];
} Live_Object_Ref_Slot_Inform;
@@ -88,14 +92,18 @@
void verifier_reset_hash_distance();
-inline unsigned int verifier_get_gc_collect_kind(GC_Verifier* gc_verifier)
-{ return gc_verifier->gc_collect_kind; }
inline void verifier_set_gc_collect_kind(GC_Verifier* gc_verifier, unsigned int collect_kind)
-{ gc_verifier->gc_collect_kind = collect_kind; }
+{ gc_verifier->collect_kind = collect_kind; }
+inline Boolean verifier_collect_is_minor(GC_Verifier* gc_verifier)
+{
+ return (gc_verifier->collect_kind & ALGO_MAJOR) == 0;
+}
+
inline void verifier_set_fallback_collection(GC_Verifier* gc_verifier, Boolean is_before_fallback)
{ gc_verifier->is_before_fallback_collection = is_before_fallback; }
#endif
+
Index: vm/gc_gen/src/verify/verify_live_heap.cpp
===================================================================
--- vm/gc_gen/src/verify/verify_live_heap.cpp (revision 636625)
+++ vm/gc_gen/src/verify/verify_live_heap.cpp (working copy)
@@ -69,7 +69,7 @@
void verify_heap_before_gc(GC* gc)
{
- verifier_set_gc_collect_kind(heap_verifier->gc_verifier, gc->collect_kind);
+ verifier_set_gc_collect_kind(heap_verifier->gc_verifier, GC_PROP);
verifier_set_gen_mode(heap_verifier);
verifier_reset_mutator_verification(heap_verifier);
verifier_reset_gc_verification(heap_verifier);
@@ -89,11 +89,10 @@
}
void verifier_cleanup_block_info(GC* gc);
-extern unsigned int MAJOR_ALGO;
void verify_heap_after_gc(GC* gc)
{
- if(MAJOR_ALGO != MAJOR_MARK_SWEEP)
+ if(!major_is_marksweep())
verifier_cleanup_block_info(gc);
if(need_scan_live_objs(heap_verifier))
@@ -121,7 +120,7 @@
void event_gc_collect_kind_changed(GC* gc)
{
/*GC collection kind were changed from normal MINOR or MAJOR to FALLBACK MAJOR*/
- assert(gc_match_kind(gc, FALLBACK_COLLECTION));
+ assert(collect_is_fallback());
if(!heap_verifier->need_verify_gc) return;
/*finish the fallbacked gc verify*/
@@ -136,7 +135,7 @@
/*start fallback major gc verify */
heap_verifier->is_before_gc = TRUE;
verifier_set_fallback_collection(heap_verifier->gc_verifier, TRUE);
- verifier_set_gc_collect_kind(heap_verifier->gc_verifier, gc->collect_kind);
+ verifier_set_gc_collect_kind(heap_verifier->gc_verifier, GC_PROP);
verifier_set_gen_mode(heap_verifier);
verifier_reset_gc_verification(heap_verifier);
@@ -152,3 +151,4 @@
Heap_Verifier* get_heap_verifier()
{ return heap_verifier; }
+
Index: vm/gc_gen/src/verify/verify_mutator_effect.cpp
===================================================================
--- vm/gc_gen/src/verify/verify_mutator_effect.cpp (revision 636625)
+++ vm/gc_gen/src/verify/verify_mutator_effect.cpp (working copy)
@@ -330,7 +330,7 @@
void verifier_mark_wb_slots(Heap_Verifier* heap_verifier)
{
GC_Gen* gc = (GC_Gen*)(heap_verifier->gc);
- if(gc_match_kind((GC*)gc, MAJOR_COLLECTION) ||!gc_is_gen_mode()) return;
+ if(collect_is_major() ||!gc_is_gen_mode()) return;
GC_Metadata*gc_metadata = gc->metadata;
Space* nspace = gc_get_nos(gc);
@@ -363,7 +363,7 @@
void verify_write_barrier(REF* p_ref, Heap_Verifier* heap_verifier)
{
GC_Gen* gc = (GC_Gen*)heap_verifier->gc;
- if(gc_match_kind((GC*)gc, MAJOR_COLLECTION) ||!gc_is_gen_mode()) return;
+ if(collect_is_major() ||!gc_is_gen_mode()) return;
Space* nspace = gc_get_nos(gc);
assert(address_belongs_to_gc_heap((void*)p_ref, (GC *) gc));
@@ -435,3 +435,4 @@
+