Index: src/common/gc_common.cpp =================================================================== --- src/common/gc_common.cpp (revision 521779) +++ src/common/gc_common.cpp (working copy) @@ -31,7 +31,7 @@ unsigned int SPACE_ALLOC_UNIT; -extern Boolean GC_VERIFY; +extern char* GC_VERIFY; extern POINTER_SIZE_INT NOS_SIZE; extern POINTER_SIZE_INT MIN_NOS_SIZE; @@ -223,7 +223,9 @@ } if (is_property_set("gc.verify", VM_PROPERTIES) == 1) { - GC_VERIFY = get_boolean_property("gc.verify"); + char* value = get_property("gc.verify", VM_PROPERTIES); + GC_VERIFY = strdup(value); + destroy_property_value(value); } if (is_property_set("gc.gen_nongen_switch", VM_PROPERTIES) == 1){ Index: src/common/gc_common.h =================================================================== --- src/common/gc_common.h (revision 521779) +++ src/common/gc_common.h (working copy) @@ -351,8 +351,8 @@ typedef struct GC{ void* heap_start; void* heap_end; - unsigned int reserved_heap_size; - unsigned int committed_heap_size; + POINTER_SIZE_INT reserved_heap_size; + POINTER_SIZE_INT committed_heap_size; unsigned int num_collections; int64 time_collections; float survive_ratio; Index: src/common/gc_for_vm.cpp =================================================================== --- src/common/gc_for_vm.cpp (revision 521779) +++ src/common/gc_for_vm.cpp (working copy) @@ -161,6 +161,11 @@ return (int64)((POINTER_SIZE_INT)gc_gen_total_memory_size((GC_Gen*)p_global_gc)); } +int64 gc_max_memory() +{ + return (int64)((POINTER_SIZE_INT)gc_gen_total_memory_size((GC_Gen*)p_global_gc)); +} + void gc_vm_initialized() { return; } @@ -179,8 +184,27 @@ unsigned int gc_time_since_last_gc() { assert(0); return 0; } +#define GCGEN_HASH_MASK 0x7c int32 gc_get_hashcode(Managed_Object_Handle p_object) -{ return 23; } +{ + Partial_Reveal_Object *obj = (Partial_Reveal_Object *)p_object; + if(!obj) return 0; + assert(address_belongs_to_gc_heap(obj, p_global_gc)); + Obj_Info_Type info = get_obj_info_raw(obj); + int hash = info & GCGEN_HASH_MASK; + if (!hash) { + hash = (((unsigned int)obj) >> 3) & GCGEN_HASH_MASK; + if(!hash) hash = (23 & GCGEN_HASH_MASK); + unsigned int new_info = (unsigned int)(info | hash); + while (true) { + unsigned int temp = atomic_cas32(&obj->obj_info, new_info, info); + if (temp == info) break; + info = get_obj_info_raw(obj); + new_info = (unsigned int)(info | hash); + } + } + return hash; +} void gc_finalize_on_exit() { Index: src/common/gc_platform.h =================================================================== --- src/common/gc_platform.h (revision 521779) +++ src/common/gc_platform.h (working copy) @@ -22,6 +22,7 @@ #define _GC_PLATFORM_H_ #include "port_vmem.h" +#include "port_atomic.h" #include @@ -97,8 +98,18 @@ (hythread_entrypoint_t)func, data); } -inline void *atomic_casptr(volatile void **mem, void *with, const void *cmp) { - return apr_atomic_casptr(mem, with, cmp); +inline void *atomic_casptr(volatile void **mem, void *with, const void *cmp) +{ return apr_atomic_casptr(mem, with, cmp); } + +inline POINTER_SIZE_INT atomic_casptrsz(volatile POINTER_SIZE_INT* mem, + POINTER_SIZE_INT swap, + POINTER_SIZE_INT cmp) +{ +#ifdef POINTER64 + return port_atomic_cas64(mem, swap, cmp); +#else + return apr_atomic_cas32(mem, swap, cmp); +#endif } inline uint32 atomic_cas32(volatile apr_uint32_t *mem, Index: src/common/interior_pointer.cpp =================================================================== --- src/common/interior_pointer.cpp (revision 521779) +++ src/common/interior_pointer.cpp (working copy) @@ -41,7 +41,13 @@ //check size; if( interior_pointer_set.size() == interior_pointer_num_count ) { - unsigned int size = interior_pointer_num_count == 0 ? initial_vector_size : interior_pointer_set.size()*2; + int size ; + if(interior_pointer_num_count == 0){ + size = initial_vector_size ; + + }else{ + size = (unsigned int)interior_pointer_set.size()*2; + } interior_pointer_set.resize(size); } Index: src/common/space_tuner.cpp =================================================================== --- src/common/space_tuner.cpp (revision 521779) +++ src/common/space_tuner.cpp (working copy) @@ -29,7 +29,7 @@ Space* gc_get_nos(GC_Gen* gc); Space* gc_get_los(GC_Gen* gc); POINTER_SIZE_INT mspace_get_expected_threshold(Mspace* mspace); -unsigned int lspace_get_failure_size(Lspace* lspace); +POINTER_SIZE_INT lspace_get_failure_size(Lspace* lspace); /*Now just prepare the alloc_size field of mspace, used to compute new los size.*/ void gc_space_tune_prepare(GC* gc, unsigned int cause) @@ -162,7 +162,7 @@ /*For non gen virable sized NOS*/ else { - unsigned int los_fail_sz = lspace_get_failure_size((Lspace*)lspace); + POINTER_SIZE_INT los_fail_sz = lspace_get_failure_size((Lspace*)lspace); if(los_fail_sz > GC_LOS_MIN_VARY_SIZE){ /*Fixme: we should set the least_tuning_size after finding out the biggest free area in LOS, this number could be zero*/ @@ -200,6 +200,46 @@ return; } +#include "../thread/collector.h" +#include "../mark_sweep/lspace.h" +Boolean retune_los_size(GC *gc) +{ + POINTER_SIZE_INT non_los_live_obj_size = 0; + unsigned int collector_num = gc->num_active_collectors; + + for(unsigned int i = collector_num; i--;){ + Collector *collector = gc->collectors[i]; + non_los_live_obj_size += collector->non_los_live_obj_size; + } + POINTER_SIZE_INT non_los_live_block_num = (non_los_live_obj_size + GC_BLOCK_SIZE_BYTES) >> GC_BLOCK_SHIFT_COUNT; + non_los_live_block_num += collector_num << 2; + + Lspace *los = (Lspace*)gc_get_los((GC_Gen*)gc); + Space_Tuner *tuner = gc->tuner; + POINTER_SIZE_INT failure_size = los->failure_size; + POINTER_SIZE_INT min_tuning_block_num = round_up_to_size(failure_size, SPACE_ALLOC_UNIT) >> GC_BLOCK_SHIFT_COUNT; + POINTER_SIZE_INT tuning_block_num = tuner->tuning_size >> GC_BLOCK_SHIFT_COUNT; + POINTER_SIZE_INT heap_block_num = gc->committed_heap_size >> GC_BLOCK_SHIFT_COUNT; + POINTER_SIZE_INT los_block_num = los->committed_heap_size >> GC_BLOCK_SHIFT_COUNT; + + POINTER_SIZE_INT live_block_num = los_block_num + non_los_live_block_num; + while(live_block_num + tuning_block_num > heap_block_num){ + if(tuning_block_num == min_tuning_block_num){ //has not enough space to extend los + tuner->tuning_size = 0; + tuner->kind = TRANS_NOTHING; + return FALSE; + } + tuning_block_num -= (SPACE_ALLOC_UNIT >> GC_BLOCK_SHIFT_COUNT) << 2; + if(tuning_block_num < min_tuning_block_num) + tuning_block_num = min_tuning_block_num; + } + + POINTER_SIZE_INT tuning_size = tuning_block_num << GC_BLOCK_SHIFT_COUNT; + if(tuner->tuning_size != tuning_size) // retune los extension size + tuner->tuning_size = tuning_size; + return TRUE; +} + void gc_space_tuner_reset(GC* gc) { if( !gc_match_kind(gc, MINOR_COLLECTION) && (gc->tuner->kind != TRANS_NOTHING)){ Index: src/common/space_tuner.h =================================================================== --- src/common/space_tuner.h (revision 521779) +++ src/common/space_tuner.h (working copy) @@ -70,6 +70,7 @@ void gc_space_tune_prepare(GC* gc, unsigned int cause); void gc_space_tune_before_gc(GC* gc, unsigned int cause); void gc_space_tune_before_gc_fixed_size(GC* gc, unsigned int cause); +Boolean retune_los_size(GC *gc); void gc_space_tuner_reset(GC* gc); void gc_space_tuner_initialize(GC* gc); Index: src/gen/gen.cpp =================================================================== --- src/gen/gen.cpp (revision 521779) +++ src/gen/gen.cpp (working copy) @@ -40,7 +40,7 @@ Boolean GEN_NONGEN_SWITCH = FALSE; -Boolean JVMTI_HEAP_ITERATION = false; +Boolean JVMTI_HEAP_ITERATION = true; #ifndef STATIC_NOS_MAPPING void* nos_boundary; @@ -268,7 +268,7 @@ /* this is for debugging. */ gc->last_collect_kind = gc->collect_kind; - if(gc->force_major_collect || cause== GC_CAUSE_LOS_IS_FULL || FORCE_FULL_COMPACT) + if(gc->force_major_collect || cause== GC_CAUSE_LOS_IS_FULL || FORCE_FULL_COMPACT || (gc->nos->num_managed_blocks == 0)) gc->collect_kind = MAJOR_COLLECTION; else gc->collect_kind = MINOR_COLLECTION; @@ -365,6 +365,9 @@ gc_reset_collect_result((GC*)gc); gc->collect_kind = FALLBACK_COLLECTION; + if(verify_live_heap) + event_gc_collect_kind_changed((GC*)gc); + mspace_collection(gc->mos); /* fspace collection is included */ lspace_collection(gc->los); Index: src/mark_compact/los_extention_mark_scan.cpp =================================================================== --- src/mark_compact/los_extention_mark_scan.cpp (revision 0) +++ src/mark_compact/los_extention_mark_scan.cpp (revision 0) @@ -0,0 +1,192 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "../common/gc_metadata.h" +#include "../thread/collector.h" +#include "../gen/gen.h" +#include "../finalizer_weakref/finalizer_weakref.h" + +static FORCE_INLINE void scan_slot(Collector* collector, REF *p_ref) +{ + REF ref = *p_ref; + if(ref == COMPRESSED_NULL) return; + + Partial_Reveal_Object *p_obj = uncompress_ref(ref); + if(obj_mark_in_vt(p_obj)){ + collector_tracestack_push(collector, p_obj); + if(!obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)collector->gc))) + collector->non_los_live_obj_size += vm_object_size(p_obj); + } + + return; +} + +static FORCE_INLINE void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) +{ + if( !object_has_ref_field(p_obj) ) return; + + REF *p_ref; + + if (object_is_array(p_obj)) { /* scan array object */ + + Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj; + unsigned int array_length = array->array_len; + + p_ref = (REF *)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array)); + + for (unsigned int i = 0; i < array_length; i++) { + scan_slot(collector, p_ref+i); + } + + }else{ /* scan non-array object */ + + unsigned int num_refs = object_ref_field_num(p_obj); + + int* ref_iterator = object_ref_iterator_init(p_obj); + + for(unsigned int i=0; itrace_stack; + while( !vector_stack_is_empty(trace_stack)){ + p_obj = (Partial_Reveal_Object *)vector_stack_pop(trace_stack); + scan_object(collector, p_obj); + trace_stack = collector->trace_stack; + } + + return; +} + +/* for marking phase termination detection */ +static volatile unsigned int num_finished_collectors = 0; + +/* NOTE:: Only marking in object header is idempotent. + Originally, we have to mark the object before put it into markstack, to + guarantee there is only one occurrance of an object in markstack. This is to + guarantee there is only one occurrance of a repointed ref slot in repset (they + are put to the set when the object is scanned). If the same object is put to + markstack twice, they will be scanned twice and their ref slots will be recorded twice. + Problem occurs when the ref slot is updated first time with new position, + the second time the value in the ref slot is not the old position as expected. + It needs to read the original obj header for forwarding pointer. With the new value, + it will read something nonsense since the obj is not moved yet. + This can be worked around if we want. + To do this we have to use atomic instruction for marking, which is undesirable. + So we abondoned this design. We no longer use the repset to remember repointed slots +*/ + +void los_extention_mark_scan_heap(Collector *collector) +{ + GC* gc = collector->gc; + GC_Metadata* metadata = gc->metadata; + + /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */ + unsigned int num_active_collectors = gc->num_active_collectors; + atomic_cas32( &num_finished_collectors, 0, num_active_collectors); + + collector->trace_stack = free_task_pool_get_entry(metadata); + + Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool); + + /* first step: copy all root objects to mark tasks. + FIXME:: can be done sequentially before coming here to eliminate atomic ops */ + while(root_set){ + POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set); + while(!vector_block_iterator_end(root_set,iter)){ + REF *p_ref = (REF *)*iter; + iter = vector_block_iterator_advance(root_set,iter); + + Partial_Reveal_Object *p_obj = read_slot(p_ref); + /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */ + assert(p_obj!=NULL); + /* we have to mark the object before put it into marktask, because + it is possible to have two slots containing a same object. They will + be scanned twice and their ref slots will be recorded twice. Problem + occurs after the ref slot is updated first time with new position + and the second time the value is the ref slot is the old position as expected. + This can be worked around if we want. + */ + if(obj_mark_in_vt(p_obj)){ + collector_tracestack_push(collector, p_obj); + if(!obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)gc))) + collector->non_los_live_obj_size += vm_object_size(p_obj); + } + + } + root_set = pool_iterator_next(metadata->gc_rootset_pool); + } + /* put back the last trace_stack task */ + pool_put_entry(metadata->mark_task_pool, collector->trace_stack); + + /* second step: iterate over the mark tasks and scan objects */ + /* get a task buf for the mark stack */ + collector->trace_stack = free_task_pool_get_entry(metadata); + +retry: + Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool); + + while(mark_task){ + POINTER_SIZE_INT* iter = vector_block_iterator_init(mark_task); + while(!vector_block_iterator_end(mark_task,iter)){ + Partial_Reveal_Object* p_obj = (Partial_Reveal_Object *)*iter; + iter = vector_block_iterator_advance(mark_task,iter); + + /* FIXME:: we should not let mark_task empty during working, , other may want to steal it. + degenerate my stack into mark_task, and grab another mark_task */ + trace_object(collector, p_obj); + } + /* run out one task, put back to the pool and grab another task */ + vector_stack_clear(mark_task); + pool_put_entry(metadata->free_task_pool, mark_task); + mark_task = pool_get_entry(metadata->mark_task_pool); + } + + /* termination detection. This is also a barrier. + NOTE:: We can simply spin waiting for num_finished_collectors, because each + generated new task would surely be processed by its generating collector eventually. + So code below is only for load balance optimization. */ + atomic_inc32(&num_finished_collectors); + while(num_finished_collectors != num_active_collectors){ + if( !pool_is_empty(metadata->mark_task_pool)){ + atomic_dec32(&num_finished_collectors); + goto retry; + } + } + + /* put back the last mark stack to the free pool */ + mark_task = (Vector_Block*)collector->trace_stack; + vector_stack_clear(mark_task); + pool_put_entry(metadata->free_task_pool, mark_task); + collector->trace_stack = NULL; + + return; +} + Index: src/mark_compact/mspace.cpp =================================================================== --- src/mark_compact/mspace.cpp (revision 521779) +++ src/mark_compact/mspace.cpp (working copy) @@ -38,7 +38,7 @@ memset(mspace, 0, sizeof(Mspace)); mspace->reserved_heap_size = mspace_size; - mspace->num_total_blocks = mspace_size >> GC_BLOCK_SHIFT_COUNT; + mspace->num_total_blocks = (unsigned int)(mspace_size >> GC_BLOCK_SHIFT_COUNT); void* reserved_base = start; /* commit mspace mem */ @@ -55,7 +55,7 @@ mspace->heap_end = (void *)((POINTER_SIZE_INT)reserved_base + commit_size); #endif - mspace->num_managed_blocks = commit_size >> GC_BLOCK_SHIFT_COUNT; + mspace->num_managed_blocks = (unsigned int)(commit_size >> GC_BLOCK_SHIFT_COUNT); mspace->first_block_idx = GC_BLOCK_INDEX_FROM(gc->heap_start, reserved_base); mspace->ceiling_block_idx = mspace->first_block_idx + mspace->num_managed_blocks - 1; Index: src/mark_compact/mspace_collect_compact.h =================================================================== --- src/mark_compact/mspace_collect_compact.h (revision 521779) +++ src/mark_compact/mspace_collect_compact.h (working copy) @@ -40,6 +40,7 @@ void move_compact_mspace(Collector* collector); void fallback_mark_scan_heap(Collector* collector); +void los_extention_mark_scan_heap(Collector *collector); void mspace_extend_compact(Collector *collector); Index: src/mark_compact/mspace_extend_compact.cpp =================================================================== --- src/mark_compact/mspace_extend_compact.cpp (revision 521779) +++ src/mark_compact/mspace_extend_compact.cpp (working copy) @@ -25,7 +25,6 @@ #include "../gen/gen.h" #include "../common/fix_repointed_refs.h" #include "../common/interior_pointer.h" -#include "../verify/verify_live_heap.h" static volatile Block *mos_first_new_block = NULL; static volatile Block *nos_first_free_block = NULL; @@ -279,21 +278,22 @@ if( ++old_num == num_active_collectors ){ Block *old_nos_boundary = fspace->blocks; nos_boundary = &mspace->blocks[mspace->free_block_idx - mspace->first_block_idx]; - assert(nos_boundary > old_nos_boundary); + if(fspace->num_managed_blocks != 0) + assert(nos_boundary > old_nos_boundary); POINTER_SIZE_INT mem_change_size = ((Block *)nos_boundary - old_nos_boundary) << GC_BLOCK_SHIFT_COUNT; fspace->heap_start = nos_boundary; fspace->blocks = (Block *)nos_boundary; fspace->committed_heap_size -= mem_change_size; fspace->num_managed_blocks = (unsigned int)(fspace->committed_heap_size >> GC_BLOCK_SHIFT_COUNT); fspace->num_total_blocks = fspace->num_managed_blocks; - fspace->first_block_idx = ((Block_Header *)nos_boundary)->block_idx; + fspace->first_block_idx = mspace->free_block_idx; fspace->free_block_idx = fspace->first_block_idx; mspace->heap_end = nos_boundary; mspace->committed_heap_size += mem_change_size; mspace->num_managed_blocks = (unsigned int)(mspace->committed_heap_size >> GC_BLOCK_SHIFT_COUNT); mspace->num_total_blocks = mspace->num_managed_blocks; - mspace->ceiling_block_idx = ((Block_Header *)nos_boundary)->block_idx - 1; + mspace->ceiling_block_idx = mspace->free_block_idx - 1; num_space_changing_collectors ++; } Index: src/mark_compact/mspace_move_compact.cpp =================================================================== --- src/mark_compact/mspace_move_compact.cpp (revision 521779) +++ src/mark_compact/mspace_move_compact.cpp (working copy) @@ -28,8 +28,6 @@ Space* gc_get_mos(GC_Gen* gc); Space* gc_get_los(GC_Gen* gc); -#include "../verify/verify_live_heap.h" - static void mspace_move_objects(Collector* collector, Mspace* mspace) { Block_Header* curr_block = collector->cur_compact_block; @@ -82,20 +80,6 @@ POINTER_SIZE_INT sector_distance = (POINTER_SIZE_INT)src_sector_addr - (POINTER_SIZE_INT)dest_sector_addr; curr_block->table[curr_sector] = sector_distance; - if (verify_live_heap) { - Partial_Reveal_Object *rescan_obj = (Partial_Reveal_Object *)src_sector_addr; - void *rescan_pos = (Partial_Reveal_Object *)((POINTER_SIZE_INT)rescan_obj + vm_object_size(rescan_obj)); - while ((POINTER_SIZE_INT)rescan_obj < (POINTER_SIZE_INT)src_sector_addr + curr_sector_size) { - Partial_Reveal_Object* targ_obj = (Partial_Reveal_Object *)((POINTER_SIZE_INT)rescan_obj- sector_distance); - if(is_fallback) - event_collector_doublemove_obj(rescan_obj, targ_obj, collector); - else - event_collector_move_obj(rescan_obj, targ_obj, collector); - rescan_obj = block_get_next_marked_object(curr_block, &rescan_pos); - if(rescan_obj == NULL) break; - } - } - memmove(dest_sector_addr, src_sector_addr, curr_sector_size); dest_sector_addr = (void*)((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size); @@ -209,7 +193,7 @@ while(num_fixing_collectors != num_active_collectors + 1); /* Dealing with out of memory in mspace */ - if(mspace->free_block_idx > fspace->first_block_idx){ + if((mspace->free_block_idx > fspace->first_block_idx) || ((fspace->num_managed_blocks == 0) && (mspace->free_block_idx < fspace->first_block_idx))){ atomic_cas32( &num_extending_collectors, 0, num_active_collectors); mspace_extend_compact(collector); atomic_inc32(&num_extending_collectors); Index: src/mark_compact/mspace_slide_compact.cpp =================================================================== --- src/mark_compact/mspace_slide_compact.cpp (revision 521779) +++ src/mark_compact/mspace_slide_compact.cpp (working copy) @@ -153,6 +153,7 @@ cur_dest_block = (Block_Header*)next_block_for_dest; while(cur_dest_block->status == BLOCK_DEST){ cur_dest_block = cur_dest_block->next; + if(!cur_dest_block) break; } next_block_for_dest = cur_dest_block; } else { @@ -162,6 +163,7 @@ unsigned int total_dest_counter = 0; Block_Header *last_dest_block = (Block_Header *)last_block_for_dest; for(; cur_dest_block <= last_dest_block; cur_dest_block = cur_dest_block->next){ + if(!cur_dest_block) return NULL; if(cur_dest_block->status == BLOCK_DEST){ continue; } @@ -262,8 +264,6 @@ return; } - -#include "../verify/verify_live_heap.h" extern unsigned int mspace_free_block_idx; static void mspace_sliding_compact(Collector* collector, Mspace* mspace) @@ -291,14 +291,6 @@ unsigned int obj_size = (unsigned int)((POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)p_obj); if(p_obj != p_target_obj){ memmove(p_target_obj, p_obj, obj_size); - - if(verify_live_heap){ - /* we forwarded it, we need remember it for verification */ - if(is_fallback) - event_collector_doublemove_obj(p_obj, p_target_obj, collector); - else - event_collector_move_obj(p_obj, p_target_obj, collector); - } } set_obj_info(p_target_obj, 0); @@ -346,14 +338,18 @@ have references that are going to be repointed */ unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1); - if(!gc_match_kind(gc, FALLBACK_COLLECTION)) - mark_scan_heap(collector); - else + if(gc_match_kind(gc, FALLBACK_COLLECTION)) fallback_mark_scan_heap(collector); + else if(gc->cause == GC_CAUSE_LOS_IS_FULL) + los_extention_mark_scan_heap(collector); + else + mark_scan_heap(collector); old_num = atomic_inc32(&num_marking_collectors); if( ++old_num == num_active_collectors ){ /* last collector's world here */ + if(gc->cause == GC_CAUSE_LOS_IS_FULL) + retune_los_size(gc); /* prepare for next phase */ gc_init_block_for_collectors(gc, mspace); @@ -434,7 +430,7 @@ while(num_restoring_collectors != num_active_collectors + 1); /* Dealing with out of memory in mspace */ - if(mspace->free_block_idx > fspace->first_block_idx){ + if((mspace->free_block_idx > fspace->first_block_idx) || ((fspace->num_managed_blocks == 0) && (mspace->free_block_idx < fspace->first_block_idx))){ atomic_cas32( &num_extending_collectors, 0, num_active_collectors); mspace_extend_compact(collector); Index: src/mark_sweep/free_area_pool.cpp =================================================================== --- src/mark_sweep/free_area_pool.cpp (revision 521779) +++ src/mark_sweep/free_area_pool.cpp (working copy) @@ -38,7 +38,7 @@ free_area_pool_init(pool); } -Free_Area* free_pool_find_size_area(Free_Area_Pool* pool, unsigned int size) +Free_Area* free_pool_find_size_area(Free_Area_Pool* pool, POINTER_SIZE_INT size) { assert(size >= GC_OBJ_SIZE_THRESHOLD); Index: src/mark_sweep/free_area_pool.h =================================================================== --- src/mark_sweep/free_area_pool.h (revision 521779) +++ src/mark_sweep/free_area_pool.h (working copy) @@ -46,11 +46,11 @@ Bidir_List* next; Bidir_List* prev; /* END of Bidir_List --> */ - unsigned int size; + POINTER_SIZE_INT size; }Free_Area; /* this is the only interface for new area creation */ -inline Free_Area* free_area_new(void* start, unsigned int size) +inline Free_Area* free_area_new(void* start, POINTER_SIZE_INT size) { assert(ADDRESS_IS_KB_ALIGNED(start)); assert(ADDRESS_IS_KB_ALIGNED(size)); @@ -90,12 +90,12 @@ return words_get_next_set_lsb(pool->list_bit_flag, NUM_FLAG_WORDS, start_idx); } -inline unsigned int pool_list_index_with_size(unsigned int size) +inline unsigned int pool_list_index_with_size(POINTER_SIZE_INT size) { assert(size >= GC_OBJ_SIZE_THRESHOLD); unsigned int index; - index = size >> BIT_SHIFT_TO_KILO; + index = (unsigned int) (size >> BIT_SHIFT_TO_KILO); if(index > MAX_LIST_INDEX) index = MAX_LIST_INDEX; return index; } Index: src/mark_sweep/lspace.cpp =================================================================== --- src/mark_sweep/lspace.cpp (revision 521779) +++ src/mark_sweep/lspace.cpp (working copy) @@ -39,7 +39,7 @@ /* commit mspace mem */ void* reserved_base = start; - unsigned int committed_size = lspace_size; + POINTER_SIZE_INT committed_size = lspace_size; if(!large_page_hint) vm_commit_mem(reserved_base, lspace_size); memset(reserved_base, 0, lspace_size); @@ -111,7 +111,7 @@ return; } -unsigned int lspace_get_failure_size(Lspace* lspace) +POINTER_SIZE_INT lspace_get_failure_size(Lspace* lspace) { return lspace->failure_size; } Index: src/mark_sweep/lspace.h =================================================================== --- src/mark_sweep/lspace.h (revision 521779) +++ src/mark_sweep/lspace.h (working copy) @@ -45,12 +45,12 @@ Free_Area_Pool* free_pool; /*Size of allocation which caused lspace alloc failure.*/ - unsigned int failure_size; + POINTER_SIZE_INT failure_size; }Lspace; void lspace_initialize(GC* gc, void* reserved_base, POINTER_SIZE_INT lspace_size); void lspace_destruct(Lspace* lspace); -Managed_Object_Handle lspace_alloc(unsigned int size, Allocator* allocator); +Managed_Object_Handle lspace_alloc(POINTER_SIZE_INT size, Allocator* allocator); void lspace_sweep(Lspace* lspace); void lspace_reset_after_collection(Lspace* lspace); void lspace_collection(Lspace* lspace); @@ -71,12 +71,12 @@ if(next_area_start < (POINTER_SIZE_INT)lspace->heap_end){ //If there is a living object at this addr, return it, and update iterate_index if(obj_is_marked_in_vt((Partial_Reveal_Object*)next_area_start)){ - unsigned int obj_size = (unsigned int)ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start)); + POINTER_SIZE_INT obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start)); *iterate_index = (unsigned int)((next_area_start + obj_size - (POINTER_SIZE_INT)lspace->heap_start) >> BIT_SHIFT_TO_KILO); return (Partial_Reveal_Object*)next_area_start; //If this is a dead object, go on to find a living one. }else{ - unsigned int obj_size = (unsigned int)ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start)); + POINTER_SIZE_INT obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start)); next_area_start += obj_size; } }else{ @@ -96,6 +96,6 @@ void lspace_fix_repointed_refs(Collector* collector, Lspace* lspace); -unsigned int lspace_get_failure_size(Lspace* lspace); +POINTER_SIZE_INT lspace_get_failure_size(Lspace* lspace); #endif /*_LSPACE_H_ */ Index: src/mark_sweep/lspace_alloc_collect.cpp =================================================================== --- src/mark_sweep/lspace_alloc_collect.cpp (revision 521779) +++ src/mark_sweep/lspace_alloc_collect.cpp (working copy) @@ -39,11 +39,11 @@ Bidir_List* head = (Bidir_List*)(&pool->sized_area_list[list_index]); return (head->next == head); } -static void* free_pool_former_lists_atomic_take_area_piece(Free_Area_Pool* pool, unsigned int list_hint, unsigned int size) +static void* free_pool_former_lists_atomic_take_area_piece(Free_Area_Pool* pool, unsigned int list_hint, POINTER_SIZE_INT size) { Free_Area* free_area; void* p_result; - int remain_size; + POINTER_SIZE_SINT remain_size; POINTER_SIZE_INT alloc_size = ALIGN_UP_TO_KILO(size); unsigned int new_list_nr = 0; Lockable_Bidir_List* head = &pool->sized_area_list[list_hint]; @@ -93,11 +93,11 @@ return NULL; } -static void* free_pool_last_list_atomic_take_area_piece(Free_Area_Pool* pool, unsigned int size) +static void* free_pool_last_list_atomic_take_area_piece(Free_Area_Pool* pool, POINTER_SIZE_INT size) { void* p_result; - int remain_size = 0; - unsigned int alloc_size = ALIGN_UP_TO_KILO(size); + POINTER_SIZE_SINT remain_size = 0; + POINTER_SIZE_INT alloc_size = ALIGN_UP_TO_KILO(size); Free_Area* free_area = NULL; Free_Area* new_area = NULL; unsigned int new_list_nr = 0; @@ -148,12 +148,12 @@ return NULL; } -void* lspace_alloc(unsigned int size, Allocator *allocator) +void* lspace_alloc(POINTER_SIZE_INT size, Allocator *allocator) { unsigned int try_count = 0; void* p_result = NULL; unsigned int list_hint = 0; - unsigned int alloc_size = ALIGN_UP_TO_KILO(size); + POINTER_SIZE_INT alloc_size = ALIGN_UP_TO_KILO(size); Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)allocator->gc); Free_Area_Pool* pool = lspace->free_pool; @@ -166,9 +166,9 @@ p_result = free_pool_former_lists_atomic_take_area_piece(pool, list_hint, alloc_size); if(p_result){ memset(p_result, 0, size); - unsigned int vold = lspace->alloced_size; - unsigned int vnew = vold + alloc_size; - while( vold != atomic_cas32((volatile unsigned int*)&lspace->alloced_size, vnew, vold) ){ + POINTER_SIZE_INT vold = lspace->alloced_size; + POINTER_SIZE_INT vnew = vold + alloc_size; + while( vold != atomic_casptrsz(&lspace->alloced_size, vnew, vold) ){ vold = lspace->alloced_size; vnew = vold + alloc_size; } @@ -185,9 +185,9 @@ p_result = free_pool_last_list_atomic_take_area_piece(pool, alloc_size); if(p_result){ memset(p_result, 0, size); - unsigned int vold = lspace->alloced_size; - unsigned int vnew = vold + alloc_size; - while( vold != atomic_cas32((volatile unsigned int*)&lspace->alloced_size, vnew, vold) ){ + POINTER_SIZE_INT vold = lspace->alloced_size; + POINTER_SIZE_INT vnew = vold + alloc_size; + while( vold != atomic_casptrsz(&lspace->alloced_size, vnew, vold) ){ vold = lspace->alloced_size; vnew = vold + alloc_size; } @@ -214,7 +214,7 @@ { GC* gc = lspace->gc; Space_Tuner* tuner = gc->tuner; - unsigned int trans_size = tuner->tuning_size; + POINTER_SIZE_INT trans_size = tuner->tuning_size; assert(!(trans_size%GC_BLOCK_SIZE_BYTES)); //For_LOS_extend if(tuner->kind == TRANS_FROM_MOS_TO_LOS){ @@ -242,7 +242,8 @@ /* reset the pool first because its info is useless now. */ free_area_pool_reset(lspace->free_pool); - unsigned int mark_bit_idx = 0, cur_size = 0; + unsigned int mark_bit_idx = 0; + POINTER_SIZE_INT cur_size = 0; void *cur_area_start, *cur_area_end; Partial_Reveal_Object* p_prev_obj = (Partial_Reveal_Object *)lspace->heap_start; Index: src/thread/collector.cpp =================================================================== --- src/thread/collector.cpp (revision 521779) +++ src/thread/collector.cpp (working copy) @@ -76,6 +76,9 @@ collector_reset_weakref_sets(collector); #endif + if(collector->gc->cause == GC_CAUSE_LOS_IS_FULL) + collector->non_los_live_obj_size = 0; + collector->result = TRUE; return; } Index: src/thread/collector.h =================================================================== --- src/thread/collector.h (revision 521779) +++ src/thread/collector.h (working copy) @@ -56,6 +56,7 @@ void(*task_func)(void*) ; /* current task */ + POINTER_SIZE_INT non_los_live_obj_size; unsigned int result; }Collector; Index: src/thread/gc_thread.cpp =================================================================== --- src/thread/gc_thread.cpp (revision 521779) +++ src/thread/gc_thread.cpp (working copy) @@ -21,7 +21,7 @@ #include "gc_thread.h" static hythread_tls_key_t tls_gc_key; -unsigned int tls_gc_offset; +POINTER_SIZE_INT tls_gc_offset; void gc_tls_init() { Index: src/thread/gc_thread.h =================================================================== --- src/thread/gc_thread.h (revision 521779) +++ src/thread/gc_thread.h (working copy) @@ -27,7 +27,7 @@ #define ALLOC_ZEROING #define ZEROING_SIZE 2*KB -extern unsigned int tls_gc_offset; +extern POINTER_SIZE_INT tls_gc_offset; inline void* gc_get_tls() { Index: src/thread/mutator_alloc.cpp =================================================================== --- src/thread/mutator_alloc.cpp (revision 521779) +++ src/thread/mutator_alloc.cpp (working copy) @@ -61,6 +61,8 @@ assert((size % GC_OBJECT_ALIGNMENT) == 0); assert(ah); + size = (size & NEXT_TO_HIGH_BIT_CLEAR_MASK); + Allocator* allocator = (Allocator*)gc_get_tls(); Boolean type_has_fin = type_has_finalizer((Partial_Reveal_VTable*)uncompress_vt((VT)ah)); Index: src/trace_forward/fspace.cpp =================================================================== --- src/trace_forward/fspace.cpp (revision 521779) +++ src/trace_forward/fspace.cpp (working copy) @@ -41,7 +41,7 @@ memset(fspace, 0, sizeof(Fspace)); fspace->reserved_heap_size = fspace_size; - fspace->num_total_blocks = fspace_size >> GC_BLOCK_SHIFT_COUNT; + fspace->num_total_blocks = (unsigned int)(fspace_size >> GC_BLOCK_SHIFT_COUNT); void* reserved_base = start; /* commit fspace mem */ @@ -58,7 +58,7 @@ fspace->heap_end = (void *)((POINTER_SIZE_INT)reserved_base + fspace->committed_heap_size); #endif - fspace->num_managed_blocks = commit_size >> GC_BLOCK_SHIFT_COUNT; + fspace->num_managed_blocks = (unsigned int)(commit_size >> GC_BLOCK_SHIFT_COUNT); fspace->first_block_idx = GC_BLOCK_INDEX_FROM(gc->heap_start, reserved_base); fspace->ceiling_block_idx = fspace->first_block_idx + fspace->num_managed_blocks - 1; @@ -130,9 +130,9 @@ Block* blocks = fspace->blocks; unsigned int num_freed = 0; - unsigned int new_start_idx = fspace->free_block_idx - first_idx; - unsigned int new_last_idx = fspace->ceiling_block_idx - first_idx; - for(unsigned int i = new_start_idx; i <= new_last_idx; i++){ + int new_start_idx = (int)(fspace->free_block_idx) - (int)first_idx; + int new_last_idx = (int)fspace->ceiling_block_idx - (int)first_idx; + for(int i = new_start_idx; i <= new_last_idx; i++){ Block_Header* block = (Block_Header*)&(blocks[i]); block->src = NULL; block->next_src = NULL; Index: src/trace_forward/fspace_alloc.cpp =================================================================== --- src/trace_forward/fspace_alloc.cpp (revision 521779) +++ src/trace_forward/fspace_alloc.cpp (working copy) @@ -77,11 +77,18 @@ /* ran out local block, grab a new one*/ Fspace* fspace = (Fspace*)allocator->alloc_space; + int attempts = 0; while( !fspace_alloc_block(fspace, allocator)){ vm_gc_lock_enum(); /* after holding lock, try if other thread collected already */ if ( !space_has_free_block((Blocked_Space*)fspace) ) { - gc_reclaim_heap(allocator->gc, GC_CAUSE_NOS_IS_FULL); + if(attempts == 0) { + gc_reclaim_heap(allocator->gc, GC_CAUSE_NOS_IS_FULL); + attempts = 1; + }else{ + vm_gc_unlock_enum(); + return NULL; + } } vm_gc_unlock_enum(); } Index: src/trace_forward/fspace_gen_forward_pool.cpp =================================================================== --- src/trace_forward/fspace_gen_forward_pool.cpp (revision 521779) +++ src/trace_forward/fspace_gen_forward_pool.cpp (working copy) @@ -90,8 +90,6 @@ The same object can be traced by the thread itself, or by other thread. */ -#include "../verify/verify_live_heap.h" - static FORCE_INLINE void forward_object(Collector *collector, REF *p_ref) { Space* space = collector->collect_space; Index: src/trace_forward/fspace_nongen_forward_pool.cpp =================================================================== --- src/trace_forward/fspace_nongen_forward_pool.cpp (revision 521779) +++ src/trace_forward/fspace_nongen_forward_pool.cpp (working copy) @@ -80,7 +80,6 @@ The same object can be traced by the thread itself, or by other thread. */ -#include "../verify/verify_live_heap.h" static FORCE_INLINE void forward_object(Collector* collector, REF *p_ref) { GC* gc = collector->gc; @@ -116,11 +115,6 @@ /* otherwise, we successfully forwarded */ write_slot(p_ref, p_target_obj); - /* we forwarded it, we need remember it for verification. */ - if(verify_live_heap) { - event_collector_move_obj(p_obj, p_target_obj, collector); - } - scan_object(collector, p_target_obj); return; } Index: src/verify/verifier_common.cpp =================================================================== --- src/verify/verifier_common.cpp (revision 0) +++ src/verify/verifier_common.cpp (revision 0) @@ -0,0 +1,193 @@ +#include "verifier_common.h" +#include "verify_gc_effect.h" +#include "verify_mutator_effect.h" +Boolean verifier_compare_objs_pools(Pool* objs_pool_before_gc, Pool* objs_pool_after_gc, Pool* free_pool ,Object_Comparator object_comparator) +{ + Vector_Block* objs_set_before_gc = pool_get_entry(objs_pool_before_gc); + Vector_Block* objs_set_after_gc = pool_get_entry(objs_pool_after_gc); + while(objs_set_before_gc && objs_set_after_gc){ + POINTER_SIZE_INT* iter_1 = vector_block_iterator_init(objs_set_before_gc); + POINTER_SIZE_INT* iter_2 = vector_block_iterator_init(objs_set_after_gc); + while(!vector_block_iterator_end(objs_set_before_gc, iter_1) + && !vector_block_iterator_end(objs_set_after_gc, iter_2) ){ + if(!(*object_comparator)(iter_1, iter_2)){ + assert(0); + printf("ERROR\n"); + return FALSE; + } + iter_1 = vector_block_iterator_advance(objs_set_before_gc, iter_1); + iter_2 = vector_block_iterator_advance(objs_set_after_gc, iter_2); + } + if(!vector_block_iterator_end(objs_set_before_gc, iter_1) + || !vector_block_iterator_end(objs_set_after_gc, iter_2) ) + return FALSE; + + vector_block_clear(objs_set_before_gc); + vector_block_clear(objs_set_after_gc); + pool_put_entry(free_pool, objs_set_before_gc); + pool_put_entry(free_pool, objs_set_after_gc); + objs_set_before_gc = pool_get_entry(objs_pool_before_gc); + objs_set_after_gc = pool_get_entry(objs_pool_after_gc); + } + if(pool_is_empty(objs_pool_before_gc)&&pool_is_empty(objs_pool_before_gc)) + return TRUE; + else + return FALSE; +} + +Boolean verifier_copy_rootsets(GC* gc, Heap_Verifier* heap_verifier) +{ + Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata; + GC_Verifier* gc_verifier = heap_verifier->gc_verifier; + gc_verifier->root_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool); + + GC_Metadata* gc_metadata = gc->metadata; + pool_iterator_init(gc_metadata->gc_rootset_pool); + Vector_Block* root_set = pool_iterator_next(gc_metadata->gc_rootset_pool); + + while(root_set){ + POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set); + while(!vector_block_iterator_end(root_set,iter)){ + REF* p_ref = (REF* )*iter; + iter = vector_block_iterator_advance(root_set,iter); + if(*p_ref == COMPRESSED_NULL) continue; + verifier_rootset_push(p_ref,gc_verifier->root_set); + } + root_set = pool_iterator_next(gc_metadata->gc_rootset_pool); + } + pool_put_entry(verifier_metadata->root_set_pool, gc_verifier->root_set); + + gc_verifier->root_set = NULL; + return TRUE; +} + +Boolean verify_rootset_slot(REF* p_ref, Heap_Verifier* heap_verifier) +{ + GC_Gen* gc = (GC_Gen*)heap_verifier->gc; + Space* mspace = gc_get_mos(gc); + Space* lspace = gc_get_los(gc); + + Partial_Reveal_Object* p_obj = read_slot(p_ref); + if(p_obj == NULL){ + if(gc->collect_kind !=MINOR_COLLECTION ||(!heap_verifier->gc_is_gen_mode && !NOS_PARTIAL_FORWARD)){ + assert(0); + return FALSE; + }else{ + return TRUE; + } + } + if(!heap_verifier->gc_is_gen_mode){ + assert(!address_belongs_to_gc_heap(p_ref, heap_verifier->gc)); + if(address_belongs_to_gc_heap(p_ref, heap_verifier->gc)){ + printf("ERROR\n"); + return FALSE; + } + } + assert(address_belongs_to_gc_heap(p_obj,heap_verifier->gc)); + if(heap_verifier->is_before_gc){ + //if(!address_belongs_to_gc_heap(p_ref) && address_belongs_to_gc_heap(p_obj)){ + if(!address_belongs_to_gc_heap(p_obj, heap_verifier->gc)){ + printf("error!\n"); + return FALSE; + } + }else{ + if(heap_verifier->gc_verifier->is_before_fallback_collection){ + if(!address_belongs_to_gc_heap(p_obj, heap_verifier->gc)){ + printf("error!\n"); + assert(0); + return FALSE; + } + return TRUE; + } + assert(address_belongs_to_space(p_obj, mspace) || address_belongs_to_space(p_obj, lspace)); + if(!address_belongs_to_space(p_obj, mspace) && !address_belongs_to_space(p_obj, lspace)){ + printf("Error\n"); + return FALSE; + } + } + return TRUE; +} + + +Boolean verifier_parse_options(Heap_Verifier* heap_verifier, char* options) +{ + char* verifier_options = options; + char* option = NULL; + for (option = strtok(verifier_options,","); option; option = strtok(NULL,",")) { + string_to_upper(option); + if(!strcmp(option, "ROOTSET")) heap_verifier->need_verify_rootset = TRUE; + else if (!strcmp(option, "WRITEBARRIER")) heap_verifier->need_verify_writebarrier = TRUE; + else if (!strcmp(option, "ALLOCATION")) heap_verifier->need_verify_allocation= TRUE; + else if (!strcmp(option, "GC")) heap_verifier->need_verify_gc= TRUE; + else if(!strcmp(option, "DEFAULT")){ + heap_verifier->need_verify_rootset = TRUE; + heap_verifier->need_verify_writebarrier = TRUE; + heap_verifier->need_verify_gc= TRUE; + }else if(!strcmp(option, "ALL")){ + heap_verifier->need_verify_rootset = TRUE; + heap_verifier->need_verify_writebarrier = TRUE; + heap_verifier->need_verify_allocation= TRUE; + heap_verifier->need_verify_gc= TRUE; + }else{ + printf("Parse verify option error.\n"); + printf("Usage: -XDgc.verify=rooset,writebarrier,allocation,gc \n"); + printf("Usage: -XDgc.verify=default \n"); + printf("Usage: -XDgc.verify=all \n"); + return FALSE; + } + } + return TRUE; +} + + +void verifier_log_before_gc(Heap_Verifier* heap_verifier) +{ + Allocation_Verifier* alloc_verifier = heap_verifier->allocation_verifier; + WriteBarrier_Verifier* wb_verifier = heap_verifier->writebarrier_verifier; + RootSet_Verifier* rootset_verifier = heap_verifier->rootset_verifier; + + printf("before gc:\n"); + + if(heap_verifier->need_verify_allocation){ + printf(" Allocation Verify: %s , ", alloc_verifier->is_verification_passed?"passed":"failed"); + printf(" new nos: %d : %d , ", alloc_verifier->num_nos_newobjs, alloc_verifier->num_nos_objs); + printf(" new los: %d : %d \n", alloc_verifier->num_los_newobjs, + alloc_verifier->num_los_objs-alloc_verifier->last_num_los_objs); + } + + if(heap_verifier->need_verify_rootset){ + printf(" RootSet Verify: %s , ", rootset_verifier->is_verification_passed?"passed":"failed"); + printf(" num: %d , ", rootset_verifier->num_slots_in_rootset); + printf(" error num: %d \n", rootset_verifier->num_error_slots); + + } + + if(heap_verifier->need_verify_writebarrier){ + printf(" WriteBarrier Verify: %s , ", wb_verifier->is_verification_passed?"passed":"failed"); + printf(" num cached: %d , ", wb_verifier->num_ref_wb_in_remset); + printf(" num real : %d \n", wb_verifier->num_ref_wb_after_scanning); + } + printf("===============================================\n"); + +} + +void verifier_log_start() +{ + printf("\n===============================================\n"); +} + +void verifier_log_after_gc(Heap_Verifier* heap_verifier) +{ + GC_Verifier* gc_verifier = heap_verifier->gc_verifier; + printf("after gc:\n"); + if(heap_verifier->need_verify_gc){ + printf(" GC Verify: %s \n", gc_verifier->is_verification_passed?"passed":"failed"); + printf(" live obj : NUM before %d , after %d \n", gc_verifier->num_live_objects_before_gc, gc_verifier->num_live_objects_after_gc); + printf(" live obj : SIZE before %d MB, after %d MB \n", gc_verifier->size_live_objects_before_gc>>20, gc_verifier->size_live_objects_after_gc>>20); + printf(" resurrect obj: NUM before %d , after %d \n", gc_verifier->num_resurrect_objects_before_gc, gc_verifier->num_resurrect_objects_after_gc); + printf(" resurrect obj : SIZE before %d MB, after %d MB\n", gc_verifier->size_resurrect_objects_before_gc>>20, gc_verifier->size_resurrect_objects_after_gc>>20); + } + printf("===============================================\n"); + +} + Index: src/verify/verifier_common.h =================================================================== --- src/verify/verifier_common.h (revision 0) +++ src/verify/verifier_common.h (revision 0) @@ -0,0 +1,156 @@ +#ifndef _VERIFIER_COMMON_H_ +#define _VERIFIER_COMMON_H_ +#include "verify_live_heap.h" +#include "verifier_metadata.h" +#include "../common/gc_common.h" +#include "../common/gc_space.h" +#include "../gen/gen.h" + +struct Heap_Verifier; +struct Allocation_Verifier; +struct GC_Verifier; +struct WriteBarrier_Verifier; +struct RootSet_Verifier; + +typedef void (*Object_Scanner)(struct Heap_Verifier*); + +typedef struct Heap_Verifier{ + GC* gc; + GC_Verifier* gc_verifier; + WriteBarrier_Verifier* writebarrier_verifier; + RootSet_Verifier* rootset_verifier; + Allocation_Verifier* allocation_verifier; + Heap_Verifier_Metadata* heap_verifier_metadata; + + Boolean is_before_gc; + Boolean gc_is_gen_mode; + Boolean need_verify_gc; + Boolean need_verify_allocation; + Boolean need_verify_rootset; + Boolean need_verify_writebarrier; + + Object_Scanner all_obj_scanner; + Object_Scanner live_obj_scanner; +} Heap_Verifier; + + + +typedef Boolean (*Object_Comparator)(POINTER_SIZE_INT*, POINTER_SIZE_INT*); + +extern Heap_Verifier* get_heap_verifier(); + +extern void verifier_metadata_initialize(Heap_Verifier* heap_verifier); +extern void verifier_init_object_scanner(Heap_Verifier* heap_verifier); + +extern void verifier_scan_los_objects(Space* space, Heap_Verifier* heap_verifier); + + +Boolean verifier_copy_rootsets(GC* gc, Heap_Verifier* heap_verifier); +Boolean verifier_compare_objs_pools(Pool* objs_pool_before_gc, Pool* objs_pool_after_gc, Pool* free_pool ,Object_Comparator object_comparator); +Boolean verifier_parse_options(Heap_Verifier* heap_verifier, char* options); +void verifier_log_before_gc(Heap_Verifier* heap_verifier); +void verifier_log_after_gc(Heap_Verifier* heap_verifier); +void verifier_log_start(); +Boolean verify_rootset_slot(REF* p_ref, Heap_Verifier* heap_verifier); + + + + + +inline void verifier_set_gen_mode(Heap_Verifier* heap_verifier) +{ heap_verifier->gc_is_gen_mode = gc_is_gen_mode(); } + +inline Boolean need_verify_gc_effect(Heap_Verifier* heap_verifier) +{ return heap_verifier->need_verify_gc && !heap_verifier->is_before_gc; } + +inline Boolean need_scan_live_objs(Heap_Verifier* heap_verifier) +{ + if(heap_verifier->need_verify_gc) return TRUE; + else if(heap_verifier->need_verify_writebarrier && !heap_verifier->is_before_gc) return TRUE; + else return FALSE; +} + +inline Boolean need_verify_mutator_effect(Heap_Verifier* heap_verifier) +{ + if(!heap_verifier->is_before_gc) return FALSE; + return heap_verifier->need_verify_allocation || heap_verifier->need_verify_rootset + || heap_verifier->need_verify_writebarrier; +} + +inline Boolean need_scan_all_objs(Heap_Verifier* heap_verifier) +{ + if(!heap_verifier->is_before_gc) return FALSE; + return heap_verifier->need_verify_allocation || heap_verifier->need_verify_writebarrier; + +} + +inline void verify_live_object_slot(REF* p_ref, Heap_Verifier* heap_verifier) +{ + assert(p_ref); + assert(address_belongs_to_gc_heap(read_slot(p_ref), (GC*)heap_verifier->gc)); + Partial_Reveal_Object* UNUSED p_obj = read_slot(p_ref); + assert(p_obj); + assert(obj_get_vt(p_obj)); + assert(!address_belongs_to_gc_heap(uncompress_vt(obj_get_vt(p_obj)), (GC*)heap_verifier->gc)); +} + +inline void verify_all_object_slot(REF* p_ref, Heap_Verifier* heap_verifier) +{ + assert(p_ref); + assert(address_belongs_to_gc_heap(read_slot(p_ref), (GC*)heap_verifier->gc)); +} + +inline void verify_object_header(Partial_Reveal_Object* p_obj, Heap_Verifier* heap_verifier) +{ + assert(p_obj); + assert(address_belongs_to_gc_heap(p_obj, (GC*)heap_verifier->gc)); + + assert(obj_get_vt(p_obj)); + assert(!address_belongs_to_gc_heap(uncompress_vt(obj_get_vt(p_obj)), (GC*)heap_verifier->gc)); +} + + +inline void verifier_clear_rootsets(Heap_Verifier* heap_verifier) +{ + Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata; + verifier_clear_pool(verifier_metadata->root_set_pool, verifier_metadata->free_set_pool, FALSE); +} + +#define VERIFY_WB_MARK_BIT 0x01 + +inline void wb_mark_in_slot(REF* p_ref){ + REF ref = *p_ref; + *p_ref = (REF)((POINTER_SIZE_INT)ref | VERIFY_WB_MARK_BIT); +} + +inline void wb_unmark_in_slot(REF* p_ref){ + REF ref = *p_ref; + *p_ref = (REF)((POINTER_SIZE_INT)ref & ~VERIFY_WB_MARK_BIT); +} + +inline Boolean wb_is_marked_in_slot(REF* p_ref){ + REF ref = *p_ref; + return (POINTER_SIZE_INT)ref & VERIFY_WB_MARK_BIT; +} + +inline REF verifier_get_object_slot(REF* p_ref) +{ + REF ref = *p_ref; + return (REF)((POINTER_SIZE_INT)ref | VERIFY_WB_MARK_BIT); +} + + +#define UNREACHABLE_OBJ_MARK_IN_VT 0x02 + +inline void tag_unreachable_obj(Partial_Reveal_Object* p_obj) +{ + Partial_Reveal_VTable* vt = uncompress_vt(obj_get_vt_raw(p_obj)); + obj_set_vt(p_obj, compress_vt((Partial_Reveal_VTable*)((POINTER_SIZE_INT)vt | UNREACHABLE_OBJ_MARK_IN_VT))); +} + +inline Boolean is_unreachable_obj(Partial_Reveal_Object* p_obj) +{ + return ((POINTER_SIZE_INT)obj_get_vt_raw(p_obj) & UNREACHABLE_OBJ_MARK_IN_VT); +} + +#endif Index: src/verify/verifier_metadata.cpp =================================================================== --- src/verify/verifier_metadata.cpp (revision 0) +++ src/verify/verifier_metadata.cpp (revision 0) @@ -0,0 +1,150 @@ +#include "verifier_metadata.h" +#include "verifier_common.h" + +#define GC_VERIFIER_METADATA_SIZE_BYTES (4*MB) +#define GC_VERIFIER_METADATA_EXTEND_SIZE_BYTES (4*MB) + +#define GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES VECTOR_BLOCK_DATA_SIZE_BYTES + +Heap_Verifier_Metadata* verifier_metadata; + +void verifier_metadata_initialize(Heap_Verifier* heap_verifier) +{ + Heap_Verifier_Metadata* heap_verifier_metadata = (Heap_Verifier_Metadata* )STD_MALLOC(sizeof(Heap_Verifier_Metadata)); + assert(heap_verifier_metadata); + memset(heap_verifier_metadata, 0, sizeof(Heap_Verifier_Metadata)); + + unsigned int seg_size = GC_VERIFIER_METADATA_SIZE_BYTES + GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES; + void* metadata = STD_MALLOC(seg_size); + assert(metadata); + memset(metadata, 0, seg_size); + heap_verifier_metadata->segments[0] = metadata; + metadata = (void*)round_up_to_size((POINTER_SIZE_INT)metadata, GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES); + heap_verifier_metadata->num_alloc_segs = 1; + + unsigned int i = 0; + unsigned int num_blocks = GC_VERIFIER_METADATA_SIZE_BYTES/GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES; + for(i=0; i>2; + heap_verifier_metadata->free_task_pool = sync_pool_create(); + for(i=0; ifree_task_pool, (void*)block); + } + heap_verifier_metadata->mark_task_pool = sync_pool_create(); + + unsigned num_sets = num_blocks>>2; + heap_verifier_metadata->free_set_pool = sync_pool_create(); + for(; i<(num_sets + num_tasks); i++){ + POINTER_SIZE_INT block = (POINTER_SIZE_INT)metadata + i*GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES; + pool_put_entry(heap_verifier_metadata->free_set_pool, (void*)block); + } + heap_verifier_metadata->root_set_pool = sync_pool_create(); + + heap_verifier_metadata->free_objects_pool = sync_pool_create(); + for(; ifree_objects_pool, (void*)block); + } + heap_verifier_metadata->objects_pool_before_gc = sync_pool_create(); + heap_verifier_metadata->objects_pool_after_gc = sync_pool_create(); + heap_verifier_metadata->resurrect_objects_pool_before_gc = sync_pool_create(); + heap_verifier_metadata->resurrect_objects_pool_after_gc = sync_pool_create(); + heap_verifier_metadata->new_objects_pool = sync_pool_create(); + + verifier_metadata = heap_verifier_metadata; + heap_verifier->heap_verifier_metadata = heap_verifier_metadata; + return; +} + +void gc_verifier_metadata_destruct(Heap_Verifier* heap_verifier) +{ + Heap_Verifier_Metadata* metadata = heap_verifier->heap_verifier_metadata; + + sync_pool_destruct(metadata->free_task_pool); + sync_pool_destruct(metadata->mark_task_pool); + + sync_pool_destruct(metadata->free_set_pool); + sync_pool_destruct(metadata->root_set_pool); + + sync_pool_destruct(metadata->free_objects_pool); + sync_pool_destruct(metadata->objects_pool_before_gc); + sync_pool_destruct(metadata->objects_pool_after_gc); + sync_pool_destruct(metadata->resurrect_objects_pool_before_gc); + sync_pool_destruct(metadata->resurrect_objects_pool_after_gc); + sync_pool_destruct(metadata->new_objects_pool); + + for(unsigned int i=0; inum_alloc_segs; i++){ + assert(metadata->segments[i]); + STD_FREE(metadata->segments[i]); + } + STD_FREE( heap_verifier->heap_verifier_metadata); + heap_verifier->heap_verifier_metadata = NULL; +} + +Vector_Block* gc_verifier_metadata_extend(Pool* pool, Boolean is_set_pool) +{ + /*add a slot to pool point back to verifier_metadata, then we do not need the global var verifer_metadata*/ + lock(verifier_metadata->alloc_lock); + Vector_Block* block = pool_get_entry(pool); + if( block ){ + unlock(verifier_metadata->alloc_lock); + return block; + } + + unsigned int num_alloced = verifier_metadata->num_alloc_segs; + if(num_alloced == METADATA_SEGMENT_NUM){ + printf("Run out GC metadata, please give it more segments!\n"); + exit(0); + } + unsigned int seg_size = GC_VERIFIER_METADATA_EXTEND_SIZE_BYTES + GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES; + void *new_segment = STD_MALLOC(seg_size); + assert(new_segment); + memset(new_segment, 0, seg_size); + verifier_metadata->segments[num_alloced] = new_segment; + new_segment = (void*)round_up_to_size((POINTER_SIZE_INT)new_segment, GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES); + verifier_metadata->num_alloc_segs = num_alloced + 1; + + unsigned int num_blocks = GC_VERIFIER_METADATA_EXTEND_SIZE_BYTES/GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES; + + unsigned int i=0; + for(i=0; ialloc_lock); + printf("extend metadata\n"); + return block; +} + +void verifier_clear_pool(Pool* working_pool, Pool* free_pool, Boolean is_vector_stack) +{ + Vector_Block* working_block = pool_get_entry(working_pool); + while(working_block){ + if(is_vector_stack) vector_stack_clear(working_block); + else vector_block_clear(working_block); + pool_put_entry(free_pool, working_block); + working_block = pool_get_entry(working_pool); + } +} Index: src/verify/verifier_metadata.h =================================================================== --- src/verify/verifier_metadata.h (revision 0) +++ src/verify/verifier_metadata.h (revision 0) @@ -0,0 +1,102 @@ +#ifndef _VERIFY_METADATA_H_ +#define _VERIFY_METADATA_H_ + +#include "../common/gc_common.h" +#include "../utils/vector_block.h" +#include "../utils/sync_pool.h" + +#define METADATA_SEGMENT_NUM 128 +typedef volatile unsigned int SpinLock; +typedef Vector_Block* Vector_Block_Ptr; + +typedef struct Heap_Verifier_Metadata{ + void* segments[METADATA_SEGMENT_NUM]; + unsigned int num_alloc_segs; + SpinLock alloc_lock; + + Pool* free_set_pool; + Pool* free_task_pool; + + Pool* root_set_pool; + Pool* mark_task_pool; + + Pool* free_objects_pool; + + Pool* objects_pool_before_gc; + Pool* objects_pool_after_gc; + + Pool* resurrect_objects_pool_before_gc; + Pool* resurrect_objects_pool_after_gc; + + Pool* new_objects_pool; +} Heap_Verifier_Metadata; + +extern Heap_Verifier_Metadata* verifier_metadata; + +struct Heap_Verifier; +void gc_verifier_metadata_initialize(Heap_Verifier* heap_verifier); +void gc_verifier_metadata_destruct(Heap_Verifier* heap_verifier); +Vector_Block* gc_verifier_metadata_extend(Pool* pool, Boolean is_set_pool); + +void verifier_clear_pool(Pool* working_pool, Pool* free_pool, Boolean is_vector_stack); + +inline Vector_Block* verifier_free_set_pool_get_entry(Pool* free_pool) +{ + assert(free_pool); + Vector_Block* block = pool_get_entry(free_pool); + + while(!block) + block = gc_verifier_metadata_extend(free_pool, TRUE); + + assert(vector_block_is_empty(block)); + return block; +} + +inline Vector_Block* verifier_free_task_pool_get_entry(Pool* free_pool) +{ + assert(free_pool); + Vector_Block* block = pool_get_entry(free_pool); + + while(!block) + block = gc_verifier_metadata_extend(free_pool, FALSE); + + assert(vector_stack_is_empty(block)); + return block; +} + + + +inline void verifier_tracestack_push(void* p_task, Vector_Block_Ptr& trace_task) +{ + vector_stack_push(trace_task, (POINTER_SIZE_INT)p_task); + + if( !vector_stack_is_full(trace_task)) return; + + pool_put_entry(verifier_metadata->mark_task_pool, trace_task); + trace_task = verifier_free_task_pool_get_entry(verifier_metadata->free_task_pool); + assert(trace_task); +} + +inline void verifier_rootset_push(void* p_task, Vector_Block_Ptr& root_set) +{ + vector_block_add_entry(root_set, (POINTER_SIZE_INT)p_task); + + if( !vector_block_is_full(root_set)) return; + + pool_put_entry(verifier_metadata->root_set_pool, root_set); + root_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool); + assert(root_set); +} + +inline void verifier_set_push(void* p_data, Vector_Block_Ptr& set_block, Pool* pool) +{ + vector_block_add_entry(set_block, (POINTER_SIZE_INT)p_data); + + if( !vector_block_is_full(set_block) ) return; + + pool_put_entry(pool, set_block); + set_block = verifier_free_set_pool_get_entry(verifier_metadata->free_objects_pool); + assert(set_block); +} + +#endif //_VERIFY_METADATA_H_ Index: src/verify/verifier_scanner.cpp =================================================================== --- src/verify/verifier_scanner.cpp (revision 0) +++ src/verify/verifier_scanner.cpp (revision 0) @@ -0,0 +1,384 @@ +#include "verify_live_heap.h" +#include "verifier_common.h" +#include "verify_gc_effect.h" +#include "verify_mutator_effect.h" +#include "../finalizer_weakref/finalizer_weakref.h" + + /*<--------live objects scanner begin-------->*/ +static FORCE_INLINE void scan_slot(Heap_Verifier* heap_verifier, REF*p_ref) +{ + GC_Verifier* gc_verifier = heap_verifier->gc_verifier; + Partial_Reveal_Object *p_obj = read_slot(p_ref); + if(p_obj == NULL) return; + + verify_live_object_slot(p_ref, heap_verifier); + verifier_tracestack_push(p_obj, gc_verifier->trace_stack); + return; +} + +static FORCE_INLINE void scan_object(Heap_Verifier* heap_verifier, Partial_Reveal_Object *p_obj) +{ + GC_Verifier* gc_verifier = heap_verifier->gc_verifier; + if(gc_verifier->is_before_fallback_collection) { + if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){ + assert(obj_get_vt(p_obj) == obj_get_vt(obj_get_fw_in_oi(p_obj))); + p_obj = obj_get_fw_in_oi(p_obj); + assert(p_obj); + } + } + if(!obj_mark_in_vt(p_obj)) return; + + verify_object_header(p_obj, heap_verifier); + verifier_update_verify_info(p_obj, heap_verifier); + + /*FIXME: */ + if (!object_has_ref_field(p_obj)) return; + + REF* p_ref; + + if (object_is_array(p_obj)) { + + Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj; + unsigned int array_length = array->array_len; + p_ref = (REF*)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array)); + + for (unsigned int i = 0; i < array_length; i++) { + scan_slot(heap_verifier, p_ref+i); + } + + }else{ + + unsigned int num_refs = object_ref_field_num(p_obj); + int* ref_iterator = object_ref_iterator_init(p_obj); + + for(unsigned int i=0; igc_verifier) == MINOR_COLLECTION){ + p_ref = obj_get_referent_field(p_obj); + scan_slot(heap_verifier, p_ref); + } +#endif + } + return; +} + + +static void trace_object(Heap_Verifier* heap_verifier, Partial_Reveal_Object* p_obj) +{ + scan_object(heap_verifier, p_obj); + GC_Verifier* gc_verifier = heap_verifier->gc_verifier; + Vector_Block* trace_stack = (Vector_Block*)gc_verifier->trace_stack; + while( !vector_stack_is_empty(trace_stack)){ + p_obj = (Partial_Reveal_Object *)vector_stack_pop(trace_stack); + scan_object(heap_verifier, p_obj); + trace_stack = (Vector_Block*)gc_verifier->trace_stack; + } + return; +} + +void verifier_trace_rootsets(Heap_Verifier* heap_verifier, Pool* root_set_pool) +{ + Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata; + GC_Verifier* gc_verifier = heap_verifier->gc_verifier; + gc_verifier->objects_set = verifier_free_task_pool_get_entry(verifier_metadata->free_objects_pool); + gc_verifier->trace_stack = verifier_free_task_pool_get_entry(verifier_metadata->free_task_pool); + pool_iterator_init(root_set_pool); + Vector_Block* root_set = pool_iterator_next(root_set_pool); + + /* first step: copy all root objects to trace tasks. */ + while(root_set){ + POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set); + while(!vector_block_iterator_end(root_set,iter)){ + REF* p_ref = (REF* )*iter; + iter = vector_block_iterator_advance(root_set,iter); + + if(!heap_verifier->need_verify_rootset || !heap_verifier->is_before_gc){ + if(!verify_rootset_slot(p_ref, heap_verifier)){ + gc_verifier->is_verification_passed = FALSE; + assert(0); + continue; + } + } + + Partial_Reveal_Object* p_obj = read_slot(p_ref); + assert(p_obj != NULL); + + verifier_tracestack_push(p_obj, gc_verifier->trace_stack); + } + root_set = pool_iterator_next(root_set_pool); + } + /* put back the last trace_stack task */ + pool_put_entry(verifier_metadata->mark_task_pool, gc_verifier->trace_stack); + + /* second step: iterate over the trace tasks and forward objects */ + gc_verifier->trace_stack = verifier_free_task_pool_get_entry(verifier_metadata->free_task_pool); + + Vector_Block* trace_task = pool_get_entry(verifier_metadata->mark_task_pool); + + while(trace_task){ + POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task); + while(!vector_block_iterator_end(trace_task,iter)){ + Partial_Reveal_Object* p_obj = (Partial_Reveal_Object* )*iter; + iter = vector_block_iterator_advance(trace_task,iter); + trace_object(heap_verifier, p_obj); + } + vector_stack_clear(trace_task); + pool_put_entry(verifier_metadata->free_task_pool, trace_task); + trace_task = pool_get_entry(verifier_metadata->mark_task_pool); + } + vector_stack_clear(gc_verifier->trace_stack); + pool_put_entry(verifier_metadata->free_task_pool, gc_verifier->trace_stack); + gc_verifier->trace_stack = NULL; + +} + + +void verifier_trace_objsets(Heap_Verifier* heap_verifier, Pool* obj_set_pool) +{ + Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata; + GC_Verifier* gc_verifier = heap_verifier->gc_verifier; + gc_verifier->trace_stack = verifier_free_task_pool_get_entry(verifier_metadata->free_task_pool); + pool_iterator_init(obj_set_pool); + Vector_Block* obj_set = pool_iterator_next(obj_set_pool); + + /* first step: copy all root objects to trace tasks. */ + while(obj_set){ + POINTER_SIZE_INT* iter = vector_block_iterator_init(obj_set); + while(!vector_block_iterator_end(obj_set,iter)){ + Partial_Reveal_Object* p_obj = (Partial_Reveal_Object* )*iter; + iter = vector_block_iterator_advance(obj_set,iter); + /*p_obj can be NULL , When GC happened, the obj in Finalize objs list will be clear.*/ + //assert(p_obj != NULL); + if(p_obj == NULL) continue; + verifier_tracestack_push(p_obj, gc_verifier->trace_stack); + } + obj_set = pool_iterator_next(obj_set_pool); + } + /* put back the last trace_stack task */ + pool_put_entry(verifier_metadata->mark_task_pool, gc_verifier->trace_stack); + + /* second step: iterate over the trace tasks and forward objects */ + gc_verifier->trace_stack = verifier_free_task_pool_get_entry(verifier_metadata->free_task_pool); + + Vector_Block* trace_task = pool_get_entry(verifier_metadata->mark_task_pool); + + while(trace_task){ + POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task); + while(!vector_block_iterator_end(trace_task,iter)){ + Partial_Reveal_Object* p_obj = (Partial_Reveal_Object* )*iter; + iter = vector_block_iterator_advance(trace_task,iter); + trace_object(heap_verifier, p_obj); + } + vector_stack_clear(trace_task); + pool_put_entry(verifier_metadata->free_task_pool, trace_task); + trace_task = pool_get_entry(verifier_metadata->mark_task_pool); + } + vector_stack_clear(gc_verifier->trace_stack); + pool_put_entry(verifier_metadata->free_task_pool, gc_verifier->trace_stack); + gc_verifier->trace_stack = NULL; + +} + +void verifier_scan_resurrect_objects(Heap_Verifier* heap_verifier) +{ + GC_Gen* gc = (GC_Gen*)heap_verifier->gc; + verifier_update_info_before_resurrect(heap_verifier); +#ifndef BUILD_IN_REFERENT + heap_verifier->gc_verifier->is_tracing_resurrect_obj = TRUE; + if(heap_verifier->is_before_gc){ + verifier_trace_objsets(heap_verifier, gc->finref_metadata->obj_with_fin_pool); + }else{ + verify_live_finalizable_obj(heap_verifier, gc->finref_metadata->obj_with_fin_pool); + verifier_trace_objsets(heap_verifier, gc->finref_metadata->finalizable_obj_pool); + } + heap_verifier->gc_verifier->is_tracing_resurrect_obj = FALSE; + verifier_update_info_after_resurrect(heap_verifier); +#endif +} + +void verifier_scan_unreachable_objects(Heap_Verifier* heap_verifier); + +void verifier_scan_live_objects(Heap_Verifier* heap_verifier) +{ + Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata; + verifier_trace_rootsets(heap_verifier, verifier_metadata->root_set_pool); + verifier_scan_resurrect_objects(heap_verifier); + verifier_scan_unreachable_objects(heap_verifier); + verifier_clear_objs_mark_bit(heap_verifier); +} +/*<--------live objects scanner end--------->*/ + +/*<--------all (live and dead) objects scanner begin-------->*/ +static FORCE_INLINE void verifier_scan_object_slots(Partial_Reveal_Object *p_obj, Heap_Verifier* heap_verifier) +{ + verifier_allocation_update_info(p_obj, heap_verifier); + verify_object_header(p_obj, heap_verifier); + if (!object_has_ref_field(p_obj)) return; + REF* p_ref; + + if (object_is_array(p_obj)){ + Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj; + unsigned int array_length = array->array_len; + p_ref = (REF*)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array)); + + for (unsigned int i = 0; i < array_length; i++) { + if(!is_unreachable_obj(p_obj)){ + verify_write_barrier(p_ref+i, heap_verifier); + if(*(p_ref+i) != COMPRESSED_NULL) verify_live_object_slot(p_ref+i, heap_verifier); + }else{ + if(*(p_ref+i) != COMPRESSED_NULL) verify_all_object_slot(p_ref+i, heap_verifier); + } + } + }else{ + unsigned int num_refs = object_ref_field_num(p_obj); + int* ref_iterator = object_ref_iterator_init(p_obj); + + for(unsigned int i=0; iheap_start; + unsigned int start_idx = ((Blocked_Space*)space)->first_block_idx; + unsigned int i; + unsigned int num_block = ((Blocked_Space*)space)->free_block_idx - ((Blocked_Space*)space)->first_block_idx; + for( i=0; i < num_block; i++ ){ + Block_Header* block = (Block_Header*)&(blocks[i]); + Partial_Reveal_Object* cur_obj = (Partial_Reveal_Object*) block->base; + while( cur_obj < block->free ){ + verify_object_header(cur_obj, heap_verifier); + verifier_scan_object_slots(cur_obj, heap_verifier); + cur_obj = obj_end(cur_obj); + } + } +} + +inline Partial_Reveal_Object* lspace_get_next_object( Space* lspace, POINTER_SIZE_INT* & next_area_start){ + POINTER_SIZE_INT* ret_obj = NULL; + + while(!*next_area_start && (POINTER_SIZE_INT)next_area_start < (POINTER_SIZE_INT)lspace->heap_end){ + next_area_start =(POINTER_SIZE_INT*)((POINTER_SIZE_INT)next_area_start + ((Free_Area*)next_area_start)->size); + } + if((POINTER_SIZE_INT)next_area_start < (POINTER_SIZE_INT)lspace->heap_end){ + ret_obj = next_area_start; + unsigned int obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start)); + assert(obj_size); + next_area_start = (POINTER_SIZE_INT*)((POINTER_SIZE_INT)next_area_start + obj_size); + return (Partial_Reveal_Object*)ret_obj; + }else{ + return NULL; + } +} + +inline Partial_Reveal_Object* lspace_get_first_object( Space* lspace, POINTER_SIZE_INT* & next_area_start){ + return lspace_get_next_object(lspace, next_area_start); +} + +void verifier_scan_los_objects(Space* lspace, Heap_Verifier* heap_verifier) +{ + POINTER_SIZE_INT* interator = (POINTER_SIZE_INT*)lspace->heap_start; + Partial_Reveal_Object* p_los_obj = lspace_get_first_object(lspace, interator); + + while(p_los_obj){ + verify_object_header(p_los_obj, heap_verifier); + verifier_scan_object_slots(p_los_obj, heap_verifier); + p_los_obj = lspace_get_next_object(lspace, interator); + } +} + +void verifier_scan_all_objects(Heap_Verifier* heap_verifier) +{ + GC_Gen* gc = (GC_Gen*)heap_verifier->gc; + Space* fspace = gc_get_nos(gc); + Space* mspace = gc_get_mos(gc); + Space* lspace = gc_get_los(gc); + + verifier_scan_nos_mos_objects(fspace, heap_verifier); + verifier_scan_nos_mos_objects(mspace, heap_verifier); + verifier_scan_los_objects(lspace, heap_verifier); +} +/*<--------all objects scanner end--------->*/ + +/*<--------unreachable objects scanner begin------>*/ + +void verifier_scan_mos_unreachable_objects(Space* space, Heap_Verifier* heap_verifier) +{ + Block* blocks = (Block*)space->heap_start; + unsigned int start_idx = ((Blocked_Space*)space)->first_block_idx; + unsigned int i; + unsigned int num_block = ((Blocked_Space*)space)->free_block_idx - ((Blocked_Space*)space)->first_block_idx; + for( i=0; i < num_block; i++ ){ + Block_Header* block = (Block_Header*)&(blocks[i]); + Partial_Reveal_Object* cur_obj = (Partial_Reveal_Object*) block->base; + while( cur_obj < block->free ){ + verify_object_header(cur_obj, heap_verifier); + if(!obj_is_marked_in_vt(cur_obj)) tag_unreachable_obj(cur_obj); + cur_obj = obj_end(cur_obj); + } + } +} + +void verifier_scan_los_unreachable_objects(Space* lspace, Heap_Verifier* heap_verifier) +{ + POINTER_SIZE_INT* interator = (POINTER_SIZE_INT*)lspace->heap_start; + Partial_Reveal_Object* p_los_obj = lspace_get_first_object(lspace, interator); + + while(p_los_obj){ + verify_object_header(p_los_obj, heap_verifier); + if(!obj_is_marked_in_vt(p_los_obj)) tag_unreachable_obj(p_los_obj); + p_los_obj = lspace_get_next_object(lspace, interator); + } +} + +void verifier_scan_unreachable_objects(Heap_Verifier* heap_verifier) +{ + if(heap_verifier->is_before_gc) return; + GC_Gen* gc = (GC_Gen*)heap_verifier->gc; + Space* mspace = gc_get_mos(gc); + Space* lspace = gc_get_los(gc); + + verifier_scan_mos_unreachable_objects(mspace, heap_verifier); + verifier_scan_los_unreachable_objects(lspace, heap_verifier); + +} +/*<--------unreachable objects scanner end------>*/ + +void verifier_init_object_scanner(Heap_Verifier* heap_verifier) +{ + heap_verifier->live_obj_scanner = verifier_scan_live_objects; + heap_verifier->all_obj_scanner = verifier_scan_all_objects; +} + + Index: src/verify/verify_gc_effect.cpp =================================================================== --- src/verify/verify_gc_effect.cpp (revision 0) +++ src/verify/verify_gc_effect.cpp (revision 0) @@ -0,0 +1,311 @@ +#include "verifier_common.h" +#include "verify_gc_effect.h" + +void verifier_init_GC_verifier(Heap_Verifier* heap_verifier) +{ + GC_Verifier* gc_verifier = (GC_Verifier*)STD_MALLOC(sizeof(GC_Verifier)); + assert(gc_verifier); + memset(gc_verifier, 0, sizeof(GC_Verifier)); + + gc_verifier->trace_stack = gc_verifier->objects_set = gc_verifier->root_set = NULL; + gc_verifier->is_tracing_resurrect_obj = FALSE; + gc_verifier->num_live_objects_after_gc = gc_verifier->num_live_objects_before_gc = 0; + gc_verifier->size_live_objects_after_gc = gc_verifier->size_live_objects_before_gc = 0; + gc_verifier->num_resurrect_objects_after_gc = gc_verifier->num_resurrect_objects_before_gc = 0; + gc_verifier->size_resurrect_objects_after_gc = gc_verifier->size_resurrect_objects_before_gc = 0; + heap_verifier->gc_verifier = gc_verifier; +} +void verifier_destruct_GC_verifier(Heap_Verifier* heap_verifier) +{ + assert(!heap_verifier->gc_verifier ->trace_stack); + assert(!heap_verifier->gc_verifier ->objects_set ); + assert(!heap_verifier->gc_verifier ->root_set); + STD_FREE(heap_verifier->gc_verifier ); + heap_verifier->gc_verifier = NULL; +} + + +void verifier_clear_objsets(Heap_Verifier* heap_verifier) +{ + Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata; + verifier_clear_pool(verifier_metadata->objects_pool_before_gc, verifier_metadata->free_objects_pool, FALSE); + verifier_clear_pool(verifier_metadata->objects_pool_after_gc, verifier_metadata->free_objects_pool, FALSE); +#ifndef BUILD_IN_REFERENT + verifier_clear_pool(verifier_metadata->resurrect_objects_pool_before_gc, verifier_metadata->free_objects_pool, FALSE); + verifier_clear_pool(verifier_metadata->resurrect_objects_pool_after_gc, verifier_metadata->free_objects_pool, FALSE); +#endif +} + +void verify_gc_reset(Heap_Verifier* heap_verifier) +{ + GC_Verifier* gc_verifier = heap_verifier->gc_verifier; + + gc_verifier->trace_stack = gc_verifier->objects_set = gc_verifier->root_set = NULL; + gc_verifier->is_tracing_resurrect_obj = FALSE; + gc_verifier->num_live_objects_after_gc = gc_verifier->num_live_objects_before_gc = 0; + gc_verifier->size_live_objects_after_gc = gc_verifier->size_live_objects_before_gc = 0; +#ifndef BUILD_IN_REFERENT + gc_verifier->num_resurrect_objects_after_gc = gc_verifier->num_resurrect_objects_before_gc = 0; + gc_verifier->size_resurrect_objects_after_gc = gc_verifier->size_resurrect_objects_before_gc = 0; +#endif + + verifier_clear_rootsets(heap_verifier); + verifier_clear_objsets(heap_verifier); +} + +void verify_live_finalizable_obj(Heap_Verifier* heap_verifier, Pool* live_finalizable_objs_pool) +{ + pool_iterator_init(live_finalizable_objs_pool); + Vector_Block* live_fin_objs = pool_iterator_next(live_finalizable_objs_pool); + while(live_fin_objs){ + POINTER_SIZE_INT * iter = vector_block_iterator_init(live_fin_objs); + while(!vector_block_iterator_end(live_fin_objs, iter)){ + Partial_Reveal_Object* p_fin_obj = (Partial_Reveal_Object*) *iter; + iter = vector_block_iterator_advance(live_fin_objs, iter); + if(p_fin_obj==NULL) continue; + assert(obj_is_marked_in_vt(p_fin_obj)); + if(!obj_is_marked_in_vt(p_fin_obj)){ + printf("ERROR\n"); + } + } + live_fin_objs = pool_iterator_next(live_finalizable_objs_pool); + } +} + +void* verifier_copy_obj_information(Partial_Reveal_Object* p_obj) +{ + Live_Object_Inform* p_obj_information = (Live_Object_Inform* )STD_MALLOC(sizeof(Live_Object_Inform)); + assert(p_obj_information); + p_obj_information->vt_raw = obj_get_vt_raw(p_obj); + p_obj_information->address = p_obj; + return (void*) p_obj_information; +} + +static Boolean fspace_object_was_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace, Heap_Verifier* heap_verifier) +{ + GC_Verifier* gc_verifier = heap_verifier->gc_verifier; + assert(obj_belongs_to_space(p_obj, (Space*)fspace)); + unsigned int forwarded_first_part; + if(!gc_verifier->gc_collect_kind == MINOR_COLLECTION || !NOS_PARTIAL_FORWARD || heap_verifier->gc_is_gen_mode) + forwarded_first_part = true; + else + forwarded_first_part = forward_first_half^1; + /* forward_first_half is flipped after the collection, so the condition is reversed as well */ + return forwarded_first_part? (p_obj < object_forwarding_boundary):(p_obj >= object_forwarding_boundary); +} + +void verifier_update_info_before_resurrect(Heap_Verifier* heap_verifier) +{ + if(!heap_verifier->need_verify_gc) return; + GC_Verifier* gc_verifier = heap_verifier->gc_verifier; + Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata; + + if(heap_verifier->is_before_gc){ + pool_put_entry(verifier_metadata->objects_pool_before_gc, gc_verifier->objects_set); + gc_verifier->objects_set = verifier_free_set_pool_get_entry(verifier_metadata->free_objects_pool); + assert(gc_verifier->objects_set); + return; + }else{ + pool_put_entry(verifier_metadata->objects_pool_after_gc, gc_verifier->objects_set); + gc_verifier->objects_set = verifier_free_set_pool_get_entry(verifier_metadata->free_objects_pool); + assert(gc_verifier->objects_set); + return; + } + +} + +void verifier_update_info_after_resurrect(Heap_Verifier* heap_verifier) +{ + if(!heap_verifier->need_verify_gc) return; + GC_Verifier* gc_verifier = heap_verifier->gc_verifier; + Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata; + + if(heap_verifier->is_before_gc){ + pool_put_entry(verifier_metadata->resurrect_objects_pool_before_gc, gc_verifier->objects_set); + gc_verifier->objects_set = NULL; + assert(!gc_verifier->objects_set); + return; + }else{ + pool_put_entry(verifier_metadata->resurrect_objects_pool_after_gc, gc_verifier->objects_set); + gc_verifier->objects_set = NULL; + assert(!gc_verifier->objects_set); + return; + } + +} + + +void verifier_update_verify_info(Partial_Reveal_Object* p_obj, Heap_Verifier* heap_verifier) +{ + if(!heap_verifier->need_verify_gc) return; + Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata; + GC_Verifier* gc_verifier = heap_verifier->gc_verifier; + + GC_Gen* gc = (GC_Gen*)heap_verifier->gc; + Space* mspace = gc_get_mos(gc); + Space* nspace = gc_get_nos(gc); + Space* lspace = gc_get_los(gc); + + if(!gc_verifier->is_before_fallback_collection && gc_verifier->gc_collect_kind == MINOR_COLLECTION){ + if(!heap_verifier->is_before_gc){ + assert(!obj_belongs_to_space(p_obj, nspace) ||!fspace_object_was_forwarded(p_obj, (Fspace*)nspace, heap_verifier)); + if(obj_belongs_to_space(p_obj, nspace) && fspace_object_was_forwarded(p_obj, (Fspace*)nspace, heap_verifier)){ + gc_verifier->is_verification_passed = FALSE; + } + } + }else if(!gc_verifier->is_before_fallback_collection){ + if(!heap_verifier->is_before_gc){ + assert(!obj_belongs_to_space(p_obj, nspace)); + if(obj_belongs_to_space(p_obj, nspace)){ + gc_verifier->is_verification_passed = FALSE; + } + } + } + /*store the object information*/ + void* p_obj_information = verifier_copy_obj_information(p_obj); + +#ifndef BUILD_IN_REFERENT + if(!gc_verifier->is_tracing_resurrect_obj){ +#endif + /*size and number*/ + if(heap_verifier->is_before_gc){ + verifier_set_push(p_obj_information, gc_verifier->objects_set, verifier_metadata->objects_pool_before_gc); + gc_verifier->num_live_objects_before_gc ++; + gc_verifier->size_live_objects_before_gc += vm_object_size(p_obj); + }else{ + verifier_set_push(p_obj_information, gc_verifier->objects_set, verifier_metadata->objects_pool_after_gc); + gc_verifier->num_live_objects_after_gc ++; + gc_verifier->size_live_objects_after_gc += vm_object_size(p_obj); + } + return; + +#ifndef BUILD_IN_REFERENT + }else{ + + if(heap_verifier->is_before_gc){ + verifier_set_push(p_obj_information, gc_verifier->objects_set, verifier_metadata->resurrect_objects_pool_before_gc); + gc_verifier->num_resurrect_objects_before_gc ++; + gc_verifier->size_resurrect_objects_before_gc += vm_object_size(p_obj); + }else{ + verifier_set_push(p_obj_information, gc_verifier->objects_set, verifier_metadata->resurrect_objects_pool_after_gc); + gc_verifier->num_resurrect_objects_after_gc ++; + gc_verifier->size_resurrect_objects_after_gc += vm_object_size(p_obj); + } + return; + + } +#endif + +} + +Boolean compare_live_obj_inform(POINTER_SIZE_INT* obj_container1,POINTER_SIZE_INT* obj_container2) +{ + Live_Object_Inform* obj_inform_1 = (Live_Object_Inform*)*obj_container1; + Live_Object_Inform* obj_inform_2 = (Live_Object_Inform*)*obj_container2; + if(((POINTER_SIZE_INT)obj_inform_1->vt_raw) == ((POINTER_SIZE_INT)obj_inform_2->vt_raw)){ + /*FIXME: erase live object information in compare_function. */ + STD_FREE(obj_inform_1); + STD_FREE(obj_inform_2); + return TRUE; + }else{ + STD_FREE(obj_inform_1); + STD_FREE(obj_inform_2); + return FALSE; + } +} + + +void verify_gc_effect(Heap_Verifier* heap_verifier) +{ + GC_Verifier* gc_verifier = heap_verifier->gc_verifier; + + if(gc_verifier->num_live_objects_before_gc != gc_verifier->num_live_objects_after_gc){ + gc_verifier->is_verification_passed = FALSE; + printf("ERROR"); + } + + if(gc_verifier->size_live_objects_before_gc != gc_verifier->size_live_objects_after_gc){ + printf("ERROR"); + gc_verifier->is_verification_passed = FALSE; + } + +#ifndef BUILD_IN_REFERENT + if(gc_verifier->num_resurrect_objects_before_gc != gc_verifier->num_resurrect_objects_after_gc){ + printf("ERROR"); + gc_verifier->is_verification_passed = FALSE; + } + + if(gc_verifier->size_resurrect_objects_before_gc != gc_verifier->size_resurrect_objects_after_gc){ + printf("ERROR"); + gc_verifier->is_verification_passed = FALSE; + } +#endif + + Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata; + Pool* free_pool = verifier_metadata->free_objects_pool; + + Object_Comparator object_comparator = compare_live_obj_inform; + Boolean passed = verifier_compare_objs_pools(verifier_metadata->objects_pool_before_gc, + verifier_metadata->objects_pool_after_gc , free_pool, object_comparator); + if(!passed) gc_verifier->is_verification_passed = FALSE; +#ifndef BUILD_IN_REFERENT + passed = verifier_compare_objs_pools(verifier_metadata->resurrect_objects_pool_before_gc, + verifier_metadata->resurrect_objects_pool_after_gc , free_pool, object_comparator); + if(!passed) gc_verifier->is_verification_passed = FALSE; +#endif +} + + +void verifier_pool_clear_objs_mark_bit(Pool* marked_objs_pool) +{ + pool_iterator_init(marked_objs_pool); + Vector_Block* objs_set = pool_iterator_next(marked_objs_pool); + + while(objs_set){ + POINTER_SIZE_INT* iter = vector_block_iterator_init(objs_set); + while(!vector_block_iterator_end(objs_set,iter)){ + Live_Object_Inform* p_verify_obj = (Live_Object_Inform* )*iter; + iter = vector_block_iterator_advance(objs_set,iter); + + Partial_Reveal_Object* p_obj = p_verify_obj->address; + assert(p_obj != NULL); + assert(obj_is_marked_in_vt(p_obj)); + obj_unmark_in_vt(p_obj); + } + objs_set = pool_iterator_next(marked_objs_pool); + } +} + +void verifier_clear_objs_mark_bit(Heap_Verifier* heap_verifier) +{ + Pool* marked_objs_pool = NULL; + + Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata; + if(heap_verifier->is_before_gc) { + verifier_pool_clear_objs_mark_bit(verifier_metadata->objects_pool_before_gc); + #ifndef BUILD_IN_REFERENT + verifier_pool_clear_objs_mark_bit(verifier_metadata->resurrect_objects_pool_before_gc); + #endif + }else{ + verifier_pool_clear_objs_mark_bit(verifier_metadata->objects_pool_after_gc); + #ifndef BUILD_IN_REFERENT + verifier_pool_clear_objs_mark_bit(verifier_metadata->resurrect_objects_pool_after_gc); + #endif + } + return; +} + + +void verifier_reset_gc_verification(Heap_Verifier* heap_verifier) +{ + if(!heap_verifier->need_verify_gc) return; + heap_verifier->gc_verifier->is_verification_passed = TRUE; + verifier_copy_rootsets(heap_verifier->gc, heap_verifier); +} +void verifier_clear_gc_verification(Heap_Verifier* heap_verifier) +{ + verify_gc_reset(heap_verifier); + verifier_set_fallback_collection(heap_verifier->gc_verifier, FALSE); +} + + Index: src/verify/verify_gc_effect.h =================================================================== --- src/verify/verify_gc_effect.h (revision 0) +++ src/verify/verify_gc_effect.h (revision 0) @@ -0,0 +1,59 @@ +#ifndef _VERIFY_GC_EFFECT_H_ +#define _VERIFY_GC_EFFECT_H_ + +#include "verifier_common.h" + +typedef struct GC_Verifier{ + Vector_Block* trace_stack; + Vector_Block* root_set; + Vector_Block* objects_set; + + Boolean is_tracing_resurrect_obj; + unsigned int gc_collect_kind; + Boolean is_before_fallback_collection; + + POINTER_SIZE_INT num_live_objects_before_gc; + POINTER_SIZE_INT num_live_objects_after_gc; + POINTER_SIZE_INT size_live_objects_before_gc; + POINTER_SIZE_INT size_live_objects_after_gc; + + POINTER_SIZE_INT num_resurrect_objects_before_gc; + POINTER_SIZE_INT num_resurrect_objects_after_gc; + POINTER_SIZE_INT size_resurrect_objects_before_gc; + POINTER_SIZE_INT size_resurrect_objects_after_gc; + + Boolean is_verification_passed; +}GC_Verifier; + +typedef struct Live_Object_Inform_struct{ + VT vt_raw; + Partial_Reveal_Object* address; +} Live_Object_Inform; + +void verifier_init_GC_verifier(Heap_Verifier* heap_verifier); +void verifier_destruct_GC_verifier(Heap_Verifier* heap_verifier); +void verifier_reset_gc_verification(Heap_Verifier* heap_verifier); +void verifier_clear_gc_verification(Heap_Verifier* heap_verifier); + +void verifier_update_verify_info(Partial_Reveal_Object* p_obj, Heap_Verifier* heap_verifier); +void verify_live_finalizable_obj(Heap_Verifier* heap_verifier, Pool* live_finalizable_objs_pool); +void verifier_clear_objs_mark_bit(Heap_Verifier* heap_verifier); + + +void verifier_update_info_before_resurrect(Heap_Verifier* heap_verifier); +void verifier_update_info_after_resurrect(Heap_Verifier* heap_verifier); + +void verify_gc_effect(Heap_Verifier* heap_verifier); + + + +inline unsigned int verifier_get_gc_collect_kind(GC_Verifier* gc_verifier) +{ return gc_verifier->gc_collect_kind; } +inline void verifier_set_gc_collect_kind(GC_Verifier* gc_verifier, unsigned int collect_kind) +{ gc_verifier->gc_collect_kind = collect_kind; } + +inline void verifier_set_fallback_collection(GC_Verifier* gc_verifier, Boolean is_before_fallback) +{ gc_verifier->is_before_fallback_collection = is_before_fallback; } + +#endif + Index: src/verify/verify_live_heap.cpp =================================================================== --- src/verify/verify_live_heap.cpp (revision 521779) +++ src/verify/verify_live_heap.cpp (working copy) @@ -1,40 +1,135 @@ -/* - * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @author Xiao-Feng Li, 2006/10/05 - */ - #include "verify_live_heap.h" +#include "verifier_common.h" +#include "verify_gc_effect.h" +#include "verify_mutator_effect.h" -Boolean GC_VERIFY = FALSE; +char* GC_VERIFY = NULL; Boolean verify_live_heap; -void gc_verify_heap(GC* gc, Boolean is_before_gc) -{ return; } +Heap_Verifier* heap_verifier; + void gc_init_heap_verification(GC* gc) -{ return; } +{ + if(GC_VERIFY == NULL){ + verify_live_heap = FALSE; + return; + } + heap_verifier = (Heap_Verifier*) STD_MALLOC(sizeof(Heap_Verifier)); + assert(heap_verifier); + memset(heap_verifier, 0, sizeof(Heap_Verifier)); + heap_verifier->gc = gc; + + verifier_metadata_initialize(heap_verifier); + verifier_init_mutator_verifiers(heap_verifier); + verifier_init_GC_verifier(heap_verifier); + verifier_init_object_scanner(heap_verifier); + + heap_verifier->is_before_gc = heap_verifier->gc_is_gen_mode = FALSE; + heap_verifier->need_verify_gc = heap_verifier->need_verify_rootset + = heap_verifier->need_verify_allocation = heap_verifier->need_verify_writebarrier = FALSE; + + if(!verifier_parse_options(heap_verifier, GC_VERIFY)){ + printf("GC Verify options error, verifier will not start.\n"); + gc_terminate_heap_verification(gc); + return; + } + + verify_live_heap = TRUE; + return; +} + void gc_terminate_heap_verification(GC* gc) -{ return; } +{ + gc_verifier_metadata_destruct(heap_verifier); + verifier_destruct_mutator_verifiers(heap_verifier); + verifier_destruct_GC_verifier(heap_verifier); + STD_FREE(heap_verifier); + heap_verifier = NULL; + return; +} -void event_collector_move_obj(Partial_Reveal_Object *p_old, Partial_Reveal_Object *p_new, Collector* collector) -{ return; } +void verify_heap_before_gc(GC* gc) +{ + verifier_log_start(); + verifier_set_gc_collect_kind(heap_verifier->gc_verifier, gc->collect_kind); + verifier_set_gen_mode(heap_verifier); + verifier_reset_mutator_verification(heap_verifier); + verifier_reset_gc_verification(heap_verifier); -void event_collector_doublemove_obj(Partial_Reveal_Object *p_old, Partial_Reveal_Object *p_new, Collector* collector) -{ return; } + if(need_scan_all_objs(heap_verifier)) + (*heap_verifier->all_obj_scanner)(heap_verifier); + /*verify mutator side effect before gc after scanning all objs.*/ + if(need_verify_mutator_effect(heap_verifier)) + verify_mutator_effect(heap_verifier); + + if(need_scan_live_objs(heap_verifier)) + (*heap_verifier->live_obj_scanner)(heap_verifier); + + verifier_log_before_gc(heap_verifier); + +} + +void verify_heap_after_gc(GC* gc) +{ + verifier_log_start(); + if(need_scan_live_objs(heap_verifier)) + (*heap_verifier->live_obj_scanner)(heap_verifier); + if(need_verify_gc_effect(heap_verifier)) + verify_gc_effect(heap_verifier); + + verifier_log_after_gc(heap_verifier); + + verifier_clear_mutator_verification(heap_verifier); + verifier_clear_gc_verification(heap_verifier); +} + +void gc_verify_heap(GC* gc, Boolean is_before_gc) +{ + heap_verifier->is_before_gc = is_before_gc; + + if(is_before_gc){ + verify_heap_before_gc(gc); + }else{ + verify_heap_after_gc(gc); + } +} + +void event_gc_collect_kind_changed(GC* gc) +{ + /*GC collection kind were changed from normal MINOR or MAJOR to FALLBACK MAJOR*/ + assert(gc->collect_kind == FALLBACK_COLLECTION); + if(!heap_verifier->need_verify_gc) return; + + verifier_log_start(); + /*finish the fallbacked gc verify*/ + heap_verifier->is_before_gc = FALSE; + verifier_set_fallback_collection(heap_verifier->gc_verifier, TRUE); + (*heap_verifier->live_obj_scanner)(heap_verifier); + verify_gc_effect(heap_verifier); + printf("GC Fall Back, GC end.\n"); + verifier_log_after_gc(heap_verifier); + verifier_clear_gc_verification(heap_verifier); + + /*start fallback major gc verify */ + heap_verifier->is_before_gc = TRUE; + verifier_set_fallback_collection(heap_verifier->gc_verifier, TRUE); + verifier_set_gc_collect_kind(heap_verifier->gc_verifier, gc->collect_kind); + verifier_set_gen_mode(heap_verifier); + verifier_reset_gc_verification(heap_verifier); + + (*heap_verifier->live_obj_scanner)(heap_verifier); + +} + +void event_mutator_allocate_newobj(Partial_Reveal_Object* p_newobj, POINTER_SIZE_INT size, VT vt_raw) +{ + verifier_event_mutator_allocate_newobj(p_newobj, size, vt_raw); +} + +Heap_Verifier* get_heap_verifier() +{ return heap_verifier; } + + Index: src/verify/verify_live_heap.h =================================================================== --- src/verify/verify_live_heap.h (revision 521779) +++ src/verify/verify_live_heap.h (working copy) @@ -1,35 +1,15 @@ -/* - * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @author Xiao-Feng Li, 2006/10/05 - */ - #ifndef _VERIFY_LIVE_HEAP_H_ #define _VERIFY_LIVE_HEAP_H_ #include "../common/gc_common.h" extern Boolean verify_live_heap; + void gc_init_heap_verification(GC* gc); void gc_terminate_heap_verification(GC* gc); -void event_collector_move_obj(Partial_Reveal_Object *p_old, Partial_Reveal_Object *p_new, Collector* collector); void gc_verify_heap(GC* gc, Boolean is_before_gc); -/* functions used in fall back compaction and the out-of-space cases*/ -void event_collector_doublemove_obj(Partial_Reveal_Object *p_old, Partial_Reveal_Object *p_new, Collector* collector); +void event_mutator_allocate_newobj(Partial_Reveal_Object* p_newobj, POINTER_SIZE_INT size, VT vt_raw); +void event_gc_collect_kind_changed(GC* gc); #endif Index: src/verify/verify_mutator_effect.cpp =================================================================== --- src/verify/verify_mutator_effect.cpp (revision 0) +++ src/verify/verify_mutator_effect.cpp (revision 0) @@ -0,0 +1,397 @@ +#include "verifier_common.h" +#include "verify_mutator_effect.h" + +/*<-----------verify allocation----------------->*/ +void verifier_init_allocation_verifier( Heap_Verifier* heap_verifier) +{ + Allocation_Verifier* allocation_verifier = (Allocation_Verifier*)STD_MALLOC(sizeof(Allocation_Verifier)); + assert(allocation_verifier); + memset(allocation_verifier, 0, sizeof(Allocation_Verifier)); + allocation_verifier->size_nos_newobjs = allocation_verifier->num_nos_newobjs = 0; + allocation_verifier->size_los_newobjs = allocation_verifier->size_los_newobjs = 0; + allocation_verifier->size_nos_objs = allocation_verifier->num_nos_objs = 0; + allocation_verifier->size_los_objs = allocation_verifier->num_los_objs = 0; + allocation_verifier->last_size_los_objs = allocation_verifier->last_num_los_objs = 0; + allocation_verifier->new_objects_set = NULL; + allocation_verifier->new_objects_set + = verifier_free_set_pool_get_entry(heap_verifier->heap_verifier_metadata->free_objects_pool); + heap_verifier->allocation_verifier = allocation_verifier; +} + +void verifier_destruct_allocation_verifier(Heap_Verifier* heap_verifier) +{ + //assert(heap_verifier->allocation_verifier->new_objects_set == NULL); + STD_FREE(heap_verifier->allocation_verifier); + heap_verifier->allocation_verifier = NULL; +} + +void verify_allocation_reset(Heap_Verifier* heap_verifier) +{ + Allocation_Verifier* alloc_verifier = heap_verifier->allocation_verifier; + GC_Gen* gc = (GC_Gen*)heap_verifier->gc; + Space* lspace = gc_get_los(gc); + + alloc_verifier->size_los_objs = alloc_verifier->num_los_objs = 0; + + verifier_scan_los_objects(lspace, heap_verifier); + + alloc_verifier->last_size_los_objs = alloc_verifier->size_los_objs; + alloc_verifier->last_num_los_objs = alloc_verifier->num_los_objs; + + alloc_verifier->size_nos_newobjs = alloc_verifier->num_nos_newobjs = 0; + alloc_verifier->size_los_newobjs = alloc_verifier->num_los_newobjs = 0; + alloc_verifier->size_nos_objs = alloc_verifier->num_nos_objs = 0; + alloc_verifier->size_los_objs = alloc_verifier->num_los_objs = 0; + + assert(alloc_verifier->new_objects_set == NULL); + alloc_verifier->new_objects_set = verifier_free_set_pool_get_entry(verifier_metadata->free_objects_pool); + assert(alloc_verifier->new_objects_set); + +} + +Boolean verify_new_object(New_Object* new_obj, Heap_Verifier* heap_verifier) +{ + if(!heap_verifier->need_verify_allocation) return TRUE; + + GC_Gen* gc =(GC_Gen*) heap_verifier->gc; + Space* mspace = gc_get_mos(gc); + assert(mspace); + + if(obj_belongs_to_space(new_obj->address, mspace)){ + assert(0); + printf("GC Verify ==> Verify Allocation: new Objects in MOS...\n "); + return FALSE; + } + + Partial_Reveal_Object* p_newobj = new_obj->address; + if(obj_get_vt_raw(p_newobj) != new_obj->vt_raw){ + assert(0); + printf("GC Verify ==> Verify Allocation: new Objects Vtable Error...\n "); + return FALSE; + } + + if(vm_object_size(p_newobj) != new_obj->size){ + assert(0); + printf("GC Verify ==> Verify Allocation: new Objects size Error...\n "); + return FALSE; + } + + return TRUE; +} + +void verify_allocation(Heap_Verifier* heap_verifier) +{ + Allocation_Verifier* alloc_verifier = heap_verifier->allocation_verifier; + Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata; + pool_put_entry(verifier_metadata->new_objects_pool, alloc_verifier->new_objects_set); + alloc_verifier->new_objects_set = NULL; + + GC_Gen* gc = (GC_Gen*)heap_verifier->gc; + Space* nspace = gc_get_nos(gc); + Space* lspace = gc_get_los(gc); + assert(nspace); + assert(lspace); + + POINTER_SIZE_INT size_los_newobjs + = alloc_verifier->size_los_objs - alloc_verifier->last_size_los_objs; + POINTER_SIZE_INT num_los_newobjs + = alloc_verifier->num_los_objs - alloc_verifier->last_num_los_objs; + + assert(alloc_verifier->size_nos_objs == alloc_verifier->size_nos_newobjs); + assert(alloc_verifier->num_nos_objs == alloc_verifier->num_nos_newobjs); + if(alloc_verifier->size_nos_objs != alloc_verifier->size_nos_newobjs){ + printf("GC Verify ==> Verify Allocation: NOS new objects size error.\n"); + alloc_verifier->is_verification_passed = FALSE; + } + if(alloc_verifier->num_nos_objs != alloc_verifier->num_nos_newobjs){ + printf("GC Verify ==> Verify Allocation: NOS new objects number error.\n"); + alloc_verifier->is_verification_passed = FALSE; + } + + assert(size_los_newobjs == alloc_verifier->size_los_newobjs); + assert(num_los_newobjs == alloc_verifier->num_los_newobjs); + if(size_los_newobjs != alloc_verifier->size_los_newobjs){ + printf("GC Verify ==> Verify Allocation: LOS new objects size error.\n"); + alloc_verifier->is_verification_passed = FALSE; + } + if(num_los_newobjs != alloc_verifier->num_los_newobjs){ + printf("GC Verify ==> Verify Allocation: LOS new objects number error.\n"); + alloc_verifier->is_verification_passed = FALSE; + } + + + assert(alloc_verifier->new_objects_set == NULL); + Vector_Block* new_objects = pool_get_entry(verifier_metadata->new_objects_pool); + while(new_objects){ + POINTER_SIZE_INT* iter = vector_block_iterator_init(new_objects); + while(!vector_block_iterator_end(new_objects, iter)){ + New_Object* p_newobj = (New_Object*) *iter; + iter = vector_block_iterator_advance(new_objects, iter); + if(!verify_new_object(p_newobj, heap_verifier)){ + assert(0); + printf("GC Verify ==> Verify Allocation: new objects verify error.\n"); + alloc_verifier->is_verification_passed = FALSE; + } + STD_FREE(p_newobj); + } + vector_block_clear(new_objects); + pool_put_entry(verifier_metadata->free_objects_pool, new_objects); + new_objects = pool_get_entry(verifier_metadata->new_objects_pool); + } +} + + + +void verifier_allocation_update_info(Partial_Reveal_Object *p_obj, Heap_Verifier* heap_verifier) +{ + if(!heap_verifier->need_verify_allocation) return; + + Allocation_Verifier* alloc_verifier = heap_verifier->allocation_verifier; + GC_Gen* gc = (GC_Gen*)heap_verifier->gc; + Space* nspace = gc_get_nos(gc); + Space* mspace = gc_get_mos(gc); + Space* lspace = gc_get_los(gc); + assert(nspace); + assert(lspace); + + if(obj_belongs_to_space(p_obj, nspace)){ + alloc_verifier->size_nos_objs += vm_object_size(p_obj); + alloc_verifier->num_nos_objs ++; + return; + }else if(obj_belongs_to_space(p_obj, lspace)){ + alloc_verifier->size_los_objs += vm_object_size(p_obj); + alloc_verifier->num_los_objs ++; + return; + }else if(obj_belongs_to_space(p_obj, mspace)){ + return; + } + assert(0); +} + +void verifier_event_mutator_allocate_newobj(Partial_Reveal_Object* p_newobj, POINTER_SIZE_INT size, VT vt_raw) +{ + Heap_Verifier* heap_verifier = get_heap_verifier(); + if(!heap_verifier->need_verify_allocation) return; + + assert(p_newobj); + assert(obj_get_vt(p_newobj)); + assert(obj_get_vt(p_newobj) == vt_raw); + + New_Object* new_obj = (New_Object*) STD_MALLOC(sizeof(New_Object)); + assert(new_obj); + new_obj->address = p_newobj; + new_obj->size = size; + new_obj->vt_raw = (VT) vt_raw; + + Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata; + Allocation_Verifier* alloc_verifier = heap_verifier->allocation_verifier; + + /*FIXME: Replace lock */ + lock(alloc_verifier->alloc_lock); + + verifier_set_push((void*)new_obj, alloc_verifier->new_objects_set, verifier_metadata->new_objects_pool); + + GC_Gen* gc =(GC_Gen*) heap_verifier->gc; + Space* nspace = gc_get_nos(gc); + Space* lspace = gc_get_los(gc); + + //FIXME: + //assert(size == vm_object_size(p_newobj)); + if(obj_belongs_to_space(p_newobj, nspace)){ + alloc_verifier->size_nos_newobjs += size; + alloc_verifier->num_nos_newobjs ++; + }else if (obj_belongs_to_space(p_newobj, lspace)){ + alloc_verifier->size_los_newobjs += size; + alloc_verifier->num_los_newobjs ++; + }else{ + assert(0); + alloc_verifier->is_verification_passed = FALSE; + + } + unlock(alloc_verifier->alloc_lock); +} + +/*<-----------verify root sets----------------->*/ +/*FIXME: where should root set verifier be placed*/ +/*The rootset set verifier is placed here, so the rootset verifier + verifys the rootset that vm enumerated before GC. The rootset + verifying processes before and after gc will be integrated in gc verifying pass, + the rootset verifying is considered as slot verifying while verifying gc. */ +void verifier_init_rootset_verifier( Heap_Verifier* heap_verifier) +{ + RootSet_Verifier* rs_verifier = (RootSet_Verifier*)STD_MALLOC(sizeof(RootSet_Verifier)); + assert(rs_verifier); + memset(rs_verifier, 0, sizeof(RootSet_Verifier)); + rs_verifier->num_slots_in_rootset = 0; + rs_verifier->num_error_slots = 0; + rs_verifier->is_verification_passed = FALSE; + heap_verifier->rootset_verifier = rs_verifier; +} + + +void verifier_destruct_rootset_verifier( Heap_Verifier* heap_verifier) +{ + assert(heap_verifier != NULL); + assert(heap_verifier->rootset_verifier != NULL); + STD_FREE(heap_verifier->rootset_verifier); + heap_verifier->rootset_verifier = NULL; +} + + +void verify_root_set(Heap_Verifier* heap_verifier) +{ + assert(heap_verifier); + GC_Gen* gc = (GC_Gen*)heap_verifier->gc; + GC_Metadata* gc_metadata = gc->metadata; + + assert(gc); + assert(gc_metadata); + assert(gc_metadata->gc_rootset_pool); + + RootSet_Verifier* rootset_verifier = heap_verifier->rootset_verifier; + + pool_iterator_init(gc_metadata->gc_rootset_pool); + Vector_Block* root_set = pool_iterator_next(gc_metadata->gc_rootset_pool); + + /* first step: copy all root objects to trace tasks. */ + while(root_set){ + POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set); + while(!vector_block_iterator_end(root_set,iter)){ + REF* p_ref = (REF* )*iter; + iter = vector_block_iterator_advance(root_set,iter); + rootset_verifier->num_slots_in_rootset ++; + if(!verify_rootset_slot(p_ref, heap_verifier)){ + rootset_verifier->is_verification_passed = FALSE; + rootset_verifier->num_error_slots ++; + assert(0); + continue; + } + } + root_set = pool_iterator_next(gc_metadata->gc_rootset_pool); + } +} + + + +/*<-----------verify write barrier----------------->*/ + +void verifier_init_wb_verifier( Heap_Verifier* heap_verifier) +{ + WriteBarrier_Verifier* wb_verifier = (WriteBarrier_Verifier*) STD_MALLOC(sizeof(WriteBarrier_Verifier)); + assert(wb_verifier); + memset(wb_verifier, 0, sizeof(WriteBarrier_Verifier)); + wb_verifier->num_ref_wb_after_scanning = 0; + wb_verifier->num_ref_wb_in_remset = 0; + wb_verifier->num_slots_in_remset = 0; + wb_verifier->is_verification_passed = FALSE; + heap_verifier->writebarrier_verifier = wb_verifier; +} + + +void verifier_destruct_wb_verifier( Heap_Verifier* heap_verifier) +{ + assert(heap_verifier != NULL); + assert(heap_verifier->writebarrier_verifier != NULL); + STD_FREE(heap_verifier->writebarrier_verifier); + heap_verifier->writebarrier_verifier = NULL; +} + + +void verifier_mark_wb_slots(Heap_Verifier* heap_verifier) +{ + GC_Gen* gc = (GC_Gen*)(heap_verifier->gc); + if(gc->collect_kind != MINOR_COLLECTION ||!gc_is_gen_mode()) return; + + GC_Metadata*gc_metadata = gc->metadata; + Space* nspace = gc_get_nos(gc); + + WriteBarrier_Verifier* wb_verifier = heap_verifier->writebarrier_verifier; + assert(wb_verifier); + + pool_iterator_init(gc_metadata->gc_rootset_pool); + Vector_Block* rem_set = pool_iterator_next(gc_metadata->gc_rootset_pool); + + + while(rem_set){ + if(rem_set == gc->root_set) break; + POINTER_SIZE_INT* iter = vector_block_iterator_init(rem_set); + while(!vector_block_iterator_end(rem_set, iter)){ + REF* p_ref = (REF* )*iter; + wb_verifier->num_slots_in_remset ++; + if(!address_belongs_to_space((void*)p_ref, nspace) && address_belongs_to_space((void*)*p_ref, nspace)){ + if(!wb_is_marked_in_slot(p_ref)){ + wb_mark_in_slot(p_ref); + wb_verifier->num_ref_wb_in_remset ++; + } + } + iter = vector_block_iterator_advance(rem_set, iter); + } + rem_set = pool_iterator_next(gc_metadata->gc_rootset_pool); + } +} + +void verify_write_barrier(REF* p_ref, Heap_Verifier* heap_verifier) +{ + GC_Gen* gc = (GC_Gen*)heap_verifier->gc; + if(gc->collect_kind != MINOR_COLLECTION ||!gc_is_gen_mode()) return; + + Space* nspace = gc_get_nos(gc); + assert(address_belongs_to_gc_heap((void*)p_ref, (GC *) gc)); + + WriteBarrier_Verifier* wb_verifier = heap_verifier->writebarrier_verifier; + assert(wb_verifier); + + if(!address_belongs_to_space((void*)p_ref, nspace) && address_belongs_to_space((void*)*p_ref, nspace)){ + if(!wb_is_marked_in_slot(p_ref)){ + assert(0); + printf("GC Verify ==> Verify Write Barrier: error!!!\n"); + wb_verifier->is_verification_passed = FALSE; + }else{ + wb_unmark_in_slot(p_ref); + wb_verifier->num_ref_wb_after_scanning ++; + } + } +} + +/*<------------verify mutator effect common--------------->*/ + +void verifier_init_mutator_verifiers( Heap_Verifier* heap_verifier) +{ + verifier_init_allocation_verifier(heap_verifier); + verifier_init_wb_verifier(heap_verifier); + verifier_init_rootset_verifier(heap_verifier); +} + + +void verifier_destruct_mutator_verifiers( Heap_Verifier* heap_verifier) +{ + verifier_destruct_allocation_verifier(heap_verifier); + verifier_destruct_wb_verifier(heap_verifier); + verifier_destruct_rootset_verifier(heap_verifier); +} + +void verifier_reset_mutator_verification(Heap_Verifier* heap_verifier) +{ + heap_verifier->allocation_verifier->is_verification_passed = TRUE; + heap_verifier->writebarrier_verifier->is_verification_passed = TRUE; + heap_verifier->rootset_verifier->is_verification_passed = TRUE; + + if(heap_verifier->need_verify_writebarrier && heap_verifier->gc_is_gen_mode) + verifier_mark_wb_slots(heap_verifier); + +} + + +void verifier_clear_mutator_verification(Heap_Verifier* heap_verifier) +{ + if(heap_verifier->need_verify_allocation) verify_allocation_reset(heap_verifier); +} + + +void verify_mutator_effect(Heap_Verifier* heap_verifier) +{ + if(heap_verifier->need_verify_rootset) verify_root_set(heap_verifier); + if(heap_verifier->need_verify_allocation) verify_allocation(heap_verifier); +} + + Index: src/verify/verify_mutator_effect.h =================================================================== --- src/verify/verify_mutator_effect.h (revision 0) +++ src/verify/verify_mutator_effect.h (revision 0) @@ -0,0 +1,65 @@ +#ifndef _VERIFY_MUTATOR_EFFECT_H_ +#define _VERIFY_MUTATOR_EFFECT_H_ + +#include "verifier_common.h" + +typedef struct Allocation_Verifier{ + SpinLock alloc_lock; + + /*calculated in GC allocation phase.*/ + POINTER_SIZE_INT size_nos_newobjs; + POINTER_SIZE_INT num_nos_newobjs; + POINTER_SIZE_INT size_los_newobjs; + POINTER_SIZE_INT num_los_newobjs; + + /*size and number of objects in LOS after last GC*/ + POINTER_SIZE_INT last_size_los_objs; + POINTER_SIZE_INT last_num_los_objs; + + /*calculated in whole heap scanning phase.*/ + POINTER_SIZE_INT size_nos_objs; + POINTER_SIZE_INT num_nos_objs; + POINTER_SIZE_INT size_los_objs; + POINTER_SIZE_INT num_los_objs; + + Vector_Block* new_objects_set; + + Boolean is_verification_passed; +}Allocation_Verifier; + +typedef struct WriteBarrier_Verifier{ + POINTER_SIZE_INT num_slots_in_remset; + POINTER_SIZE_INT num_ref_wb_in_remset; + POINTER_SIZE_INT num_ref_wb_after_scanning; + Boolean is_verification_passed; +}WriteBarrier_Verifier; + +typedef struct RootSet_Verifier{ + POINTER_SIZE_INT num_slots_in_rootset; + POINTER_SIZE_INT num_error_slots; + Boolean is_verification_passed; +}RootSet_Verifier; + + +typedef struct New_Object_struct{ + Partial_Reveal_Object* address; + POINTER_SIZE_INT size; + VT vt_raw; +} New_Object; + +void verify_write_barrier(REF* p_ref, Heap_Verifier* heap_verifier); +void verify_allocation(Heap_Verifier* heap_verifier); +void verify_root_set(Heap_Verifier* heap_verifier); + +void verifier_init_mutator_verifiers(Heap_Verifier* heap_verifier); +void verifier_event_mutator_allocate_newobj(Partial_Reveal_Object* p_newobj, POINTER_SIZE_INT size, VT vt_raw); +void verifier_destruct_mutator_verifiers(Heap_Verifier* heap_verifier); +void verifier_allocation_update_info(Partial_Reveal_Object *p_obj, Heap_Verifier* heap_verifier); + +void verifier_reset_mutator_verification(Heap_Verifier* heap_verifier); +void verifier_clear_mutator_verification(Heap_Verifier* heap_verifier); + +void verify_mutator_effect(Heap_Verifier* heap_verifier); + + +#endif //_VERIFY_MUTATOR_EFFECT_H_