Index: src/common/compressed_ref.cpp =================================================================== --- src/common/compressed_ref.cpp (revision 0) +++ src/common/compressed_ref.cpp (revision 0) @@ -0,0 +1,72 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2007/01/24 + */ + +#include "gc_common.h" +#include "../utils/vector_block.h" +#include "../utils/sync_pool.h" +#include "../thread/collector.h" +#include "../thread/mutator.h" +#include "compressed_ref.h" + +typedef struct Uncompressed_Root{ + Partial_Reveal_Object **p_ref; /* pointing to the uncompressed address of the root object */ + REF ref; /* temporal compressed pointer pointing to the root object */ +}Compressed_Root; + +POINTER_SIZE_INT vtable_base = 0; +POINTER_SIZE_INT HEAP_NULL = 0; + +void gc_set_uncompressed_rootset(GC *gc) +{ + Pool *rootset_pool = gc->metadata->gc_uncompressed_rootset_pool; + + pool_put_entry(rootset_pool, gc->uncompressed_root_set); + gc->uncompressed_root_set = NULL; + + pool_iterator_init(rootset_pool); + while(Vector_Block *root_set = pool_iterator_next(rootset_pool)){ + POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set); + for(; !vector_block_iterator_end(root_set, iter); iter = vector_block_iterator_advance(root_set, iter)){ + iter = vector_block_iterator_advance(root_set, iter); + assert(!vector_block_iterator_end(root_set, iter)); + /* add the pointer to ref of Uncmpressed_Root to rootset */ + gc_compressed_rootset_add_entry(gc, (REF *)iter); + } + } +} + +void gc_fix_uncompressed_rootset(GC *gc) +{ + Pool *rootset_pool = gc->metadata->gc_uncompressed_rootset_pool; + + pool_iterator_init(rootset_pool); + while(Vector_Block *root_set = pool_iterator_next(rootset_pool)){ + POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set); + for(; !vector_block_iterator_end(root_set, iter); iter = vector_block_iterator_advance(root_set, iter)){ + Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)*iter; + iter = vector_block_iterator_advance(root_set, iter); + assert(!vector_block_iterator_end(root_set, iter)); + REF ref = (REF)*iter; + Partial_Reveal_Object *p_obj = uncompress_ref(ref); + *p_ref = p_obj; + } + } +} + Property changes on: src\common\compressed_ref.cpp ___________________________________________________________________ Name: svn:eol-style + native Index: src/common/compressed_ref.h =================================================================== --- src/common/compressed_ref.h (revision 0) +++ src/common/compressed_ref.h (revision 0) @@ -0,0 +1,51 @@ +/* + * Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @author Xiao-Feng Li, 2007/01/24 + */ +#ifndef _COMPRESSED_REF_H_ +#define _COMPRESSED_REF_H_ + +#include "gc_common.h" +#include "gc_metadata.h" +#include "../utils/vector_block.h" +#include "../utils/sync_pool.h" +#include "../thread/collector.h" +#include "../thread/mutator.h" + +void gc_set_uncompressed_rootset(GC *gc); +void gc_fix_uncompressed_rootset(GC *gc); + + +FORCE_INLINE void gc_compressed_rootset_add_entry(GC *gc, REF *p_ref) +{ + assert( p_ref < gc_heap_base(gc) || p_ref >= gc_heap_ceiling(gc)); + + GC_Metadata *metadata = gc->metadata; + Vector_Block *root_set = gc->root_set; + assert(root_set); + + vector_block_add_entry(root_set, (POINTER_SIZE_INT)p_ref); + + if(!vector_block_is_full(root_set)) return; + + pool_put_entry(metadata->gc_rootset_pool, root_set); + gc->root_set = free_set_pool_get_entry(metadata); + assert(gc->root_set); +} + +#endif /* #ifndef _COMPRESSED_REF_H_ */ Property changes on: src\common\compressed_ref.h ___________________________________________________________________ Name: svn:eol-style + native Index: src/common/fix_repointed_refs.h =================================================================== --- src/common/fix_repointed_refs.h (revision 519535) +++ src/common/fix_repointed_refs.h (working copy) @@ -22,11 +22,12 @@ #define _FIX_REPOINTED_REFS_H_ #include "gc_common.h" +#include "compressed_ref.h" extern Boolean IS_MOVE_COMPACT; -inline void slot_fix(Partial_Reveal_Object** p_ref) +inline void slot_fix(REF* p_ref) { - Partial_Reveal_Object* p_obj = *p_ref; + Partial_Reveal_Object* p_obj = read_slot(p_ref); if(!p_obj) return; if(IS_MOVE_COMPACT){ @@ -40,7 +41,7 @@ * since those which can be scanned in MOS & NOS must have been set fw bit in oi. */ assert((POINTER_SIZE_INT)obj_get_fw_in_oi(p_obj) > DUAL_MARKBITS); - *p_ref = obj_get_fw_in_oi(p_obj); + write_slot(p_ref, obj_get_fw_in_oi(p_obj)); } } @@ -57,7 +58,7 @@ assert(!obj_is_primitive_array(p_obj)); int32 array_length = array->array_len; - Partial_Reveal_Object** p_refs = (Partial_Reveal_Object**)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array)); + REF* p_refs = (REF *)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array)); for (int i = 0; i < array_length; i++) { slot_fix(p_refs + i); } @@ -67,7 +68,7 @@ /* scan non-array object */ int *offset_scanner = init_object_scanner(p_obj); while (true) { - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj); + REF* p_ref = (REF*)offset_get_ref(offset_scanner, p_obj); if (p_ref == NULL) break; /* terminating ref slot */ slot_fix(p_ref); @@ -77,6 +78,7 @@ return; } + inline void block_fix_ref_after_copying(Block_Header* curr_block) { POINTER_SIZE_INT cur_obj = (POINTER_SIZE_INT)curr_block->base; @@ -106,7 +108,7 @@ { void* start_pos; Partial_Reveal_Object* p_obj = block_get_first_marked_obj_after_prefetch(curr_block, &start_pos); - + while( p_obj ){ assert( obj_is_marked_in_vt(p_obj)); object_fix_ref_slots(p_obj); @@ -115,5 +117,4 @@ return; } - #endif /* #ifndef _FIX_REPOINTED_REFS_H_ */ Index: src/common/gc_block.h =================================================================== --- src/common/gc_block.h (revision 519535) +++ src/common/gc_block.h (working copy) @@ -102,6 +102,24 @@ return (Partial_Reveal_Object *)((POINTER_SIZE_INT)obj + vm_object_size(obj)); } +inline void obj_set_prefetched_next_pointer(Partial_Reveal_Object* obj, Partial_Reveal_Object* raw_prefetched_next){ + /*Fixme: em64t: This may be not necessary!*/ + if(raw_prefetched_next == 0){ + *((REF*)obj + 1) = 0; + return; + } + REF ref = compress_ref(raw_prefetched_next); + *((REF*)obj + 1) = ref; +} + +inline Partial_Reveal_Object* obj_get_prefetched_next_pointer(Partial_Reveal_Object* obj){ + /*Fixme: em64t: This may be not necessary!*/ + assert(obj); + + REF ref = *( (REF*)obj + 1); + return uncompress_ref(ref); +} + inline Partial_Reveal_Object *next_marked_obj_in_block(Partial_Reveal_Object *cur_obj, Partial_Reveal_Object *block_end) { while(cur_obj < block_end){ @@ -160,11 +178,10 @@ if(next_marked_obj){ if(next_marked_obj != next_obj) - set_obj_info(next_obj, (Obj_Info_Type)next_marked_obj); + obj_set_prefetched_next_pointer(next_obj, next_marked_obj); } else { - set_obj_info(next_obj, 0); + obj_set_prefetched_next_pointer(next_obj, 0); } - return first_marked_obj; } @@ -186,8 +203,8 @@ if(obj_is_marked_in_vt(cur_obj)) cur_marked_obj = cur_obj; else - cur_marked_obj = (Partial_Reveal_Object *)get_obj_info_raw(cur_obj); - + cur_marked_obj = (Partial_Reveal_Object *)obj_get_prefetched_next_pointer(cur_obj); + if(!cur_marked_obj) return NULL; @@ -201,9 +218,9 @@ if(next_marked_obj){ if(next_marked_obj != next_obj) - set_obj_info(next_obj, (Obj_Info_Type)next_marked_obj); + obj_set_prefetched_next_pointer(next_obj, next_marked_obj); } else { - set_obj_info(next_obj, 0); + obj_set_prefetched_next_pointer(next_obj, 0); } return cur_marked_obj; @@ -211,7 +228,7 @@ inline Partial_Reveal_Object *block_get_next_marked_obj_after_prefetch(Block_Header *block, void **start_pos) { - Partial_Reveal_Object *cur_obj = *(Partial_Reveal_Object **)start_pos; + Partial_Reveal_Object *cur_obj = (Partial_Reveal_Object *)(*start_pos); Partial_Reveal_Object *block_end = (Partial_Reveal_Object *)block->free; if(cur_obj >= block_end) @@ -222,7 +239,7 @@ if(obj_is_marked_in_vt(cur_obj) || obj_is_fw_in_oi(cur_obj)) cur_marked_obj = cur_obj; else - cur_marked_obj = (Partial_Reveal_Object *)get_obj_info_raw(cur_obj); + cur_marked_obj = obj_get_prefetched_next_pointer(cur_obj); if(!cur_marked_obj) return NULL; @@ -233,12 +250,14 @@ return cur_marked_obj; } -inline Partial_Reveal_Object * obj_get_fw_in_table(Partial_Reveal_Object *p_obj) +inline REF obj_get_fw_in_table(Partial_Reveal_Object *p_obj) { /* only for inter-sector compaction */ unsigned int index = OBJECT_INDEX_TO_OFFSET_TABLE(p_obj); Block_Header *curr_block = GC_BLOCK_HEADER(p_obj); - return (Partial_Reveal_Object *)(((POINTER_SIZE_INT)p_obj) - curr_block->table[index]); + Partial_Reveal_Object* new_addr = (Partial_Reveal_Object *)(((POINTER_SIZE_INT)p_obj) - curr_block->table[index]); + REF new_ref = compress_ref(new_addr); + return new_ref; } inline void block_clear_table(Block_Header* block) @@ -248,5 +267,5 @@ return; } +#endif //#ifndef _BLOCK_H_ -#endif //#ifndef _BLOCK_H_ Index: src/common/gc_common.cpp =================================================================== --- src/common/gc_common.cpp (revision 519535) +++ src/common/gc_common.cpp (working copy) @@ -51,6 +51,8 @@ extern Boolean JVMTI_HEAP_ITERATION ; +extern Boolean IS_MOVE_COMPACT; + static int get_int_property(const char *property_name) { assert(property_name); @@ -234,7 +236,7 @@ } if (is_property_set("gc.use_large_page", VM_PROPERTIES) == 1){ - char* value = get_property("gc.use_large_page", VM_PROPERTIES); + char* value = get_property("gc.large_page", VM_PROPERTIES); large_page_hint = strdup(value); destroy_property_value(value); } @@ -257,14 +259,14 @@ //For_LOS_extend! #ifdef GC_FIXED_SIZE_TUNER - gc_space_tune_before_gc_simplified(gc, gc_cause); + gc_space_tune_before_gc_fixed_size(gc, gc_cause); #else gc_space_tune_prepare(gc, gc_cause); gc_space_tune_before_gc(gc, gc_cause); #endif #ifdef MARK_BIT_FLIPPING - if(gc->collect_kind == MINOR_COLLECTION) + if(gc_match_kind(gc, MINOR_COLLECTION)) mark_bit_flip(); #endif @@ -315,3 +317,4 @@ return; } + Index: src/common/gc_common.h =================================================================== --- src/common/gc_common.h (revision 519535) +++ src/common/gc_common.h (working copy) @@ -41,17 +41,28 @@ #define KB (1<<10) #define MB (1<<20) -#define BYTES_PER_WORD 4 #define BITS_PER_BYTE 8 -#define BITS_PER_WORD 32 +#define BYTES_PER_WORD (sizeof(POINTER_SIZE_INT)) +#define BITS_PER_WORD (BITS_PER_BYTE * BYTES_PER_WORD) + #define MASK_OF_BYTES_PER_WORD (BYTES_PER_WORD-1) /* 0x11 */ -#define BIT_SHIFT_TO_BYTES_PER_WORD 2 /* 2 */ #define BIT_SHIFT_TO_BITS_PER_BYTE 3 -#define BIT_SHIFT_TO_BITS_PER_WORD 5 + +#ifdef POINTER64 + #define BIT_SHIFT_TO_BYTES_PER_WORD 3 /* 3 */ +#else + #define BIT_SHIFT_TO_BYTES_PER_WORD 2 /* 2 */ +#endif + +#ifdef POINTER64 + #define BIT_SHIFT_TO_BITS_PER_WORD 6 +#else + #define BIT_SHIFT_TO_BITS_PER_WORD 5 +#endif + #define BIT_SHIFT_TO_KILO 10 - #define BIT_MASK_TO_BITS_PER_WORD ((1<gc_ref_offset_array; } -inline Partial_Reveal_Object** object_ref_iterator_get(int* iterator, Partial_Reveal_Object* obj) +inline REF* object_ref_iterator_get(int* iterator, Partial_Reveal_Object *obj) { - return (Partial_Reveal_Object**)((POINTER_SIZE_INT)obj + *iterator); + return (REF*)((POINTER_SIZE_INT)obj + *iterator); } inline int* object_ref_iterator_next(int* iterator) @@ -132,20 +214,20 @@ /****************************************/ inline Boolean obj_is_marked_in_vt(Partial_Reveal_Object *obj) -{ return ((POINTER_SIZE_INT)obj_get_vt_raw(obj) & CONST_MARK_BIT); } +{ return (Boolean)((POINTER_SIZE_INT)obj_get_vt_raw(obj) & CONST_MARK_BIT); } inline Boolean obj_mark_in_vt(Partial_Reveal_Object *obj) { - Partial_Reveal_VTable* vt = obj_get_vt_raw(obj); + VT vt = obj_get_vt_raw(obj); if((POINTER_SIZE_INT)vt & CONST_MARK_BIT) return FALSE; - obj_set_vt(obj, (POINTER_SIZE_INT)vt | CONST_MARK_BIT); + obj_set_vt(obj, (VT)( (POINTER_SIZE_INT)vt | CONST_MARK_BIT ) ); return TRUE; } inline void obj_unmark_in_vt(Partial_Reveal_Object *obj) { - Partial_Reveal_VTable* vt = obj_get_vt_raw(obj); - obj_set_vt(obj, (POINTER_SIZE_INT)vt & ~CONST_MARK_BIT); + VT vt = obj_get_vt_raw(obj); + obj_set_vt(obj, (VT)((POINTER_SIZE_INT)vt & ~CONST_MARK_BIT)); } inline Boolean obj_is_marked_or_fw_in_oi(Partial_Reveal_Object *obj) @@ -164,7 +246,7 @@ inline Partial_Reveal_Object *obj_get_fw_in_oi(Partial_Reveal_Object *obj) { assert(get_obj_info_raw(obj) & CONST_FORWARD_BIT); - return (Partial_Reveal_Object*) (get_obj_info_raw(obj) & ~CONST_FORWARD_BIT); + return (Partial_Reveal_Object*)(uncompress_ref((REF)(get_obj_info_raw(obj) & ~CONST_FORWARD_BIT))); } inline Boolean obj_is_fw_in_oi(Partial_Reveal_Object *obj) @@ -173,6 +255,7 @@ inline void obj_set_fw_in_oi(Partial_Reveal_Object *obj,void *dest) { assert(!(get_obj_info_raw(obj) & CONST_FORWARD_BIT)); + REF dest = compress_ref((Partial_Reveal_Object *) dest); set_obj_info(obj,(Obj_Info_Type)dest | CONST_FORWARD_BIT); } @@ -209,7 +292,7 @@ inline Partial_Reveal_Object *obj_get_fw_in_oi(Partial_Reveal_Object *obj) { assert(get_obj_info_raw(obj) & FLIP_FORWARD_BIT); - return (Partial_Reveal_Object*) get_obj_info(obj); + return (Partial_Reveal_Object*) ( uncompress_ref( (REF)get_obj_info(obj) ) ); } inline Boolean obj_is_fw_in_oi(Partial_Reveal_Object *obj) @@ -225,7 +308,8 @@ /* It's important to clear the FLIP_FORWARD_BIT before collection ends, since it is the same as next minor cycle's FLIP_MARK_BIT. And if next cycle is major, it is also confusing as FLIP_FORWARD_BIT. (The bits are flipped only in minor collection). */ - set_obj_info(obj,(Obj_Info_Type)dest | FLIP_FORWARD_BIT); + Obj_Info_Type dst = (Obj_Info_Type)compress_ref((Partial_Reveal_Object *) dest); + set_obj_info(obj, dst | FLIP_FORWARD_BIT); } inline Boolean obj_mark_in_oi(Partial_Reveal_Object* p_obj) @@ -296,6 +380,7 @@ /* FIXME:: this is wrong! root_set belongs to mutator */ Vector_Block* root_set; + Vector_Block* uncompressed_root_set; //For_LOS_extend Space_Tuner* tuner; @@ -316,6 +401,12 @@ return (addr >= gc_heap_base(gc) && addr < gc_heap_ceiling(gc)); } +inline Boolean gc_match_kind(GC *gc, unsigned int kind) +{ + assert(gc->collect_kind && kind); + return gc->collect_kind & kind; +} + void gc_parse_options(GC* gc); void gc_reclaim_heap(GC* gc, unsigned int gc_cause); @@ -330,7 +421,7 @@ //#define NOS_BOUNDARY ((void*)0x2ea20000) //this is for 512M #define NOS_BOUNDARY ((void*)0x40000000) //this is for 256M - #define nos_boundary NOS_BOUNDARY + #define nos_boundary NOS_BOUNDARY #else /* STATIC_NOS_MAPPING */ Index: src/common/gc_for_class.cpp =================================================================== --- src/common/gc_for_class.cpp (revision 519535) +++ src/common/gc_for_class.cpp (working copy) @@ -70,7 +70,7 @@ unsigned idx; for(idx = 0; idx < num_fields; idx++) { Field_Handle fh = class_get_instance_field_recursive(ch, idx); - if(field_is_enumerable_reference(fh)){ + if(field_is_reference(fh)) { num_ref_fields++; } } @@ -102,7 +102,7 @@ int *result = new_ref_array; for(unsigned int idx = 0; idx < num_fields; idx++) { Field_Handle fh = class_get_instance_field_recursive(ch, idx); - if(field_is_enumerable_reference(fh)) { + if(field_is_reference(fh)) { int offset = field_get_offset(fh); #ifndef BUILD_IN_REFERENT if(is_reference && offset == gc_referent_offset) continue; @@ -197,3 +197,4 @@ } /* gc_class_prepared */ + Index: src/common/gc_for_class.h =================================================================== --- src/common/gc_for_class.h (revision 519535) +++ src/common/gc_for_class.h (working copy) @@ -41,6 +41,7 @@ */ #define CONST_MARK_BIT 0x1 +#define CLEAR_VT_MARK 0x03 #define DUAL_MARKBITS 0x3 #define DUAL_MARKBITS_MASK (~DUAL_MARKBITS) @@ -63,6 +64,19 @@ #endif /* else MARK_BIT_FLIPPING */ +/*emt64 related!*/ +#define COMPRESS_VTABLE + +#ifdef POINTER64 + #ifdef COMPRESS_VTABLE + #define VT uint32 + #else + #define VT Partial_Reveal_VTable* + #endif +#else/*ifdef POINTER64*/ + #define VT Partial_Reveal_VTable* +#endif + typedef void *Thread_Handle; #define GC_CLASS_FLAG_FINALIZER 1 @@ -75,7 +89,7 @@ #define GCVT_ALIGNMENT 8 #define GCVT_ALIGN_MASK (GCVT_ALIGNMENT-1) -typedef POINTER_SIZE_INT Obj_Info_Type; +typedef uint32 Obj_Info_Type; typedef struct GC_VTable_Info { @@ -103,20 +117,48 @@ } GC_VTable_Info; typedef struct Partial_Reveal_VTable { + //--Fixme: emt64 GC_VTable_Info *gcvt; } Partial_Reveal_VTable; typedef struct Partial_Reveal_Object { - Partial_Reveal_VTable *vt_raw; + VT vt_raw; Obj_Info_Type obj_info; } Partial_Reveal_Object; typedef struct Partial_Reveal_Array { - Partial_Reveal_VTable *vt_raw; + VT vt_raw; Obj_Info_Type obj_info; unsigned int array_len; } Partial_Reveal_Array; +////////////////////////////////////////// +//Compress vtable related!/////////////////// +////////////////////////////////////////// +extern POINTER_SIZE_INT vtable_base; + +#ifdef COMPRESS_VTABLE +FORCE_INLINE VT compress_vt(Partial_Reveal_VTable* vt) +{ + assert(vt); + return (VT)((POINTER_SIZE_INT)vt - vtable_base); +} + +FORCE_INLINE Partial_Reveal_VTable* uncompress_vt(VT vt) +{ + assert(vt); + return (Partial_Reveal_VTable*)((POINTER_SIZE_INT)vt + vtable_base); +} +#else/*ifdef COMPRESS_VTABLE*/ + +FORCE_INLINE VT compress_vt(Partial_Reveal_VTable* vt) +{ return (VT)vt; } + +FORCE_INLINE Partial_Reveal_VTable* uncompress_vt(VT vt) +{ return (Partial_Reveal_VTable*) vt; } +#endif + + inline Obj_Info_Type get_obj_info_raw(Partial_Reveal_Object *obj) { assert(obj); return obj->obj_info; } @@ -138,50 +180,56 @@ inline Obj_Info_Type *get_obj_info_addr(Partial_Reveal_Object *obj) { assert(obj); return &obj->obj_info; } -inline Partial_Reveal_VTable *obj_get_vt_raw(Partial_Reveal_Object *obj) +inline VT obj_get_vt_raw(Partial_Reveal_Object *obj) { assert(obj && obj->vt_raw); return obj->vt_raw; } -inline Partial_Reveal_VTable **obj_get_vt_addr(Partial_Reveal_Object *obj) +inline VT *obj_get_vt_addr(Partial_Reveal_Object *obj) { assert(obj && obj->vt_raw); return &obj->vt_raw; } -inline Partial_Reveal_VTable *obj_get_vt(Partial_Reveal_Object *obj) -{ assert(obj && obj->vt_raw); return (Partial_Reveal_VTable *)((POINTER_SIZE_INT)obj->vt_raw & ~CONST_MARK_BIT); } +/*Fixme: emt64*/ +inline VT obj_get_vt(Partial_Reveal_Object *obj) +{ assert(obj && obj->vt_raw); return (VT)((POINTER_SIZE_INT)obj->vt_raw & ~CLEAR_VT_MARK); } -inline void obj_set_vt(Partial_Reveal_Object *obj, Allocation_Handle ah) -{ assert(obj && ah); obj->vt_raw = (Partial_Reveal_VTable *)ah; } +inline void obj_set_vt(Partial_Reveal_Object *obj, VT ah) +{ assert(obj && ah); obj->vt_raw = ah; } -inline GC_VTable_Info *vtable_get_gcvt_raw(Partial_Reveal_VTable *vt) +/*Fixme: emt64, we should check whether gcvt is compressed first!*/ +inline GC_VTable_Info *vtable_get_gcvt_raw(Partial_Reveal_VTable* vt) { assert(vt && vt->gcvt); return vt->gcvt; } -inline GC_VTable_Info *vtable_get_gcvt(Partial_Reveal_VTable *vt) -{ assert(vt && vt->gcvt); return (GC_VTable_Info*)((POINTER_SIZE_INT)vt->gcvt & GC_CLASS_FLAGS_MASK); } +inline GC_VTable_Info *vtable_get_gcvt(Partial_Reveal_VTable* vt) +{ + assert(vt && vt->gcvt); + return (GC_VTable_Info*)((POINTER_SIZE_INT)vt->gcvt & GC_CLASS_FLAGS_MASK); +} inline void vtable_set_gcvt(Partial_Reveal_VTable *vt, GC_VTable_Info *new_gcvt) +/*Fixme: emt64*/ { assert(vt && new_gcvt); vt->gcvt = new_gcvt; } inline GC_VTable_Info *obj_get_gcvt_raw(Partial_Reveal_Object *obj) { - Partial_Reveal_VTable *vt = obj_get_vt(obj); - return vtable_get_gcvt_raw(vt); + Partial_Reveal_VTable *vtable = uncompress_vt(obj_get_vt(obj)); + return vtable_get_gcvt_raw(vtable); } inline GC_VTable_Info *obj_get_gcvt(Partial_Reveal_Object *obj) { - Partial_Reveal_VTable *vt = obj_get_vt(obj); - return vtable_get_gcvt(vt); + Partial_Reveal_VTable* vtable = uncompress_vt(obj_get_vt(obj) ); + return vtable_get_gcvt(vtable); } inline Boolean object_has_ref_field(Partial_Reveal_Object *obj) { GC_VTable_Info *gcvt = obj_get_gcvt_raw(obj); - return (POINTER_SIZE_INT)gcvt & GC_CLASS_FLAG_REFS; + return (Boolean)((POINTER_SIZE_INT)gcvt & GC_CLASS_FLAG_REFS); } inline Boolean object_has_ref_field_before_scan(Partial_Reveal_Object *obj) { - Partial_Reveal_VTable *vt = obj_get_vt_raw(obj); + Partial_Reveal_VTable *vt = uncompress_vt(obj_get_vt_raw(obj)); GC_VTable_Info *gcvt = vtable_get_gcvt_raw(vt); - return (POINTER_SIZE_INT)gcvt & GC_CLASS_FLAG_REFS; + return (Boolean)((POINTER_SIZE_INT)gcvt & GC_CLASS_FLAG_REFS); } inline unsigned int object_ref_field_num(Partial_Reveal_Object *obj) @@ -193,7 +241,7 @@ inline Boolean object_is_array(Partial_Reveal_Object *obj) { GC_VTable_Info *gcvt = obj_get_gcvt_raw(obj); - return ((POINTER_SIZE_INT)gcvt & GC_CLASS_FLAG_ARRAY); + return (Boolean)((POINTER_SIZE_INT)gcvt & GC_CLASS_FLAG_ARRAY); } inline Boolean obj_is_primitive_array(Partial_Reveal_Object *obj) @@ -244,8 +292,10 @@ inline Boolean type_has_finalizer(Partial_Reveal_VTable *vt) { GC_VTable_Info *gcvt = vtable_get_gcvt_raw(vt); - return (POINTER_SIZE_INT)gcvt & GC_CLASS_FLAG_FINALIZER; + return (Boolean)(POINTER_SIZE_INT)gcvt & GC_CLASS_FLAG_FINALIZER; } #endif //#ifndef _GC_TYPES_H_ + + Index: src/common/gc_for_vm.cpp =================================================================== --- src/common/gc_for_vm.cpp (revision 519535) +++ src/common/gc_for_vm.cpp (working copy) @@ -20,6 +20,7 @@ #include #include "vm_threads.h" +#include "compressed_ref.h" #include "../gen/gen.h" #include "interior_pointer.h" @@ -77,15 +78,27 @@ p_global_gc = NULL; } +#ifdef COMPRESS_REFERENCE +Boolean gc_supports_compressed_references() +{ + vtable_base = vm_get_vtable_base(); + return TRUE; +} +#endif + /* this interface need reconsidering. is_pinned is unused. */ void gc_add_root_set_entry(Managed_Object_Handle *ref, Boolean is_pinned) -{ +{ Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)ref; Partial_Reveal_Object* p_obj = *p_ref; /* we don't enumerate NULL reference and nos_boundary FIXME:: nos_boundary is a static field in GCHelper.java for fast write barrier, not a real object reference this should be fixed that magic Address field should not be enumerated. */ +#ifdef COMPRESS_REFERENCE + if (p_obj == (Partial_Reveal_Object*)HEAP_NULL || p_obj == NULL || p_obj == nos_boundary ) return; +#else if (p_obj == NULL || p_obj == nos_boundary ) return; +#endif assert( !obj_is_marked_in_vt(p_obj)); /* for Minor_collection, it's possible for p_obj be forwarded in non-gen mark-forward GC. The forward bit is actually last cycle's mark bit. @@ -101,6 +114,16 @@ add_root_set_entry_interior_pointer(slot, offset, is_pinned); } +void gc_add_compressed_root_set_entry(REF* ref, Boolean is_pinned) +{ + REF *p_ref = (REF *)ref; + if(*p_ref == COMPRESSED_NULL) return; + Partial_Reveal_Object* p_obj = read_slot(p_ref); + assert(!obj_is_marked_in_vt(p_obj)); + assert( address_belongs_to_gc_heap(p_obj, p_global_gc)); + gc_compressed_rootset_add_entry(p_global_gc, p_ref); +} + /* VM to force GC */ void gc_force_gc() { @@ -182,3 +205,4 @@ + Index: src/common/gc_metadata.cpp =================================================================== --- src/common/gc_metadata.cpp (revision 519535) +++ src/common/gc_metadata.cpp (working copy) @@ -22,6 +22,8 @@ #include "interior_pointer.h" #include "../finalizer_weakref/finalizer_weakref.h" #include "gc_block.h" +#include "compressed_ref.h" +#include "../utils/sync_stack.h" #define GC_METADATA_SIZE_BYTES (1*MB) #define GC_METADATA_EXTEND_SIZE_BYTES (1*MB) @@ -64,11 +66,12 @@ gc_metadata.free_set_pool = sync_pool_create(); /* initialize free rootset pool so that mutators can use them */ for(; imark_task_pool); sync_pool_destruct(metadata->free_set_pool); - sync_pool_destruct(metadata->gc_rootset_pool); - sync_pool_destruct(metadata->mutator_remset_pool); + sync_pool_destruct(metadata->gc_rootset_pool); + sync_pool_destruct(metadata->gc_uncompressed_rootset_pool); + sync_pool_destruct(metadata->mutator_remset_pool); sync_pool_destruct(metadata->collector_remset_pool); sync_pool_destruct(metadata->collector_repset_pool); @@ -106,12 +110,13 @@ unlock(metadata->alloc_lock); return block; } - + unsigned int num_alloced = metadata->num_alloc_segs; if(num_alloced == GC_METADATA_SEGMENT_NUM){ printf("Run out GC metadata, please give it more segments!\n"); exit(0); } + unsigned int seg_size = GC_METADATA_EXTEND_SIZE_BYTES + METADATA_BLOCK_SIZE_BYTES; void *new_segment = STD_MALLOC(seg_size); memset(new_segment, 0, seg_size); @@ -145,6 +150,7 @@ block = pool_get_entry(pool); unlock(metadata->alloc_lock); + return block; } @@ -161,10 +167,10 @@ while(root_set){ POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set); while(!vector_block_iterator_end(root_set,iter)){ - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; + REF* p_ref = (REF* )*iter; iter = vector_block_iterator_advance(root_set,iter); - Partial_Reveal_Object* p_obj = *p_ref; + Partial_Reveal_Object* p_obj = read_slot(p_ref); if(IS_MOVE_COMPACT){ if(obj_is_moved(p_obj)) *p_ref = obj_get_fw_in_table(p_obj); @@ -177,7 +183,7 @@ * since those which can be scanned in MOS & NOS must have been set fw bit in oi. */ assert(address_belongs_to_gc_heap(obj_get_fw_in_oi(p_obj), gc)); - *p_ref = obj_get_fw_in_oi(p_obj); + write_slot(p_ref , obj_get_fw_in_oi(p_obj)); } } } @@ -192,20 +198,25 @@ GC* gc = collector->gc; GC_Metadata* metadata = gc->metadata; - /* generational MINOR_COLLECTION doesn't need rootset update, but need reset */ - if( gc->collect_kind != MINOR_COLLECTION ) /* MINOR but not forwarding */ + /* MINOR_COLLECTION doesn't need rootset update, but need reset */ + if( !gc_match_kind(gc, MINOR_COLLECTION)){ gc_update_repointed_sets(gc, metadata->gc_rootset_pool); - else - gc_set_pool_clear(metadata->gc_rootset_pool); - #ifndef BUILD_IN_REFERENT - gc_update_finref_repointed_refs(gc); + gc_update_finref_repointed_refs(gc); #endif + } else { + gc_set_pool_clear(metadata->gc_rootset_pool); + } +#ifdef COMPRESS_REFERENCE + gc_fix_uncompressed_rootset(gc); +#endif + + update_rootset_interior_pointer(); /* it was pointing to the last root_set entry in gc_rootset_pool (before rem_sets). */ gc->root_set = NULL; - + return; } @@ -218,7 +229,9 @@ Pool* free_set_pool = metadata->free_set_pool; Vector_Block* root_set = NULL; - +#ifdef COMPRESS_REFERENCE + gc_set_uncompressed_rootset(gc); +#endif /* put back last rootset block */ pool_put_entry(gc_rootset_pool, gc->root_set); @@ -250,7 +263,7 @@ collector->rem_set = NULL; } - if( gc->collect_kind != MINOR_COLLECTION ){ + if( !gc_match_kind(gc, MINOR_COLLECTION )){ /* all the remsets are useless now */ /* clean and put back mutator remsets */ root_set = pool_get_entry( mutator_remset_pool ); @@ -293,11 +306,18 @@ { assert(pool_is_empty(gc_metadata.gc_rootset_pool)); assert(gc->root_set == NULL); - gc->root_set = free_set_pool_get_entry(&gc_metadata); - - assert(vector_block_is_empty(gc->root_set)); + gc->root_set = free_set_pool_get_entry(&gc_metadata); + assert(vector_block_is_empty(gc->root_set)); + +#ifdef COMPRESS_REFERENCE + assert(pool_is_empty(gc_metadata.gc_uncompressed_rootset_pool)); + assert(gc->uncompressed_root_set == NULL); + gc->uncompressed_root_set = free_set_pool_get_entry(&gc_metadata); + assert(vector_block_is_empty(gc->uncompressed_root_set)); +#endif + return; -} +} void gc_clear_remset(GC* gc) { @@ -341,3 +361,4 @@ return; } + Index: src/common/gc_metadata.h =================================================================== --- src/common/gc_metadata.h (revision 519535) +++ src/common/gc_metadata.h (working copy) @@ -39,6 +39,7 @@ /* FIXME:: the mutator remset pool can be merged with the rootset pool*/ Pool* free_set_pool; /* list of free buffers for rootsets remsets */ Pool* gc_rootset_pool; /* list of root sets for enumeration */ + Pool* gc_uncompressed_rootset_pool; /* list of uncompressed root sets for enumeration */ Pool* mutator_remset_pool; /* list of remsets generated by app during execution */ Pool* collector_remset_pool; /* list of remsets generated by gc during collection */ Pool* collector_repset_pool; /* list of repointed ref slot sets */ @@ -71,7 +72,7 @@ { Vector_Block* set = pool_get_entry(set_pool); while(set){ - vector_block_clear(set); + vector_block_clear(set); pool_put_entry(gc_metadata.free_set_pool, set); set = pool_get_entry(set_pool); } @@ -158,8 +159,35 @@ assert(collector->trace_stack); } + +#ifdef COMPRESS_REFERENCE + inline void gc_rootset_add_entry(GC* gc, Partial_Reveal_Object** p_ref) { + assert( p_ref < gc_heap_base(gc) || p_ref >= gc_heap_ceiling(gc)); + + GC_Metadata *metadata = gc->metadata; + Vector_Block *uncompressed_root_set = gc->uncompressed_root_set; + assert(uncompressed_root_set); + + Partial_Reveal_Object *p_obj = *p_ref; + REF ref = compress_ref(p_obj); + + /* construct an Uncompressed_Root */ + vector_block_add_entry(uncompressed_root_set, (POINTER_SIZE_INT)p_ref); + assert(!vector_block_is_full(uncompressed_root_set)); + vector_block_add_entry(uncompressed_root_set, (POINTER_SIZE_INT)ref); + + if(!vector_block_is_full(uncompressed_root_set)) return; + + pool_put_entry(metadata->gc_uncompressed_rootset_pool, uncompressed_root_set); + gc->uncompressed_root_set = free_set_pool_get_entry(metadata); + assert(gc->uncompressed_root_set); +} + +#else /* COMPRESS_REFERENCE */ +inline void gc_rootset_add_entry(GC* gc, Partial_Reveal_Object** p_ref) +{ assert( p_ref < gc_heap_base_address() || p_ref >= gc_heap_ceiling_address()); Vector_Block* root_set = gc->root_set; @@ -171,5 +199,6 @@ gc->root_set = free_set_pool_get_entry(&gc_metadata); assert(gc->root_set); } +#endif #endif /* #ifndef _GC_METADATA_H_ */ Index: src/common/interior_pointer.cpp =================================================================== --- src/common/interior_pointer.cpp (revision 519535) +++ src/common/interior_pointer.cpp (working copy) @@ -58,8 +58,8 @@ unsigned int i; for( i = 0; ibase)), FALSE); + slot_offset_entry* entry_traverser = (slot_offset_entry*)&interior_pointer_set[i]; + gc_add_root_set_entry((Managed_Object_Handle*)(&(entry_traverser->base)), FALSE); } } @@ -68,14 +68,14 @@ unsigned int i; for( i = 0; islot; - Partial_Reveal_Object* root_base = (Partial_Reveal_Object*)entry_traverser->base; - unsigned int root_offset = entry_traverser->offset; - void *new_slot_contents = (void *)((Byte*)root_base + root_offset); - *root_slot = new_slot_contents; + slot_offset_entry* entry_traverser = (slot_offset_entry*)&interior_pointer_set[i]; + void** root_slot = entry_traverser->slot; + Partial_Reveal_Object* root_base = (Partial_Reveal_Object*)entry_traverser->base; + unsigned int root_offset = entry_traverser->offset; + void *new_slot_contents = (void *)((Byte*)root_base + root_offset); + *root_slot = new_slot_contents; } - //can not reset the table here, for the rootset may be updated multi times + //can not reset the table here, for the rootset may be updated multi times } void gc_reset_interior_pointer_table() @@ -84,3 +84,4 @@ //this function is for the case of out of memory which need to call update_rootset_interior_pointer multi-times } + Index: src/common/large_pages.cpp =================================================================== --- src/common/large_pages.cpp (revision 519535) +++ src/common/large_pages.cpp (working copy) @@ -134,6 +134,7 @@ } else { printf("GC large_page: Not enough reserved large pages.\n"); } + printf("GC large_page: Large pages can be only allocated.\n"); } } @@ -148,9 +149,9 @@ int fd = open(buf, O_CREAT | O_RDWR, 0700); if (fd == -1){ - printf("GC large_page: Can't open Mount hugetlbfs with: mount none /mnt/huge -t hugetlbfs.\n"); + printf("GC large_page: Can't open Mount hugetlbfs with: mount none /mnt/huge -t hugetlbfsi.\n"); printf("GC large_page: Check you have appropriate permissions to /mnt/huge.\n"); - printf("GC large_page: Use command line switch -Dgc.use_large_page=/mnt/huge.\n"); + printf("GC large_page: Use command line switch -Dgc.lp=/mnt/huge.\n"); free(buf); return NULL; } @@ -171,7 +172,7 @@ void* alloc_large_pages(size_t size, const char* hint){ parse_proc_meminfo(size); void* alloc_addr = mmap_large_pages(size, hint); - if(alloc_addr == NULL || ((POINTER_SIZE_INT)alloc_addr%proc_huge_page_size!=0)){ + if(alloc_addr == NULL){ printf("GC large_page: Large pages allocation failed.\n"); return NULL; } Index: src/common/mark_scan_pool.cpp =================================================================== --- src/common/mark_scan_pool.cpp (revision 519535) +++ src/common/mark_scan_pool.cpp (working copy) @@ -23,30 +23,30 @@ #include "../gen/gen.h" #include "../finalizer_weakref/finalizer_weakref.h" -static FORCE_INLINE void scan_slot(Collector* collector, Partial_Reveal_Object** p_ref) +static FORCE_INLINE void scan_slot(Collector* collector, REF *p_ref) { - Partial_Reveal_Object* p_obj = *p_ref; - if(p_obj==NULL) return; + REF ref = *p_ref; + if(ref == COMPRESSED_NULL) return; + Partial_Reveal_Object *p_obj = uncompress_ref(ref); if(obj_mark_in_vt(p_obj)) collector_tracestack_push(collector, p_obj); return; } - static FORCE_INLINE void scan_object(Collector* collector, Partial_Reveal_Object *p_obj) { if( !object_has_ref_field(p_obj) ) return; - Partial_Reveal_Object **p_ref; + REF *p_ref; if (object_is_array(p_obj)) { /* scan array object */ Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj; unsigned int array_length = array->array_len; - p_ref = (Partial_Reveal_Object**)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array)); + p_ref = (REF *)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array)); for (unsigned int i = 0; i < array_length; i++) { scan_slot(collector, p_ref+i); @@ -122,10 +122,10 @@ while(root_set){ POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set); while(!vector_block_iterator_end(root_set,iter)){ - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; + REF *p_ref = (REF *)*iter; iter = vector_block_iterator_advance(root_set,iter); - Partial_Reveal_Object* p_obj = *p_ref; + Partial_Reveal_Object *p_obj = read_slot(p_ref); /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */ assert(p_obj!=NULL); /* we have to mark the object before put it into marktask, because Index: src/common/space_tuner.cpp =================================================================== --- src/common/space_tuner.cpp (revision 519535) +++ src/common/space_tuner.cpp (working copy) @@ -34,8 +34,8 @@ /*Now just prepare the alloc_size field of mspace, used to compute new los size.*/ void gc_space_tune_prepare(GC* gc, unsigned int cause) { - if(gc->collect_kind == MINOR_COLLECTION) - return; + if(gc_match_kind(gc, MINOR_COLLECTION)) + return; Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc); Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc); @@ -76,7 +76,7 @@ void gc_space_tune_before_gc(GC* gc, unsigned int cause) { - if(gc->collect_kind == MINOR_COLLECTION) return; + if(gc_match_kind(gc, MINOR_COLLECTION)) return; Space_Tuner* tuner = gc->tuner; @@ -123,21 +123,18 @@ tuner->tuning_size = tuner->least_tuning_size; - if(tuner->tuning_size == 0) tuner->kind = TRANS_NOTHING; - - if(tuner->tuning_size < none_los_size) return; - - printf("Out of Memory!\n"); - assert(0); - exit(0); + if((tuner->tuning_size + gc->num_active_collectors * GC_BLOCK_SIZE_BYTES) >= none_los_size){ + tuner->tuning_size = 0; + } + if(tuner->tuning_size == 0) tuner->kind = TRANS_NOTHING; } } -void gc_space_tune_before_gc_simplified(GC* gc, unsigned int cause) +void gc_space_tune_before_gc_fixed_size(GC* gc, unsigned int cause) { - if((gc->collect_kind == MINOR_COLLECTION) || (cause != GC_CAUSE_LOS_IS_FULL) ) - return; + if(gc_match_kind(gc, MINOR_COLLECTION) || (cause != GC_CAUSE_LOS_IS_FULL) ) + return; Space_Tuner* tuner = gc->tuner; tuner->kind = TRANS_FROM_MOS_TO_LOS; @@ -190,16 +187,13 @@ if(tuner->tuning_size > none_los_size){ tuner->tuning_size = tuner->least_tuning_size; } - if(tuner->tuning_size > none_los_size){ - printf("Out of Memory!\n"); - assert(0); - exit(0); + if((tuner->tuning_size + gc->num_active_collectors * GC_BLOCK_SIZE_BYTES) >= none_los_size){ + tuner->tuning_size = 0; } - } /*Fixme: Should MOS heap_start must be 64k aligned?*/ - tuner->tuning_size = round_down_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT); + tuner->tuning_size = round_up_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT); if(tuner->tuning_size == 0) tuner->kind = TRANS_NOTHING; return; @@ -207,7 +201,7 @@ void gc_space_tuner_reset(GC* gc) { - if((gc->collect_kind != MINOR_COLLECTION) && (gc->tuner->kind != TRANS_NOTHING)){ + if( !gc_match_kind(gc, MINOR_COLLECTION) && (gc->tuner->kind != TRANS_NOTHING)){ Space_Tuner* tuner = gc->tuner; memset(tuner, 0, sizeof(Space_Tuner)); } Index: src/common/space_tuner.h =================================================================== --- src/common/space_tuner.h (revision 519535) +++ src/common/space_tuner.h (working copy) @@ -69,7 +69,7 @@ void gc_space_tune_prepare(GC* gc, unsigned int cause); void gc_space_tune_before_gc(GC* gc, unsigned int cause); -void gc_space_tune_before_gc_simplified(GC* gc, unsigned int cause); +void gc_space_tune_before_gc_fixed_size(GC* gc, unsigned int cause); void gc_space_tuner_reset(GC* gc); void gc_space_tuner_initialize(GC* gc); Index: src/finalizer_weakref/finalizer_weakref.cpp =================================================================== --- src/finalizer_weakref/finalizer_weakref.cpp (revision 519535) +++ src/finalizer_weakref/finalizer_weakref.cpp (working copy) @@ -52,11 +52,9 @@ // clear the two least significant bits of p_obj first static inline Boolean gc_obj_is_dead(GC *gc, Partial_Reveal_Object *p_obj) { - unsigned int collect_kind = gc->collect_kind; - assert(p_obj); - if(collect_kind == MINOR_COLLECTION){ - if( gc_is_gen_mode()) + if(gc_match_kind(gc, MINOR_COLLECTION)){ + if(gc_is_gen_mode()) return obj_is_dead_in_gen_minor_gc(p_obj); else return obj_is_dead_in_nongen_minor_gc(p_obj); @@ -74,7 +72,7 @@ { assert(!gc_obj_is_dead(gc, p_obj)); - if(gc_is_gen_mode() && gc->collect_kind == MINOR_COLLECTION) + if(gc_is_gen_mode() && gc_match_kind(gc, MINOR_COLLECTION)) return fspace_obj_to_be_forwarded(p_obj); Space *space = space_of_addr(gc, p_obj); @@ -88,26 +86,27 @@ while(Vector_Block *block = pool_iterator_next(pool)){ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ - Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter; - if(*p_ref && obj_need_move(gc, *p_ref)) + REF* p_ref = (REF*)iter; + Partial_Reveal_Object* p_obj = read_slot(p_ref); + if(*p_ref && obj_need_move(gc, p_obj)) finref_repset_add_entry(gc, p_ref); } } finref_put_repset(gc); } -static inline void fallback_update_fw_ref(Partial_Reveal_Object **p_ref) +static inline void fallback_update_fw_ref(REF* p_ref) { if(!IS_FALLBACK_COMPACTION) return; - Partial_Reveal_Object *p_obj = *p_ref; + Partial_Reveal_Object *p_obj = read_slot(p_ref); if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){ assert(!obj_is_marked_in_vt(p_obj)); assert(obj_get_vt(p_obj) == obj_get_vt(obj_get_fw_in_oi(p_obj))); p_obj = obj_get_fw_in_oi(p_obj); assert(p_obj); - *p_ref = p_obj; + write_slot(p_ref, p_obj); } } @@ -123,15 +122,20 @@ unsigned int block_has_ref = 0; POINTER_SIZE_INT *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ - Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter; - fallback_update_fw_ref(p_ref); - Partial_Reveal_Object *p_obj = *p_ref; + REF* p_ref = (REF *)iter; + if(IS_FALLBACK_COMPACTION) + fallback_update_fw_ref(p_ref); // in case that this collection is FALLBACK_COLLECTION + Partial_Reveal_Object *p_obj = read_slot(p_ref); if(!p_obj) continue; if(gc_obj_is_dead(gc, p_obj)){ gc_add_finalizable_obj(gc, p_obj); - *p_ref = NULL; + *p_ref = (REF)NULL; } else { + if(gc_match_kind(gc, MINOR_COLLECTION) && obj_need_move(gc, p_obj)){ + assert(obj_is_fw_in_oi(p_obj)); + write_slot(p_ref, obj_get_fw_in_oi(p_obj)); + } ++block_has_ref; } } @@ -139,6 +143,9 @@ vector_block_clear(block); } gc_put_finalizable_objects(gc); + + if(!gc_match_kind(gc, MINOR_COLLECTION)) + finref_add_repset_from_pool(gc, obj_with_fin_pool); } extern void trace_obj_in_gen_fw(Collector *collector, void *p_ref); @@ -149,29 +156,28 @@ typedef void (* Trace_Object_Func)(Collector *collector, void *p_ref_or_obj); // clear the two least significant bits of p_obj first // add p_ref to repset -static inline void resurrect_obj_tree(Collector *collector, Partial_Reveal_Object **p_ref) +static inline void resurrect_obj_tree(Collector *collector, REF* p_ref) { GC *gc = collector->gc; GC_Metadata* metadata = gc->metadata; - unsigned int collect_kind = gc->collect_kind; - Partial_Reveal_Object *p_obj = *p_ref; + Partial_Reveal_Object *p_obj = read_slot(p_ref); assert(p_obj && gc_obj_is_dead(gc, p_obj)); void *p_ref_or_obj = p_ref; Trace_Object_Func trace_object; /* set trace_object() function */ - if(collect_kind == MINOR_COLLECTION){ + if(gc_match_kind(gc, MINOR_COLLECTION)){ if(gc_is_gen_mode()) trace_object = trace_obj_in_gen_fw; else trace_object = trace_obj_in_nongen_fw; - } else if(collect_kind == MAJOR_COLLECTION){ + } else if(gc_match_kind(gc, MAJOR_COLLECTION)){ p_ref_or_obj = p_obj; trace_object = trace_obj_in_marking; obj_mark_in_vt(p_obj); } else { - assert(collect_kind == FALLBACK_COLLECTION); + assert(gc_match_kind(gc, FALLBACK_COLLECTION)); trace_object = trace_obj_in_fallback_marking; } @@ -185,13 +191,21 @@ POINTER_SIZE_INT *iter = vector_block_iterator_init(task_block); while(!vector_block_iterator_end(task_block, iter)){ void* p_ref_or_obj = (void *)*iter; - assert((collect_kind!=MAJOR_COLLECTION && *(Partial_Reveal_Object **)p_ref_or_obj) - || (collect_kind==MAJOR_COLLECTION && p_ref_or_obj)); + assert((gc_match_kind(gc, MINOR_COLLECTION | FALLBACK_COLLECTION) && *(Partial_Reveal_Object **)p_ref_or_obj) + || (gc_match_kind(gc, MAJOR_COLLECTION) && p_ref_or_obj)); trace_object(collector, p_ref_or_obj); + if(collector->result == FALSE) break; /* force return */ + iter = vector_block_iterator_advance(task_block, iter); } vector_stack_clear(task_block); pool_put_entry(metadata->free_task_pool, task_block); + + if(collector->result == FALSE){ + gc_task_pool_clear(metadata->mark_task_pool); + break; /* force return */ + } + task_block = pool_get_entry(metadata->mark_task_pool); } @@ -205,83 +219,94 @@ { GC *gc = collector->gc; Finref_Metadata *metadata = gc->finref_metadata; - Pool *obj_with_fin_pool = metadata->obj_with_fin_pool; Pool *finalizable_obj_pool = metadata->finalizable_obj_pool; - unsigned int collect_kind = gc->collect_kind; - if(!finalizable_obj_pool_is_empty(gc)){ + if(finalizable_obj_pool_is_empty(gc)) + return; + + if(!gc_match_kind(gc, MINOR_COLLECTION)) finref_reset_repset(gc); - pool_iterator_init(finalizable_obj_pool); - while(Vector_Block *block = pool_iterator_next(finalizable_obj_pool)){ - POINTER_SIZE_INT *iter = vector_block_iterator_init(block); - for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ - Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter; - Partial_Reveal_Object *p_obj = *p_ref; - assert(p_obj); - - /* - * In major & fallback collection we need record p_ref of the root dead obj to update it later. - * Because it is outside heap, we can't update in ref fixing. - * In minor collection p_ref of the root dead obj is automatically updated while tracing. - */ - if(collect_kind != MINOR_COLLECTION) - finref_repset_add_entry(gc, p_ref); - - /* Perhaps obj has been resurrected by previous resurrections */ - if(!gc_obj_is_dead(gc, p_obj)){ - if(gc->collect_kind == MINOR_COLLECTION && obj_need_move(gc, p_obj)) - *p_ref = obj_get_fw_in_oi(p_obj); - continue; - } - - resurrect_obj_tree(collector, p_ref); + pool_iterator_init(finalizable_obj_pool); + while(Vector_Block *block = pool_iterator_next(finalizable_obj_pool)){ + POINTER_SIZE_INT *iter = vector_block_iterator_init(block); + for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ + REF* p_ref = (REF *)iter; + Partial_Reveal_Object *p_obj = read_slot(p_ref); + assert(p_obj); + + /* In major & fallback collection we need record p_ref of the root dead obj to update it later. + * Because it is outside heap, we can't update in ref fixing. + * In minor collection p_ref of the root dead obj is automatically updated while tracing. + */ + if(!gc_match_kind(gc, MINOR_COLLECTION)) + finref_repset_add_entry(gc, p_ref); + + /* Perhaps obj has been resurrected by previous resurrections */ + if(!gc_obj_is_dead(gc, p_obj)){ + if(gc_match_kind(gc, MINOR_COLLECTION) && obj_need_move(gc, p_obj)) + write_slot(p_ref, obj_get_fw_in_oi(p_obj)); + continue; } + + resurrect_obj_tree(collector, p_ref); + if(collector->result == FALSE){ + /* Resurrection fallback happens */ + assert(gc_match_kind(gc, MINOR_COLLECTION)); + return; /* force return */ + } } - metadata->pending_finalizers = TRUE; + } + if(!gc_match_kind(gc, MINOR_COLLECTION)) finref_put_repset(gc); - } + metadata->pending_finalizers = TRUE; - finref_add_repset_from_pool(gc, obj_with_fin_pool); - /* fianlizable objects have been added to collector repset pool */ - //finref_add_repset_from_pool(collector, finalizable_obj_pool); + /* fianlizable objs have been added to finref repset pool or updated by tracing */ } static void identify_dead_refs(GC *gc, Pool *pool) { - finref_reset_repset(gc); + if(!gc_match_kind(gc, MINOR_COLLECTION)) + finref_reset_repset(gc); pool_iterator_init(pool); while(Vector_Block *block = pool_iterator_next(pool)){ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ - Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter; - Partial_Reveal_Object *p_obj = *p_ref; + REF* p_ref = (REF*)iter; + Partial_Reveal_Object *p_obj = read_slot(p_ref); assert(p_obj); - Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj); + REF* p_referent_field = obj_get_referent_field(p_obj); + if(IS_FALLBACK_COMPACTION) fallback_update_fw_ref(p_referent_field); - Partial_Reveal_Object *p_referent = *p_referent_field; + Partial_Reveal_Object *p_referent = read_slot(p_referent_field); if(!p_referent){ // referent field has been cleared - *p_ref = NULL; + *p_ref = (REF)NULL; continue; } if(!gc_obj_is_dead(gc, p_referent)){ // referent is alive if(obj_need_move(gc, p_referent)) - finref_repset_add_entry(gc, p_referent_field); - *p_ref = NULL; + if(gc_match_kind(gc, MINOR_COLLECTION)){ + assert(obj_is_fw_in_oi(p_referent)); + write_slot(p_referent_field, (obj_get_fw_in_oi(p_referent))); + } else { + finref_repset_add_entry(gc, p_referent_field); + } + *p_ref = (REF)NULL; continue; } - *p_referent_field = NULL; /* referent is weakly reachable: clear the referent field */ + *p_referent_field = (REF)NULL; /* referent is weakly reachable: clear the referent field */ } } - finref_put_repset(gc); - - finref_add_repset_from_pool(gc, pool); + if(!gc_match_kind(gc, MINOR_COLLECTION)){ + finref_put_repset(gc); + finref_add_repset_from_pool(gc, pool); + } } static void identify_dead_softrefs(Collector *collector) { GC *gc = collector->gc; - if(gc->collect_kind == MINOR_COLLECTION){ + if(gc_match_kind(gc, MINOR_COLLECTION)){ assert(softref_pool_is_empty(gc)); return; } @@ -308,18 +333,20 @@ Finref_Metadata *metadata = gc->finref_metadata; Pool *phanref_pool = metadata->phanref_pool; - finref_reset_repset(gc); + if(!gc_match_kind(gc, MINOR_COLLECTION)) + finref_reset_repset(gc); // collector_reset_repset(collector); pool_iterator_init(phanref_pool); while(Vector_Block *block = pool_iterator_next(phanref_pool)){ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter; - Partial_Reveal_Object *p_obj = *p_ref; + Partial_Reveal_Object *p_obj = read_slot((REF*)p_ref); assert(p_obj); - Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj); + REF* p_referent_field = obj_get_referent_field(p_obj); + if(IS_FALLBACK_COMPACTION) fallback_update_fw_ref(p_referent_field); - Partial_Reveal_Object *p_referent = *p_referent_field; + Partial_Reveal_Object *p_referent = read_slot(p_referent_field); if(!p_referent){ // referent field has been cleared *p_ref = NULL; @@ -327,14 +354,19 @@ } if(!gc_obj_is_dead(gc, p_referent)){ // referent is alive if(obj_need_move(gc, p_referent)) - finref_repset_add_entry(gc, p_referent_field); - *p_ref = NULL; + if(gc_match_kind(gc, MINOR_COLLECTION)){ + assert(obj_is_fw_in_oi(p_referent)); + write_slot(p_referent_field, (obj_get_fw_in_oi(p_referent))); + } else { + finref_repset_add_entry(gc, p_referent_field); + } + *p_ref = (REF)NULL; continue; } - *p_referent_field = NULL; + *p_referent_field = (REF)NULL; /* Phantom status: for future use * if((unsigned int)p_referent & PHANTOM_REF_ENQUEUE_STATUS_MASK){ - * // enqueued but not explicitly cleared OR pending for enqueuing + * // enqueued but not explicitly cleared OR pending for enqueueing * *iter = NULL; * } * resurrect_obj_tree(collector, p_referent_field); @@ -342,9 +374,10 @@ } } // collector_put_repset(collector); - finref_put_repset(gc); - - finref_add_repset_from_pool(gc, phanref_pool); + if(!gc_match_kind(gc, MINOR_COLLECTION)){ + finref_put_repset(gc); + finref_add_repset_from_pool(gc, phanref_pool); + } } static void put_finalizable_obj_to_vm(GC *gc) @@ -356,7 +389,7 @@ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); while(!vector_block_iterator_end(block, iter)){ assert(*iter); - Managed_Object_Handle p_obj = (Managed_Object_Handle)*iter; + Managed_Object_Handle p_obj = (Managed_Object_Handle)read_slot((REF*)iter); vm_finalize_object(p_obj); iter = vector_block_iterator_advance(block, iter); } @@ -365,14 +398,14 @@ } } -static inline void put_dead_weak_refs_to_vm(GC *gc, Pool *reference_pool) +static inline void put_dead_weak_refs_to_vm(GC *gc, Pool *ref_pool) { Pool *free_pool = gc->finref_metadata->free_pool; - while(Vector_Block *block = pool_get_entry(reference_pool)){ + while(Vector_Block *block = pool_get_entry(ref_pool)){ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); while(!vector_block_iterator_end(block, iter)){ - Managed_Object_Handle p_obj = (Managed_Object_Handle)*iter; + Managed_Object_Handle p_obj = (Managed_Object_Handle)read_slot((REF*)iter); if(p_obj) vm_enqueue_reference(p_obj); iter = vector_block_iterator_advance(block, iter); @@ -384,25 +417,115 @@ static void put_dead_refs_to_vm(GC *gc) { + Finref_Metadata *metadata = gc->finref_metadata; + if(softref_pool_is_empty(gc) && weakref_pool_is_empty(gc) - && phanref_pool_is_empty(gc)){ + && phanref_pool_is_empty(gc) + && pool_is_empty(metadata->fallback_ref_pool)){ gc_clear_weakref_pools(gc); return; } - gc->finref_metadata->pending_weakrefs = TRUE; + put_dead_weak_refs_to_vm(gc, metadata->softref_pool); + put_dead_weak_refs_to_vm(gc, metadata->weakref_pool); + put_dead_weak_refs_to_vm(gc, metadata->phanref_pool); - Pool *softref_pool = gc->finref_metadata->softref_pool; - Pool *weakref_pool = gc->finref_metadata->weakref_pool; - Pool *phanref_pool = gc->finref_metadata->phanref_pool; - Pool *free_pool = gc->finref_metadata->free_pool; + if(/*IS_FALLBACK_COMPACTION && */!pool_is_empty(metadata->fallback_ref_pool)) + put_dead_weak_refs_to_vm(gc, metadata->fallback_ref_pool); + metadata->pending_weakrefs = TRUE; +} + +/* Finalizable objs falls back to objs with fin when resurrection fallback happens */ +static void finalizable_objs_fallback(GC *gc) +{ + Finref_Metadata *metadata = gc->finref_metadata; + Pool *finalizable_obj_pool = metadata->finalizable_obj_pool; + Pool *obj_with_fin_pool = metadata->obj_with_fin_pool; + Vector_Block *obj_with_fin_block = pool_get_entry(obj_with_fin_pool); + assert(obj_with_fin_block); - put_dead_weak_refs_to_vm(gc, softref_pool); - put_dead_weak_refs_to_vm(gc, weakref_pool); - put_dead_weak_refs_to_vm(gc, phanref_pool); + Boolean pending_finalizers = FALSE; + + while(Vector_Block *block = pool_get_entry(finalizable_obj_pool)){ + POINTER_SIZE_INT *iter = vector_block_iterator_init(block); + for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ + REF* p_ref = (REF*)iter; + Partial_Reveal_Object *p_obj = read_slot(p_ref); + assert(p_obj); + /* Perhaps obj has been resurrected by previous resurrections */ + if(!gc_obj_is_dead(gc, p_obj) && obj_belongs_to_nos(p_obj)){ + if(!gc_is_gen_mode() || fspace_obj_to_be_forwarded(p_obj)){ + write_slot(p_ref , obj_get_fw_in_oi(p_obj)); + p_obj = read_slot(p_ref); + } + } + gc_add_finalizer(gc, obj_with_fin_block, p_obj); // Perhaps p_obj has been forwarded, so we use *p_ref rather than p_obj + } + } + + pool_put_entry(obj_with_fin_pool, obj_with_fin_block); + metadata->pending_finalizers = pending_finalizers; } +static void dead_weak_refs_fallback(GC *gc, Pool *ref_pool) +{ + Finref_Metadata *metadata = gc->finref_metadata; + Pool *free_pool = metadata->free_pool; + Pool *fallback_ref_pool = metadata->fallback_ref_pool; + + Vector_Block *fallback_ref_block = finref_get_free_block(gc); + while(Vector_Block *block = pool_get_entry(ref_pool)){ + POINTER_SIZE_INT *iter = vector_block_iterator_init(block); + while(!vector_block_iterator_end(block, iter)){ + Partial_Reveal_Object *p_obj = read_slot((REF*)iter); + if(p_obj) + finref_add_fallback_ref(gc, fallback_ref_block, p_obj); + iter = vector_block_iterator_advance(block, iter); + } + vector_block_clear(block); + pool_put_entry(free_pool, block); + } + + pool_put_entry(fallback_ref_pool, fallback_ref_block); +} + +/* Record softrefs and weakrefs whose referents are dead. + * In fallback collection these refs will not be considered for enqueueing again, + * since their referent fields have been cleared by identify_dead_refs(). + */ +static void dead_refs_fallback(GC *gc) +{ + Finref_Metadata *metadata = gc->finref_metadata; + + if(!softref_pool_is_empty(gc) || !weakref_pool_is_empty(gc)) + metadata->pending_weakrefs = TRUE; + + dead_weak_refs_fallback(gc, metadata->softref_pool); + dead_weak_refs_fallback(gc, metadata->weakref_pool); + + gc_clear_weakref_pools(gc); +} + +static void resurrection_fallback_handler(GC *gc) +{ + Finref_Metadata *metadata = gc->finref_metadata; + + /* Repset pool should be empty, because we don't add anthing to this pool in Minor Collection. */ + assert(pool_is_empty(metadata->repset_pool)); + + finalizable_objs_fallback(gc); + dead_refs_fallback(gc); + + assert(pool_is_empty(metadata->finalizable_obj_pool)); + assert(pool_is_empty(metadata->softref_pool)); + assert(pool_is_empty(metadata->weakref_pool)); + assert(pool_is_empty(metadata->phanref_pool)); + + assert(metadata->finalizable_obj_set == NULL); + assert(metadata->repset == NULL); +} + void collector_identify_finref(Collector *collector) { GC *gc = collector->gc; @@ -412,9 +535,20 @@ identify_dead_weakrefs(collector); identify_finalizable_objects(collector); resurrect_finalizable_objects(collector); + gc->collect_result = gc_collection_result(gc); + if(!gc->collect_result){ + assert(gc_match_kind(gc, MINOR_COLLECTION)); + resurrection_fallback_handler(gc); + return; + } identify_dead_phanrefs(collector); } +void fallback_finref_cleanup(GC *gc) +{ + gc_set_weakref_sets(gc); + gc_clear_weakref_pools(gc); +} void gc_put_finref_to_vm(GC *gc) { put_dead_refs_to_vm(gc); @@ -438,7 +572,7 @@ while(Vector_Block *block = pool_get_entry(obj_with_fin_pool)){ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); while(!vector_block_iterator_end(block, iter)){ - Managed_Object_Handle p_obj = (Managed_Object_Handle)*iter; + Managed_Object_Handle p_obj = (Managed_Object_Handle)read_slot((REF*)iter); if(p_obj) vm_finalize_object(p_obj); iter = vector_block_iterator_advance(block, iter); @@ -454,24 +588,30 @@ while(Vector_Block *block = pool_get_entry(pool)){ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ - Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter; - Partial_Reveal_Object *p_obj = *p_ref; + REF* p_ref = (REF*)iter; + Partial_Reveal_Object *p_obj = read_slot(p_ref); assert(p_obj); - Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj); + REF* p_referent_field = obj_get_referent_field(p_obj); + if(IS_FALLBACK_COMPACTION) fallback_update_fw_ref(p_referent_field); - Partial_Reveal_Object *p_referent = *p_referent_field; + Partial_Reveal_Object* p_referent = read_slot(p_referent_field); if(!p_referent){ // referent field has been cleared - *p_ref = NULL; + *p_ref = (REF)NULL; continue; } if(!gc_obj_is_dead(gc, p_referent)){ // referent is alive if(obj_need_move(gc, p_referent)) - finref_repset_add_entry(gc, p_referent_field); - *p_ref = NULL; + if(gc_match_kind(gc, MINOR_COLLECTION)){ + assert(obj_is_fw_in_oi(p_referent)); + write_slot(p_referent_field , obj_get_fw_in_oi(p_referent)); + } else { + finref_repset_add_entry(gc, p_referent_field); + } + *p_ref = (REF)NULL; continue; } - *p_referent_field = NULL; /* referent is weakly reachable: clear the referent field */ + *p_referent_field = (REF)NULL; /* referent is weakly reachable: clear the referent field */ } } } @@ -480,64 +620,105 @@ { Finref_Metadata *metadata = gc->finref_metadata; - finref_reset_repset(gc); + if(!gc_match_kind(gc, MINOR_COLLECTION)) + finref_reset_repset(gc); update_referent_field_ignore_finref(gc, metadata->softref_pool); update_referent_field_ignore_finref(gc, metadata->weakref_pool); update_referent_field_ignore_finref(gc, metadata->phanref_pool); - finref_put_repset(gc); + if(!gc_match_kind(gc, MINOR_COLLECTION)) + finref_put_repset(gc); } -static void move_compaction_update_referent_field(GC *gc, Partial_Reveal_Object **p_referent_field) +static void move_compaction_update_ref(GC *gc, REF* p_ref) { - if(!address_belongs_to_gc_heap((void *)p_referent_field, gc)){ - *p_referent_field = obj_get_fw_in_table(*p_referent_field); - return; - } - - Space *ref_space = space_of_addr(gc, p_referent_field); - if(ref_space->move_object){ + /* If p_ref belongs to heap, it must be a referent field pointer */ + if(address_belongs_to_gc_heap((void *)p_ref, gc) && (space_of_addr(gc, p_ref))->move_object){ unsigned int offset = get_gc_referent_offset(); - Partial_Reveal_Object *p_old_ref = (Partial_Reveal_Object *)((POINTER_SIZE_INT)p_referent_field - offset); - Partial_Reveal_Object *p_new_ref = obj_get_fw_in_table(p_old_ref); - p_referent_field = (Partial_Reveal_Object **)((POINTER_SIZE_INT)p_new_ref + offset); + Partial_Reveal_Object *p_old_ref = (Partial_Reveal_Object *)((POINTER_SIZE_INT)p_ref - offset); + Partial_Reveal_Object *p_new_ref = uncompress_ref(obj_get_fw_in_table(p_old_ref)); + p_ref = (REF*)((POINTER_SIZE_INT)p_new_ref + offset); } - assert(space_of_addr(gc, *p_referent_field)->move_object); - *p_referent_field = obj_get_fw_in_table(*p_referent_field); + Partial_Reveal_Object* p_obj = read_slot(p_ref); + assert(space_of_addr(gc, (void*)p_obj)->move_object); + *p_ref = obj_get_fw_in_table(p_obj); } extern Boolean IS_MOVE_COMPACT; -void gc_update_finref_repointed_refs(GC *gc) +/* parameter pointer_addr_in_pool means it is p_ref or p_obj in pool */ +static void destructively_fix_finref_pool(GC *gc, Pool *pool, Boolean pointer_addr_in_pool) { - unsigned int collect_kind = gc->collect_kind; - Finref_Metadata* metadata = gc->finref_metadata; - Pool *repset_pool = metadata->repset_pool; + Finref_Metadata *metadata = gc->finref_metadata; + REF* p_ref; + Partial_Reveal_Object *p_obj; /* NOTE:: this is destructive to the root sets. */ - Vector_Block* repset = pool_get_entry(repset_pool); - + Vector_Block *repset = pool_get_entry(pool); while(repset){ POINTER_SIZE_INT *iter = vector_block_iterator_init(repset); for(; !vector_block_iterator_end(repset,iter); iter = vector_block_iterator_advance(repset,iter)){ - Partial_Reveal_Object **p_ref = (Partial_Reveal_Object** )*iter; - Partial_Reveal_Object *p_obj = *p_ref; + if(pointer_addr_in_pool) + p_ref = (REF*)*iter; + else + p_ref = (REF*)iter; + p_obj = read_slot(p_ref); if(!IS_MOVE_COMPACT){ + assert(obj_is_marked_in_vt(p_obj)); assert(obj_is_fw_in_oi(p_obj)); - assert(collect_kind == MINOR_COLLECTION || obj_is_marked_in_vt(p_obj)); - *p_ref = obj_get_fw_in_oi(p_obj); + write_slot(p_ref , obj_get_fw_in_oi(p_obj)); } else { - move_compaction_update_referent_field(gc, p_ref); + move_compaction_update_ref(gc, p_ref); } } vector_block_clear(repset); pool_put_entry(metadata->free_pool, repset); - repset = pool_get_entry(repset_pool); - } + repset = pool_get_entry(pool); + } +} + +/* parameter pointer_addr_in_pool means it is p_ref or p_obj in pool */ +static void nondestructively_fix_finref_pool(GC *gc, Pool *pool, Boolean pointer_addr_in_pool) +{ + Finref_Metadata *metadata = gc->finref_metadata; + REF* p_ref; + Partial_Reveal_Object *p_obj; - return; + /* NOTE:: this is nondestructive to the root sets. */ + pool_iterator_init(pool); + while(Vector_Block *repset = pool_iterator_next(pool)){ + POINTER_SIZE_INT *iter = vector_block_iterator_init(repset); + for(; !vector_block_iterator_end(repset,iter); iter = vector_block_iterator_advance(repset,iter)){ + if(pointer_addr_in_pool) + p_ref = (REF*)*iter; + else + p_ref = (REF*)iter; + p_obj = read_slot(p_ref); + + if(!IS_MOVE_COMPACT){ + assert(obj_is_marked_in_vt(p_obj)); + assert(obj_is_fw_in_oi(p_obj)); + write_slot(p_ref , obj_get_fw_in_oi(p_obj)); + } else { + move_compaction_update_ref(gc, p_ref); + } + } + } } +void gc_update_finref_repointed_refs(GC *gc) +{ + assert(!gc_match_kind(gc, MINOR_COLLECTION)); + + Finref_Metadata* metadata = gc->finref_metadata; + Pool *repset_pool = metadata->repset_pool; + Pool *fallback_ref_pool = metadata->fallback_ref_pool; + + destructively_fix_finref_pool(gc, repset_pool, TRUE); + if(IS_FALLBACK_COMPACTION && !pool_is_empty(fallback_ref_pool)) + nondestructively_fix_finref_pool(gc, fallback_ref_pool, FALSE); +} + void gc_activate_finref_threads(GC *gc) { Finref_Metadata* metadata = gc->finref_metadata; Index: src/finalizer_weakref/finalizer_weakref.h =================================================================== --- src/finalizer_weakref/finalizer_weakref.h (revision 519535) +++ src/finalizer_weakref/finalizer_weakref.h (working copy) @@ -47,25 +47,24 @@ * } */ -inline Partial_Reveal_Object **obj_get_referent_field(Partial_Reveal_Object *p_obj) +inline REF* obj_get_referent_field(Partial_Reveal_Object *p_obj) { assert(p_obj); - return (Partial_Reveal_Object **)(( Byte*)p_obj+get_gc_referent_offset()); + return (REF*)(( Byte*)p_obj+get_gc_referent_offset()); } -typedef void (* Scan_Slot_Func)(Collector *collector, Partial_Reveal_Object **p_ref); +typedef void (* Scan_Slot_Func)(Collector *collector, REF* p_ref); inline void scan_weak_reference(Collector *collector, Partial_Reveal_Object *p_obj, Scan_Slot_Func scan_slot) { WeakReferenceType type = special_reference_type(p_obj); if(type == NOT_REFERENCE) return; - unsigned int collect_kind = collector->gc->collect_kind; - Partial_Reveal_Object **p_referent_field = obj_get_referent_field(p_obj); - Partial_Reveal_Object *p_referent = *p_referent_field; + REF* p_referent_field = obj_get_referent_field(p_obj); + REF p_referent = *p_referent_field; if (!p_referent) return; switch(type){ case SOFT_REFERENCE : - if(collect_kind==MINOR_COLLECTION) + if(gc_match_kind(collector->gc, MINOR_COLLECTION)) scan_slot(collector, p_referent_field); else collector_add_softref(collector, p_obj); @@ -84,6 +83,7 @@ extern void gc_update_weakref_ignore_finref(GC *gc); extern void collector_identify_finref(Collector *collector); +extern void fallback_finref_cleanup(GC *gc); extern void gc_put_finref_to_vm(GC *gc); extern void put_all_fin_on_exit(GC *gc); Index: src/finalizer_weakref/finalizer_weakref_metadata.cpp =================================================================== --- src/finalizer_weakref/finalizer_weakref_metadata.cpp (revision 519535) +++ src/finalizer_weakref/finalizer_weakref_metadata.cpp (working copy) @@ -22,12 +22,11 @@ #include "../thread/mutator.h" #include "../thread/collector.h" -#define FINREF_METADATA_SEG_SIZE_BIT_SHIFT 20 -#define FINREF_METADATA_SEG_SIZE_BYTES (1 << FINREF_METADATA_SEG_SIZE_BIT_SHIFT) +#define FINREF_METADATA_SEG_SIZE_BIT_SHIFT 20 +#define FINREF_METADATA_SEG_SIZE_BYTES (1 << FINREF_METADATA_SEG_SIZE_BIT_SHIFT) -//#define FINREF_METADATA_BLOCK_SIZE_BYTES must be equal to VECTOR_BLOCK_DATA_SIZE_BYTES -#define FINREF_METADATA_BLOCK_SIZE_BIT_SHIFT 11 -#define FINREF_METADATA_BLOCK_SIZE_BYTES (1 << FINREF_METADATA_BLOCK_SIZE_BIT_SHIFT) +#define FINREF_METADATA_BLOCK_SIZE_BIT_SHIFT 10 +#define FINREF_METADATA_BLOCK_SIZE_BYTES (1 << FINREF_METADATA_BLOCK_SIZE_BIT_SHIFT) static Finref_Metadata finref_metadata; @@ -59,6 +58,7 @@ finref_metadata.weakref_pool = sync_pool_create(); finref_metadata.phanref_pool = sync_pool_create(); finref_metadata.repset_pool = sync_pool_create(); + finref_metadata.fallback_ref_pool = sync_pool_create(); finref_metadata.finalizable_obj_set= NULL; finref_metadata.repset = NULL; @@ -194,6 +194,9 @@ { GC *gc = collector->gc; + assert(collector->softref_set == NULL); + assert(collector->weakref_set == NULL); + assert(collector->phanref_set == NULL); collector->softref_set = finref_get_free_block(gc); collector->weakref_set = finref_get_free_block(gc); collector->phanref_set= finref_get_free_block(gc); @@ -249,13 +252,13 @@ } -static inline void finref_metadata_add_entry(GC *gc, Vector_Block* &vector_block_in_use, Pool *pool, Partial_Reveal_Object *ref) +static inline void finref_metadata_add_entry(GC *gc, Vector_Block* &vector_block_in_use, Pool *pool, POINTER_SIZE_INT value) { assert(vector_block_in_use); - assert(ref); + assert(value); Vector_Block* block = vector_block_in_use; - vector_block_add_entry(block, (POINTER_SIZE_INT)ref); + vector_block_add_entry(block, value); if(!vector_block_is_full(block)) return; @@ -263,37 +266,63 @@ vector_block_in_use = finref_get_free_block(gc); } -void mutator_add_finalizer(Mutator *mutator, Partial_Reveal_Object *ref) +void mutator_add_finalizer(Mutator *mutator, Partial_Reveal_Object *p_obj) { - finref_metadata_add_entry(mutator->gc, mutator->obj_with_fin, finref_metadata.obj_with_fin_pool, ref); + GC *gc = mutator->gc; + Finref_Metadata *metadata = gc->finref_metadata; + finref_metadata_add_entry(gc, mutator->obj_with_fin, metadata->obj_with_fin_pool, (POINTER_SIZE_INT)compress_ref(p_obj)); } -void gc_add_finalizable_obj(GC *gc, Partial_Reveal_Object *ref) +/* This function is only used by resurrection fallback */ +void gc_add_finalizer(GC *gc, Vector_Block* &vector_block_in_use, Partial_Reveal_Object *p_obj) { - finref_metadata_add_entry(gc, finref_metadata.finalizable_obj_set, finref_metadata.finalizable_obj_pool, ref); + Finref_Metadata *metadata = gc->finref_metadata; + finref_metadata_add_entry(gc, vector_block_in_use, metadata->obj_with_fin_pool, (POINTER_SIZE_INT)compress_ref(p_obj)); } +void gc_add_finalizable_obj(GC *gc, Partial_Reveal_Object *p_obj) +{ + Finref_Metadata *metadata = gc->finref_metadata; + finref_metadata_add_entry(gc, metadata->finalizable_obj_set, metadata->finalizable_obj_pool, (POINTER_SIZE_INT)compress_ref(p_obj)); +} + void collector_add_softref(Collector *collector, Partial_Reveal_Object *ref) { - finref_metadata_add_entry(collector->gc, collector->softref_set, finref_metadata.softref_pool, ref); + GC *gc = collector->gc; + Finref_Metadata *metadata = gc->finref_metadata; + finref_metadata_add_entry(gc, collector->softref_set, metadata->softref_pool, (POINTER_SIZE_INT)compress_ref(ref)); } void collector_add_weakref(Collector *collector, Partial_Reveal_Object *ref) { - finref_metadata_add_entry(collector->gc, collector->weakref_set, finref_metadata.weakref_pool, ref); + GC *gc = collector->gc; + Finref_Metadata *metadata = gc->finref_metadata; + finref_metadata_add_entry(gc, collector->weakref_set, metadata->weakref_pool, (POINTER_SIZE_INT)compress_ref(ref)); } void collector_add_phanref(Collector *collector, Partial_Reveal_Object *ref) { - finref_metadata_add_entry(collector->gc, collector->phanref_set, finref_metadata.phanref_pool, ref); + GC *gc = collector->gc; + Finref_Metadata *metadata = gc->finref_metadata; + finref_metadata_add_entry(gc, collector->phanref_set, metadata->phanref_pool, (POINTER_SIZE_INT)compress_ref(ref)); } -void finref_repset_add_entry(GC *gc, Partial_Reveal_Object **p_ref) +void finref_repset_add_entry(GC *gc, REF* p_ref) { assert(*p_ref); - finref_metadata_add_entry(gc, finref_metadata.repset, finref_metadata.repset_pool, (Partial_Reveal_Object *)p_ref); + assert(*(unsigned int*)*p_ref); + Finref_Metadata *metadata = gc->finref_metadata; + finref_metadata_add_entry(gc, metadata->repset, metadata->repset_pool, (POINTER_SIZE_INT)p_ref); } +/* This function is only used by resurrection fallback */ +void finref_add_fallback_ref(GC *gc, Vector_Block* &vector_block_in_use, Partial_Reveal_Object *p_obj) +{ + assert(p_obj); + Finref_Metadata *metadata = gc->finref_metadata; + finref_metadata_add_entry(gc, vector_block_in_use, metadata->fallback_ref_pool, (POINTER_SIZE_INT)compress_ref(p_obj)); +} + static inline Boolean pool_has_no_ref(Pool *pool) { if(pool_is_empty(pool)) @@ -355,3 +384,8 @@ finref_metadata_clear_pool(gc->finref_metadata->weakref_pool); finref_metadata_clear_pool(gc->finref_metadata->phanref_pool); } + +void gc_clear_finref_repset_pool(GC *gc) +{ + finref_metadata_clear_pool(gc->finref_metadata->repset_pool); +} Index: src/finalizer_weakref/finalizer_weakref_metadata.h =================================================================== --- src/finalizer_weakref/finalizer_weakref_metadata.h (revision 519535) +++ src/finalizer_weakref/finalizer_weakref_metadata.h (working copy) @@ -44,6 +44,8 @@ Pool *repset_pool; // repointed reference slot sets + Pool *fallback_ref_pool; // temporary buffer for weakref needing to be put to vm when resurrection fallback happens + Vector_Block *finalizable_obj_set; // buffer for finalizable_objects_pool Vector_Block *repset; // buffer for repset_pool @@ -66,11 +68,13 @@ extern void gc_reset_finref_metadata(GC *gc); extern void mutator_add_finalizer(Mutator *mutator, Partial_Reveal_Object *ref); +extern void gc_add_finalizer(GC *gc, Vector_Block* &vector_block_in_use, Partial_Reveal_Object *ref); extern void gc_add_finalizable_obj(GC *gc, Partial_Reveal_Object *ref); extern void collector_add_softref(Collector *collector, Partial_Reveal_Object *ref); extern void collector_add_weakref(Collector *collector, Partial_Reveal_Object *ref); extern void collector_add_phanref(Collector *collector, Partial_Reveal_Object *ref); -extern void finref_repset_add_entry(GC *gc, Partial_Reveal_Object **ref); +extern void finref_repset_add_entry(GC *gc, REF* ref); +extern void finref_add_fallback_ref(GC *gc, Vector_Block* &vector_block_in_use, Partial_Reveal_Object *p_ref); extern Boolean obj_with_fin_pool_is_empty(GC *gc); extern Boolean finalizable_obj_pool_is_empty(GC *gc); Index: src/gen/gen.cpp =================================================================== --- src/gen/gen.cpp (revision 519535) +++ src/gen/gen.cpp (working copy) @@ -24,6 +24,7 @@ #include "../finalizer_weakref/finalizer_weakref.h" #include "../verify/verify_live_heap.h" #include "../common/space_tuner.h" +#include "../common/compressed_ref.h" /* fspace size limit is not interesting. only for manual tuning purpose */ unsigned int min_nos_size_bytes = 16 * MB; @@ -150,6 +151,8 @@ free(large_page_hint); large_page_hint = NULL; printf("GC use small pages.\n"); + }else{ + printf("GC use large pages.\n"); } } @@ -173,6 +176,8 @@ #endif /* STATIC_NOS_MAPPING else */ + HEAP_NULL = (POINTER_SIZE_INT)reserved_base; + gc_gen->reserved_heap_size = los_size + nos_reserve_size + mos_reserve_size; gc_gen->heap_start = reserved_base; gc_gen->heap_end = reserved_end; @@ -329,7 +334,7 @@ gc->collect_result = TRUE; - if(gc->collect_kind == MINOR_COLLECTION){ + if(gc_match_kind((GC*)gc, MINOR_COLLECTION)){ /* FIXME:: move_object is only useful for nongen_slide_copy */ gc->mos->move_object = FALSE; @@ -346,7 +351,7 @@ } - if(gc->collect_result == FALSE && gc->collect_kind == MINOR_COLLECTION){ + if(gc->collect_result == FALSE && gc_match_kind((GC*)gc, MINOR_COLLECTION)){ if(gc_is_gen_mode()) gc_clear_remset((GC*)gc); @@ -375,6 +380,11 @@ if(verify_live_heap) gc_verify_heap((GC*)gc, FALSE); + /*Fixme: clear root set here to support verify.*/ +#ifdef COMPRESS_REFERENCE + gc_set_pool_clear(gc->metadata->gc_uncompressed_rootset_pool); +#endif + return; } Index: src/gen/gen.h =================================================================== --- src/gen/gen.h (revision 519535) +++ src/gen/gen.h (working copy) @@ -82,6 +82,7 @@ /* FIXME:: this is wrong! root_set belongs to mutator */ Vector_Block* root_set; + Vector_Block* uncompressed_root_set; //For_LOS_extend Space_Tuner* tuner; @@ -172,3 +173,4 @@ #endif /* ifndef _GC_GEN_H_ */ + Index: src/gen/gen_adapt.cpp =================================================================== --- src/gen/gen_adapt.cpp (revision 519535) +++ src/gen/gen_adapt.cpp (working copy) @@ -106,7 +106,7 @@ POINTER_SIZE_INT nos_free_size = space_free_memory_size(fspace); POINTER_SIZE_INT total_free_size = mos_free_size + nos_free_size; - if(gc->collect_kind != MINOR_COLLECTION) { + if(!gc_match_kind((GC*)gc, MINOR_COLLECTION)) { assert(!gc_is_gen_mode()); if(gen_mode_adaptor->major_survive_ratio_threshold != 0 && mspace->survive_ratio > gen_mode_adaptor->major_survive_ratio_threshold){ @@ -199,9 +199,9 @@ POINTER_SIZE_INT mos_free_size = space_free_memory_size(mspace); POINTER_SIZE_INT nos_free_size = space_free_memory_size(fspace); POINTER_SIZE_INT total_free_size = mos_free_size + nos_free_size; - if(gc->collect_kind != MINOR_COLLECTION) gc->force_gen_mode = FALSE; + if(!gc_match_kind((GC*)gc, MINOR_COLLECTION)) gc->force_gen_mode = FALSE; if(!gc->force_gen_mode){ - if(gc->collect_kind != MINOR_COLLECTION){ + if(!gc_match_kind((GC*)gc, MINOR_COLLECTION)){ mspace->time_collections += pause_time; Tslow = (float)pause_time; @@ -210,7 +210,7 @@ POINTER_SIZE_INT major_survive_size = space_committed_size((Space*)mspace) - mos_free_size; /*If major is caused by LOS, or collection kind is EXTEND_COLLECTION, all survive ratio is not updated.*/ - if((gc->cause != GC_CAUSE_LOS_IS_FULL) && (gc->collect_kind != EXTEND_COLLECTION)){ + if((gc->cause != GC_CAUSE_LOS_IS_FULL) && (!gc_match_kind((GC*)gc, EXTEND_COLLECTION))){ survive_ratio = (float)major_survive_size/(float)space_committed_size((Space*)mspace); mspace->survive_ratio = survive_ratio; } @@ -289,7 +289,7 @@ /* if(curr_nos_size <= min_nos_size_bytes){ //after major, should not allow this size - assert(gc->collect_kind == MINOR_COLLECTION); + assert(gc_match_kind((GC*)gc, MINOR_COLLECTION)); return FALSE; } */ @@ -318,12 +318,6 @@ return TRUE;; } -// this function is added to disambiguate on windows/em64t calls to asm() below -POINTER_SIZE_SINT POINTER_SIZE_abs(POINTER_SIZE_SINT x) -{ - return x<0?-x:x; -} - #ifndef STATIC_NOS_MAPPING void gc_gen_adapt(GC_Gen* gc, int64 pause_time) @@ -344,7 +338,7 @@ POINTER_SIZE_INT curr_nos_size = space_committed_size((Space*)fspace); - if( POINTER_SIZE_abs((POINTER_SIZE_SINT)new_nos_size - (POINTER_SIZE_SINT)curr_nos_size) < NOS_COPY_RESERVE_DELTA ) + if( abs((POINTER_SIZE_SINT)new_nos_size - (POINTER_SIZE_SINT)curr_nos_size) < NOS_COPY_RESERVE_DELTA ) return; /* below are ajustment */ @@ -396,7 +390,7 @@ POINTER_SIZE_INT curr_nos_size = space_committed_size((Space*)fspace); - if( POINTER_SIZE_abs((POINTER_SIZE_SINT)new_nos_size - (POINTER_SIZE_SINT)curr_nos_size) < NOS_COPY_RESERVE_DELTA ) + if( abs((POINTER_SIZE_SINT)new_nos_size - (POINTER_SIZE_SINT)curr_nos_size) < NOS_COPY_RESERVE_DELTA ) return; POINTER_SIZE_INT used_mos_size = space_used_memory_size((Blocked_Space*)mspace); Index: src/mark_compact/fallback_mark_scan.cpp =================================================================== --- src/mark_compact/fallback_mark_scan.cpp (revision 519535) +++ src/mark_compact/fallback_mark_scan.cpp (working copy) @@ -23,26 +23,25 @@ #include "../gen/gen.h" #include "../finalizer_weakref/finalizer_weakref.h" -static void scan_slot(Collector* collector, Partial_Reveal_Object** p_ref) +static void scan_slot(Collector* collector, REF *p_ref) { - Partial_Reveal_Object* p_obj = *p_ref; - if(p_obj==NULL) return; + REF ref = *p_ref; + if(ref == COMPRESSED_NULL) return; collector_tracestack_push(collector, p_ref); - return; } -static void scan_object(Collector* collector, Partial_Reveal_Object **p_ref) +static void scan_object(Collector* collector, REF *p_ref) { - Partial_Reveal_Object *p_obj = *p_ref; + Partial_Reveal_Object *p_obj = read_slot(p_ref); assert(p_obj); if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){ assert(obj_get_vt(p_obj) == obj_get_vt(obj_get_fw_in_oi(p_obj))); p_obj = obj_get_fw_in_oi(p_obj); assert(p_obj); - *p_ref = p_obj; + write_slot(p_ref, p_obj); } if(!obj_mark_in_vt(p_obj)) @@ -57,7 +56,7 @@ int32 array_length = vector_get_length((Vector_Handle) array); for (int i = 0; i < array_length; i++) { - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)vector_get_element_address_ref((Vector_Handle) array, i); + REF *p_ref = (REF*)vector_get_element_address_ref((Vector_Handle) array, i); scan_slot(collector, p_ref); } return; @@ -66,7 +65,7 @@ /* scan non-array object */ int *offset_scanner = init_object_scanner(p_obj); while (true) { - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj); + REF *p_ref = (REF*)offset_get_ref(offset_scanner, p_obj); if (p_ref == NULL) break; /* terminating ref slot */ scan_slot(collector, p_ref); @@ -81,13 +80,13 @@ } -static void trace_object(Collector* collector, Partial_Reveal_Object **p_ref) +static void trace_object(Collector* collector, REF *p_ref) { scan_object(collector, p_ref); Vector_Block* trace_stack = collector->trace_stack; while( !vector_stack_is_empty(trace_stack)){ - p_ref = (Partial_Reveal_Object **)vector_stack_pop(trace_stack); + p_ref = (REF *)vector_stack_pop(trace_stack); scan_object(collector, p_ref); trace_stack = collector->trace_stack; } @@ -103,7 +102,7 @@ GC* gc = collector->gc; GC_Metadata* metadata = gc->metadata; - assert(gc->collect_kind == FALLBACK_COLLECTION); + assert(gc_match_kind(gc, FALLBACK_COLLECTION)); /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */ unsigned int num_active_collectors = gc->num_active_collectors; @@ -118,12 +117,11 @@ while(root_set){ POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set); while(!vector_block_iterator_end(root_set,iter)){ - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; + REF *p_ref = (REF *)*iter; iter = vector_block_iterator_advance(root_set,iter); - Partial_Reveal_Object* p_obj = *p_ref; /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for MAJOR_COLLECTION */ - assert(p_obj != NULL); + assert(*p_ref); collector_tracestack_push(collector, p_ref); @@ -143,7 +141,7 @@ while(mark_task){ POINTER_SIZE_INT* iter = vector_block_iterator_init(mark_task); while(!vector_block_iterator_end(mark_task,iter)){ - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object **)*iter; + REF* p_ref = (REF *)*iter; iter = vector_block_iterator_advance(mark_task,iter); /* FIXME:: we should not let mark_task empty during working, , other may want to steal it. @@ -179,5 +177,5 @@ void trace_obj_in_fallback_marking(Collector *collector, void *p_ref) { - trace_object(collector, (Partial_Reveal_Object **)p_ref); + trace_object(collector, (REF *)p_ref); } Index: src/mark_compact/mspace_collect_compact.cpp =================================================================== --- src/mark_compact/mspace_collect_compact.cpp (revision 519535) +++ src/mark_compact/mspace_collect_compact.cpp (working copy) @@ -20,6 +20,7 @@ #include "mspace_collect_compact.h" + Boolean IS_MOVE_COMPACT; struct GC_Gen; @@ -269,8 +270,7 @@ //For_LOS_extend if(gc->tuner->kind != TRANS_NOTHING){ collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace); - - }else if (gc->collect_kind == FALLBACK_COLLECTION){ + }else if (gc_match_kind(gc, FALLBACK_COLLECTION)){ collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace); //IS_MOVE_COMPACT = TRUE; //collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace); @@ -295,6 +295,9 @@ } } + return; } + + Index: src/mark_compact/mspace_extend_compact.cpp =================================================================== --- src/mark_compact/mspace_extend_compact.cpp (revision 519535) +++ src/mark_compact/mspace_extend_compact.cpp (working copy) @@ -149,13 +149,13 @@ assert(!obj_is_primitive_array(p_obj)); int32 array_length = array->array_len; - Partial_Reveal_Object** p_refs = (Partial_Reveal_Object**)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array)); + REF* p_refs = (REF*)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array)); for (int i = 0; i < array_length; i++) { - Partial_Reveal_Object** p_ref = p_refs + i; - Partial_Reveal_Object* p_element = *p_ref; + REF* p_ref = p_refs + i; + Partial_Reveal_Object* p_element = read_slot(p_ref); if((p_element > start_address) && (p_element < end_address)) - *p_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_element - addr_diff); + write_slot(p_ref, (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_element - addr_diff)); } return; } @@ -163,12 +163,12 @@ /* scan non-array object */ int *offset_scanner = init_object_scanner(p_obj); while (true) { - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object**)offset_get_ref(offset_scanner, p_obj); + REF* p_ref = (REF*)offset_get_ref(offset_scanner, p_obj); if (p_ref == NULL) break; /* terminating ref slot */ - Partial_Reveal_Object* p_element = *p_ref; + Partial_Reveal_Object* p_element = read_slot(p_ref); if((p_element > start_address) && (p_element < end_address)) - *p_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_element - addr_diff); + write_slot(p_ref, (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_element - addr_diff)); offset_scanner = offset_next_ref(offset_scanner); } @@ -204,7 +204,7 @@ static void gc_reupdate_repointed_sets(GC* gc, Pool* pool, void *start_address, void *end_address, unsigned int addr_diff) { GC_Metadata *metadata = gc->metadata; - assert(gc->collect_kind != MINOR_COLLECTION); + assert(gc_match_kind(gc, EXTEND_COLLECTION)); pool_iterator_init(pool); @@ -216,7 +216,7 @@ Partial_Reveal_Object *p_obj = *p_ref; if((p_obj > start_address) && (p_obj < end_address)) - *p_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_obj - addr_diff); + *p_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_obj - addr_diff); } } } @@ -227,9 +227,13 @@ GC_Metadata *metadata = gc->metadata; /* only for MAJOR_COLLECTION and FALLBACK_COLLECTION */ - assert(gc->collect_kind != MINOR_COLLECTION); + assert(gc_match_kind(gc, EXTEND_COLLECTION)); gc_reupdate_repointed_sets(gc, metadata->gc_rootset_pool, start_address, end_address, addr_diff); + +#ifdef COMPRESS_REFERENCE + gc_fix_uncompressed_rootset(gc); +#endif #ifndef BUILD_IN_REFERENT gc_update_finref_repointed_refs(gc); @@ -252,11 +256,6 @@ Block_Header *dest_block = GC_BLOCK_HEADER((void *)((POINTER_SIZE_INT)src_base - addr_diff)); memmove(dest_block->base, src_base, size); dest_block->new_free = (void *)((POINTER_SIZE_INT)block_end - addr_diff); - if(verify_live_heap) - while (p_obj < block_end) { - event_collector_doublemove_obj(p_obj, (Partial_Reveal_Object *)((POINTER_SIZE_INT)p_obj - addr_diff), collector); - p_obj = obj_end(p_obj); - } } } @@ -271,7 +270,7 @@ Lspace *lspace = gc_gen->los; /*For_LOS adaptive: when doing EXTEND_COLLECTION, mspace->survive_ratio should not be updated in gc_decide_next_collect( )*/ - gc_gen->collect_kind = EXTEND_COLLECTION; + gc_gen->collect_kind |= EXTEND_COLLECTION; unsigned int num_active_collectors = gc_gen->num_active_collectors; unsigned int old_num; @@ -315,7 +314,7 @@ Lspace *lspace = gc_gen->los; /*For_LOS adaptive: when doing EXTEND_COLLECTION, mspace->survive_ratio should not be updated in gc_decide_next_collect( )*/ - gc_gen->collect_kind = EXTEND_COLLECTION; + gc_gen->collect_kind |= EXTEND_COLLECTION; unsigned int num_active_collectors = gc_gen->num_active_collectors; unsigned int old_num; Index: src/mark_compact/mspace_move_compact.cpp =================================================================== --- src/mark_compact/mspace_move_compact.cpp (revision 519535) +++ src/mark_compact/mspace_move_compact.cpp (working copy) @@ -36,7 +36,7 @@ Block_Header* dest_block = collector->cur_target_block; void* dest_sector_addr = dest_block->base; - Boolean is_fallback = (collector->gc->collect_kind == FALLBACK_COLLECTION); + Boolean is_fallback = gc_match_kind(collector->gc, FALLBACK_COLLECTION); while( curr_block ){ @@ -91,11 +91,11 @@ event_collector_doublemove_obj(rescan_obj, targ_obj, collector); else event_collector_move_obj(rescan_obj, targ_obj, collector); - rescan_obj = block_get_next_marked_object(curr_block, &rescan_pos); + rescan_obj = block_get_next_marked_object(curr_block, &rescan_pos); if(rescan_obj == NULL) break; } } - + memmove(dest_sector_addr, src_sector_addr, curr_sector_size); dest_sector_addr = (void*)((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size); @@ -144,10 +144,10 @@ have references that are going to be repointed */ unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1); - if(gc->collect_kind != FALLBACK_COLLECTION) + if(!gc_match_kind(gc, FALLBACK_COLLECTION)) mark_scan_heap(collector); else - fallback_mark_scan_heap(collector); + fallback_mark_scan_heap(collector); old_num = atomic_inc32(&num_marking_collectors); if( ++old_num == num_active_collectors ){ @@ -163,6 +163,7 @@ gc_update_weakref_ignore_finref(gc); } #endif + /* let other collectors go */ num_marking_collectors++; @@ -222,7 +223,8 @@ mspace_reset_after_compaction(mspace); fspace_reset_for_allocation(fspace); - gc_set_pool_clear(gc->metadata->gc_rootset_pool); + + gc_set_pool_clear(gc->metadata->gc_rootset_pool); return; } Index: src/mark_compact/mspace_slide_compact.cpp =================================================================== --- src/mark_compact/mspace_slide_compact.cpp (revision 519535) +++ src/mark_compact/mspace_slide_compact.cpp (working copy) @@ -110,7 +110,7 @@ static void mspace_fix_repointed_refs(Collector* collector, Mspace* mspace) { Block_Header* curr_block = mspace_block_iterator_next(mspace); - + /* for MAJOR_COLLECTION, we must iterate over all compact blocks */ while( curr_block){ block_fix_ref_after_repointing(curr_block); @@ -242,7 +242,7 @@ } Partial_Reveal_Object *next_src_obj = GC_BLOCK_HEADER(first_src_obj)->next_src; - if(next_src_obj && GC_BLOCK_HEADER(get_obj_info_raw(next_src_obj)) != next_dest_block){ + if(next_src_obj && GC_BLOCK_HEADER(uncompress_ref((REF)get_obj_info_raw(next_src_obj))) != next_dest_block){ next_src_obj = NULL; } next_dest_block->src = next_src_obj; @@ -270,7 +270,7 @@ { void *start_pos; Block_Header *nos_fw_start_block = (Block_Header *)&mspace->blocks[mspace_free_block_idx - mspace->first_block_idx]; - Boolean is_fallback = (collector->gc->collect_kind == FALLBACK_COLLECTION); + Boolean is_fallback = gc_match_kind(collector->gc, FALLBACK_COLLECTION); while(Partial_Reveal_Object *p_obj = get_next_first_src_obj(mspace)){ Block_Header *src_block = GC_BLOCK_HEADER(p_obj); @@ -340,13 +340,13 @@ Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)gc); unsigned int num_active_collectors = gc->num_active_collectors; - + /* Pass 1: ************************************************** mark all live objects in heap, and save all the slots that have references that are going to be repointed */ unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1); - if(gc->collect_kind != FALLBACK_COLLECTION) + if(!gc_match_kind(gc, FALLBACK_COLLECTION)) mark_scan_heap(collector); else fallback_mark_scan_heap(collector); @@ -372,7 +372,7 @@ num_marking_collectors++; } while(num_marking_collectors != num_active_collectors + 1); - + /* Pass 2: ************************************************** assign target addresses for all to-be-moved objects */ atomic_cas32( &num_repointing_collectors, 0, num_active_collectors+1); @@ -394,21 +394,17 @@ } while(num_repointing_collectors != num_active_collectors + 1); if(!gc->collect_result) return; - + /* Pass 3: ************************************************** update all references whose objects are to be moved */ old_num = atomic_cas32( &num_fixing_collectors, 0, num_active_collectors+1); - mspace_fix_repointed_refs(collector, mspace); - old_num = atomic_inc32(&num_fixing_collectors); if( ++old_num == num_active_collectors ){ /* last collector's world here */ lspace_fix_repointed_refs(collector, lspace); gc_fix_rootset(collector); - gc_init_block_for_sliding_compact(gc, mspace); - num_fixing_collectors++; } while(num_fixing_collectors != num_active_collectors + 1); @@ -421,7 +417,7 @@ atomic_inc32(&num_moving_collectors); while(num_moving_collectors != num_active_collectors); - + /* Pass 5: ************************************************** restore obj_info */ atomic_cas32( &num_restoring_collectors, 0, num_active_collectors+1); @@ -430,13 +426,13 @@ old_num = atomic_inc32(&num_restoring_collectors); if( ++old_num == num_active_collectors ){ - + update_mspace_info_for_los_extension(mspace); num_restoring_collectors++; } while(num_restoring_collectors != num_active_collectors + 1); - + /* Dealing with out of memory in mspace */ if(mspace->free_block_idx > fspace->first_block_idx){ atomic_cas32( &num_extending_collectors, 0, num_active_collectors); @@ -446,7 +442,6 @@ atomic_inc32(&num_extending_collectors); while(num_extending_collectors != num_active_collectors); } - if( collector->thread_handle != 0 ) return; @@ -458,7 +453,7 @@ //For_LOS_extend mspace_restore_block_chain(mspace); - + gc_set_pool_clear(gc->metadata->gc_rootset_pool); return; Index: src/mark_sweep/free_area_pool.h =================================================================== --- src/mark_sweep/free_area_pool.h (revision 519535) +++ src/mark_sweep/free_area_pool.h (working copy) @@ -33,16 +33,16 @@ typedef struct Lockable_Bidir_List{ /* <-- First couple of fields overloadded as Bidir_List */ - unsigned int zero; + POINTER_SIZE_INT zero; Bidir_List* next; Bidir_List* prev; /* END of Bidir_List --> */ - SpinLock lock; + SpinLock lock; }Lockable_Bidir_List; typedef struct Free_Area{ /* <-- First couple of fields overloadded as Bidir_List */ - unsigned int zero; + POINTER_SIZE_INT zero; Bidir_List* next; Bidir_List* prev; /* END of Bidir_List --> */ @@ -70,7 +70,7 @@ typedef struct Free_Area_Pool{ Lockable_Bidir_List sized_area_list[NUM_FREE_LIST]; /* each list corresponds to one bit in below vector */ - unsigned int list_bit_flag[NUM_FLAG_WORDS]; + POINTER_SIZE_INT list_bit_flag[NUM_FLAG_WORDS]; }Free_Area_Pool; #define MAX_LIST_INDEX (NUM_FREE_LIST - 1) @@ -120,7 +120,7 @@ /* set bit flag of the list */ Bidir_List* list = (Bidir_List*)&(pool->sized_area_list[index]); if(list->next == list){ - pool_list_clear_flag(pool, index); + pool_list_clear_flag(pool, index); } } Index: src/mark_sweep/lspace.cpp =================================================================== --- src/mark_sweep/lspace.cpp (revision 519535) +++ src/mark_sweep/lspace.cpp (working copy) @@ -24,6 +24,13 @@ struct GC_Gen; void gc_set_los(GC_Gen* gc, Space* lspace); +/*Fixme: This macro is for handling HEAP_NULL issues caused by JIT OPT*/ +#ifdef COMPRESS_REFERENCE + #define LOS_HEAD_RESERVE_FOR_HEAP_NULL (4*KB) +#else + #define LOS_HEAD_RESERVE_FOR_HEAP_NULL (0*KB) +#endif + void lspace_initialize(GC* gc, void* start, unsigned int lspace_size) { Lspace* lspace = (Lspace*)STD_MALLOC( sizeof(Lspace)); @@ -37,9 +44,9 @@ vm_commit_mem(reserved_base, lspace_size); memset(reserved_base, 0, lspace_size); - lspace->committed_heap_size = committed_size; - lspace->reserved_heap_size = committed_size; - lspace->heap_start = reserved_base; + lspace->committed_heap_size = committed_size - LOS_HEAD_RESERVE_FOR_HEAP_NULL; + lspace->reserved_heap_size = committed_size - LOS_HEAD_RESERVE_FOR_HEAP_NULL; + lspace->heap_start = (void*)((POINTER_SIZE_INT)reserved_base + LOS_HEAD_RESERVE_FOR_HEAP_NULL); lspace->heap_end = (void *)((POINTER_SIZE_INT)reserved_base + committed_size); lspace->move_object = FALSE; Index: src/thread/collector.cpp =================================================================== --- src/thread/collector.cpp (revision 519535) +++ src/thread/collector.cpp (working copy) @@ -61,12 +61,12 @@ /* TO_REMOVE assert(collector->rep_set==NULL); - if( !gc_is_gen_mode() || collector->gc->collect_kind != MINOR_COLLECTION){ + if( !gc_is_gen_mode() || !gc_match_kind(collector->gc, MINOR_COLLECTION)){ collector->rep_set = free_set_pool_get_entry(metadata); } */ - if(gc_is_gen_mode() && collector->gc->collect_kind==MINOR_COLLECTION && NOS_PARTIAL_FORWARD){ + if(gc_is_gen_mode() && gc_match_kind(collector->gc, MINOR_COLLECTION) && NOS_PARTIAL_FORWARD){ assert(collector->rem_set==NULL); collector->rem_set = free_set_pool_get_entry(metadata); } @@ -102,9 +102,9 @@ static void assign_collector_with_task(GC* gc, TaskType task_func, Space* space) { /* FIXME:: to adaptively identify the num_collectors_to_activate */ - if( MINOR_COLLECTORS && gc->collect_kind == MINOR_COLLECTION){ + if( MINOR_COLLECTORS && gc_match_kind(gc, MINOR_COLLECTION)){ gc->num_active_collectors = MINOR_COLLECTORS; - }else if ( MAJOR_COLLECTORS && gc->collect_kind != MINOR_COLLECTION){ + }else if ( MAJOR_COLLECTORS && !gc_match_kind(gc, MINOR_COLLECTION)){ gc->num_active_collectors = MAJOR_COLLECTORS; }else{ gc->num_active_collectors = gc->num_collectors; Index: src/thread/collector_alloc.h =================================================================== --- src/thread/collector_alloc.h (revision 519535) +++ src/thread/collector_alloc.h (working copy) @@ -31,7 +31,7 @@ Obj_Info_Type oi = get_obj_info_raw(p_obj); /* forwarded by somebody else */ - if ((POINTER_SIZE_INT)oi & FORWARD_BIT){ + if (oi & FORWARD_BIT){ return NULL; } @@ -50,7 +50,8 @@ /* else, take the obj by setting the forwarding flag atomically we don't put a simple bit in vt because we need compute obj size later. */ - if ((void*)oi != atomic_casptr((volatile void**)get_obj_info_addr(p_obj), (void*)((POINTER_SIZE_INT)p_targ_obj|FORWARD_BIT), (void*)oi)) { + REF target = compress_ref(p_targ_obj); + if (oi != (Obj_Info_Type)atomic_cas32(get_obj_info_addr(p_obj), ( ( (POINTER_SIZE_INT)target |FORWARD_BIT)), oi)) { /* forwarded by other, we need unalloc the allocated obj. We may waste some space if the allocation switched block. The remaining part of the switched block cannot be revivied for next allocation of object that has smaller size than this one. */ Index: src/thread/mutator_alloc.cpp =================================================================== --- src/thread/mutator_alloc.cpp (revision 519535) +++ src/thread/mutator_alloc.cpp (working copy) @@ -73,9 +73,9 @@ if( p_obj == NULL ) return NULL; - obj_set_vt((Partial_Reveal_Object*)p_obj, ah); + obj_set_vt((Partial_Reveal_Object*)p_obj, (VT)ah); - if(!IGNORE_FINREF && type_has_finalizer((Partial_Reveal_VTable *)ah)) + if(!IGNORE_FINREF && type_has_finalizer( (Partial_Reveal_VTable *) uncompress_vt((VT)ah) )) mutator_add_finalizer((Mutator*)allocator, (Partial_Reveal_Object*)p_obj); return (Managed_Object_Handle)p_obj; @@ -88,14 +88,14 @@ assert((size % GC_OBJECT_ALIGNMENT) == 0); assert(ah); - if(type_has_finalizer((Partial_Reveal_VTable *)ah)) + if(type_has_finalizer((Partial_Reveal_VTable *) uncompress_vt((VT)ah))) return NULL; #ifdef GC_OBJ_SIZE_STATISTIC gc_alloc_statistic_obj_distrubution(size); #endif - /* object should be handled specially */ + /* object shoud be handled specially */ if ( size > GC_OBJ_SIZE_THRESHOLD ) return NULL; Allocator* allocator = (Allocator*)gc_get_tls(); @@ -105,7 +105,7 @@ p_obj = (Managed_Object_Handle)thread_local_alloc(size, allocator); if(p_obj == NULL) return NULL; - obj_set_vt((Partial_Reveal_Object*)p_obj, ah); + obj_set_vt((Partial_Reveal_Object*)p_obj, (VT)ah); return p_obj; } Index: src/trace_forward/fspace.cpp =================================================================== --- src/trace_forward/fspace.cpp (revision 519535) +++ src/trace_forward/fspace.cpp (working copy) @@ -101,7 +101,7 @@ unsigned int first_idx = fspace->first_block_idx; unsigned int marked_start_idx = 0; //was for oi markbit reset, now useless unsigned int marked_last_idx = 0; - Boolean is_major_collection = (fspace->gc->collect_kind != MINOR_COLLECTION); + Boolean is_major_collection = !gc_match_kind(fspace->gc, MINOR_COLLECTION); Boolean gen_mode = gc_is_gen_mode(); if( is_major_collection || Index: src/trace_forward/fspace_gen_forward_pool.cpp =================================================================== --- src/trace_forward/fspace_gen_forward_pool.cpp (revision 519535) +++ src/trace_forward/fspace_gen_forward_pool.cpp (working copy) @@ -23,6 +23,7 @@ #include "../thread/collector.h" #include "../common/gc_metadata.h" #include "../finalizer_weakref/finalizer_weakref.h" +#include "../common/compressed_ref.h" static FORCE_INLINE Boolean fspace_object_to_be_forwarded(Partial_Reveal_Object *p_obj, Fspace *fspace) { @@ -30,13 +31,14 @@ return forward_first_half? (p_obj < object_forwarding_boundary):(p_obj>=object_forwarding_boundary); } -static FORCE_INLINE void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref) +static FORCE_INLINE void scan_slot(Collector *collector, REF *p_ref) { - Partial_Reveal_Object *p_obj = *p_ref; - if (p_obj == NULL) return; + REF ref = *p_ref; + if(ref == COMPRESSED_NULL) return; /* the slot can be in tspace or fspace, we don't care. we care only if the reference in the slot is pointing to fspace */ + Partial_Reveal_Object *p_obj = uncompress_ref(ref); if (obj_belongs_to_nos(p_obj)) collector_tracestack_push(collector, p_ref); @@ -47,7 +49,7 @@ { if (!object_has_ref_field(p_obj)) return; - void *slot; + REF *p_ref; /* scan array object */ if (object_is_array(p_obj)) { @@ -56,8 +58,8 @@ int32 array_length = vector_get_length((Vector_Handle) array); for (int i = 0; i < array_length; i++) { - slot = vector_get_element_address_ref((Vector_Handle) array, i); - scan_slot(collector, (Partial_Reveal_Object **)slot); + p_ref= (REF *)vector_get_element_address_ref((Vector_Handle) array, i); + scan_slot(collector, p_ref); } return; } @@ -65,10 +67,10 @@ /* scan non-array object */ int *offset_scanner = init_object_scanner(p_obj); while (true) { - slot = offset_get_ref(offset_scanner, p_obj); - if (slot == NULL) break; + p_ref = (REF *)offset_get_ref(offset_scanner, p_obj); + if (p_ref == NULL) break; - scan_slot(collector, (Partial_Reveal_Object **)slot); + scan_slot(collector, p_ref); offset_scanner = offset_next_ref(offset_scanner); } @@ -90,17 +92,18 @@ #include "../verify/verify_live_heap.h" -static FORCE_INLINE void forward_object(Collector* collector, Partial_Reveal_Object **p_ref) +static FORCE_INLINE void forward_object(Collector *collector, REF *p_ref) { Space* space = collector->collect_space; GC* gc = collector->gc; - Partial_Reveal_Object *p_obj = *p_ref; + Partial_Reveal_Object *p_obj = read_slot(p_ref); if(!obj_belongs_to_nos(p_obj)) return; /* Fastpath: object has already been forwarded, update the ref slot */ if(obj_is_fw_in_oi(p_obj)) { - *p_ref = obj_get_fw_in_oi(p_obj); + Partial_Reveal_Object* p_target_obj = obj_get_fw_in_oi(p_obj); + write_slot(p_ref, p_target_obj); return; } @@ -110,7 +113,7 @@ /* this obj remains in fspace, remember its ref slot for next GC if p_ref is not root. we don't need remember root ref. Actually it's wrong to rem root ref since they change in next GC */ if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc)) - collector_remset_add_entry(collector, p_ref); + collector_remset_add_entry(collector, ( Partial_Reveal_Object**) p_ref); if(obj_mark_in_oi(p_obj)) scan_object(collector, p_obj); @@ -136,28 +139,24 @@ Partial_Reveal_Object *p_new_obj = obj_get_fw_in_oi(p_obj); assert(p_new_obj); - *p_ref = p_new_obj; + write_slot(p_ref, p_new_obj); return; } /* otherwise, we successfully forwarded */ - *p_ref = p_target_obj; + write_slot(p_ref, p_target_obj); - /* we forwarded it, we need remember it for verification. */ - if(verify_live_heap) { - event_collector_move_obj(p_obj, p_target_obj, collector); - } scan_object(collector, p_target_obj); return; } -static void trace_object(Collector* collector, Partial_Reveal_Object **p_ref) +static void trace_object(Collector *collector, REF *p_ref) { forward_object(collector, p_ref); Vector_Block* trace_stack = (Vector_Block*)collector->trace_stack; while( !vector_stack_is_empty(trace_stack)){ - p_ref = (Partial_Reveal_Object **)vector_stack_pop(trace_stack); + p_ref = (REF *)vector_stack_pop(trace_stack); forward_object(collector, p_ref); trace_stack = (Vector_Block*)collector->trace_stack; } @@ -186,10 +185,12 @@ while(root_set){ POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set); while(!vector_block_iterator_end(root_set,iter)){ - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; + REF *p_ref = (REF *)*iter; iter = vector_block_iterator_advance(root_set,iter); - if(*p_ref == NULL) continue; /* root ref cann't be NULL, but remset can be */ - if(obj_belongs_to_nos(*p_ref)){ + + if(!*p_ref) continue; /* root ref cann't be NULL, but remset can be */ + Partial_Reveal_Object *p_obj = read_slot(p_ref); + if(obj_belongs_to_nos(p_obj)){ collector_tracestack_push(collector, p_ref); } } @@ -207,7 +208,7 @@ while(trace_task){ POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task); while(!vector_block_iterator_end(trace_task,iter)){ - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; + REF *p_ref = (REF *)*iter; iter = vector_block_iterator_advance(trace_task,iter); assert(*p_ref); /* a task can't be NULL, it was checked before put into the task stack */ /* in sequential version, we only trace same object once, but we were using a local hashset for that, @@ -259,10 +260,17 @@ if( collector->thread_handle != 0 ) return; gc->collect_result = gc_collection_result(gc); - if(!gc->collect_result) return; + if(!gc->collect_result){ +#ifndef BUILD_IN_REFERENT + fallback_finref_cleanup(gc); +#endif + return; + } - if(!IGNORE_FINREF ) + if(!IGNORE_FINREF ){ collector_identify_finref(collector); + if(!gc->collect_result) return; + } #ifndef BUILD_IN_REFERENT else { gc_set_weakref_sets(gc); @@ -280,5 +288,5 @@ void trace_obj_in_gen_fw(Collector *collector, void *p_ref) { - trace_object(collector, (Partial_Reveal_Object **)p_ref); + trace_object(collector, (REF *)p_ref); } Index: src/trace_forward/fspace_nongen_forward_pool.cpp =================================================================== --- src/trace_forward/fspace_nongen_forward_pool.cpp (revision 519535) +++ src/trace_forward/fspace_nongen_forward_pool.cpp (working copy) @@ -27,11 +27,11 @@ #ifdef MARK_BIT_FLIPPING -static FORCE_INLINE void scan_slot(Collector* collector, Partial_Reveal_Object **p_ref) +static FORCE_INLINE void scan_slot(Collector *collector, REF *p_ref) { - Partial_Reveal_Object *p_obj = *p_ref; - if(p_obj == NULL) return; - + REF ref = *p_ref; + if(ref == COMPRESSED_NULL) return; + collector_tracestack_push(collector, p_ref); return; } @@ -40,13 +40,13 @@ { if (!object_has_ref_field_before_scan(p_obj)) return; - Partial_Reveal_Object **p_ref; + REF *p_ref; if (object_is_array(p_obj)) { /* scan array object */ Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj; unsigned int array_length = array->array_len; - p_ref = (Partial_Reveal_Object**)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array)); + p_ref = (REF *)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array)); for (unsigned int i = 0; i < array_length; i++) { scan_slot(collector, p_ref+i); @@ -81,10 +81,10 @@ */ #include "../verify/verify_live_heap.h" -static FORCE_INLINE void forward_object(Collector* collector, Partial_Reveal_Object **p_ref) +static FORCE_INLINE void forward_object(Collector* collector, REF *p_ref) { GC* gc = collector->gc; - Partial_Reveal_Object *p_obj = *p_ref; + Partial_Reveal_Object *p_obj = read_slot(p_ref); if(!obj_belongs_to_nos(p_obj)){ if(obj_mark_in_oi(p_obj)) @@ -110,11 +110,11 @@ Partial_Reveal_Object *p_new_obj = obj_get_fw_in_oi(p_obj); assert(p_new_obj); - *p_ref = p_new_obj; + write_slot(p_ref, p_new_obj); return; } /* otherwise, we successfully forwarded */ - *p_ref = p_target_obj; + write_slot(p_ref, p_target_obj); /* we forwarded it, we need remember it for verification. */ if(verify_live_heap) { @@ -125,13 +125,13 @@ return; } -static void trace_object(Collector* collector, Partial_Reveal_Object **p_ref) +static void trace_object(Collector *collector, REF *p_ref) { forward_object(collector, p_ref); Vector_Block* trace_stack = (Vector_Block*)collector->trace_stack; while( !vector_stack_is_empty(trace_stack)){ - p_ref = (Partial_Reveal_Object **)vector_stack_pop(trace_stack); + p_ref = (REF *)vector_stack_pop(trace_stack); forward_object(collector, p_ref); trace_stack = (Vector_Block*)collector->trace_stack; } @@ -159,11 +159,10 @@ while(root_set){ POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set); while(!vector_block_iterator_end(root_set,iter)){ - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; - iter = vector_block_iterator_advance(root_set,iter); + REF *p_ref = (REF *)*iter; + iter = vector_block_iterator_advance(root_set, iter); - Partial_Reveal_Object* p_obj = *p_ref; - assert(p_obj != NULL); /* root ref cann't be NULL, but remset can be */ + assert(*p_ref); /* root ref cann't be NULL, but remset can be */ collector_tracestack_push(collector, p_ref); } @@ -181,8 +180,8 @@ while(trace_task){ POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task); while(!vector_block_iterator_end(trace_task,iter)){ - Partial_Reveal_Object** p_ref = (Partial_Reveal_Object** )*iter; - iter = vector_block_iterator_advance(trace_task,iter); + REF *p_ref = (REF *)*iter; + iter = vector_block_iterator_advance(trace_task, iter); trace_object(collector, p_ref); if(collector->result == FALSE) break; /* force return */ @@ -230,10 +229,17 @@ if( collector->thread_handle != 0 ) return; gc->collect_result = gc_collection_result(gc); - if(!gc->collect_result) return; + if(!gc->collect_result){ +#ifndef BUILD_IN_REFERENT + fallback_finref_cleanup(gc); +#endif + return; + } - if(!IGNORE_FINREF ) + if(!IGNORE_FINREF ){ collector_identify_finref(collector); + if(!gc->collect_result) return; + } #ifndef BUILD_IN_REFERENT else { gc_set_weakref_sets(gc); @@ -251,7 +257,7 @@ void trace_obj_in_nongen_fw(Collector *collector, void *p_ref) { - trace_object(collector, (Partial_Reveal_Object **)p_ref); + trace_object(collector, (REF*)p_ref); } #endif /* MARK_BIT_FLIPPING */ Index: src/utils/bit_ops.h =================================================================== --- src/utils/bit_ops.h (revision 519535) +++ src/utils/bit_ops.h (working copy) @@ -23,32 +23,29 @@ #include "../common/gc_common.h" -inline unsigned int word_get_first_set_lsb(unsigned int target_word) +inline unsigned int word_get_first_set_lsb(POINTER_SIZE_INT target_word) { assert(target_word != 0); - unsigned int bit_offset; + POINTER_SIZE_INT bit_offset; -#ifdef _EM64T_ - -#else /* ifdef _EM64T_*/ #ifdef PLATFORM_POSIX /* POSIX Platform*/ - __asm__ __volatile__( - "bsf %1,%0\n" - :"=r"(bit_offset) - :"m"(target_word) - ); + __asm__ __volatile__( + "bsf %1,%0\n" + :"=r"(bit_offset) + :"m"(target_word) + ); #else /*WIN32 Platform*/ - __asm{ - bsf eax, target_word - mov bit_offset, eax - } + __asm{ + bsf eax, target_word + mov bit_offset, eax + } #endif /* ifdef PLATFORM_POSIX else*/ -#endif /* ifdef _EM64T_ else */ - return bit_offset; + return (unsigned int)bit_offset; + } -inline unsigned int words_get_next_set_lsb(unsigned int* words, unsigned int count, unsigned int start_idx) +inline unsigned int words_get_next_set_lsb(POINTER_SIZE_INT* words, unsigned int count, unsigned int start_idx) { unsigned int bit_offset; @@ -59,10 +56,10 @@ bit_offset = start_idx - start_bit_offset; for(unsigned int i = start_word_index; i < count; i ++ ){ - unsigned int cur_word = *(words + i); + POINTER_SIZE_INT cur_word = *(words + i); if(start_word_index == i){ - unsigned int mask = ~((1 << start_bit_offset) - 1); + POINTER_SIZE_INT mask = ~(((POINTER_SIZE_INT)1 << start_bit_offset) - 1); cur_word = cur_word & mask; } @@ -71,32 +68,32 @@ return bit_offset; } - bit_offset += 32; + bit_offset += BITS_PER_WORD; } return bit_offset; } -inline void words_set_bit(unsigned int* words, unsigned int count, unsigned int start_idx) +inline void words_set_bit(POINTER_SIZE_INT* words, unsigned int count, unsigned int start_idx) { assert(start_idx < 128); - unsigned int word_index = start_idx >> BIT_SHIFT_TO_BITS_PER_WORD; + unsigned int word_index = start_idx >> BIT_SHIFT_TO_BITS_PER_WORD; unsigned int bit_offset = start_idx & BIT_MASK_TO_BITS_PER_WORD; if(word_index >= count) return; - unsigned int* p_word = words + word_index; - unsigned int old_value = *p_word; - unsigned int mask = 1 << bit_offset; - unsigned int new_value = old_value|mask; + POINTER_SIZE_INT* p_word = words + word_index; + POINTER_SIZE_INT old_value = *p_word; + POINTER_SIZE_INT mask = (POINTER_SIZE_INT)1 << bit_offset; + POINTER_SIZE_INT new_value = old_value|mask; *p_word = new_value; return; } -inline void words_clear_bit(unsigned int* words, unsigned int count, unsigned int start_idx) +inline void words_clear_bit(POINTER_SIZE_INT* words, unsigned int count, unsigned int start_idx) { assert(start_idx < 128); @@ -105,10 +102,10 @@ if(word_index >= count) return; - unsigned int* p_word = words + word_index; - unsigned int old_value = *p_word; - unsigned int mask = ~(1 << bit_offset); - unsigned int new_value = old_value & mask; + POINTER_SIZE_INT* p_word = words + word_index; + POINTER_SIZE_INT old_value = *p_word; + POINTER_SIZE_INT mask = ~((POINTER_SIZE_INT)1 << bit_offset); + POINTER_SIZE_INT new_value = old_value & mask; *p_word = new_value; Index: src/utils/sync_queue.h =================================================================== --- src/utils/sync_queue.h (revision 519535) +++ src/utils/sync_queue.h (working copy) @@ -26,20 +26,20 @@ struct Queue_Node; typedef struct Queue_Link{ - struct Queue_Node* ptr; - unsigned int count; + struct Queue_Node* ptr; + unsigned int count; }Queue_Link; typedef struct Queue_Node{ - __declspec(align(8)) - Queue_Link next; /* must be aligned to 8Byte*/ - unsigned int* value; + __declspec(align(8)) + Queue_Link next; /* must be aligned to 8Byte*/ + unsigned int* value; }Queue_Node; typedef struct Sync_Queue{ - __declspec(align(8)) - Queue_Link head; /* must be aligned to 8Byte*/ - Queue_Link tail; + __declspec(align(8)) + Queue_Link head; /* must be aligned to 8Byte*/ + Queue_Link tail; }Sync_Queue; inline Queue_Node * new_queue_node() @@ -56,9 +56,9 @@ { Queue_Node *node = new_queue_node(); node->next.ptr = NULL; - node->next.count = 0; + node->next.count = 0; queue->head.ptr = queue->tail.ptr = node; - queue->head.count = queue->tail.count = 0; + queue->head.count = queue->tail.count = 0; return; } @@ -67,64 +67,63 @@ inline void sync_queue_push(Sync_Queue* queue, unsigned int* value) { - Queue_Link tail, next, tmp1, tmp2; - Queue_Node* node = new_queue_node(); - node->value = value; - node->next.ptr = NULL; - while(TRUE){ - QLINK_VAL(tail) = QLINK_VAL(queue->tail); - QLINK_VAL(next) = QLINK_VAL(tail.ptr->next); - if( QLINK_VAL(tail) == QLINK_VAL(queue->tail)){ - if( next.ptr==NULL ){ - tmp1.ptr = node; - tmp1.count = next.count + 1; - node->next.count = tmp1.count; - QLINK_VAL(tmp2) = atomic_cas64(QLINK_PTR(tail.ptr->next), QLINK_VAL(next), QLINK_VAL(tmp1)) - if( QLINK_VAL(tmp1) == QLINK_VAL(tmp2)) + Queue_Link tail, next, tmp1, tmp2; + Queue_Node* node = new_queue_node(); + node->value = value; + node->next.ptr = NULL; + while(TRUE){ + QLINK_VAL(tail) = QLINK_VAL(queue->tail); + QLINK_VAL(next) = QLINK_VAL(tail.ptr->next); + if( QLINK_VAL(tail) == QLINK_VAL(queue->tail)){ + if( next.ptr==NULL ){ + tmp1.ptr = node; + tmp1.count = next.count + 1; + node->next.count = tmp1.count; + QLINK_VAL(tmp2) = atomic_cas64(QLINK_PTR(tail.ptr->next), QLINK_VAL(next), QLINK_VAL(tmp1)) + if( QLINK_VAL(tmp1) == QLINK_VAL(tmp2)) break; - }else{ - tmp1.ptr = next.ptr; - tmp1.count = tail.count + 1; - atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1)); + tmp1.ptr = next.ptr; + tmp1.count = tail.count + 1; + atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1)); } } } - tmp1.ptr = node; - tmp1.count = tail.count + 1; - atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1)); - return; + tmp1.ptr = node; + tmp1.count = tail.count + 1; + atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1)); + return; } Boolean sync_queue_pull(Sync_Queue* queue, unsigned int * pvalue) { - Queue_Link head, tail, next, tmp1, tmp2; - while(TRUE){ - QLINK_VAL(head) = QLINK_VAL(queue->head); - QLINK_VAL(tail) = QLINK_VAL(queue->tail); - QLINK_VAL(next) = QLINK_VAL(head.ptr->next); - - if( QLINK_VAL(head) == QLINK_VAL(queue->head)){ - if( head.ptr== tail.ptr ) + Queue_Link head, tail, next, tmp1, tmp2; + while(TRUE){ + QLINK_VAL(head) = QLINK_VAL(queue->head); + QLINK_VAL(tail) = QLINK_VAL(queue->tail); + QLINK_VAL(next) = QLINK_VAL(head.ptr->next); + + if( QLINK_VAL(head) == QLINK_VAL(queue->head)){ + if( head.ptr== tail.ptr ) if( next.ptr == NULL ) - return FALSE; + return FALSE; else{ - tmp1.ptr = next.ptr; - tmp1.count = tail.count+1; - atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1)); - } - else{ + tmp1.ptr = next.ptr; + tmp1.count = tail.count+1; + atomic_cas64(QLINK_PTR(queue->tail), QLINK_VAL(tail), QLINK_VAL(tmp1)); + } + else{ *pvalue = next.ptr->value; - tmp1.ptr = next.ptr; - tmp1.count = head.count+1; - QLINK_VAL(tmp2) = atomic_cas64(QLINK_PTR(queue->head), QLINK_VAL(head), QLINK_VAL(tmp1)); - if( QLINK_VAL(tmp2) == QLINK_VAL(tmp1)) - break; - } + tmp1.ptr = next.ptr; + tmp1.count = head.count+1; + QLINK_VAL(tmp2) = atomic_cas64(QLINK_PTR(queue->head), QLINK_VAL(head), QLINK_VAL(tmp1)); + if( QLINK_VAL(tmp2) == QLINK_VAL(tmp1)) + break; + } } } - free( head.ptr ); - return TRUE; + free( head.ptr ); + return TRUE; } - + #endif /* _SYNC_QUEUE_H_ */ Index: src/utils/sync_stack.h =================================================================== --- src/utils/sync_stack.h (revision 519535) +++ src/utils/sync_stack.h (working copy) @@ -106,9 +106,8 @@ while( top_entry != NULL ){ POINTER_SIZE_INT temp = stack_top_contruct(top_entry->next, version); - Stack_Top new_top = *(Stack_Top*)&temp; - temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)&stack->top, *(void**)&new_top, *(void**)&cur_top); - if(temp == *(POINTER_SIZE_INT*)&cur_top){ /* got it */ + temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)&stack->top, (void*)temp, (void*)cur_top); + if(temp == *(POINTER_SIZE_INT*)&cur_top){ // got it top_entry->next = NULL; return top_entry; } @@ -125,20 +124,18 @@ node->next = stack_top_get_entry(cur_top); POINTER_SIZE_INT new_version = stack_top_get_next_version(cur_top); POINTER_SIZE_INT temp = stack_top_contruct(node, new_version); - Stack_Top new_top = *(Stack_Top*)&temp; - + while( TRUE ){ - temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)&stack->top, *(void**)&new_top, *(void**)&cur_top); - if(temp == *(POINTER_SIZE_INT*)&cur_top){ /* got it */ + temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**)&stack->top, (void*)temp, (void*)cur_top); + if(temp == *(POINTER_SIZE_INT*)&cur_top){ // got it return TRUE; } cur_top = stack->top; node->next = stack_top_get_entry(cur_top); new_version = stack_top_get_next_version(cur_top); temp = stack_top_contruct(node, new_version); - new_top = *(Stack_Top*)&temp; } - /* never comes here */ + // never comes here return FALSE; } Index: src/utils/vector_block.h =================================================================== --- src/utils/vector_block.h (revision 519535) +++ src/utils/vector_block.h (working copy) @@ -48,7 +48,6 @@ inline unsigned int vector_block_entry_count(Vector_Block* block) { return (unsigned int)(block->tail - block->head); } - inline Boolean vector_block_is_full(Vector_Block* block) { return block->tail == block->heap_end; } @@ -77,7 +76,7 @@ block->head = (POINTER_SIZE_INT*)block->entries; block->tail = (POINTER_SIZE_INT*)block->entries; #ifdef _DEBUG - memset(block->entries, 0, (POINTER_SIZE_INT)block->heap_end - (POINTER_SIZE_INT)block->entries); + memset(block->entries, 0, (POINTER_SIZE_INT)block->heap_end - (POINTER_SIZE_INT)block->entries); #endif }