Index: src/common/gc_block.h =================================================================== --- src/common/gc_block.h (revision 538396) +++ src/common/gc_block.h (working copy) @@ -22,6 +22,9 @@ #define _BLOCK_H_ #include "gc_common.h" +#ifdef USE_32BITS_HASHCODE +#include "hashcode.h" +#endif #define GC_BLOCK_SHIFT_COUNT 15 #define GC_BLOCK_SIZE_BYTES (1 << GC_BLOCK_SHIFT_COUNT) @@ -45,6 +48,9 @@ unsigned int block_idx; volatile unsigned int status; volatile unsigned int dest_counter; +#ifdef USE_32BITS_HASHCODE + Hashcode_Buf* hashcode_buf; /*hash code entry list*/ +#endif Partial_Reveal_Object* src; Partial_Reveal_Object* next_src; Block_Header* next; @@ -98,13 +104,40 @@ block->dest_counter = 0; block->src = NULL; block->next_src = NULL; +#ifdef USE_32BITS_HASHCODE + block->hashcode_buf = hashcode_buf_create(); +#endif } +inline void block_destruct(Block_Header* block) +{ +#ifdef USE_32BITS_HASHCODE + hashcode_buf_destory(block->hashcode_buf); +#endif +} + inline Partial_Reveal_Object *obj_end(Partial_Reveal_Object *obj) { +#ifdef USE_32BITS_HASHCODE + assert(vm_object_size(obj) != 0); + unsigned int hash_extend_size + = (hashcode_is_attached(obj))?GC_OBJECT_ALIGNMENT:0; + return (Partial_Reveal_Object *)((POINTER_SIZE_INT)obj + vm_object_size(obj) + hash_extend_size); +#else return (Partial_Reveal_Object *)((POINTER_SIZE_INT)obj + vm_object_size(obj)); +#endif } +#ifdef USE_32BITS_HASHCODE +inline Partial_Reveal_Object *obj_end_extend(Partial_Reveal_Object *obj) +{ + assert(vm_object_size(obj) != 0); + unsigned int hash_extend_size + = (obj_is_sethash_in_vt(obj))?GC_OBJECT_ALIGNMENT:0; + return (Partial_Reveal_Object *)((POINTER_SIZE_INT)obj + vm_object_size(obj) + hash_extend_size); +} +#endif + inline void obj_set_prefetched_next_pointer(Partial_Reveal_Object* obj, Partial_Reveal_Object* raw_prefetched_next){ /*Fixme: em64t: This may be not necessary!*/ if(raw_prefetched_next == 0){ @@ -132,6 +165,22 @@ return NULL; } +#ifdef USE_32BITS_HASHCODE +inline Partial_Reveal_Object* block_get_first_marked_object_extend(Block_Header* block, void** start_pos) +{ + Partial_Reveal_Object* cur_obj = (Partial_Reveal_Object*)block->base; + Partial_Reveal_Object* block_end = (Partial_Reveal_Object*)block->free; + + Partial_Reveal_Object* first_marked_obj = next_marked_obj_in_block(cur_obj, block_end); + if(!first_marked_obj) + return NULL; + + *start_pos = obj_end_extend(first_marked_obj); + + return first_marked_obj; +} +#endif + inline Partial_Reveal_Object* block_get_first_marked_object(Block_Header* block, void** start_pos) { Partial_Reveal_Object* cur_obj = (Partial_Reveal_Object*)block->base; @@ -188,7 +237,11 @@ inline Partial_Reveal_Object *block_get_first_marked_obj_after_prefetch(Block_Header *block, void **start_pos) { +#ifdef USE_32BITS_HASHCODE + return block_get_first_marked_object_extend(block, start_pos); +#else return block_get_first_marked_object(block, start_pos); +#endif } inline Partial_Reveal_Object *block_get_next_marked_obj_prefetch_next(Block_Header *block, void **start_pos) @@ -245,7 +298,12 @@ if(!cur_marked_obj) return NULL; +#ifdef USE_32BITS_HASHCODE + Partial_Reveal_Object *next_obj = obj_end_extend(cur_marked_obj); +#else Partial_Reveal_Object *next_obj = obj_end(cur_marked_obj); +#endif + *start_pos = next_obj; return cur_marked_obj; @@ -268,6 +326,32 @@ return; } +#ifdef USE_32BITS_HASHCODE +inline Hashcode_Buf* block_set_hashcode_buf(Block_Header *block, Hashcode_Buf* new_hashcode_buf) +{ + Hashcode_Buf* old_hashcode_buf = block->hashcode_buf; + block->hashcode_buf = new_hashcode_buf; + return old_hashcode_buf; +} + +inline void block_swap_hashcode_buf(Block_Header *block, Hashcode_Buf** new_ptr, Hashcode_Buf** old_ptr) +{ + Hashcode_Buf* temp = block_set_hashcode_buf(block, *new_ptr); + *old_ptr = *new_ptr; + *new_ptr = temp; + hashcode_buf_init(*new_ptr); +} + +inline Hashcode_Buf* block_get_hashcode_buf(Block_Header *block) +{ return block->hashcode_buf; } + +inline int obj_lookup_hashcode_in_buf(Partial_Reveal_Object *p_obj) +{ + Hashcode_Buf* hashcode_buf = block_get_hashcode_buf(GC_BLOCK_HEADER(p_obj)); + return hashcode_buf_lookup(p_obj,hashcode_buf); +} +#endif + #endif //#ifndef _BLOCK_H_ Index: src/common/gc_common.h =================================================================== --- src/common/gc_common.h (revision 538396) +++ src/common/gc_common.h (working copy) @@ -70,6 +70,8 @@ #define GC_OBJ_SIZE_THRESHOLD (4*KB) +#define USE_32BITS_HASHCODE + typedef void (*TaskType)(void*); enum Collection_Algorithm{ @@ -229,6 +231,11 @@ obj_set_vt(obj, (VT)((POINTER_SIZE_INT)vt & ~CONST_MARK_BIT)); } +inline void obj_clear_dual_bits_in_vt(Partial_Reveal_Object* p_obj){ + VT vt = obj_get_vt_raw(p_obj); + obj_set_vt(p_obj,(VT)((POINTER_SIZE_INT)vt & DUAL_MARKBITS_MASK)); +} + inline Boolean obj_is_marked_or_fw_in_oi(Partial_Reveal_Object *obj) { return get_obj_info_raw(obj) & DUAL_MARKBITS; } Index: src/common/gc_for_vm.cpp =================================================================== --- src/common/gc_for_vm.cpp (revision 538396) +++ src/common/gc_for_vm.cpp (working copy) @@ -27,6 +27,9 @@ #include "../thread/collector.h" #include "../verify/verify_live_heap.h" #include "../finalizer_weakref/finalizer_weakref.h" +#ifdef USE_32BITS_HASHCODE +#include "hashcode.h" +#endif static GC* p_global_gc = NULL; Boolean mutator_need_block; @@ -47,10 +50,11 @@ gc_parse_options(gc); gc_tls_init(); + + gc_metadata_initialize(gc); /* root set and mark stack */ gc_gen_initialize((GC_Gen*)gc, min_heap_size_bytes, max_heap_size_bytes); - gc_metadata_initialize(gc); /* root set and mark stack */ #ifndef BUILD_IN_REFERENT gc_finref_metadata_initialize(gc); #endif @@ -204,6 +208,7 @@ unsigned int gc_time_since_last_gc() { assert(0); return 0; } +#ifndef USE_32BITS_HASHCODE #define GCGEN_HASH_MASK 0x1fc int32 gc_get_hashcode(Managed_Object_Handle p_object) { @@ -225,6 +230,35 @@ } return hash; } +#else //USE_32BITS_HASHCODE +int32 gc_get_hashcode(Managed_Object_Handle p_object) +{ + Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)p_object; + if(!p_obj) return 0; + assert(address_belongs_to_gc_heap(p_obj, p_global_gc)); + Obj_Info_Type info = get_obj_info_raw(p_obj); + int hash; + + switch(info & HASHCODE_MASK){ + case HASHCODE_SET_UNALLOCATED: + hash = hashcode_gen((void*)p_obj); + break; + case HASHCODE_SET_ATTACHED: + hash = hashcode_lookup(p_obj,info); + break; + case HASHCODE_SET_BUFFERED: + hash = hashcode_lookup(p_obj,info); + break; + case HASHCODE_UNSET: + set_obj_info(p_obj, info | HASHCODE_SET_BIT); + hash = hashcode_gen((void*)p_obj); + break; + default: + assert(0); + } + return hash; +} +#endif //USE_32BITS_HASHCODE void gc_finalize_on_exit() { Index: src/common/gc_metadata.cpp =================================================================== --- src/common/gc_metadata.cpp (revision 538396) +++ src/common/gc_metadata.cpp (working copy) @@ -75,6 +75,9 @@ gc_metadata.mutator_remset_pool = sync_pool_create(); gc_metadata.collector_remset_pool = sync_pool_create(); gc_metadata.collector_repset_pool = sync_pool_create(); +#ifdef USE_32BITS_HASHCODE + gc_metadata.collector_hashcode_pool = sync_pool_create(); +#endif gc->metadata = &gc_metadata; return; @@ -92,6 +95,9 @@ sync_pool_destruct(metadata->mutator_remset_pool); sync_pool_destruct(metadata->collector_remset_pool); sync_pool_destruct(metadata->collector_repset_pool); +#ifdef USE_32BITS_HASHCODE + sync_pool_destruct(metadata->collector_hashcode_pool); +#endif for(unsigned int i=0; inum_alloc_segs; i++){ assert(metadata->segments[i]); Index: src/common/gc_metadata.h =================================================================== --- src/common/gc_metadata.h (revision 538396) +++ src/common/gc_metadata.h (working copy) @@ -43,6 +43,9 @@ Pool* mutator_remset_pool; /* list of remsets generated by app during execution */ Pool* collector_remset_pool; /* list of remsets generated by gc during collection */ Pool* collector_repset_pool; /* list of repointed ref slot sets */ +#ifdef USE_32BITS_HASHCODE + Pool* collector_hashcode_pool; +#endif }GC_Metadata; @@ -92,6 +95,11 @@ return block; } +#ifdef USE_32BITS_HASHCODE +inline void free_set_pool_put_entry(Vector_Block* block, GC_Metadata *metadata) +{ pool_put_entry(metadata->free_set_pool, block); } +#endif + inline Vector_Block *free_task_pool_get_entry(GC_Metadata *metadata) { Vector_Block *block = pool_get_entry(metadata->free_task_pool); @@ -145,6 +153,20 @@ assert(collector->rem_set); } +#ifdef USE_32BITS_HASHCODE +inline void collector_hashcodeset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref) +{ + Vector_Block* hashcode_set = collector->hashcode_set; + vector_block_add_entry(hashcode_set, (POINTER_SIZE_INT) p_ref); + + if(!vector_block_is_full(hashcode_set)) return; + + pool_put_entry(gc_metadata.collector_hashcode_pool, hashcode_set); + collector->hashcode_set = free_set_pool_get_entry(&gc_metadata); + assert(collector->hashcode_set); +} +#endif + inline void collector_tracestack_push(Collector* collector, void* p_task) { /* we don't have assert as others because p_task is a p_obj for marking, Index: src/common/gc_space.h =================================================================== --- src/common/gc_space.h (revision 538396) +++ src/common/gc_space.h (working copy) @@ -112,6 +112,15 @@ return; } +inline void space_desturct_blocks(Blocked_Space* space) +{ + Block* blocks = (Block*)space->heap_start; + unsigned int i=0; + for(; i < space->num_managed_blocks; i++){ + Block_Header* block = (Block_Header*)&(blocks[i]); + block_destruct(block); + } +} inline void blocked_space_shrink(Blocked_Space* space, unsigned int changed_size) { Index: src/common/hashcode.h =================================================================== --- src/common/hashcode.h (revision 0) +++ src/common/hashcode.h (revision 0) @@ -0,0 +1,368 @@ +#ifndef _HASHCODE_H_ +#define _HASHCODE_H_ + +#include "gc_common.h" +#include "../utils/vector_block.h" +#include "../utils/seq_list.h" + +#define HASHCODE_MASK 0x1C + +#define HASHCODE_SET_BIT 0x04 +#define HASHCODE_ATTACHED_BIT 0x08 +#define HASHCODE_BUFFERED_BIT 0x10 + +#define HASHCODE_EXTENDED_VT_BIT 0x02 + +enum Hashcode_Kind{ + HASHCODE_UNSET = 0x0, + HASHCODE_SET_UNALLOCATED = HASHCODE_SET_BIT, + HASHCODE_SET_ATTACHED = HASHCODE_SET_BIT | HASHCODE_ATTACHED_BIT, + HASHCODE_SET_BUFFERED = HASHCODE_SET_BIT | HASHCODE_BUFFERED_BIT +}; + +inline Boolean obj_is_sethash_in_vt(Partial_Reveal_Object* p_obj){ + return (Boolean)((POINTER_SIZE_INT)obj_get_vt_raw(p_obj) & HASHCODE_EXTENDED_VT_BIT); +} + +inline void obj_sethash_in_vt(Partial_Reveal_Object* p_obj){ + VT vt = obj_get_vt_raw(p_obj); + obj_set_vt(p_obj,(VT)((POINTER_SIZE_INT)vt | HASHCODE_EXTENDED_VT_BIT)); +} + +inline Boolean hashcode_is_set(Partial_Reveal_Object* p_obj) +{ + Obj_Info_Type obj_info = get_obj_info_raw(p_obj); + return obj_info & HASHCODE_SET_BIT; +} + +inline Boolean hashcode_is_attached(Partial_Reveal_Object* p_obj) +{ + Obj_Info_Type obj_info = get_obj_info_raw(p_obj); + return obj_info & HASHCODE_ATTACHED_BIT; +} + +inline Boolean hashcode_is_buffered(Partial_Reveal_Object* p_obj) +{ + Obj_Info_Type obj_info = get_obj_info_raw(p_obj); + return obj_info & HASHCODE_BUFFERED_BIT; +} + +inline int hashcode_gen(void* addr) +{ return (int)(POINTER_SIZE_INT)addr; } + +typedef struct Hashcode_Buf{ + Seq_List* list; + POINTER_SIZE_INT* checkpoint; +}Hashcode_Buf; + +extern GC_Metadata gc_metadata; +Vector_Block* free_set_pool_get_entry(GC_Metadata *metadata); +void free_set_pool_put_entry(Vector_Block* block, GC_Metadata *metadata); + +inline void hashcode_buf_set_checkpoint(Hashcode_Buf* hashcode_buf) +{ hashcode_buf->checkpoint = vector_block_get_last_entry((Vector_Block*)hashcode_buf->list->end); } + +inline Hashcode_Buf* hashcode_buf_create() +{ + Hashcode_Buf* hashcode_buf = (Hashcode_Buf*) STD_MALLOC(sizeof(Hashcode_Buf)); + memset(hashcode_buf, 0, sizeof(Hashcode_Buf)); + hashcode_buf->list = seq_list_create(); + return hashcode_buf; +} + +inline void hashcode_buf_remove(Hashcode_Buf* hashcode_buf, Vector_Block* block) +{ + Seq_List* list = hashcode_buf->list; + seq_list_remove(list, (List_Node*) block); + vector_block_clear(block); + free_set_pool_put_entry(block, &gc_metadata); +} + +inline void hashcode_buf_clear(Hashcode_Buf* hashcode_buf) +{ + //push vector block back to free list + Seq_List* list = hashcode_buf->list; + seq_list_iterate_init(list); + + while(seq_list_has_next(list)){ + Vector_Block* curr_block = (Vector_Block*)seq_list_iterate_next(list);; + vector_block_clear(curr_block); + free_set_pool_put_entry(curr_block, &gc_metadata); + } + seq_list_clear(list); + return; +} + +inline void hashcode_buf_destory(Hashcode_Buf* hashcode_buf) +{ + Seq_List* list = hashcode_buf->list; + hashcode_buf_clear(hashcode_buf); + seq_list_destruct(list); + STD_FREE((void*)hashcode_buf); +} + +inline void hashcode_buf_init(Hashcode_Buf* hashcode_buf) +{ + Seq_List* list = hashcode_buf->list; +#ifdef _DEBUG + seq_list_iterate_init(list); + assert(!seq_list_has_next(list)); +#endif + Vector_Block* free_block = free_set_pool_get_entry(&gc_metadata); + seq_list_add(list, (List_Node*)free_block); + hashcode_buf_set_checkpoint(hashcode_buf); + return; +} + +inline int hashcode_buf_lookup(Partial_Reveal_Object* p_obj,Hashcode_Buf* hashcode_buf) +{ + POINTER_SIZE_INT obj_addr = (POINTER_SIZE_INT)p_obj; + Seq_List* list = hashcode_buf->list; + seq_list_iterate_init(list); + while(seq_list_has_next(list)){ + Vector_Block* curr_block = (Vector_Block*)seq_list_iterate_next(list); + POINTER_SIZE_INT *iter = vector_block_iterator_init(curr_block); + + while(!vector_block_iterator_end(curr_block, iter)){ + POINTER_SIZE_INT addr = (POINTER_SIZE_INT)*iter; + if(obj_addr != addr){ + iter = vector_block_iterator_advance(curr_block, iter); + iter = vector_block_iterator_advance(curr_block, iter); + }else{ + iter = vector_block_iterator_advance(curr_block, iter); + POINTER_SIZE_INT hashcode = (POINTER_SIZE_INT)*iter; + iter = vector_block_iterator_advance(curr_block, iter); + return *(int*)&hashcode; + } + } + } + assert(0); + return 0; +} + +inline void hashcode_buf_add(Partial_Reveal_Object* p_obj, int32 hashcode, Hashcode_Buf* hashcode_buf) +{ + Seq_List* list = hashcode_buf->list; + Vector_Block* tail_block = (Vector_Block*)seq_list_end_node(list); + vector_block_add_entry(tail_block, (POINTER_SIZE_INT) p_obj); + POINTER_SIZE_INT hashcode_var = 0; + *(int*) &hashcode_var = hashcode; + vector_block_add_entry(tail_block, hashcode_var); + + if(!vector_block_is_full(tail_block)) return; + + tail_block = free_set_pool_get_entry(&gc_metadata); + seq_list_add(list, (List_Node*)tail_block); + return; +} + +inline void hashcode_buf_refresh_all(Hashcode_Buf* hashcode_buf, POINTER_SIZE_INT dist) +{ + Seq_List* list = hashcode_buf->list; + seq_list_iterate_init(list); + while(seq_list_has_next(list)){ + Vector_Block* curr_block = (Vector_Block*)seq_list_iterate_next(list);; + POINTER_SIZE_INT *iter = vector_block_iterator_init(curr_block); + while(!vector_block_iterator_end(curr_block, iter)){ + POINTER_SIZE_INT addr = (POINTER_SIZE_INT)*iter; + *iter = addr - dist; + iter =vector_block_iterator_advance(curr_block, iter); + iter =vector_block_iterator_advance(curr_block, iter); + } + } + return; +} + +inline void hashcode_buf_rollback_new_entry(Hashcode_Buf* hashcode_buf) +{ + Vector_Block* first_block = VECTOR_BLOCK_HEADER(hashcode_buf->checkpoint); + POINTER_SIZE_INT* iter = hashcode_buf->checkpoint; + while(!vector_block_iterator_end(first_block, iter)){ + Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter; + Obj_Info_Type oi = get_obj_info_raw(p_obj); + set_obj_info(p_obj, oi & ~HASHCODE_BUFFERED_BIT); + iter =vector_block_iterator_advance(first_block, iter); + iter =vector_block_iterator_advance(first_block, iter); + } + first_block->tail = hashcode_buf->checkpoint; + + Seq_List* list = hashcode_buf->list; + seq_list_iterate_init_after_node(list, (List_Node*)first_block); + while(seq_list_has_next(list)){ + Vector_Block* curr_block = (Vector_Block*)seq_list_iterate_next(list);; + iter = vector_block_iterator_init(curr_block); + while(!vector_block_iterator_end(curr_block, iter)){ + Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter; + Obj_Info_Type oi = get_obj_info_raw(p_obj); + set_obj_info(p_obj, oi & ~HASHCODE_BUFFERED_BIT); + iter =vector_block_iterator_advance(curr_block, iter); + iter =vector_block_iterator_advance(curr_block, iter); + } + hashcode_buf_remove(hashcode_buf, curr_block); + } + return; +} + +inline void hashcode_buf_transfer_new_entry(Hashcode_Buf* old_buf, Hashcode_Buf* new_buf) +{ + hashcode_buf_set_checkpoint(new_buf); + + Vector_Block* first_block = VECTOR_BLOCK_HEADER(old_buf->checkpoint); + POINTER_SIZE_INT* iter = old_buf->checkpoint; + while(!vector_block_iterator_end(first_block, iter)){ + Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter; + + iter =vector_block_iterator_advance(first_block, iter); + POINTER_SIZE_INT hashcode = (POINTER_SIZE_INT)*iter; + iter =vector_block_iterator_advance(first_block, iter); + hashcode_buf_add(p_obj, *(int*) &hashcode, new_buf); + } + first_block->tail = old_buf->checkpoint; + + Seq_List* list = old_buf->list; + seq_list_iterate_init_after_node(list, (List_Node*)first_block); + while(seq_list_has_next(list)){ + Vector_Block* curr_block = (Vector_Block*)seq_list_iterate_next(list);; + iter = vector_block_iterator_init(curr_block); + while(!vector_block_iterator_end(curr_block, iter)){ + Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)*iter; + iter =vector_block_iterator_advance(curr_block, iter); + POINTER_SIZE_INT hashcode = (POINTER_SIZE_INT)*iter; + iter =vector_block_iterator_advance(curr_block, iter); + + hashcode_buf_add(p_obj, *(int*) &hashcode, new_buf); + } + hashcode_buf_remove(old_buf, curr_block); + } + return; +} + +inline void hashcode_buf_refresh_new_entry(Hashcode_Buf* hashcode_buf, POINTER_SIZE_INT dist) +{ + Vector_Block* first_block = VECTOR_BLOCK_HEADER(hashcode_buf->checkpoint); + POINTER_SIZE_INT* iter = hashcode_buf->checkpoint; + while(!vector_block_iterator_end(first_block, iter)){ + POINTER_SIZE_INT addr = (POINTER_SIZE_INT)*iter; + *iter = addr - dist; + + iter =vector_block_iterator_advance(first_block, iter); + iter =vector_block_iterator_advance(first_block, iter); + } + + Seq_List* list = hashcode_buf->list; + seq_list_iterate_init_after_node(list, (List_Node*)first_block); + while(seq_list_has_next(list)){ + Vector_Block* curr_block = (Vector_Block*)seq_list_iterate_next(list);; + iter = vector_block_iterator_init(curr_block); + while(!vector_block_iterator_end(curr_block, iter)){ + POINTER_SIZE_INT addr = (POINTER_SIZE_INT)*iter; + *iter = addr - dist; + + iter =vector_block_iterator_advance(curr_block, iter); + iter =vector_block_iterator_advance(curr_block, iter); + } + } + hashcode_buf_set_checkpoint(hashcode_buf); + return; +} + +void collector_hashcodeset_add_entry(Collector* collector, Partial_Reveal_Object** p_ref); + +inline Obj_Info_Type slide_compact_process_hashcode(Partial_Reveal_Object* p_obj, void* dest_addr, + unsigned int* p_obj_size, Collector* collector, + Hashcode_Buf* old_buf, Hashcode_Buf* new_buf) +{ + Obj_Info_Type obj_info = get_obj_info(p_obj); + POINTER_SIZE_INT hashcode; + + switch(obj_info & HASHCODE_MASK){ + case HASHCODE_SET_UNALLOCATED: + if((POINTER_SIZE_INT)dest_addr != (POINTER_SIZE_INT)p_obj){ + *p_obj_size += GC_OBJECT_ALIGNMENT; + obj_info = obj_info | HASHCODE_ATTACHED_BIT; + *(int*) &hashcode = hashcode_gen(p_obj); + POINTER_SIZE_INT obj_end_pos = (POINTER_SIZE_INT)dest_addr + vm_object_size(p_obj); + collector_hashcodeset_add_entry(collector, (Partial_Reveal_Object**)obj_end_pos); + collector_hashcodeset_add_entry(collector, (Partial_Reveal_Object**)hashcode); + } + break; + + case HASHCODE_SET_ATTACHED: + obj_sethash_in_vt(p_obj); + break; + + case HASHCODE_SET_BUFFERED: + *(int*) &hashcode = hashcode_buf_lookup(p_obj, old_buf); + if((POINTER_SIZE_INT)dest_addr != (POINTER_SIZE_INT)p_obj){ + *p_obj_size += GC_OBJECT_ALIGNMENT; + obj_info = obj_info & ~HASHCODE_BUFFERED_BIT; + obj_info = obj_info | HASHCODE_ATTACHED_BIT; + POINTER_SIZE_INT obj_end_pos = (POINTER_SIZE_INT)dest_addr + vm_object_size(p_obj); + collector_hashcodeset_add_entry(collector, (Partial_Reveal_Object**)obj_end_pos); + collector_hashcodeset_add_entry(collector, (Partial_Reveal_Object**)hashcode); + }else{ + hashcode_buf_add((Partial_Reveal_Object*)dest_addr, *(int*) &hashcode, new_buf); + } + break; + + case HASHCODE_UNSET: + break; + + default: + assert(0); + + } + return obj_info; +} + +inline void move_compact_process_hashcode(Partial_Reveal_Object* p_obj,Hashcode_Buf* old_buf, + Hashcode_Buf* new_buf) +{ + if(hashcode_is_set(p_obj) && !hashcode_is_attached(p_obj)){ + int hashcode; + if(hashcode_is_buffered(p_obj)){ + /*already buffered objects;*/ + hashcode = hashcode_buf_lookup(p_obj, old_buf); + hashcode_buf_add(p_obj, hashcode, new_buf); + }else{ + /*objects need buffering.*/ + hashcode = hashcode_gen(p_obj); + hashcode_buf_add(p_obj, hashcode, new_buf); + Obj_Info_Type oi = get_obj_info_raw(p_obj); + set_obj_info(p_obj, oi | HASHCODE_BUFFERED_BIT); + } + } +} + +inline Obj_Info_Type trace_forward_process_hashcode(Partial_Reveal_Object* p_obj, Partial_Reveal_Object* p_old_obj, + Obj_Info_Type oi, unsigned int p_obj_size) +{ + oi |= HASHCODE_ATTACHED_BIT; + *(int *)(((char*)p_obj) + p_obj_size - GC_OBJECT_ALIGNMENT) = hashcode_gen(p_old_obj); + assert(vm_object_size(p_obj) != 0); + return oi; +} + +inline void precompute_hashcode_extend_size(Partial_Reveal_Object* p_obj, void* dest_addr, + unsigned int * obj_size_precompute) +{ + if(hashcode_is_set(p_obj) && !hashcode_is_attached(p_obj)){ + if((POINTER_SIZE_INT)dest_addr != (POINTER_SIZE_INT)p_obj) + *obj_size_precompute += GC_OBJECT_ALIGNMENT; + } +} + +inline int obj_lookup_hashcode_in_buf(Partial_Reveal_Object *p_obj); +inline int hashcode_lookup(Partial_Reveal_Object* p_obj,Obj_Info_Type obj_info) +{ + int hash; + if(hashcode_is_attached(p_obj)){ + int offset = vm_object_size(p_obj); + unsigned char* pos = (unsigned char *)p_obj; + hash = *(int*) (pos + offset); + }else if(hashcode_is_buffered(p_obj)){ + hash = obj_lookup_hashcode_in_buf(p_obj); + } + return hash; +} +#endif //_HASHCODE_H_ Index: src/gen/gen.cpp =================================================================== --- src/gen/gen.cpp (revision 538396) +++ src/gen/gen.cpp (working copy) @@ -25,6 +25,9 @@ #include "../verify/verify_live_heap.h" #include "../common/space_tuner.h" #include "../common/compressed_ref.h" +#ifdef USE_32BITS_HASHCODE +#include "../common/hashcode.h" +#endif /* fspace size limit is not interesting. only for manual tuning purpose */ POINTER_SIZE_INT min_nos_size_bytes = 16 * MB; @@ -229,10 +232,14 @@ Space* mos = (Space*)gc_gen->mos; Space* los = (Space*)gc_gen->los; - vm_unmap_mem(nos->heap_start, space_committed_size(nos)); - vm_unmap_mem(mos->heap_start, space_committed_size(mos)); - vm_unmap_mem(los->heap_start, space_committed_size(los)); + POINTER_SIZE_INT nos_size = space_committed_size(nos); + POINTER_SIZE_INT mos_size = space_committed_size(nos); + POINTER_SIZE_INT los_size = space_committed_size(nos); + void* nos_start = nos->heap_start; + void* mos_start = mos->heap_start; + void* los_start = los->heap_start; + gc_nos_destruct(gc_gen); gc_gen->nos = NULL; @@ -241,6 +248,10 @@ gc_los_destruct(gc_gen); gc_gen->los = NULL; + + vm_unmap_mem(nos_start, nos_size); + vm_unmap_mem(mos_start, mos_size); + vm_unmap_mem(los_start, los_size); return; } @@ -433,10 +444,14 @@ while(curr_block < space_end) { POINTER_SIZE_INT p_obj = (POINTER_SIZE_INT)curr_block->base; POINTER_SIZE_INT block_end = (POINTER_SIZE_INT)curr_block->free; + unsigned int hash_extend_size = 0; while(p_obj < block_end){ cont = vm_iterate_object((Managed_Object_Handle)p_obj); if (!cont) return; - p_obj = p_obj + vm_object_size((Partial_Reveal_Object *)p_obj); +#ifdef USE_32BITS_HASHCODE + hash_extend_size = (hashcode_is_attached((Partial_Reveal_Object*)p_obj))?GC_OBJECT_ALIGNMENT:0; +#endif + p_obj = p_obj + vm_object_size((Partial_Reveal_Object *)p_obj) + hash_extend_size; } curr_block = curr_block->next; if(curr_block == NULL) break; @@ -460,13 +475,17 @@ Lspace* lspace = gc->los; POINTER_SIZE_INT lspace_obj = (POINTER_SIZE_INT)lspace->heap_start; POINTER_SIZE_INT lspace_end = (POINTER_SIZE_INT)lspace->heap_end; + unsigned int hash_extend_size = 0; while (lspace_obj < lspace_end) { if(!*((unsigned int *)lspace_obj)){ lspace_obj = lspace_obj + ((Free_Area*)lspace_obj)->size; }else{ cont = vm_iterate_object((Managed_Object_Handle)lspace_obj); if (!cont) return; - unsigned int obj_size = (unsigned int)ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object *)lspace_obj)); +#ifdef USE_32BITS_HASHCODE + hash_extend_size = (hashcode_is_attached((Partial_Reveal_Object *)lspace_obj))?GC_OBJECT_ALIGNMENT:0; +#endif + unsigned int obj_size = (unsigned int)ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object *)lspace_obj)+hash_extend_size); lspace_obj = lspace_obj + obj_size; } } Index: src/mark_compact/fallback_mark_scan.cpp =================================================================== --- src/mark_compact/fallback_mark_scan.cpp (revision 538396) +++ src/mark_compact/fallback_mark_scan.cpp (working copy) @@ -178,3 +178,34 @@ { trace_object(collector, (REF *)p_ref); } + +#ifdef USE_32BITS_HASHCODE +void fallback_clear_fwd_obj_oi(Collector* collector) +{ + GC* gc = collector->gc; + assert(gc_match_kind(gc, FALLBACK_COLLECTION)); + + unsigned int num_active_collectors = gc->num_active_collectors; + atomic_cas32( &num_finished_collectors, 0, num_active_collectors); + + Block_Header* curr_block = fspace_get_next_block(); + while(curr_block){ + Partial_Reveal_Object* curr_obj = (Partial_Reveal_Object*) curr_block->base; + while(curr_obj < curr_block->free){ + if(obj_is_fw_in_oi(curr_obj)){ + set_obj_info(curr_obj, (Obj_Info_Type)0); + } + curr_obj = (Partial_Reveal_Object*)((POINTER_SIZE_INT)curr_obj + vm_object_size(curr_obj)); + } + curr_block = fspace_get_next_block(); + } + atomic_inc32(&num_finished_collectors); + while(num_finished_collectors < num_active_collectors) ; +} + +void fallback_clear_fwd_obj_oi_init(Collector* collector) +{ + fspace_block_iterate_init((Fspace*)((GC_Gen*)collector->gc)->nos); +} +#endif + Index: src/mark_compact/los_extention_mark_scan.cpp =================================================================== --- src/mark_compact/los_extention_mark_scan.cpp (revision 538396) +++ src/mark_compact/los_extention_mark_scan.cpp (working copy) @@ -26,10 +26,14 @@ if(obj_mark_in_vt(p_obj)){ collector_tracestack_push(collector, p_obj); + unsigned int obj_size = vm_object_size(p_obj); +#ifdef USE_32BITS_HASHCODE + obj_size += (hashcode_is_set(p_obj))?GC_OBJECT_ALIGNMENT:0; +#endif if(!obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)collector->gc))) - collector->non_los_live_obj_size += vm_object_size(p_obj); + collector->non_los_live_obj_size += obj_size; else - collector->los_live_obj_size += round_up_to_size(vm_object_size(p_obj), KB); + collector->los_live_obj_size += round_up_to_size(obj_size, KB); } return; @@ -137,10 +141,14 @@ */ if(obj_mark_in_vt(p_obj)){ collector_tracestack_push(collector, p_obj); + unsigned int obj_size = vm_object_size(p_obj); +#ifdef USE_32BITS_HASHCODE + obj_size += (hashcode_is_set(p_obj))?GC_OBJECT_ALIGNMENT:0; +#endif if(!obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)gc))) - collector->non_los_live_obj_size += vm_object_size(p_obj); + collector->non_los_live_obj_size += obj_size; else - collector->los_live_obj_size += round_up_to_size(vm_object_size(p_obj), KB); + collector->los_live_obj_size += round_up_to_size(obj_size, KB); } } Index: src/mark_compact/mspace.cpp =================================================================== --- src/mark_compact/mspace.cpp (revision 538396) +++ src/mark_compact/mspace.cpp (working copy) @@ -24,6 +24,9 @@ static void mspace_destruct_blocks(Mspace* mspace) { +#ifdef USE_32BITS_HASHCODE + space_desturct_blocks((Blocked_Space*)mspace); +#endif return; } Index: src/mark_compact/mspace_collect_compact.cpp =================================================================== --- src/mark_compact/mspace_collect_compact.cpp (revision 538396) +++ src/mark_compact/mspace_collect_compact.cpp (working copy) @@ -38,6 +38,14 @@ POINTER_SIZE_INT tune_size = tuner->tuning_size; unsigned int tune_blocks = (unsigned int)(tune_size >> GC_BLOCK_SHIFT_COUNT); +#ifdef USE_32BITS_HASHCODE + unsigned int index = 0; + for(; index < tune_blocks; index++){ + Block* curr_block = &mspace->blocks[index]; + hashcode_buf_destory(((Block_Header*)curr_block)->hashcode_buf); + } +#endif + mspace->blocks = &mspace->blocks[tune_blocks]; mspace->heap_start = mspace->blocks; mspace->committed_heap_size -= tune_size; @@ -210,6 +218,9 @@ curr_block->new_free = curr_block->free; curr_block->ceiling = (void*)((POINTER_SIZE_INT)curr_block->base + GC_BLOCK_BODY_SIZE_BYTES); curr_block->status = BLOCK_COMPACTED; +#ifdef USE_32BITS_HASHCODE + curr_block->hashcode_buf = hashcode_buf_create(); +#endif last_block->next = curr_block; last_block = curr_block; } @@ -218,7 +229,11 @@ Collector* collector = gc->collectors[0]; collector->cur_target_block = los_trans_fake_blocks; collector->cur_target_block->status = BLOCK_TARGET; - collector->cur_compact_block = mos_first_block; + if(trans_blocks >= gc->num_active_collectors) + collector->cur_compact_block = mos_first_block; + else + collector->cur_compact_block = los_trans_fake_blocks; + collector->cur_compact_block->status = BLOCK_IN_COMPACT; for(i=1; i< gc->num_active_collectors; i++){ Index: src/mark_compact/mspace_collect_compact.h =================================================================== --- src/mark_compact/mspace_collect_compact.h (revision 538396) +++ src/mark_compact/mspace_collect_compact.h (working copy) @@ -46,6 +46,11 @@ void mspace_extend_compact(Collector *collector); +#ifdef USE_32BITS_HASHCODE +void fallback_clear_fwd_obj_oi(Collector* collector); +void fallback_clear_fwd_obj_oi_init(Collector* collector); +#endif + extern Boolean IS_MOVE_COMPACT; #endif /* _MSPACE_COLLECT_COMPACT_H_ */ Index: src/mark_compact/mspace_extend_compact.cpp =================================================================== --- src/mark_compact/mspace_extend_compact.cpp (revision 538396) +++ src/mark_compact/mspace_extend_compact.cpp (working copy) @@ -185,6 +185,12 @@ object_refix_ref_slots(p_obj, start_address, end_address, addr_diff); p_obj = obj_end(p_obj); } +#ifdef USE_32BITS_HASHCODE + /*repoint the p_obj in hashcode_table in the moved block.*/ + if(((void*)block) >= start_address && ((void*)block) <= end_address){ + hashcode_buf_refresh_all(block->hashcode_buf, (POINTER_SIZE_INT)addr_diff); + } +#endif } } Index: src/mark_compact/mspace_move_compact.cpp =================================================================== --- src/mark_compact/mspace_move_compact.cpp (revision 538396) +++ src/mark_compact/mspace_move_compact.cpp (working copy) @@ -22,6 +22,9 @@ #include "../trace_forward/fspace.h" #include "../mark_sweep/lspace.h" #include "../finalizer_weakref/finalizer_weakref.h" +#ifdef USE_32BITS_HASHCODE +#include "../common/hashcode.h" +#endif struct GC_Gen; Space* gc_get_nos(GC_Gen* gc); @@ -37,12 +40,20 @@ void* dest_sector_addr = dest_block->base; Boolean is_fallback = gc_match_kind(collector->gc, FALLBACK_COLLECTION); +#ifdef USE_32BITS_HASHCODE + Hashcode_Buf* old_hashcode_buf = NULL; + Hashcode_Buf* new_hashcode_buf = hashcode_buf_create(); + hashcode_buf_init(new_hashcode_buf); +#endif while( curr_block ){ void* start_pos; Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &start_pos); if( !p_obj ){ + #ifdef USE_32BITS_HASHCODE + hashcode_buf_clear(curr_block->hashcode_buf); + #endif curr_block = mspace_get_next_compact_block(collector, mspace); continue; } @@ -55,17 +66,30 @@ /* we don't check if it's set, since only remaining objs from last NOS partial collection need it. */ obj_unmark_in_oi(p_obj); +#ifdef USE_32BITS_HASHCODE + move_compact_process_hashcode(p_obj, curr_block->hashcode_buf, new_hashcode_buf); +#endif + POINTER_SIZE_INT curr_sector_size = (POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)src_sector_addr; /* check if dest block is not enough to hold this sector. If yes, grab next one */ POINTER_SIZE_INT block_end = (POINTER_SIZE_INT)GC_BLOCK_END(dest_block); if( ((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size) > block_end ){ dest_block->new_free = dest_sector_addr; +#ifdef USE_32BITS_HASHCODE + block_swap_hashcode_buf(dest_block, &new_hashcode_buf, &old_hashcode_buf); +#endif dest_block = mspace_get_next_target_block(collector, mspace); if(dest_block == NULL){ +#ifdef USE_32BITS_HASHCODE + hashcode_buf_rollback_new_entry(old_hashcode_buf); +#endif collector->result = FALSE; return; } +#ifdef USE_32BITS_HASHCODE + hashcode_buf_transfer_new_entry(old_hashcode_buf, new_hashcode_buf); +#endif if((!local_last_dest) || (dest_block->block_idx > local_last_dest->block_idx)) local_last_dest = dest_block; block_end = (POINTER_SIZE_INT)GC_BLOCK_END(dest_block); @@ -85,15 +109,26 @@ memmove(dest_sector_addr, src_sector_addr, curr_sector_size); +#ifdef USE_32BITS_HASHCODE + hashcode_buf_refresh_new_entry(new_hashcode_buf, sector_distance); +#endif + dest_sector_addr = (void*)((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size); src_sector_addr = p_obj; curr_sector = OBJECT_INDEX_TO_OFFSET_TABLE(p_obj); } +#ifdef USE_32BITS_HASHCODE + hashcode_buf_clear(curr_block->hashcode_buf); + #endif curr_block = mspace_get_next_compact_block(collector, mspace); } dest_block->new_free = dest_sector_addr; collector->cur_target_block = local_last_dest; +#ifdef USE_32BITS_HASHCODE + old_hashcode_buf = block_set_hashcode_buf(dest_block, new_hashcode_buf); + hashcode_buf_destory(old_hashcode_buf); +#endif return; } Index: src/mark_compact/mspace_slide_compact.cpp =================================================================== --- src/mark_compact/mspace_slide_compact.cpp (revision 538396) +++ src/mark_compact/mspace_slide_compact.cpp (working copy) @@ -38,9 +38,18 @@ Block_Header *local_last_dest = dest_block; void *dest_addr = dest_block->base; Block_Header *last_src; + +#ifdef USE_32BITS_HASHCODE + Hashcode_Buf* old_hashcode_buf = NULL; + Hashcode_Buf* new_hashcode_buf = hashcode_buf_create(); + hashcode_buf_init(new_hashcode_buf); +#endif assert(!collector->rem_set); collector->rem_set = free_set_pool_get_entry(collector->gc->metadata); +#ifdef USE_32BITS_HASHCODE + collector->hashcode_set = free_set_pool_get_entry(collector->gc->metadata); +#endif while( curr_block ){ void* start_pos; @@ -60,7 +69,17 @@ unsigned int obj_size = (unsigned int)((POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)p_obj); - if( ((POINTER_SIZE_INT)dest_addr + obj_size) > (POINTER_SIZE_INT)GC_BLOCK_END(dest_block)){ + Obj_Info_Type obj_info = get_obj_info(p_obj); + + unsigned int obj_size_precompute = obj_size; + +#ifdef USE_32BITS_HASHCODE + precompute_hashcode_extend_size(p_obj, dest_addr, &obj_size_precompute); +#endif + if( ((POINTER_SIZE_INT)dest_addr + obj_size_precompute) > (POINTER_SIZE_INT)GC_BLOCK_END(dest_block)){ +#ifdef USE_32BITS_HASHCODE + block_swap_hashcode_buf(dest_block, &new_hashcode_buf, &old_hashcode_buf); +#endif dest_block->new_free = dest_addr; dest_block = mspace_get_next_target_block(collector, mspace); if(dest_block == NULL){ @@ -77,7 +96,9 @@ } assert(((POINTER_SIZE_INT)dest_addr + obj_size) <= (POINTER_SIZE_INT)GC_BLOCK_END(dest_block)); - Obj_Info_Type obj_info = get_obj_info(p_obj); +#ifdef USE_32BITS_HASHCODE + obj_info = slide_compact_process_hashcode(p_obj, dest_addr, &obj_size, collector,curr_block->hashcode_buf, new_hashcode_buf); +#endif if( obj_info != 0 ) { collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr); @@ -90,11 +111,17 @@ dest_addr = (void *)((POINTER_SIZE_INT) dest_addr + obj_size); p_obj = block_get_next_marked_obj_prefetch_next(curr_block, &start_pos); } - + #ifdef USE_32BITS_HASHCODE + hashcode_buf_clear(curr_block->hashcode_buf); + #endif curr_block = mspace_get_next_compact_block(collector, mspace); } +#ifdef USE_32BITS_HASHCODE + pool_put_entry(collector->gc->metadata->collector_hashcode_pool, collector->hashcode_set); + collector->hashcode_set = NULL; +#endif pool_put_entry(collector->gc->metadata->collector_remset_pool, collector->rem_set); collector->rem_set = NULL; dest_block->new_free = dest_addr; @@ -106,6 +133,10 @@ cur_last_dest = (Block_Header *)last_block_for_dest; } +#ifdef USE_32BITS_HASHCODE + old_hashcode_buf = block_set_hashcode_buf(dest_block, new_hashcode_buf); + hashcode_buf_destory(old_hashcode_buf); +#endif return; } @@ -203,7 +234,7 @@ cur_dest_block = cur_dest_block->next; } } else { - cur_dest_block = set_next_block_for_dest(mspace); + cur_dest_block = mspace_block_iterator_get(mspace); } unsigned int total_dest_counter = 0; @@ -290,13 +321,21 @@ /* We don't set start_pos as p_obj in case that memmove of this obj may overlap itself. * In that case we can't get the correct vt and obj_info. */ +#ifdef USE_32BITS_HASHCODE + start_pos = obj_end_extend(p_obj); +#else start_pos = obj_end(p_obj); +#endif do { assert(obj_is_marked_in_vt(p_obj)); +#ifdef USE_32BITS_HASHCODE + obj_clear_dual_bits_in_vt(p_obj); + #else obj_unmark_in_vt(p_obj); - - unsigned int obj_size = (unsigned int)((POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)p_obj); +#endif + + unsigned int obj_size = (unsigned int)((POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)p_obj); if(p_obj != p_target_obj){ memmove(p_target_obj, p_obj, obj_size); } @@ -380,7 +419,12 @@ gc_update_weakref_ignore_finref(gc); } #endif - + +#ifdef USE_32BITS_HASHCODE + if(gc_match_kind(gc, FALLBACK_COLLECTION)) + fallback_clear_fwd_obj_oi_init(collector); +#endif + last_block_for_dest = NULL; /* let other collectors go */ @@ -393,6 +437,11 @@ assign target addresses for all to-be-moved objects */ atomic_cas32( &num_repointing_collectors, 0, num_active_collectors+1); +#ifdef USE_32BITS_HASHCODE + if(gc_match_kind(gc, FALLBACK_COLLECTION)) + fallback_clear_fwd_obj_oi(collector); +#endif + mspace_compute_object_target(collector, mspace); old_num = atomic_inc32(&num_repointing_collectors); @@ -448,6 +497,9 @@ atomic_cas32( &num_restoring_collectors, 0, num_active_collectors+1); collector_restore_obj_info(collector); +#ifdef USE_32BITS_HASHCODE + collector_attach_hashcode(collector); +#endif old_num = atomic_inc32(&num_restoring_collectors); if( ++old_num == num_active_collectors ){ Index: src/mark_sweep/lspace.h =================================================================== --- src/mark_sweep/lspace.h (revision 538396) +++ src/mark_sweep/lspace.h (working copy) @@ -24,12 +24,15 @@ #include "../common/gc_common.h" #include "../thread/gc_thread.h" #include "free_area_pool.h" +#ifdef USE_32BITS_HASHCODE +#include "../common/hashcode.h" +#endif /*Fixme: This macro is for handling HEAP_NULL issues caused by JIT OPT*/ #ifdef COMPRESS_REFERENCE - #define LOS_HEAD_RESERVE_FOR_HEAP_NULL (GC_BLOCK_SIZE_BYTES ) + #define LOS_HEAD_RESERVE_FOR_HEAP_NULL ( SPACE_ALLOC_UNIT ) #else - #define LOS_HEAD_RESERVE_FOR_HEAP_NULL (0*KB) + #define LOS_HEAD_RESERVE_FOR_HEAP_NULL ( 0*KB ) #endif typedef struct Lspace{ @@ -80,6 +83,7 @@ { POINTER_SIZE_INT next_area_start = (POINTER_SIZE_INT)lspace->heap_start + (*iterate_index) * KB; BOOLEAN reach_heap_end = 0; + unsigned int hash_extend_size = 0; while(!reach_heap_end){ //FIXME: This while shoudl be if, try it! @@ -89,13 +93,18 @@ } if(next_area_start < (POINTER_SIZE_INT)lspace->heap_end){ //If there is a living object at this addr, return it, and update iterate_index + +#ifdef USE_32BITS_HASHCODE + hash_extend_size = (hashcode_is_attached((Partial_Reveal_Object*)next_area_start))?GC_OBJECT_ALIGNMENT:0; +#endif + if(obj_is_marked_in_vt((Partial_Reveal_Object*)next_area_start)){ - POINTER_SIZE_INT obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start)); + POINTER_SIZE_INT obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start) + hash_extend_size); *iterate_index = (unsigned int)((next_area_start + obj_size - (POINTER_SIZE_INT)lspace->heap_start) >> BIT_SHIFT_TO_KILO); return (Partial_Reveal_Object*)next_area_start; //If this is a dead object, go on to find a living one. }else{ - POINTER_SIZE_INT obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start)); + POINTER_SIZE_INT obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start)+ hash_extend_size); next_area_start += obj_size; } }else{ Index: src/mark_sweep/lspace_alloc_collect.cpp =================================================================== --- src/mark_sweep/lspace_alloc_collect.cpp (revision 538396) +++ src/mark_sweep/lspace_alloc_collect.cpp (working copy) @@ -238,15 +238,24 @@ void* dest_addr = lspace->heap_start; unsigned int iterate_index = 0; Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &iterate_index); - + assert(!collector->rem_set); collector->rem_set = free_set_pool_get_entry(collector->gc->metadata); +#ifdef USE_32BITS_HASHCODE + collector->hashcode_set = free_set_pool_get_entry(collector->gc->metadata); +#endif while( p_obj ){ assert( obj_is_marked_in_vt(p_obj)); unsigned int obj_size = vm_object_size(p_obj); assert(((POINTER_SIZE_INT)dest_addr + obj_size) <= (POINTER_SIZE_INT)lspace->heap_end); +#ifdef USE_32BITS_HASHCODE + obj_size += hashcode_is_attached(p_obj)? GC_OBJECT_ALIGNMENT : 0 ; + Obj_Info_Type obj_info = slide_compact_process_hashcode(p_obj, dest_addr, &obj_size, collector, null, null); +#else Obj_Info_Type obj_info = get_obj_info_raw(p_obj); +#endif + if( obj_info != 0 ) { collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr); collector_remset_add_entry(collector, (Partial_Reveal_Object **)obj_info); @@ -259,6 +268,10 @@ pool_put_entry(collector->gc->metadata->collector_remset_pool, collector->rem_set); collector->rem_set = NULL; +#ifdef USE_32BITS_HASHCODE + pool_put_entry(collector->gc->metadata->collector_hashcode_pool, collector->hashcode_set); + collector->hashcode_set = NULL; +#endif lspace->scompact_fa_start = dest_addr; lspace->scompact_fa_end= lspace->heap_end; @@ -269,12 +282,20 @@ { unsigned int iterate_index = 0; Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &iterate_index); + Partial_Reveal_Object *p_target_obj = obj_get_fw_in_oi(p_obj); while( p_obj ){ assert( obj_is_marked_in_vt(p_obj)); +#ifdef USE_32BITS_HASHCODE + obj_clear_dual_bits_in_vt(p_obj); +#else obj_unmark_in_vt(p_obj); +#endif unsigned int obj_size = vm_object_size(p_obj); +#ifdef USE_32BITS_HASHCODE + obj_size += (obj_is_sethash_in_vt(p_obj))?GC_OBJECT_ALIGNMENT:0; +#endif Partial_Reveal_Object *p_target_obj = obj_get_fw_in_oi(p_obj); POINTER_SIZE_INT target_obj_end = (POINTER_SIZE_INT)p_target_obj + obj_size; if( p_obj != p_target_obj){ @@ -373,13 +394,17 @@ the last time marked object is thought to be already marked and not scanned for this cycle. */ obj_clear_dual_bits_in_oi(p_next_obj); /*For_statistic: sum up the size of suvived large objects, useful to deciede los extention.*/ - lspace->surviving_size += ALIGN_UP_TO_KILO(vm_object_size(p_next_obj)); +unsigned int obj_size = vm_object_size(p_next_obj); +#ifdef USE_32BITS_HASHCODE + obj_size += (hashcode_is_attached(p_next_obj))?GC_OBJECT_ALIGNMENT:0; +#endif + lspace->surviving_size += ALIGN_UP_TO_KILO(obj_size); } cur_area_start = (void*)ALIGN_UP_TO_KILO(p_prev_obj); cur_area_end = (void*)ALIGN_DOWN_TO_KILO(p_next_obj); + unsigned int hash_extend_size = 0; - while(cur_area_end){ cur_size = (POINTER_SIZE_INT)cur_area_end - (POINTER_SIZE_INT)cur_area_start; @@ -394,10 +419,17 @@ obj_unmark_in_vt(p_next_obj); obj_clear_dual_bits_in_oi(p_next_obj); /*For_statistic: sum up the size of suvived large objects, useful to deciede los extention.*/ - lspace->surviving_size += ALIGN_UP_TO_KILO(vm_object_size(p_next_obj)); + unsigned int obj_size = vm_object_size(p_next_obj); +#ifdef USE_32BITS_HASHCODE + obj_size += (hashcode_is_attached(p_next_obj))?GC_OBJECT_ALIGNMENT:0; +#endif + lspace->surviving_size += ALIGN_UP_TO_KILO(obj_size); } - cur_area_start = (void*)ALIGN_UP_TO_KILO((POINTER_SIZE_INT)p_prev_obj + vm_object_size(p_prev_obj)); +#ifdef USE_32BITS_HASHCODE + hash_extend_size = (hashcode_is_attached((Partial_Reveal_Object*)p_prev_obj))?GC_OBJECT_ALIGNMENT:0; +#endif + cur_area_start = (void*)ALIGN_UP_TO_KILO((POINTER_SIZE_INT)p_prev_obj + vm_object_size(p_prev_obj) + hash_extend_size); cur_area_end = (void*)ALIGN_DOWN_TO_KILO(p_next_obj); } Index: src/thread/collector.cpp =================================================================== --- src/thread/collector.cpp (revision 538396) +++ src/thread/collector.cpp (working copy) @@ -49,6 +49,28 @@ } } +#ifdef USE_32BITS_HASHCODE +void collector_attach_hashcode(Collector *collector) +{ + Pool* hashcode_pool = collector->gc->metadata->collector_hashcode_pool; + Pool *free_pool = collector->gc->metadata->free_set_pool; + assert(!collector->hashcode_set); + + while(Vector_Block* hashcode_block = pool_get_entry(hashcode_pool)){ + POINTER_SIZE_INT *iter = vector_block_iterator_init(hashcode_block); + while(!vector_block_iterator_end(hashcode_block, iter)){ + POINTER_SIZE_INT* obj_end_pos = (POINTER_SIZE_INT*)*iter; + iter = vector_block_iterator_advance(hashcode_block, iter); + POINTER_SIZE_INT hashcode = *iter; + iter = vector_block_iterator_advance(hashcode_block, iter); + *obj_end_pos = hashcode; + } + vector_block_clear(hashcode_block); + pool_put_entry(free_pool, hashcode_block); + } +} +#endif + static void collector_reset_thread(Collector *collector) { collector->task_func = NULL; Index: src/thread/collector.h =================================================================== --- src/thread/collector.h (revision 538396) +++ src/thread/collector.h (working copy) @@ -43,6 +43,9 @@ Vector_Block* rep_set; /* repointed set */ Vector_Block* rem_set; +#ifdef USE_32BITS_HASHCODE + Vector_Block* hashcode_set; +#endif Vector_Block *softref_set; Vector_Block *weakref_set; @@ -69,6 +72,9 @@ void collector_execute_task(GC* gc, TaskType task_func, Space* space); void collector_restore_obj_info(Collector* collector); +#ifdef USE_32BITS_HASHCODE +void collector_attach_hashcode(Collector *collector); +#endif inline Boolean gc_collection_result(GC* gc) { Index: src/thread/collector_alloc.h =================================================================== --- src/thread/collector_alloc.h (revision 538396) +++ src/thread/collector_alloc.h (working copy) @@ -22,6 +22,9 @@ #define _COLLECTOR_ALLOC_H_ #include "gc_thread.h" +#ifdef USE_32BITS_HASHCODE +#include "../common/hashcode.h" +#endif void* mos_alloc(unsigned size, Allocator *allocator); @@ -37,6 +40,11 @@ /* otherwise, try to alloc it. mos should always has enough space to hold nos during collection */ unsigned int size = vm_object_size(p_obj); + +#ifdef USE_32BITS_HASHCODE + Boolean obj_is_set_hashcode = hashcode_is_set(p_obj); + if(obj_is_set_hashcode) size += GC_OBJECT_ALIGNMENT; +#endif Partial_Reveal_Object* p_targ_obj = thread_local_alloc(size, (Allocator*)collector); if(!p_targ_obj) @@ -60,8 +68,16 @@ return NULL; } - /* we forwarded the object */ +#ifdef USE_32BITS_HASHCODE + if(obj_is_set_hashcode){ + memcpy(p_targ_obj, p_obj, size-GC_OBJECT_ALIGNMENT); + oi = trace_forward_process_hashcode(p_targ_obj, p_obj ,oi, size); + }else{ + memcpy(p_targ_obj, p_obj, size); + } +#else memcpy(p_targ_obj, p_obj, size); +#endif //USE_32BITS_HASHCODE /* we need clear the bit to give major collection a clean status. */ if(gc_is_gen_mode()) @@ -72,7 +88,12 @@ else set_obj_info(p_targ_obj, oi|FLIP_MARK_BIT); +#else +#ifdef USE_32BITS_HASHCODE + else if(obj_is_set_hashcode) + set_obj_info(p_targ_obj, oi); #endif +#endif return p_targ_obj; Index: src/trace_forward/fspace.cpp =================================================================== --- src/trace_forward/fspace.cpp (revision 538396) +++ src/trace_forward/fspace.cpp (working copy) @@ -27,6 +27,9 @@ static void fspace_destruct_blocks(Fspace* fspace) { +#ifdef USE_32BITS_HASHCODE + space_desturct_blocks((Blocked_Space*)fspace); +#endif return; } @@ -146,6 +149,31 @@ return; } +#ifdef USE_32BITS_HASHCODE +Block_Header* fspace_next_block; + +void fspace_block_iterate_init(Fspace* fspace) +{ + fspace_next_block = (Block_Header*) fspace->blocks; +} + +Block_Header* fspace_get_next_block() +{ + Block_Header* curr_block = (Block_Header*) fspace_next_block; + while(fspace_next_block != NULL){ + Block_Header* next_block = curr_block->next; + + Block_Header* temp = (Block_Header*)atomic_casptr((volatile void**)&fspace_next_block, next_block, curr_block); + if(temp != curr_block){ + curr_block = (Block_Header*) fspace_next_block; + continue; + } + return curr_block; + } + return NULL; +} +#endif + void collector_execute_task(GC* gc, TaskType task_func, Space* space); #include "../gen/gen.h" Index: src/trace_forward/fspace.h =================================================================== --- src/trace_forward/fspace.h (revision 538396) +++ src/trace_forward/fspace.h (working copy) @@ -58,4 +58,8 @@ void fspace_collection(Fspace* fspace); +#ifdef USE_32BITS_HASHCODE +void fspace_block_iterate_init(Fspace* fspace); +Block_Header* fspace_get_next_block(); +#endif #endif // _FROM_SPACE_H_ Index: src/utils/seq_list.h =================================================================== --- src/utils/seq_list.h (revision 0) +++ src/utils/seq_list.h (revision 0) @@ -0,0 +1,120 @@ +#ifndef _SEQ_LIST_H_ +#define _SEQ_LIST_H_ + +#include "vector_block.h" + +typedef struct List_Node{ + List_Node* next; +}List_Node; + +typedef struct Seq_List{ + List_Node* head; + List_Node* end; + List_Node* curr; +#ifdef _DEBUG + unsigned int node_num; +#endif +}Seq_List; + +inline Seq_List* seq_list_create() +{ + unsigned int size = sizeof(Seq_List); + Seq_List* seq_list = (Seq_List*)STD_MALLOC(size); + memset(seq_list, 0, size); + + //List Head + size = sizeof(List_Node); + List_Node* lnode = (List_Node*)STD_MALLOC(size); + seq_list->head = seq_list->end = lnode; + lnode->next = lnode; + + return seq_list; +} + +inline void seq_list_destruct(Seq_List* seq_list) +{ + STD_FREE(seq_list->head); + STD_FREE(seq_list); +} + +inline Boolean seq_list_add(Seq_List* seq_list, List_Node* node) +{ +#ifdef _DEBUG + seq_list->node_num ++; +#endif + seq_list->end ->next = node; + seq_list->end = node; + node->next = seq_list->head; + return TRUE; +} + +inline void seq_list_iterate_init(Seq_List* seq_list) +{ + seq_list->curr = seq_list->head->next; +} + +inline void seq_list_iterate_init_after_node(Seq_List* seq_list, List_Node* begin) +{ + seq_list->curr = begin->next; +} + +inline List_Node* seq_list_iterate_next(Seq_List* seq_list) +{ + if(seq_list->curr != seq_list->head){ + List_Node* ret_node = seq_list->curr; + seq_list->curr =seq_list->curr->next; + return ret_node; + } + return NULL; +} + +inline Boolean seq_list_has_next(Seq_List* seq_list) +{ + return seq_list->curr != seq_list->head; +} + +inline List_Node* seq_list_end_node(Seq_List* seq_list) +{ return seq_list->end; } + +inline List_Node* seq_list_lookup_prev_node(Seq_List* seq_list, List_Node* node) +{ + List_Node* prev_node = seq_list->head; + seq_list_iterate_init(seq_list); + while(seq_list_has_next(seq_list)){ + List_Node* curr_node = seq_list_iterate_next(seq_list); + if( node == curr_node ) return prev_node; + prev_node = curr_node; + } + return NULL; +} + +inline Boolean seq_list_remove(Seq_List* seq_list, List_Node* node) +{ + List_Node* prev_node = seq_list_lookup_prev_node(seq_list, node); + if(prev_node==NULL) return FALSE; //need assertion here. + prev_node->next = node->next; +#ifdef _DEBUG + seq_list->node_num --; +#endif + if(seq_list->end == node) seq_list->end = prev_node; + return TRUE; +} + +inline void seq_list_clear(Seq_List* seq_list) +{ + seq_list->end = seq_list->head; + seq_list->curr = seq_list->head; + List_Node* head = seq_list->head; + head->next = seq_list->head; +#ifdef _DEBUG + seq_list->node_num = 0; +#endif +} + +#ifdef _DEBUG +inline unsigned int seq_list_size(Seq_List* seq_list) +{ + return seq_list->node_num; +} +#endif +#endif //_SEQ_LIST_H_ Index: src/utils/vector_block.h =================================================================== --- src/utils/vector_block.h (revision 538396) +++ src/utils/vector_block.h (working copy) @@ -35,6 +35,9 @@ #define VECTOR_BLOCK_HEADER_SIZE_BYTES ((POINTER_SIZE_INT)((Vector_Block*)0)->entries) #define VECTOR_BLOCK_ENTRY_NUM ((VECTOR_BLOCK_DATA_SIZE_BYTES - VECTOR_BLOCK_HEADER_SIZE_BYTES) >> BIT_SHIFT_TO_BYTES_OF_POINTER_SIZE_INT ) +#define VECTOR_BLOCK_LOW_MASK ((POINTER_SIZE_INT)(VECTOR_BLOCK_DATA_SIZE_BYTES - 1)) +#define VECTOR_BLOCK_HIGH_MASK (~VECTOR_BLOCK_LOW_MASK) +#define VECTOR_BLOCK_HEADER(addr) ((Vector_Block *)((POINTER_SIZE_INT)(addr) & VECTOR_BLOCK_HIGH_MASK)) inline void vector_block_init(Vector_Block* block, unsigned int size) { @@ -90,6 +93,8 @@ inline Boolean vector_block_iterator_end(Vector_Block* block, POINTER_SIZE_INT* iter) { return iter == block->tail; } +inline POINTER_SIZE_INT* vector_block_get_last_entry(Vector_Block* block) +{ return block->tail; } /* Below is to use Vector_Block as stack (for trace-forwarding DFS order ) */ inline void vector_stack_init(Vector_Block* block) Index: src/verify/verifier_scanner.cpp =================================================================== --- src/verify/verifier_scanner.cpp (revision 538396) +++ src/verify/verifier_scanner.cpp (working copy) @@ -297,7 +297,11 @@ } if((POINTER_SIZE_INT)next_area_start < (POINTER_SIZE_INT)lspace->heap_end){ ret_obj = next_area_start; - POINTER_SIZE_INT obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start)); + unsigned int hash_extend_size = 0; +#ifdef USE_32BITS_HASHCODE + hash_extend_size = (hashcode_is_attached((Partial_Reveal_Object*)next_area_start))?GC_OBJECT_ALIGNMENT:0; +#endif + POINTER_SIZE_INT obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start) + hash_extend_size); assert(obj_size); next_area_start = (POINTER_SIZE_INT*)((POINTER_SIZE_INT)next_area_start + obj_size); return (Partial_Reveal_Object*)ret_obj; Index: src/verify/verify_live_heap.cpp =================================================================== --- src/verify/verify_live_heap.cpp (revision 538396) +++ src/verify/verify_live_heap.cpp (working copy) @@ -121,7 +121,7 @@ verifier_reset_gc_verification(heap_verifier); (*heap_verifier->live_obj_scanner)(heap_verifier); - + verifier_set_fallback_collection(heap_verifier->gc_verifier, FALSE); } void event_mutator_allocate_newobj(Partial_Reveal_Object* p_newobj, POINTER_SIZE_INT size, VT vt_raw)