diff -BburN vm/gc_gen/src/common/gc_common.h vm.uncompressed/gc_gen/src/common/gc_common.h --- vm/gc_gen/src/common/gc_common.h 2007-11-15 15:50:59.000000000 +0800 +++ vm.uncompressed/gc_gen/src/common/gc_common.h 2007-11-15 16:00:45.000000000 +0800 @@ -137,11 +137,6 @@ GC_CAUSE_RUNTIME_FORCE_GC }; -/*Fixme: There is only compressed mode under em64t currently.*/ -#ifdef POINTER64 - #define COMPRESS_REFERENCE -#endif - extern POINTER_SIZE_INT HEAP_NULL; #ifdef POINTER64 @@ -339,7 +334,7 @@ /* It's important to clear the FLIP_FORWARD_BIT before collection ends, since it is the same as next minor cycle's FLIP_MARK_BIT. And if next cycle is major, it is also confusing as FLIP_FORWARD_BIT. (The bits are flipped only in minor collection). */ - Obj_Info_Type dst = (Obj_Info_Type)obj_ptr_to_ref((Partial_Reveal_Object *) dest); + Obj_Info_Type dst = (Obj_Info_Type)(POINTER_SIZE_INT)obj_ptr_to_ref((Partial_Reveal_Object *) dest); set_obj_info(obj, dst | FLIP_FORWARD_BIT); } diff -BburN vm/gc_gen/src/common/gc_for_class.h vm.uncompressed/gc_gen/src/common/gc_for_class.h --- vm/gc_gen/src/common/gc_for_class.h 2007-11-15 15:50:59.000000000 +0800 +++ vm.uncompressed/gc_gen/src/common/gc_for_class.h 2007-11-15 16:00:45.000000000 +0800 @@ -96,7 +96,17 @@ #define GCVT_ALIGNMENT 8 #define GCVT_ALIGN_MASK (GCVT_ALIGNMENT-1) +#ifdef POINTER64 + #ifdef REFS_USE_COMPRESSED + #define COMPRESS_REFERENCE + #endif +#endif + +#ifdef COMPRESS_REFERENCE typedef uint32 Obj_Info_Type; +#else +typedef POINTER_SIZE_INT Obj_Info_Type; +#endif typedef struct GC_VTable_Info { diff -BburN vm/gc_gen/src/thread/collector_alloc.h vm.uncompressed/gc_gen/src/thread/collector_alloc.h --- vm/gc_gen/src/thread/collector_alloc.h 2007-11-15 15:50:59.000000000 +0800 +++ vm.uncompressed/gc_gen/src/thread/collector_alloc.h 2007-11-15 16:00:45.000000000 +0800 @@ -61,7 +61,11 @@ /* else, take the obj by setting the forwarding flag atomically we don't put a simple bit in vt because we need compute obj size later. */ REF target = obj_ptr_to_ref(p_targ_obj); - if (oi != (Obj_Info_Type)atomic_cas32((volatile unsigned int*)get_obj_info_addr(p_obj), ( ( (unsigned int)target |FORWARD_BIT)), oi)) { +#ifdef COMPRESS_REFERENCE + if (oi != (Obj_Info_Type)atomic_cas32((volatile unsigned int*)get_obj_info_addr(p_obj), ((unsigned int)(POINTER_SIZE_INT)target |FORWARD_BIT), oi)) { +#else + if (oi != (Obj_Info_Type)(POINTER_SIZE_INT)atomic_casptr((volatile void**)get_obj_info_addr(p_obj), (void *)(((POINTER_SIZE_INT)target) |FORWARD_BIT), (void *)(POINTER_SIZE_INT)oi)) { +#endif /* forwarded by other, we need unalloc the allocated obj. We may waste some space if the allocation switched block. The remaining part of the switched block cannot be revivied for next allocation of object that has smaller size than this one. */ diff -BburN vm/vmcore/include/object_layout.h vm.uncompressed/vmcore/include/object_layout.h --- vm/vmcore/include/object_layout.h 2007-11-15 15:49:46.000000000 +0800 +++ vm.uncompressed/vmcore/include/object_layout.h 2007-11-15 16:00:25.000000000 +0800 @@ -221,7 +221,11 @@ typedef struct ManagedObject { #if defined USE_COMPRESSED_VTABLE_POINTERS uint32 vt_offset; +#ifdef REFS_USE_COMPRESSED uint32 obj_info; +#else + POINTER_SIZE_INT obj_info; +#endif VTable *vt_unsafe() { return (VTable*)(vt_offset + vm_get_vtable_base()); } VTable *vt() { assert(vt_offset); return vt_unsafe(); } static VTable *allocation_handle_to_vtable(Allocation_Handle ah) { @@ -231,7 +235,11 @@ static bool are_vtable_pointers_compressed() { return true; } #else // USE_COMPRESSED_VTABLE_POINTERS VTable *vt_raw; +#ifdef REFS_USE_COMPRESSED + uint32 obj_info; +#else POINTER_SIZE_INT obj_info; +#endif VTable *vt_unsafe() { return vt_raw; } VTable *vt() { assert(vt_raw); return vt_unsafe(); } static VTable *allocation_handle_to_vtable(Allocation_Handle ah) {