Index: src/mark_compact/mspace.h =================================================================== --- src/mark_compact/mspace.h (revision 491045) +++ src/mark_compact/mspace.h (working copy) @@ -24,9 +24,9 @@ #include "../common/gc_block.h" #include "../thread/gc_thread.h" -/* Mark-compaction space is orgnized into blocks*/ +/* Mark-compaction space is organized into blocks*/ typedef struct Mspace{ - /* <-- first couple of fields are overloadded as Space */ + /* <-- first couple of fields are overloaded as Space */ void* heap_start; void* heap_end; unsigned int reserved_heap_size; @@ -37,7 +37,7 @@ Boolean (*mark_object_func)(Mspace* space, Partial_Reveal_Object* p_obj); /* END of Space --> */ - Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */ + Block* blocks; /* short-cut for mspace blockheader access, not mandatory */ /* FIXME:: the block indices should be replaced with block header addresses */ unsigned int first_block_idx; Index: src/trace_forward/fspace.h =================================================================== --- src/trace_forward/fspace.h (revision 491045) +++ src/trace_forward/fspace.h (working copy) @@ -29,11 +29,11 @@ */ extern Boolean forward_first_half; -/* boundary spliting fspace into forwarding part and remaining part */ +/* boundary splitting fspace into forwarding part and remaining part */ extern void* object_forwarding_boundary; typedef struct Fspace { - /* <-- first couple of fields are overloadded as Space */ + /* <-- first couple of fields are overloaded as Space */ void* heap_start; void* heap_end; unsigned int reserved_heap_size; @@ -44,7 +44,7 @@ Boolean (*mark_object_func)(Fspace* space, Partial_Reveal_Object* p_obj); /* END of Space --> */ - Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */ + Block* blocks; /* short-cut for mspace blockheader access, not mandatory */ /* FIXME:: the block indices should be replaced with block header addresses */ unsigned int first_block_idx; Index: src/trace_forward/fspace_alloc.cpp =================================================================== --- src/trace_forward/fspace_alloc.cpp (revision 491045) +++ src/trace_forward/fspace_alloc.cpp (working copy) @@ -59,7 +59,7 @@ } -/* FIXME:: the collection should be seperated from the alloation */ +/* FIXME:: the collection should be separated from the allocation */ void* fspace_alloc(unsigned size, Allocator *allocator) { void* p_return = NULL; Index: src/thread/collector_alloc.cpp =================================================================== --- src/thread/collector_alloc.cpp (revision 491045) +++ src/thread/collector_alloc.cpp (working copy) @@ -42,7 +42,7 @@ we don't put a simple bit in vt because we need compute obj size later. */ if ((unsigned int)vt != atomic_cas32((unsigned int*)obj_get_vtraw_addr(p_obj), ((unsigned int)p_targ_obj|FORWARDING_BIT_MASK), (unsigned int)vt)) { /* forwarded by other, we need unalloc the allocated obj. We may waste some space if the allocation switched - block. The remaining part of the switched block cannot be revivied for next allocation of + block. The remaining part of the switched block cannot be revived for next allocation of object that has smaller size than this one. */ assert( obj_is_forwarded_in_vt(p_obj) && !obj_is_marked_in_vt(p_obj)); thread_local_unalloc(size, (Allocator*)collector); @@ -51,7 +51,7 @@ /* we forwarded the object */ memcpy(p_targ_obj, p_obj, size); - /* because p_obj has forwarding pointer in its vt, we set it seperately here */ + /* because p_obj has forwarding pointer in its vt, we set it separately here */ obj_set_vt(p_targ_obj, (Allocation_Handle)vt); return p_targ_obj; Index: src/thread/mutator_alloc.cpp =================================================================== --- src/thread/mutator_alloc.cpp (revision 491045) +++ src/thread/mutator_alloc.cpp (working copy) @@ -24,7 +24,7 @@ #include "../finalizer_weakref/finalizer_weakref_metadata.h" -/* classloader sometimes sets the bit for finalizible objects (?) */ +/* classloader sometimes sets the bit for finalizable objects (?) */ inline unsigned int get_instance_data_size (unsigned int encoded_size) { return (encoded_size & NEXT_TO_HIGH_BIT_CLEAR_MASK); } @@ -66,7 +66,7 @@ if(type_has_finalizer((Partial_Reveal_VTable *)ah)) return NULL; - /* object shoud be handled specially */ + /* object should be handled specially */ if ( size > GC_OBJ_SIZE_THRESHOLD ) return NULL; Allocator* allocator = (Allocator*)gc_get_tls(); Index: src/finalizer_weakref/finalizer_weakref.cpp =================================================================== --- src/finalizer_weakref/finalizer_weakref.cpp (revision 491045) +++ src/finalizer_weakref/finalizer_weakref.cpp (working copy) @@ -48,7 +48,7 @@ return; } -/* reset weak references vetctor block of each collector */ +/* reset weak references vector block of each collector */ void collector_reset_weakref_sets(Collector *collector) { collector->softref_set = finalizer_weakref_get_free_block(); @@ -257,7 +257,7 @@ collector_put_repset(collector); finalizer_weakref_repset_add_entry_from_pool(collector, objects_with_finalizer_pool); - /* fianlizable objects have been added to collector repset pool */ + /* finalizable objects have been added to collector repset pool */ //finalizer_weakref_repset_add_entry_from_pool(collector, finalizable_objects_pool); } @@ -384,7 +384,7 @@ *p_referent_field = NULL; /* Phantom status: for future use * if((unsigned int)p_referent & PHANTOM_REF_ENQUEUE_STATUS_MASK){ - * // enqueued but not explicitly cleared OR pending for enqueueing + * // enqueued but not explicitly cleared OR pending for enqueuing * *iter = NULL; * } * resurrect_obj_tree(collector, p_referent_field); Index: src/common/mark_scan.cpp =================================================================== --- src/common/mark_scan.cpp (revision 491045) +++ src/common/mark_scan.cpp (working copy) @@ -135,7 +135,7 @@ pool_put_entry(metadata->mark_task_pool, collector->trace_stack); /* second step: iterate over the mark tasks and scan objects */ - /* get a task buf for the mark stack */ + /* get a task buff for the mark stack */ collector->trace_stack = pool_get_entry(metadata->free_task_pool); retry: Index: src/common/gc_block.h =================================================================== --- src/common/gc_block.h (revision 491045) +++ src/common/gc_block.h (working copy) @@ -169,7 +169,7 @@ } typedef struct Blocked_Space { - /* <-- first couple of fields are overloadded as Space */ + /* <-- first couple of fields are overloaded as Space */ void* heap_start; void* heap_end; unsigned int reserved_heap_size; @@ -180,7 +180,7 @@ Boolean (*mark_object_func)(Space* space, Partial_Reveal_Object* p_obj); /* END of Space --> */ - Block* blocks; /* short-cut for mpsace blockheader access, not mandatory */ + Block* blocks; /* short-cut for mspace blockheader access, not mandatory */ /* FIXME:: the block indices should be replaced with block header addresses */ unsigned int first_block_idx; Index: src/gen/gen.h =================================================================== --- src/gen/gen.h (revision 491045) +++ src/gen/gen.h (working copy) @@ -46,7 +46,7 @@ extern unsigned int max_nos_size_bytes; typedef struct GC_Gen { - /* <-- First couple of fields overloadded as GC */ + /* <-- First couple of fields overloaded as GC */ void* heap_start; void* heap_end; unsigned int reserved_heap_size; Index: src/mark_sweep/free_area_pool.h =================================================================== --- src/mark_sweep/free_area_pool.h (revision 491045) +++ src/mark_sweep/free_area_pool.h (working copy) @@ -32,7 +32,7 @@ #define NUM_FREE_LIST 128 typedef struct Free_Area{ - /* <-- First couple of fields overloadded as Bidir_List */ + /* <-- First couple of fields overloaded as Bidir_List */ Bidir_List* next; Bidir_List* prev; /* END of Bidir_List --> */ Index: src/mark_sweep/lspace.h =================================================================== --- src/mark_sweep/lspace.h (revision 491045) +++ src/mark_sweep/lspace.h (working copy) @@ -26,7 +26,7 @@ #include "free_area_pool.h" typedef struct Lspace{ - /* <-- first couple of fields are overloadded as Space */ + /* <-- first couple of fields are overloaded as Space */ void* heap_start; void* heap_end; unsigned int reserved_heap_size;