Index: src/init.cpp =================================================================== --- src/init.cpp (revision 491210) +++ src/init.cpp (working copy) @@ -187,7 +187,7 @@ #else static inline void *reserve_mem(size_t size) { #ifdef POINTER64 - /* We have planty of address space, let's protect unaccessible part of heap + /* We have plenty of address space, let's protect unaccessible part of heap * to find some of bad pointers. */ size_t four_gig = 4 * 1024 * (size_t) 1024 * 1024; size_t padding = 4 * 1024 * (size_t) 1024 * 1024; @@ -406,5 +406,5 @@ heap.pos_limit = heap.ceiling; } chunk_size = round_down(heap.size / (10 * num_threads),128); - INFO("heap shrinked to " << mb(heap.size) << " mb"); + INFO("heap shrank to " << mb(heap.size) << " mb"); } Index: src/collect_slide_compact.cpp =================================================================== --- src/collect_slide_compact.cpp (revision 491210) +++ src/collect_slide_compact.cpp (working copy) @@ -168,9 +168,9 @@ } } -// after moving some objects should be rescaned +// after moving some objects should be rescanned // storing references to this object to the linked list -// of reverers to the right of this one. +// of reversers to the right of this one. // oldobj = original position of object: // if this object is pinned and referenced object is moved only original // position of this object contains valid (unchanged) information of left/right direction Index: src/collect_copy.cpp =================================================================== --- src/collect_copy.cpp (revision 491210) +++ src/collect_copy.cpp (working copy) @@ -152,7 +152,7 @@ unsigned vt = obj->vt(); if (info & phase) { - // object already marked, need to check if it is forwared still + // object already marked, need to check if it is forwarded still if (vt & FORWARDING_BIT) { Partial_Reveal_Object *newpos = fw_to_pointer(vt & ~FORWARDING_BIT); @@ -208,7 +208,7 @@ + ((obj->obj_info() & HASHCODE_IS_ALLOCATED_BIT) ? GC_OBJECT_ALIGNMENT : 0)); TRACE2("gc.pin", "add failed pinned area = " << pos << " " << pinned_areas_unsorted.back()); TRACE2("gc.pin", "failed object = " << pos); - // arange transition to slide compaction + // arrange transition to slide compaction obj->obj_info() &= ~MARK_BITS; slots.push_back(slot); transition_copy_to_sliding_compaction(slots); @@ -269,7 +269,7 @@ } void gc_copy_add_root_set_entry(Slot root) { - // FIXME: check for zero here, how it reflect perfomance, should be better! + // FIXME: check for zero here, how it reflect performance, should be better! // and possibly remove check in gc_copy_process_reference // while added check in array handling Index: src/selector.cpp =================================================================== --- src/selector.cpp (revision 491210) +++ src/selector.cpp (working copy) @@ -165,7 +165,7 @@ bool need_compaction_next_gc() { if (heap.working_set_size == 0 || !gc_adaptive) { - TRACE2("gc.adaptive", "static Smin analisis"); + TRACE2("gc.adaptive", "static Smin analysis"); return heap.allocation_region_end() - heap.pos < heap.size * 0.7f; } else { float smin = Smin(heap.roots_start - heap.base - RESERVED_FOR_HEAP_NULL - heap.working_set_size, Index: src/collect.cpp =================================================================== --- src/collect.cpp (revision 491210) +++ src/collect.cpp (working copy) @@ -252,7 +252,7 @@ // object not marked referent.write(heap_null); - TRACE2("gc.ref", "process_special_references: reference enquequed"); + TRACE2("gc.ref", "process_special_references: reference enqueued"); vm_enqueue_reference((Managed_Object_Handle)ref); } array.clear();