Index: src/hash_table.h =================================================================== --- src/hash_table.h (revision 491217) +++ src/hash_table.h (working copy) @@ -93,7 +93,7 @@ int _save_pointer; // used to record last item served // - // The number of entries that this hash table can accomodate. + // The number of entries that this hash table can accommodate. // int _size_in_entries; @@ -190,7 +190,7 @@ int _save_pointer; // used to record last item served // - // The number of entries that this hash table can accomodate. + // The number of entries that this hash table can accommodate. // int _size_in_entries; Index: src/gc_for_vm.cpp =================================================================== --- src/gc_for_vm.cpp (revision 491217) +++ src/gc_for_vm.cpp (working copy) @@ -468,7 +468,7 @@ // garbage collector data structures. The assumption is that the // VM is exiting but needs to give the GC time to run destructors // and free up memory it has gotten from the OS. -// After this routine has been called the VM can not relie on any +// After this routine has been called the VM can not rely on any // data structures created by the GC. // // Errors: If gc_enumerate_finalizable_objects has been called and @@ -993,7 +993,7 @@ ASSERT(0, "No need for alignment on IPF"); #endif - // We hava a special object that needs + // We have a special object that needs // // In phase 1 of alignment, re-direct all objects // with special alignment needs to the LOS. @@ -1259,7 +1259,7 @@ Boolean gc_is_object_pinned (Managed_Object_Handle obj) { /** - * @note This API is not for interrior pointers. + * @note This API is not for interior pointers. */ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *) obj; @@ -1282,7 +1282,7 @@ /**** * -* Routines to support the the class loader and to allow the GC to seperate out and +* Routines to support the the class loader and to allow the GC to separate out and * control the format of the data structures that it uses. * *****/ Index: src/stash_block.h =================================================================== --- src/stash_block.h (revision 491217) +++ src/stash_block.h (working copy) @@ -21,7 +21,7 @@ // -// When colocating objects using a sliding compaction algorithm one +// When collocating objects using a sliding compaction algorithm one // needs to have a place to temporally store objects until their // final location is available. // During the slide phase of the collector we will place objects in a Stash_Block @@ -37,7 +37,7 @@ typedef struct stash { // Next 64K block to put objects in. stash *next; - // Pointer to end of allcated objects, where the next object is going to be placed. + // Pointer to end of allocated objects, where the next object is going to be placed. Partial_Reveal_Object *frontier; // The end of the block. No object can go beyond this point. Partial_Reveal_Object *ceiling; Index: src/garbage_collector.h =================================================================== --- src/garbage_collector.h (revision 491217) +++ src/garbage_collector.h (working copy) @@ -447,7 +447,7 @@ // some hint to run finalizer threads bool hint_to_finalize; - // hint to calculete number of created finalizable objects + // hint to calculate number of created finalizable objects int finalizer_count; // list of short weak roots Index: src/descendents.h =================================================================== --- src/descendents.h (revision 491217) +++ src/descendents.h (working copy) @@ -95,7 +95,7 @@ // -// Initials interator for fusing objects. +// Initials iterator for fusing objects. // inline int * init_fused_object_scanner (Partial_Reveal_Object *the_object) { Index: src/block_store.cpp =================================================================== --- src/block_store.cpp (revision 491217) +++ src/block_store.cpp (working copy) @@ -202,7 +202,7 @@ // within 4GB. In the future, this limit could expand to 16GB or 32GB if // we exploit the fact that objects are aligned on a 4 or 8 byte boundary. if (gc_references_are_compressed && (final_heap_size - 1) > 0xFFFFffff) { - DIE("If compressed referencess are used," + DIE("If compressed references are used," << "the maximum heap size must be less than 4GB."); } @@ -263,7 +263,7 @@ _compaction_type_for_this_gc = gc_bogus_compaction; _compaction_blocks_low_index = 0; - // Compact an eigth of the heap each GC cycle + // Compact an eighth of the heap each GC cycle _compaction_blocks_high_index = get_compaction_increment_size(); ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -297,7 +297,7 @@ { // // Release the memory we obtained for the GC heap, including - // the ancilliary table(s). + // the ancillary table(s). // TRACE("Block_Store::~Block_Store()"); port_vmem_release(allocated_memory); @@ -436,7 +436,7 @@ assert(block_address == (void *) ((POINTER_SIZE_INT) p_heap_start_address + heap_size_in_bytes)); _num_free_blocks = _number_of_blocks_in_heap; TRACE2("gc.free", "_num_free_blocks = " << _num_free_blocks); - // Initlialize the hint to the start of block list + // Initialize the hint to the start of block list _free_block_search_hint = 0; } Index: src/gc_utils.cpp =================================================================== --- src/gc_utils.cpp (revision 491217) +++ src/gc_utils.cpp (working copy) @@ -131,7 +131,7 @@ assert(((POINTER_SIZE_INT) p_word & alignment_mask) == 0); // This loop is where we spend all of out time. So lets do some optimization. // Originally we had the conditional (((uint8 *)((POINTER_SIZE_INT) p_word + sizeof(POINTER_SIZE_INT)) < p_ceil) && (*p_word == 0)) - // Lets adjust p_ceil to subrtact sizeof(POINTER_SIZE_INT) and hoist it. This will speed up the loop by about 10%. + // Lets adjust p_ceil to subtract sizeof(POINTER_SIZE_INT) and hoist it. This will speed up the loop by about 10%. uint8 *p_ceil_last_to_check = p_ceil - sizeof(POINTER_SIZE_INT); while (((uint8 *)(POINTER_SIZE_INT) p_word < p_ceil_last_to_check) && (*p_word == 0)) { @@ -166,7 +166,7 @@ get_num_consecutive_similar_bits(uint8 *p_byte_start, unsigned int bit_index_to_search_from, unsigned int *num_consec_bits, uint8 *p_ceil) { if (p_ceil <= p_byte_start) { - DIE("Unexpected values of input prameters"); + DIE("Unexpected values of input parameters"); } bool is_zero_str = ((*p_byte_start) & (1 << bit_index_to_search_from)) ? false : true; Index: src/verify_live_heap.cpp =================================================================== --- src/verify_live_heap.cpp (revision 491217) +++ src/verify_live_heap.cpp (working copy) @@ -152,7 +152,7 @@ // Verify integrity of qsort for (unsigned int j = 0; j < num_lives; j++) { if (j > 0) { - // stricly sorted and no duplicates... + // strictly sorted and no duplicates... assert((POINTER_SIZE_INT) all_lives_before_gc[j-1].p_obj < (POINTER_SIZE_INT) all_lives_before_gc[j].p_obj); assert(all_lives_before_gc[j].p_obj_copied->vt() == all_lives_before_gc[j].p_obj->vt()); assert(all_lives_before_gc[j].p_obj_copied->get_obj_info() == all_lives_before_gc[j].p_obj->get_obj_info()); @@ -444,7 +444,7 @@ // Free the hash table that we just constructed... delete all_lives_after_gc_ht; - // We need to free up the live heap that was malloced befored we leave... + // We need to free up the live heap that was malloced before we leave... for (unsigned int x = 0; x < num_lives_before_gc; x++) { // some object exists here....and it was copied properly before GC assert(all_lives_before_gc[x].p_obj); Index: src/slot_offset_list.cpp =================================================================== --- src/slot_offset_list.cpp (revision 491217) +++ src/slot_offset_list.cpp (working copy) @@ -110,7 +110,7 @@ slot_offset_list::next() { _current_pointer++; - // to avoid infinite loops of nexts make sure it is called only + // to avoid infinite loops of next make sure it is called only // once when the table is empty. assert (_current_pointer < (_resident_count + 1)); return; Index: src/mark_stack.h =================================================================== --- src/mark_stack.h (revision 491217) +++ src/mark_stack.h (working copy) @@ -36,7 +36,7 @@ // Buffers can be removed if there is more than one of them. They can be added when // a thread is through helping out the primary marker. Ultimately it is up to -// the primary marker pop the elments off the stack. +// the primary marker pop the elements off the stack. #define FIELDS_IN_MARK_STACK 4 #define MARK_STACK_ELEMENTS (1024 - FIELDS_IN_MARK_STACK) Index: src/faststack.h =================================================================== --- src/faststack.h (revision 491217) +++ src/faststack.h (working copy) @@ -20,7 +20,7 @@ */ // Fast LIFO stack. -// To improve memory footprint 2 dimensional array is used: stack elemnts +// To improve memory footprint 2 dimensional array is used: stack elements // are stored in the sequence of consistent memory blocks. // When stack is created only one first block is allocated. // Memory for other blocks is allocated if it's necessary. @@ -78,7 +78,7 @@ block = 0; } - // get number of elemtnts in the stack + // get number of elements in the stack inline unsigned int size() { assert(bottom <= STACK_BLOCK_SIZE); @@ -102,7 +102,7 @@ { // if we reached the end of stack - push back new element // !!! better not to do this - performance drop!!! - // You shoud allocate more elements in STACK_NUMBER_BLOCKS parameter (do it in constructor) + // You should allocate more elements in STACK_NUMBER_BLOCKS parameter (do it in constructor) assert(block < stack.size()); if ((block + 1) >= stack.size()) { Index: src/hash_table.cpp =================================================================== --- src/hash_table.cpp (revision 491217) +++ src/hash_table.cpp (working copy) @@ -281,7 +281,7 @@ volatile void *address = _table[hash_code]; // // Since we start scanning at the freshly deleted - // slot, we have to accomodate an initial zero. + // slot, we have to accommodate an initial zero. // if (address!=0) { //TRACE(" In rehash removing " << address); Index: src/object_placement.cpp =================================================================== --- src/object_placement.cpp (revision 491217) +++ src/object_placement.cpp (working copy) @@ -53,8 +53,8 @@ #ifndef GC_LIVE_OBJECT_LISTS_PER_COMPACTION_BLOCK // -// Run through all the objects in the blocks assigned to this heap colocating the interesting ones. -// An object is interesting if there is some value to colocating it with another object and if +// Run through all the objects in the blocks assigned to this heap collocating the interesting ones. +// An object is interesting if there is some value to collocating it with another object and if // they are not already colocated. // // For this version to avoid race conditions with other threads both objects being colocated need @@ -80,7 +80,7 @@ // I need to keep track of the new after-slided address obj_info->p_obj = (Partial_Reveal_Object *) dest; gc_thread->insert_object_header_info_during_sliding_compaction(obj_info); - gc_trace (p_obj, "Object being compacted or colocated needs obj_info perserved."); + gc_trace (p_obj, "Object being compacted or colocated needs obj_info preserved."); } assert ((p_obj->get_obj_info() & FORWARDING_BIT_MASK) == FORWARDING_BIT_MASK); // Make sure we have the forwarding bit. @@ -111,12 +111,12 @@ // // Sliding compaction presents some problems when moving the objects. First the invariant that // if we slide objects to the left we will always have room is no longer obvious since colocated -// objects are also interspersed. If I have dead A B C dead D E and I am colocating D and E after A +// objects are also interspersed. If I have dead A B C dead D E and I am collocating D and E after A // Then I slide A D E // and I have overwritten B before I can move it into place. -// One way to deal with this is to move D and E to a seperate area until the entire block has been slid +// One way to deal with this is to move D and E to a separate area until the entire block has been slid // A x x B stash D E -// But this overwwrites C with A. So we need to stash B also. +// But this overwrites C with A. So we need to stash B also. // A x x x C stash D E B // Now we can move D E and B // A D E B C @@ -126,7 +126,7 @@ // // Each GC thread has a stash list associated with each GC thread including itself. All objects // that can not be immediately placed where they belong go onto this stash list. This introduces an -// additional synchronization point where all sliding is done and no futher objects will be placed in +// additional synchronization point where all sliding is done and no father objects will be placed in // the stash blocks. After this synchronization point all stash blocks can be emptied. // If we start with x A B C x x D E // and we want A D E B C x x x @@ -156,14 +156,14 @@ // // -// What if we stash the collocaing objects at pointer creating time and then ignore them during sliding time. +// What if we stash the collocating objects at pointer creating time and then ignore them during sliding time. // Finally we move them into place after the sliding. // // -// What if we smash the colocating if it means that we can't fit it into lower addresses than the next object +// What if we smash the collocating if it means that we can't fit it into lower addresses than the next object // that is to be slid. This means that the sliding will never have to deal with not having a place to slide -// an object. This is good. The invariant is that if an object must be slid to the right as a result of colocating +// an object. This is good. The invariant is that if an object must be slid to the right as a result of collocating // then we do not colocate. // @@ -219,7 +219,7 @@ void *to_obj = *next_obj_start_arg; void * UNUSED debug_orig_to_obj = to_obj; - // Claim the Forwading bit if you can. If you loose the race you can't fuse since someone else is. + // Claim the Forwarding bit if you can. If you loose the race you can't fuse since someone else is. Obj_Info_Type old_base_value = p_obj->get_obj_info(); Obj_Info_Type new_base_value = old_base_value; if ((old_base_value & FORWARDING_BIT_MASK) == FORWARDING_BIT_MASK) { @@ -366,7 +366,7 @@ Partial_Reveal_Object *p_fuse_obj = fuse_queue[i]; unsigned int fused_obj_size = get_object_size_bytes(p_fuse_obj); gc_trace (p_fuse_obj, "Fusing this object with parent."); - // Finally deal with this colocations. + // Finally deal with this collocations. assert (p_fuse_obj != p_obj); // Nulls should have been filtered out up above. if (object_info_is_not_zero(p_fuse_obj)) { Index: src/mark.h =================================================================== --- src/mark.h (revision 491217) +++ src/mark.h (working copy) @@ -36,7 +36,7 @@ // for vtable_get_class #include "open/vm_gc.h" -// returns address correcsponding bit index in specified block +// returns address corresponding bit index in specified block inline void* block_address_from_mark_bit_index(block_info *block, unsigned int bit_index) { POINTER_SIZE_INT data = (POINTER_SIZE_INT)block + GC_BLOCK_INFO_SIZE_BYTES; Index: src/gc_v4.cpp =================================================================== --- src/gc_v4.cpp (revision 491217) +++ src/gc_v4.cpp (working copy) @@ -631,7 +631,7 @@ new_chunk_start = block; block = next_block; } else { - gc_trace_block (block, "in return_free_blocks_to_block_store returing free block to block store."); + gc_trace_block (block, "in return_free_blocks_to_block_store returning free block to block store."); // Fully free block means.....NO live data...can go back to the block store // Return it to the block store block_info *next_block = block->next_free_block; @@ -824,7 +824,7 @@ TRACE("finalizable object " << p_obj << " is still alive"); } - // The eumeration needs to provide a set which means no duplicates. Remove duplicates here. + // The enumeration needs to provide a set which means no duplicates. Remove duplicates here. Partial_Reveal_Object **slot = NULL; if (object_was_finalizable) { @@ -954,7 +954,7 @@ void Garbage_Collector::process_soft_references(bool compaction_this_gc) { TRACE2("gc.ref", "process_soft_references(" << compaction_this_gc << ")"); - // FIXME salikh: implement some euristics to decide whether we need to + // FIXME salikh: implement some heuristics to decide whether we need to // reset soft references bool reset_soft_references = true; if (reset_soft_references) { Index: src/gc_threads.cpp =================================================================== --- src/gc_threads.cpp (revision 491217) +++ src/gc_threads.cpp (working copy) @@ -79,7 +79,7 @@ _num_slots_collected = 0; _output_packet = NULL; _input_packet = NULL; - _id = 1; // FIXME ivan 20051026: concurent mark scan uses thread id = 1 for all mutator threads + _id = 1; // FIXME ivan 20051026: concurrent mark scan uses thread id = 1 for all mutator threads } GC_Mark_Activity::~GC_Mark_Activity() { Index: src/sliding_compaction.cpp =================================================================== --- src/sliding_compaction.cpp (revision 491217) +++ src/sliding_compaction.cpp (working copy) @@ -103,7 +103,7 @@ } // while (one_slot) - // Delete the remebered set since it is not needed anymore...all slot updates for this list are done + // Delete the remembered set since it is not needed anymore...all slot updates for this list are done delete some_slots; } // while (true) @@ -167,7 +167,7 @@ p_compaction_block->curr_free = p_compaction_block->block_free_areas[0].area_base = GC_BLOCK_ALLOC_START(p_compaction_block); p_compaction_block->curr_ceiling = p_compaction_block->block_free_areas[0].area_ceiling = GC_BLOCK_CEILING(p_compaction_block); - p_compaction_block->block_free_areas[0].area_size = GC_BLOCK_ALLOC_SIZE; // This is how we declare that the entire block is avaliable. (contains fo object) + p_compaction_block->block_free_areas[0].area_size = GC_BLOCK_ALLOC_SIZE; // This is how we declare that the entire block is available. (contains fo object) p_compaction_block->block_free_areas[0].has_been_zeroed = false; p_compaction_block->block_has_been_swept = true; @@ -291,7 +291,7 @@ assert ((POINTER_SIZE_INT)next_obj_start + get_object_size_bytes(p_obj) < (POINTER_SIZE_INT)(GC_BLOCK_CEILING(next_obj_start))); // Check overflow. p_obj->set_forwarding_pointer(next_obj_start); - success = true; // We always succeed if we are not colocating objects. + success = true; // We always succeed if we are not collocating objects. gc_trace (next_obj_start, " In allocate_forwarding_pointers_for_compaction_live_objects forwarding *to* this location. (vtable not yet legal) from " << p_obj); Index: src/gc_resize.cpp =================================================================== --- src/gc_resize.cpp (revision 491217) +++ src/gc_resize.cpp (working copy) @@ -92,7 +92,7 @@ if (size_failed > GC_BLOCK_ALLOC_SIZE) { // how many blocks we need? int needed_blocks = GC_NUM_BLOCKS_PER_LARGE_OBJECT(size_failed); - // can we satisy the request now? + // can we satisfy the request now? if (!_p_block_store->block_store_can_satisfy_request(needed_blocks)) { if (needed_blocks + current_blocks > max_blocks) { assert(current_blocks <= max_blocks); Index: src/block_store.h =================================================================== --- src/block_store.h (revision 491217) +++ src/block_store.h (working copy) @@ -287,7 +287,7 @@ return _blocks_in_block_store[i].number_of_blocks; } // Block iterator code, - // This can be used in a stop the world setting to iterate through all ther block sequentially. + // This can be used in a stop the world setting to iterate through all their block sequentially. void init_block_iterator() { current_block_index = 0; } Index: src/characterize_heap_win.cpp =================================================================== --- src/characterize_heap_win.cpp (revision 491217) +++ src/characterize_heap_win.cpp (working copy) @@ -895,8 +895,8 @@ void heapTraceBegin(bool before_gc) { // // A - We need to characterize at the end of every GC to get a characterization of live objects. - // B - We need to characterize at the start of every GC to get a characterizatin of live + dead objects. - // You want a characterization of allocated objects just subract A from B + // B - We need to characterize at the start of every GC to get a characterization of live + dead objects. + // You want a characterization of allocated objects just subtract A from B // // If we are before the GC then what we have in the characterization tables are the // types of objects live at the end of the last GC *plus* all the objects that have @@ -1323,8 +1323,8 @@ // -// This code dumps out the type and thier frequencies. -// Each line holds the counts comma deliminated and ready for Excel. +// This code dumps out the type and their frequencies. +// Each line holds the counts comma delaminated and ready for Excel. // Since we do not know all the types that might exist when the first line is dumped out we // need to delay dumping out the type names until the final line. // The implementation therefore requires a call to end_type_counts to dump the names of the types. Index: src/gc_thread.h =================================================================== --- src/gc_thread.h (revision 491217) +++ src/gc_thread.h (working copy) @@ -220,9 +220,9 @@ object_lock_save_info *_object_headers; - // This is the chunk that is to be used by the object placement code to colocate objects + // This is the chunk that is to be used by the object placement code to collocate objects block_info *placement_chunk; - // This is the block in the chunk that is used by object placement code to colocate objects + // This is the block in the chunk that is used by object placement code to collocate objects block_info *placement_block; Stash_Block *stash; Index: src/allocation.cpp =================================================================== --- src/allocation.cpp (revision 491217) +++ src/allocation.cpp (working copy) @@ -124,7 +124,7 @@ assert (ah); // Get the gc tls structure. Review of what each piece of the structure is used for. - // chunk - This holds a list of blocks linked throuh the next_free_block fields + // chunk - This holds a list of blocks linked through the next_free_block fields // Each block is an alloc_block and it has various areas where allocation can happen. // The block might not have been swept which means that the allocation areas have // not be initialized. @@ -209,7 +209,7 @@ // We have a block that has been swept and has at least one allocation area big enough to fit this object so we will be // successful.. - alloc_block->current_alloc_area++; // Get the next currenct area. If it is the first one it will be 0. + alloc_block->current_alloc_area++; // Get the next current area. If it is the first one it will be 0. //XXX assert part of GC_SLOW_ALLOC routines. assert (alloc_block->current_alloc_area != -1);