Index: src/common/gc_common.cpp =================================================================== --- src/common/gc_common.cpp (revision 536884) +++ src/common/gc_common.cpp (working copy) @@ -251,6 +251,11 @@ gc_gen_assign_free_area_to_mutators((GC_Gen*)gc); } +void gc_adjust_heap_size(GC* gc) +{ + gc_gen_adjust_heap_size((GC_Gen*)gc); +} + void gc_copy_interior_pointer_table_to_rootset(); void gc_reclaim_heap(GC* gc, unsigned int gc_cause) @@ -263,8 +268,6 @@ gc->cause = gc_cause; gc_decide_collection_kind((GC_Gen*)gc, gc_cause); - - //For_LOS_extend! #ifdef GC_FIXED_SIZE_TUNER gc_space_tune_before_gc_fixed_size(gc, gc_cause); #else @@ -273,8 +276,7 @@ #endif #ifdef MARK_BIT_FLIPPING - if(gc_match_kind(gc, MINOR_COLLECTION)) - mark_bit_flip(); + if(gc_match_kind(gc, MINOR_COLLECTION)) mark_bit_flip(); #endif gc_metadata_verify(gc, TRUE); @@ -291,21 +293,21 @@ /* this has to be done after all mutators are suspended */ gc_reset_mutator_context(gc); - if(!IGNORE_FINREF ) - gc_set_obj_with_fin(gc); + if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc); gc_gen_reclaim_heap((GC_Gen*)gc); gc_reset_interior_pointer_table(); - + gc_metadata_verify(gc, FALSE); + gc_adjust_heap_size(gc); + int64 pause_time = time_now() - start_time; gc->time_collections += pause_time; gc_gen_adapt((GC_Gen*)gc, pause_time); - if(gc_is_gen_mode()) - gc_prepare_mutator_remset(gc); + if(gc_is_gen_mode()) gc_prepare_mutator_remset(gc); if(!IGNORE_FINREF ){ gc_put_finref_to_vm(gc); @@ -317,16 +319,11 @@ #endif } - //For_LOS_extend! gc_space_tuner_reset(gc); - + gc_assign_free_area_to_mutators(gc); - + vm_resume_threads_after(); return; } - - - - Index: src/common/gc_space.h =================================================================== --- src/common/gc_space.h (revision 536884) +++ src/common/gc_space.h (working copy) @@ -137,9 +137,9 @@ new_last_block->next = NULL; } -inline void blocked_space_extend(Blocked_Space* space, unsigned int changed_size) +inline void blocked_space_extend(Blocked_Space* space, POINTER_SIZE_INT changed_size) { - unsigned int block_inc_count = changed_size >> GC_BLOCK_SHIFT_COUNT; + POINTER_SIZE_INT block_inc_count = changed_size >> GC_BLOCK_SHIFT_COUNT; void* old_base = (void*)&space->blocks[space->num_managed_blocks]; void* commit_base = (void*)round_down_to_size((POINTER_SIZE_INT)old_base, SPACE_ALLOC_UNIT); @@ -152,7 +152,8 @@ void* new_end = (void*)((POINTER_SIZE_INT)commit_base + commit_size); space->committed_heap_size = (POINTER_SIZE_INT)new_end - (POINTER_SIZE_INT)space->heap_start; - + /*Fixme: For_Heap_Adjust, but need fix if static mapping.*/ + space->heap_end = new_end; /* init the grown blocks */ Block_Header* block = (Block_Header*)commit_base; Block_Header* last_block = (Block_Header*)((Block*)block -1); Index: src/common/space_tuner.cpp =================================================================== --- src/common/space_tuner.cpp (revision 536884) +++ src/common/space_tuner.cpp (working copy) @@ -106,7 +106,7 @@ } check_size: - tuner->tuning_size = round_down_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES); + tuner->tuning_size = round_up_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES); if(tuner->tuning_size == 0){ tuner->kind = TRANS_NOTHING; lspace->move_object = 0; @@ -137,9 +137,9 @@ Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc); Space* lspace = (Space*)gc_get_los((GC_Gen*)gc); - POINTER_SIZE_INT los_expect_survive_sz = (POINTER_SIZE_INT)((float)(lspace->surviving_size + lspace->alloced_size) * lspace->survive_ratio); - POINTER_SIZE_INT los_expect_free_sz = ((lspace->committed_heap_size > los_expect_survive_sz) ? - (lspace->committed_heap_size - los_expect_survive_sz) : 0); + POINTER_SIZE_INT los_expect_surviving_sz = (POINTER_SIZE_INT)((float)(lspace->surviving_size + lspace->alloced_size) * lspace->survive_ratio); + POINTER_SIZE_INT los_expect_free_sz = ((lspace->committed_heap_size > los_expect_surviving_sz) ? + (lspace->committed_heap_size - los_expect_surviving_sz) : 0); POINTER_SIZE_INT mos_expect_survive_sz = (POINTER_SIZE_INT)((float)(mspace->surviving_size + mspace->alloced_size) * mspace->survive_ratio); POINTER_SIZE_INT mos_expect_threshold = mspace_get_expected_threshold((Mspace*)mspace); POINTER_SIZE_INT mos_expect_free_sz = ((mos_expect_threshold > mos_expect_survive_sz)? @@ -304,7 +304,7 @@ } los_live_obj_size = round_up_to_size(los_live_obj_size, GC_BLOCK_SIZE_BYTES); los_live_obj_size += (collector_num << 2 << GC_BLOCK_SHIFT_COUNT); - + Lspace *los = (Lspace*)gc_get_los((GC_Gen*)gc); Space_Tuner *tuner = gc->tuner; POINTER_SIZE_INT los_max_shrink_size = 0; Index: src/gen/gen.cpp =================================================================== --- src/gen/gen.cpp (revision 536884) +++ src/gen/gen.cpp (working copy) @@ -71,14 +71,14 @@ min_nos_size_bytes *= gc_gen->_num_processors; - POINTER_SIZE_INT min_nos_size_threshold = max_heap_size>>5; + POINTER_SIZE_INT min_nos_size_threshold = min_heap_size>>5; if(min_nos_size_bytes > min_nos_size_threshold){ min_nos_size_bytes = round_down_to_size(min_nos_size_threshold,SPACE_ALLOC_UNIT); } if( MIN_NOS_SIZE ) min_nos_size_bytes = MIN_NOS_SIZE; - POINTER_SIZE_INT los_size = max_heap_size >> 7; + POINTER_SIZE_INT los_size = min_heap_size >> 7; if(INIT_LOS_SIZE) los_size = INIT_LOS_SIZE; if(los_size < min_los_size_bytes ) los_size = min_los_size_bytes ; @@ -96,21 +96,21 @@ gc_gen->survive_ratio = 0.2f; if(NOS_SIZE){ - los_mos_size = max_heap_size - NOS_SIZE; + los_mos_size = min_heap_size - NOS_SIZE; mos_reserve_size = los_mos_size - los_size; nos_commit_size = NOS_SIZE; nos_reserve_size = NOS_SIZE; }else{ - los_mos_size = max_heap_size; + los_mos_size = min_heap_size; mos_reserve_size = los_mos_size - los_size; - nos_commit_size = (POINTER_SIZE_INT)(((float)(max_heap_size - los_size))/(1.0f + gc_gen->survive_ratio)); + nos_commit_size = (POINTER_SIZE_INT)(((float)(min_heap_size - los_size))/(1.0f + gc_gen->survive_ratio)); nos_reserve_size = mos_reserve_size; } nos_commit_size = round_down_to_size(nos_commit_size, SPACE_ALLOC_UNIT); - mos_commit_size = max_heap_size - los_size - nos_commit_size; + mos_commit_size = min_heap_size - los_size - nos_commit_size; /* allocate memory for gc_gen */ void* reserved_base; @@ -142,8 +142,8 @@ } reserved_base = vm_reserve_mem(los_mos_base, los_mos_size); } - -#else /* STATIC_NOS_MAPPING */ +/* NON_STATIC_NOS_MAPPING */ +#else reserved_base = NULL; if(large_page_hint){ @@ -157,15 +157,24 @@ } } - if(reserved_base==NULL){ + if(reserved_base == NULL){ + Boolean max_size_reduced = 0; reserved_base = vm_reserve_mem((void*)0, max_heap_size + SPACE_ALLOC_UNIT); + + while( !reserved_base ){ + max_size_reduced = 1; + max_heap_size -= SPACE_ALLOC_UNIT; + reserved_base = vm_reserve_mem((void*)0, max_heap_size + SPACE_ALLOC_UNIT); + } + if(max_heap_size < min_heap_size){ + printf("Non-static NOS mapping: Max heap size could not be gauranteed greater than min heap size according to memory limitation.\n"); + exit(0); + }else if(max_size_reduced){ + printf("Non-static NOS mapping: Max heap size is reduced to %x according to memory limitation.\n", max_heap_size); + }//else printf("Max size: %x, heap_start: %lx\n", max_heap_size, reserved_base); + reserved_base = (void*)round_up_to_size((POINTER_SIZE_INT)reserved_base, SPACE_ALLOC_UNIT); assert((POINTER_SIZE_INT)reserved_base%SPACE_ALLOC_UNIT == 0); - - while( !reserved_base ){ - printf("Non-static NOS mapping: Can't allocate memory at address %x for specified size %x", reserved_base, max_heap_size); - exit(0); - } } reserved_end = (void*)((POINTER_SIZE_INT)reserved_base + max_heap_size); @@ -205,9 +214,6 @@ nos->collect_algorithm = MINOR_ALGO; mos->collect_algorithm = MAJOR_ALGO; - /*Give GC a hint of space survive ratio.*/ -// nos->survive_ratio = gc_gen->survive_ratio; -// mos->survive_ratio = gc_gen->survive_ratio; gc_space_tuner_initialize((GC*)gc_gen); gc_gen_mode_adapt_init(gc_gen); @@ -216,7 +222,6 @@ space_committed_size((Space*)gc_gen->mos) + space_committed_size((Space*)gc_gen->los); - set_native_finalizer_thread_flag(!IGNORE_FINREF); set_native_ref_enqueue_thread_flag(!IGNORE_FINREF); @@ -260,12 +265,6 @@ unsigned int gc_get_processor_num(GC_Gen* gc){ return gc->_num_processors;} - -static Boolean major_collection_needed(GC_Gen* gc) -{ - return space_used_memory_size((Blocked_Space*)gc->nos)*gc->survive_ratio > (space_free_memory_size((Blocked_Space*)gc->mos)); -} - Boolean FORCE_FULL_COMPACT = FALSE; void gc_decide_collection_kind(GC_Gen* gc, unsigned int cause) @@ -345,6 +344,64 @@ return; } +void gc_gen_adjust_heap_size(GC_Gen* gc) +{ + if(gc_match_kind((GC*)gc, MINOR_COLLECTION)) return; + if(gc->committed_heap_size == max_heap_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_NULL) return; + + Mspace* mos = gc->mos; + Fspace* nos = gc->nos; + Lspace* los = gc->los; + /*We can not tolerate gc->survive_ratio be greater than threshold twice continuously. + *Or, we must adjust heap size + */ + static unsigned int tolerate = 0; + + POINTER_SIZE_INT heap_total_size = los->committed_heap_size + mos->committed_heap_size + nos->committed_heap_size; + assert(heap_total_size == gc->committed_heap_size); + + assert(nos->surviving_size == 0); + POINTER_SIZE_INT heap_surviving_size = mos->surviving_size + los->surviving_size; + assert(heap_total_size > heap_surviving_size); + + float heap_survive_ratio = (float)heap_surviving_size / (float)heap_total_size; + float threshold_survive_ratio = 0.3f; + float regular_survive_ratio = 0.125f; + + POINTER_SIZE_INT new_heap_total_size = 0; + POINTER_SIZE_INT adjust_size = 0; + + if(heap_survive_ratio < threshold_survive_ratio) return; + + if(++tolerate < 2) return; + tolerate = 0; + + new_heap_total_size = (POINTER_SIZE_INT)((float)heap_surviving_size / regular_survive_ratio); + new_heap_total_size = round_down_to_size(new_heap_total_size, SPACE_ALLOC_UNIT); + + + if(new_heap_total_size <= heap_total_size) return; + if(new_heap_total_size > max_heap_size_bytes) + new_heap_total_size = max_heap_size_bytes - LOS_HEAD_RESERVE_FOR_HEAP_NULL; + + adjust_size = new_heap_total_size - heap_total_size; + assert( !(adjust_size % SPACE_ALLOC_UNIT) ); + +#ifdef STATIC_NOS_MAPPING + /*Fixme: Static mapping have other bugs to be fixed first.*/ + assert(!large_page_hint); + return; +#else + assert(!large_page_hint); + POINTER_SIZE_INT old_nos_size = nos->committed_heap_size; + blocked_space_extend(nos, adjust_size); + nos->survive_ratio = (float)old_nos_size * nos->survive_ratio / (float)nos->committed_heap_size; + /*Fixme: gc should be modified according to nos extend*/ + gc->committed_heap_size += adjust_size; + assert(gc->committed_heap_size == los->committed_heap_size + mos->committed_heap_size + nos->committed_heap_size); +#endif +} + Boolean IS_FALLBACK_COMPACTION = FALSE; /* only for debugging, don't use it. */ void gc_gen_reclaim_heap(GC_Gen* gc) @@ -361,41 +418,31 @@ if(gc_match_kind((GC*)gc, MINOR_COLLECTION)){ /* FIXME:: move_object is only useful for nongen_slide_copy */ gc->mos->move_object = FALSE; - fspace_collection(gc->nos); - gc->mos->move_object = TRUE; - - }else{ - /* process mos and nos together in one compaction */ mspace_collection(gc->mos); /* fspace collection is included */ lspace_collection(gc->los); - } if(gc->collect_result == FALSE && gc_match_kind((GC*)gc, MINOR_COLLECTION)){ - if(gc_is_gen_mode()) - gc_clear_remset((GC*)gc); + if(gc_is_gen_mode()) gc_clear_remset((GC*)gc); /* runout mspace in minor collection */ assert(mspace->free_block_idx == mspace->ceiling_block_idx + 1); mspace->num_used_blocks = mspace->num_managed_blocks; IS_FALLBACK_COMPACTION = TRUE; - gc_reset_collect_result((GC*)gc); gc->collect_kind = FALLBACK_COLLECTION; - if(verify_live_heap) - event_gc_collect_kind_changed((GC*)gc); + if(verify_live_heap) event_gc_collect_kind_changed((GC*)gc); mspace_collection(gc->mos); /* fspace collection is included */ lspace_collection(gc->los); IS_FALLBACK_COMPACTION = FALSE; - } if( gc->collect_result == FALSE){ @@ -412,7 +459,6 @@ #endif assert(!gc->los->move_object); - return; } Index: src/gen/gen.h =================================================================== --- src/gen/gen.h (revision 536884) +++ src/gen/gen.h (working copy) @@ -169,6 +169,8 @@ void gc_gen_reclaim_heap(GC_Gen* gc); void gc_gen_assign_free_area_to_mutators(GC_Gen* gc); + +void gc_gen_adjust_heap_size(GC_Gen* gc); void gc_gen_mode_adapt_init(GC_Gen *gc); Index: src/gen/gen_adapt.cpp =================================================================== --- src/gen/gen_adapt.cpp (revision 536884) +++ src/gen/gen_adapt.cpp (working copy) @@ -25,8 +25,6 @@ #define NOS_COPY_RESERVE_DELTA (GC_BLOCK_SIZE_BYTES<<1) /*Tune this number in case that MOS could be too small, so as to avoid or put off fall back.*/ #define GC_MOS_MIN_EXTRA_REMAIN_SIZE (36*MB) -/*Switch on this MACRO when we want lspace->survive_ratio to be sensitive.*/ -//#define NOS_SURVIVE_RATIO_SENSITIVE struct Mspace; void mspace_set_expected_threshold(Mspace* mspace, POINTER_SIZE_INT threshold); @@ -213,8 +211,8 @@ /*If major is caused by LOS, or collection kind is EXTEND_COLLECTION, all survive ratio is not updated.*/ if((gc->cause != GC_CAUSE_LOS_IS_FULL) && (!gc_match_kind((GC*)gc, EXTEND_COLLECTION))){ - POINTER_SIZE_INT major_survive_size = space_committed_size((Space*)mspace) - mos_free_size; - survive_ratio = (float)major_survive_size/(float)space_committed_size((Space*)mspace); + POINTER_SIZE_INT major_surviving_size = space_committed_size((Space*)mspace) - mos_free_size; + survive_ratio = (float)major_surviving_size/(float)space_committed_size((Space*)mspace); mspace->survive_ratio = survive_ratio; } /*For LOS_Shrink:*/ @@ -222,15 +220,10 @@ POINTER_SIZE_INT mspace_size_threshold = (space_committed_size((Space*)mspace) + space_committed_size((Space*)fspace)) >> 1; mspace_set_expected_threshold((Mspace *)mspace, mspace_size_threshold ); } - #ifdef NOS_SURVIVE_RATIO_SENSITIVE - /*If this major is caused by fall back compaction, - we must give fspace->survive_ratio a conservative and reasonable number to avoid next fall back.*/ - //fspace->survive_ratio = mspace->survive_ratio; - /*In fallback compaction, the survive_ratio of mspace must be 1.*/ - if(gc_match_kind((GC*)gc, FALLBACK_COLLECTION)) - fspace->survive_ratio = 1; - - #endif + /*If this major is caused by fall back compaction, we must give fspace->survive_ratio + *a conservative and reasonable number to avoid next fall back. + *In fallback compaction, the survive_ratio of mspace must be 1.*/ + if(gc_match_kind((GC*)gc, FALLBACK_COLLECTION)) fspace->survive_ratio = 1; }else{ /*Give a hint to mini_free_ratio. */ if(fspace->num_collections == 1){ @@ -242,19 +235,17 @@ fspace->time_collections += pause_time; POINTER_SIZE_INT free_size_threshold; - - POINTER_SIZE_INT minor_survive_size = last_total_free_size - total_free_size; + + POINTER_SIZE_INT minor_surviving_size = last_total_free_size - total_free_size; float k = Tslow * fspace->num_collections/fspace->time_collections; - float m = ((float)minor_survive_size)*1.0f/((float)(SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE )); + float m = ((float)minor_surviving_size)*1.0f/((float)(SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE )); float free_ratio_threshold = mini_free_ratio(k, m); free_size_threshold = (POINTER_SIZE_INT)(free_ratio_threshold * (SMax - GC_MOS_MIN_EXTRA_REMAIN_SIZE ) + GC_MOS_MIN_EXTRA_REMAIN_SIZE ); - if ((mos_free_size + nos_free_size)< free_size_threshold) { - gc->force_major_collect = TRUE; - } + if ((mos_free_size + nos_free_size)< free_size_threshold) gc->force_major_collect = TRUE; - survive_ratio = (float)minor_survive_size/(float)space_committed_size((Space*)fspace); + survive_ratio = (float)minor_surviving_size/(float)space_committed_size((Space*)fspace); fspace->survive_ratio = survive_ratio; /*For LOS_Adaptive*/ POINTER_SIZE_INT mspace_committed_size = space_committed_size((Space*)mspace); @@ -265,9 +256,8 @@ mspace_set_expected_threshold((Mspace *)mspace, mspace_size_threshold ); } } - - gc->survive_ratio = (gc->survive_ratio + survive_ratio)/2.0f; + gc->survive_ratio = (gc->survive_ratio + survive_ratio)/2.0f; last_total_free_size = total_free_size; } @@ -295,7 +285,9 @@ #ifdef STATIC_NOS_MAPPING total_size = max_heap_size_bytes - lspace->committed_heap_size; #else - total_size = (POINTER_SIZE_INT)gc->heap_end - (POINTER_SIZE_INT)mspace->heap_start; + POINTER_SIZE_INT curr_heap_commit_end = + (POINTER_SIZE_INT)gc->heap_start + LOS_HEAD_RESERVE_FOR_HEAP_NULL + gc->committed_heap_size; + total_size = curr_heap_commit_end - (POINTER_SIZE_INT)mspace->heap_start; #endif POINTER_SIZE_INT total_free = total_size - used_mos_size; @@ -306,16 +298,15 @@ POINTER_SIZE_INT nos_reserve_size; nos_reserve_size = (POINTER_SIZE_INT)(((float)total_free)/(1.0f + fspace->survive_ratio)); /*NOS should not be zero, if there is only one block in non-los, i.e. in the former if sentence, - if total_free = GC_BLOCK_SIZE_BYTES, then the computed nos_reserve_size is between zero - and GC_BLOCK_SIZE_BYTES. In this case, we assign this block to NOS*/ + *if total_free = GC_BLOCK_SIZE_BYTES, then the computed nos_reserve_size is between zero + *and GC_BLOCK_SIZE_BYTES. In this case, we assign this block to NOS*/ if(nos_reserve_size <= GC_BLOCK_SIZE_BYTES) nos_reserve_size = GC_BLOCK_SIZE_BYTES; #ifdef STATIC_NOS_MAPPING if(nos_reserve_size > fspace->reserved_heap_size) nos_reserve_size = fspace->reserved_heap_size; #endif - //To reserve some MOS space to avoid fallback situation. - //But we need ensure nos has at least one block - //if(new_nos_size > GC_MOS_MIN_EXTRA_REMAIN_SIZE) new_nos_size -= GC_MOS_MIN_EXTRA_REMAIN_SIZE ; + /*To reserve some MOS space to avoid fallback situation. + *But we need ensure nos has at least one block */ POINTER_SIZE_INT reserve_in_mos = GC_MOS_MIN_EXTRA_REMAIN_SIZE; while (reserve_in_mos >= GC_BLOCK_SIZE_BYTES){ if(nos_reserve_size >= reserve_in_mos + GC_BLOCK_SIZE_BYTES){ @@ -342,7 +333,6 @@ } #ifndef STATIC_NOS_MAPPING - void gc_gen_adapt(GC_Gen* gc, int64 pause_time) { gc_decide_next_collect(gc, pause_time); @@ -366,9 +356,10 @@ return; /* below are ajustment */ + POINTER_SIZE_INT curr_heap_commit_end = + (POINTER_SIZE_INT)gc->heap_start + LOS_HEAD_RESERVE_FOR_HEAP_NULL + gc->committed_heap_size; + nos_boundary = (void*)(curr_heap_commit_end - new_nos_size); - nos_boundary = (void*)((POINTER_SIZE_INT)gc->heap_end - new_nos_size); - fspace->heap_start = nos_boundary; fspace->blocks = (Block*)nos_boundary; fspace->committed_heap_size = new_nos_size; @@ -394,8 +385,8 @@ return; } -#else /* ifndef STATIC_NOS_MAPPING */ - +/* ifdef STATIC_NOS_MAPPING */ +#else void gc_gen_adapt(GC_Gen* gc, int64 pause_time) { gc_decide_next_collect(gc, pause_time); Index: src/mark_compact/mspace_slide_compact.cpp =================================================================== --- src/mark_compact/mspace_slide_compact.cpp (revision 536884) +++ src/mark_compact/mspace_slide_compact.cpp (working copy) @@ -357,17 +357,8 @@ if( ++old_num == num_active_collectors ){ /* last collector's world here */ /*Retune space tuner to insure the tuning size is not to great*/ -// Boolean retune_result; if(gc->tuner->kind != TRANS_NOTHING) gc_space_retune(gc); -// if(gc->tuner->kind == TRANS_FROM_LOS_TO_MOS) printf("los shrink...\n"); -// if(gc->tuner->kind == TRANS_FROM_MOS_TO_LOS) printf("los extend...\n"); -/* if(!retune_result){ - gc->collect_result = FALSE; - num_marking_collectors++; - return; - }*/ - assert(!(gc->tuner->tuning_size % GC_BLOCK_SIZE_BYTES)); /* prepare for next phase */ gc_init_block_for_collectors(gc, mspace); Index: src/mark_sweep/lspace.h =================================================================== --- src/mark_sweep/lspace.h (revision 536884) +++ src/mark_sweep/lspace.h (working copy) @@ -27,9 +27,9 @@ /*Fixme: This macro is for handling HEAP_NULL issues caused by JIT OPT*/ #ifdef COMPRESS_REFERENCE - #define LOS_HEAD_RESERVE_FOR_HEAP_NULL (GC_BLOCK_SIZE_BYTES ) + #define LOS_HEAD_RESERVE_FOR_HEAP_NULL ( SPACE_ALLOC_UNIT ) #else - #define LOS_HEAD_RESERVE_FOR_HEAP_NULL (0*KB) + #define LOS_HEAD_RESERVE_FOR_HEAP_NULL ( 0*KB ) #endif typedef struct Lspace{