diff --git a/gcv4/src/garbage_collector.h b/gcv4/src/garbage_collector.h index 0e49c8d..d5de159 100644 --- a/gcv4/src/garbage_collector.h +++ b/gcv4/src/garbage_collector.h @@ -57,10 +57,6 @@ public: int gc_add_fresh_chunks(unsigned int); - tl::MemoryPoolMT& get_gcvt_pool() { - return gcvt_pool; - } - /// Considers statistics and makes a decision to resize the heap. void consider_heap_resize(int size_failed); @@ -265,8 +261,6 @@ #endif // GC_LIVE_OBJECT_LISTS_PER_COMPA ///////////////////////////////////////////////////////////////////////////////////////////////////// private: - tl::MemoryPoolMT gcvt_pool; - block_info * get_fresh_chunk_from_block_store(bool stay_above_waterline); block_info *get_free_chunk_from_global_gc_chunks(); diff --git a/gcv4/src/gc_for_vm.cpp b/gcv4/src/gc_for_vm.cpp index 0005085..0eb82ad 100644 --- a/gcv4/src/gc_for_vm.cpp +++ b/gcv4/src/gc_for_vm.cpp @@ -486,17 +486,6 @@ void gc_wrapup() heapTraceFinalize(); } - if (p_loaded_vtable_directory) { - p_loaded_vtable_directory->rewind(); - Partial_Reveal_VTable *vt_handle; - vt_handle = (Partial_Reveal_VTable *)p_loaded_vtable_directory->next(); - while(vt_handle != NULL) - { - STD_FREE(vt_handle->get_gcvt()); - vt_handle = (Partial_Reveal_VTable *)p_loaded_vtable_directory->next(); - } - delete p_loaded_vtable_directory; - } if (interior_pointer_table) delete interior_pointer_table; if (compressed_pointer_table) @@ -1370,7 +1359,7 @@ static int *build_slot_offset_array(Clas unsigned int size = (num_ref_fields+1) * sizeof (unsigned int); // malloc up the array if we need one. - int *new_ref_array = (int*) p_global_gc->get_gcvt_pool().alloc(size); + int *new_ref_array = (int*) class_alloc_via_classloader(ch, size); result = new_ref_array; for(idx = 0; idx < num_fields; idx++) { @@ -1486,7 +1475,9 @@ void gc_class_prepared (Class_Handle ch, assert(ch); assert(vth); Partial_Reveal_VTable *vt = (Partial_Reveal_VTable *)vth; - vt->set_gcvt((GC_VTable_Info *) STD_MALLOC(sizeof(GC_VTable_Info))); + void* p = class_alloc_via_classloader(ch, sizeof(GC_VTable_Info)); + vt->set_gcvt((GC_VTable_Info *) p); + assert(vt->get_gcvt()); memset((void *) vt->get_gcvt(), 0, sizeof(GC_VTable_Info)); vt->get_gcvt()->gc_clss = ch; vt->get_gcvt()->gc_class_properties = 0; // Clear the properties. @@ -1494,13 +1485,6 @@ void gc_class_prepared (Class_Handle ch, // Set the properties. gc_set_prop_alignment_mask(vt, class_get_alignment(ch)); - // Remember the VTable (vt) in a hash table so that delta_dynopt can check if it has a legal - // vtable. (see object_address_seems_valid for an example of its use.) - if (!p_loaded_vtable_directory) { - p_loaded_vtable_directory = new Hash_Table(); - } - p_loaded_vtable_directory->add_entry(vt); - if(class_is_array(ch)) { Class_Handle array_element_class = class_get_array_element_class(ch); // We have an array so not it. @@ -1589,8 +1573,7 @@ #endif //NUM_EXTRA_OBJ_HEADER_WORDS int gc_number_of_slots = vt->get_gcvt()->gc_number_of_slots; int* gc_ref_offset_array = vt->get_gcvt()->gc_ref_offset_array; - int* gc_strong_ref_offset_array = (int*) p_global_gc-> - get_gcvt_pool().alloc(gc_number_of_slots * sizeof (unsigned int)); + int* gc_strong_ref_offset_array = (int*) class_alloc_via_classloader(ch, gc_number_of_slots * sizeof (unsigned int)); assert(gc_strong_ref_offset_array); int i,j; diff --git a/gcv4/src/gc_globals.cpp b/gcv4/src/gc_globals.cpp index d437a2c..b66adc2 100644 --- a/gcv4/src/gc_globals.cpp +++ b/gcv4/src/gc_globals.cpp @@ -55,16 +55,6 @@ POINTER_SIZE_INT initial_heap_size_bytes POINTER_SIZE_INT final_heap_size_bytes = 0; // -// This remembered set has an entry per loaded class. -// It is used for determining valid vtable pointers -// when examining candidate objects. -// - -Hash_Table *p_loaded_vtable_directory = NULL; - - - -// // Global to specify the size differentiating // unsigned los_threshold_bytes = 0; diff --git a/gcv4/src/gc_header.h b/gcv4/src/gc_header.h index 5470355..f2e91c4 100644 --- a/gcv4/src/gc_header.h +++ b/gcv4/src/gc_header.h @@ -33,12 +33,6 @@ #include "gc_cout.h" #include "open/hythread_ext.h" #include "hash_table.h" -// -// This Hash_Table has an entry per loaded class. -// It is used for determining valid vtable pointers -// when examining candidate objects. -// -extern Hash_Table *p_loaded_vtable_directory; // Define USE_COMPRESSED_VTABLE_POINTERS here to enable compressed vtable // pointers within objects. @@ -70,8 +64,8 @@ #endif // !USE_COMPRESSED_VTABLE_POINTER typedef struct Partial_Reveal_Object { #ifdef USE_COMPRESSED_VTABLE_POINTERS - uint32 vt_offset; private: + uint32 vt_offset; Obj_Info_Type obj_info; public: @@ -86,12 +80,10 @@ public: return (struct Partial_Reveal_VTable *) (vt_offset + vtable_base); } struct Partial_Reveal_VTable *vt() { assert(vt_offset); return (struct Partial_Reveal_VTable *) (vt_offset + vtable_base); } - bool vt_valid() {return p_loaded_vtable_directory->is_present((void *) (vt_offset + vtable_base));} void set_vtable(Allocation_Handle ah) { // vtables are allocated from a fixed-size pool in the VM // see the details in mem_alloc.cpp, grep for vtable_data_pool. - assert(ah < 8*1048576); vt_offset = (uint32)ah; } @@ -139,16 +131,17 @@ public: } static uint64 max_supported_heap_size() { return (0x100000000) << forwarding_pointer_compression_shift(); } #else // !USE_COMPRESSED_VTABLE_POINTERS +private: struct Partial_Reveal_VTable *vt_raw; Obj_Info_Type obj_info; +public: Obj_Info_Type get_obj_info() { return obj_info; } void set_obj_info(Obj_Info_Type new_obj_info) { obj_info = new_obj_info; } Obj_Info_Type * obj_info_addr() { return &obj_info; } struct Partial_Reveal_VTable *vtraw() { return vt_raw; } struct Partial_Reveal_VTable *vt() { ASSERT(vt_raw, "incorrect object at " << this); return vt_raw; } - bool vt_valid() {return p_loaded_vtable_directory->is_present((void *)vt_raw);} void set_vtable(Allocation_Handle ah) { vt_raw = (struct Partial_Reveal_VTable *)ah; } struct Partial_Reveal_Object *get_forwarding_pointer() { @@ -180,7 +173,7 @@ #else // !USE_COMPRESSED_VTABLE_POINTERS } static uint64 max_supported_heap_size() { return ~((uint64)0); } #endif // !USE_COMPRESSED_VTABLE_POINTERS - + static POINTER_SIZE_INT vtable_base; static POINTER_SIZE_INT heap_base; diff --git a/gcv4/src/mark.h b/gcv4/src/mark.h index 32e4470..f146c56 100644 --- a/gcv4/src/mark.h +++ b/gcv4/src/mark.h @@ -121,7 +121,6 @@ static inline bool mark_object(Partial_R uint8 mask = (uint8) (1 << bit_index_into_byte); - vm_notify_live_object_class(p_obj->vt()->get_gcvt()->gc_clss); while (true) { uint8 old_val = *p_byte; uint8 final_val = (uint8) (old_val | mask); diff --git a/include/jit_import.h b/include/jit_import.h index eed1e76..736f1aa 100644 --- a/include/jit_import.h +++ b/include/jit_import.h @@ -245,13 +245,10 @@ typedef enum Code_Allocation_ActionEnum // heat values. The JIT is responsible for specifying ids that are unique // within the same method. // The first instruction of the chunk with id=0 is the entry point of the method. -// If the CAA_Allocate argument is specified, memory is allocated and a pointer +// DEPRECATED: If the CAA_Allocate argument is specified, memory is allocated and a pointer // to it is returned. If the CAA_Simulate argument is specified, no memory is -// actually allocated and the VM returns an address that would have been -// allocated if CAA_Allocate was specified and all the other arguments were -// the same. The VM may return NULL when CAA_Simulate is specified. This may -// for instance happen if multiple heat values were mapped to the same code -// pool or if the specified size would require a new code pool. +// allocated - the same as pass parameter size = 0 - function returns only current +// address for allocation in pool but no memory is allocated. VMEXPORT Byte * method_allocate_code_block(Method_Handle m, JIT_Handle j, diff --git a/include/open/types.h b/include/open/types.h index 01a25eb..bd6afe5 100644 --- a/include/open/types.h +++ b/include/open/types.h @@ -203,7 +203,6 @@ typedef struct Method *Method_Handle; typedef struct Method_Signature *Method_Signature_Handle; typedef struct TypeDesc *Type_Info_Handle; typedef POINTER_SIZE_INT Allocation_Handle; -typedef POINTER_SIZE_INT Runtime_Type_Handle; typedef void* NativeCodePtr; typedef struct ClassLoader* ClassLoaderHandle; typedef struct ManagedObject* ManagedPointer; diff --git a/include/open/vm.h b/include/open/vm.h index f2db97f..d24e586 100644 --- a/include/open/vm.h +++ b/include/open/vm.h @@ -119,10 +119,6 @@ VMEXPORT VTable_Handle class_get_vtable( // routines, given a class handle. VMEXPORT Allocation_Handle class_get_allocation_handle(Class_Handle ch); -// Returns the allocation handle to be used for runtime type checks in -// JIT-generated code, given a class handle. -VMEXPORT Runtime_Type_Handle class_get_runtime_type_handle(Class_Handle ch); - // Returns the class handle corresponding to a given allocation handle. VMEXPORT Class_Handle allocation_handle_get_class(Allocation_Handle ah); @@ -195,6 +191,9 @@ VMEXPORT Method_Handle class_get_method( // Returns TRUE if all instances of this class are pinned. VMEXPORT Boolean class_is_pinned(Class_Handle ch); +// Returns TRUE if all instances of this class are pinned. +VMEXPORT void* class_alloc_via_classloader(Class_Handle ch, int32 size); + // Returns TRUE if this is an array of primitives. VMEXPORT Boolean class_is_non_ref_array(Class_Handle ch); @@ -676,7 +675,7 @@ VMEXPORT POINTER_SIZE_INT vm_get_vtable_ // information in each object's header. This is typically used // by the JIT for generating type-checking code, e.g. for inlined // type checks or for inlining of virtual methods. -VMEXPORT unsigned vm_get_runtime_type_handle_width(); +VMEXPORT unsigned vm_get_vtable_ptr_size(); // Returns a printable signature. The character buffer is owned by the // caller. Call free_string_buffer to reclaim the memory. diff --git a/include/open/vm_gc.h b/include/open/vm_gc.h index 3e0bc2e..a3bc5ce 100644 --- a/include/open/vm_gc.h +++ b/include/open/vm_gc.h @@ -107,13 +107,6 @@ VMEXPORT void vm_classloader_iterate_obj VMEXPORT bool vm_iterate_object(Managed_Object_Handle object); /** - * GC calls this function for each live object it finds in heap. - * This is used for finding unreferenced class loaders for class - * unloading. - */ -VMEXPORT void vm_notify_live_object_class(Class_Handle); - -/** * GC calls this function to hint VM that finalizers may need to be run * and references enqueued. This method is guaranteed not to hold global * GC lock. diff --git a/jitrino/src/vm/drl/DrlVMInterface.h b/jitrino/src/vm/drl/DrlVMInterface.h index 74ba382..f861944 100644 --- a/jitrino/src/vm/drl/DrlVMInterface.h +++ b/jitrino/src/vm/drl/DrlVMInterface.h @@ -100,7 +100,7 @@ public: return class_is_initialized((Class_Handle)vmTypeHandle)?true:false; } void* getVTable(void* vmTypeHandle) { - return (void *) class_get_runtime_type_handle((Class_Handle)vmTypeHandle); + return (void *) class_get_vtable((Class_Handle)vmTypeHandle); } // @@ -424,7 +424,7 @@ public: return (vm_vtable_pointers_are_compressed() != 0); } uint32 getVTablePtrSize() { - return vm_get_runtime_type_handle_width(); + return vm_get_vtable_ptr_size(); } uint64 getVTableBase() { return vm_get_vtable_base(); diff --git a/port/include/lil_code_generator.h b/port/include/lil_code_generator.h index 0995a08..e0a6e78 100644 --- a/port/include/lil_code_generator.h +++ b/port/include/lil_code_generator.h @@ -25,6 +25,8 @@ #define _LIL_CODE_GENERATOR_H_ #include "lil.h" #include "vm_core_types.h" +#include "environment.h" +#include "mem_alloc.h" // This is an abstract base case for LIL code generators // Subclasses compile LIL into native code for a particular @@ -44,7 +46,8 @@ public: // The stub_name is for vtune support // Dump an ascii version of the compiled stub to stdout if dump_stub // If cs_stats is nonnull add the number of bytes of the compiled code to *cs_stats - NativeCodePtr compile(LilCodeStub* cs); + NativeCodePtr compile(LilCodeStub* cs, PoolManager* code_pool = + VM_Global_State::loader_env->GlobalCodeMemoryManager); protected: LilCodeGenerator(); @@ -52,7 +55,7 @@ protected: // allocates a chunk of memory for a LIL stub; the user-provided function // compile_main() should call this function instead of allocating memory // directly. - NativeCodePtr allocate_memory(size_t); + NativeCodePtr allocate_memory(size_t, PoolManager*); // generates compiled code for a LIL stub, and returns its address. The // size of the compiled stub is placed in stub_size. Called by the @@ -61,7 +64,7 @@ protected: // Each subclass of LilCodeGenerator should provide a platform-dependent // implementation of compile_main(). The memory area that holds the // compiled code should be allocated by calling allocate_memory(). - virtual NativeCodePtr compile_main(LilCodeStub* cs, size_t* stub_size) = 0; + virtual NativeCodePtr compile_main(LilCodeStub* cs, size_t* stub_size, PoolManager* code_pool) = 0; }; #endif // _LIL_CODE_GENERATOR_H_ diff --git a/port/src/lil/em64t/pim/include/lil_code_generator_em64t.h b/port/src/lil/em64t/pim/include/lil_code_generator_em64t.h index 207e94c..cbbb3c7 100644 --- a/port/src/lil/em64t/pim/include/lil_code_generator_em64t.h +++ b/port/src/lil/em64t/pim/include/lil_code_generator_em64t.h @@ -609,7 +609,7 @@ class LilCodeGeneratorEM64T : public Lil LilCodeGeneratorEM64T(); protected: - NativeCodePtr compile_main(LilCodeStub* , size_t*); + NativeCodePtr compile_main(LilCodeStub* , size_t*, PoolManager*); }; #endif // _LIL_CODE_GENERATOR_EM64T_ diff --git a/port/src/lil/em64t/pim/lil_code_generator_em64t.cpp b/port/src/lil/em64t/pim/lil_code_generator_em64t.cpp index 2a0b9b6..9481ad6 100644 --- a/port/src/lil/em64t/pim/lil_code_generator_em64t.cpp +++ b/port/src/lil/em64t/pim/lil_code_generator_em64t.cpp @@ -1622,7 +1622,7 @@ #endif // STUB_DEBUG LilCodeGeneratorEM64T::LilCodeGeneratorEM64T(): LilCodeGenerator() {} -NativeCodePtr LilCodeGeneratorEM64T::compile_main(LilCodeStub * cs, size_t * stub_size) { +NativeCodePtr LilCodeGeneratorEM64T::compile_main(LilCodeStub * cs, size_t * stub_size, PoolManager* code_pool) { // start a memory manager tl::MemoryPool m; // get context @@ -1631,7 +1631,7 @@ NativeCodePtr LilCodeGeneratorEM64T::com LcgEM64TCodeGen codegen(cs, *context, m); // copy generated code to the destination *stub_size = codegen.get_size(); - NativeCodePtr buffer = allocate_memory(*stub_size); + NativeCodePtr buffer = allocate_memory(*stub_size, code_pool); codegen.copy_stub(buffer); return buffer; diff --git a/port/src/lil/ia32/pim/include/lil_code_generator_ia32.h b/port/src/lil/ia32/pim/include/lil_code_generator_ia32.h index c80869b..bbd361a 100644 --- a/port/src/lil/ia32/pim/include/lil_code_generator_ia32.h +++ b/port/src/lil/ia32/pim/include/lil_code_generator_ia32.h @@ -32,7 +32,7 @@ class LilCodeGeneratorIa32 : public LilC LilCodeGeneratorIa32(); protected: - NativeCodePtr compile_main(LilCodeStub* , size_t*); + NativeCodePtr compile_main(LilCodeStub* , size_t*, PoolManager*); }; #endif // _LIL_CODE_GENERATOR_IA32_ diff --git a/port/src/lil/ia32/pim/lil_code_generator_ia32.cpp b/port/src/lil/ia32/pim/lil_code_generator_ia32.cpp index bc32c5b..d290427 100644 --- a/port/src/lil/ia32/pim/lil_code_generator_ia32.cpp +++ b/port/src/lil/ia32/pim/lil_code_generator_ia32.cpp @@ -1461,12 +1461,12 @@ static void main_pass(LilCodeStub* cs, t } -NativeCodePtr LilCodeGeneratorIa32::compile_main(LilCodeStub* cs, size_t* stub_size) +NativeCodePtr LilCodeGeneratorIa32::compile_main(LilCodeStub* cs, size_t* stub_size, PoolManager* code_pool) { LcgIa32PrePassInfo* data; tl::MemoryPool mem; size_t size = pre_pass(cs, &mem, &data); - NativeCodePtr buf = allocate_memory(size); + NativeCodePtr buf = allocate_memory(size, code_pool); main_pass(cs, &mem, buf, data, stub_size); return buf; } diff --git a/port/src/lil/ipf/pim/include/lil_code_generator_ipf.h b/port/src/lil/ipf/pim/include/lil_code_generator_ipf.h index e08e50e..3c712e0 100644 --- a/port/src/lil/ipf/pim/include/lil_code_generator_ipf.h +++ b/port/src/lil/ipf/pim/include/lil_code_generator_ipf.h @@ -37,7 +37,7 @@ public: LilCodeGeneratorIpf(); protected: - NativeCodePtr compile_main(LilCodeStub* , size_t*); + NativeCodePtr compile_main(LilCodeStub* , size_t*, PoolManager*); }; #endif // _LIL_CODE_GENERATOR_IPF_ diff --git a/port/src/lil/ipf/pim/lil_code_generator_ipf.cpp b/port/src/lil/ipf/pim/lil_code_generator_ipf.cpp index cc613a2..a4181c0 100644 --- a/port/src/lil/ipf/pim/lil_code_generator_ipf.cpp +++ b/port/src/lil/ipf/pim/lil_code_generator_ipf.cpp @@ -2113,7 +2113,7 @@ LilCodeGeneratorIpf::LilCodeGeneratorIpf { } -NativeCodePtr LilCodeGeneratorIpf::compile_main(LilCodeStub* cs, size_t* stub_size) { +NativeCodePtr LilCodeGeneratorIpf::compile_main(LilCodeStub* cs, size_t* stub_size, PoolManager* code_pool) { // start a memory manager tl::MemoryPool m; @@ -2131,7 +2131,7 @@ NativeCodePtr LilCodeGeneratorIpf::compi // get the goodies from the emitter emitter.flush_buffer(); *stub_size = emitter.get_size(); - NativeCodePtr buffer = allocate_memory(*stub_size); + NativeCodePtr buffer = allocate_memory(*stub_size, code_pool); emitter.copy((char*)buffer); flush_hw_cache((Byte*)buffer, *stub_size); sync_i_cache(); diff --git a/port/src/lil/lil_code_generator.cpp b/port/src/lil/lil_code_generator.cpp index 299f3e8..8d52a5f 100644 --- a/port/src/lil/lil_code_generator.cpp +++ b/port/src/lil/lil_code_generator.cpp @@ -54,10 +54,11 @@ LilCodeGenerator::LilCodeGenerator() { } -NativeCodePtr LilCodeGenerator::compile(LilCodeStub* cs) +NativeCodePtr LilCodeGenerator::compile(LilCodeStub* cs, PoolManager* code_pool) { + assert (code_pool); size_t stub_size; - NativeCodePtr stub = compile_main(cs, &stub_size); + NativeCodePtr stub = compile_main(cs, &stub_size, code_pool); lil_cs_set_code_size(cs, stub_size); compile_add_dynamic_generated_code_chunk("unknown", stub, stub_size); @@ -69,9 +70,10 @@ NativeCodePtr LilCodeGenerator::compile( } -NativeCodePtr LilCodeGenerator::allocate_memory(size_t size) +NativeCodePtr LilCodeGenerator::allocate_memory(size_t size, PoolManager* code_pool) { - NativeCodePtr buf = malloc_fixed_code_for_jit(size, DEFAULT_CODE_ALIGNMENT, CODE_BLOCK_HEAT_DEFAULT, CAA_Allocate); + assert(code_pool); + NativeCodePtr buf = code_pool->alloc(size, DEFAULT_CODE_ALIGNMENT, CAA_Allocate); // Check for 16-byte alignment assert((((POINTER_SIZE_INT)buf)&15)==0); diff --git a/vmcore/include/Class.h b/vmcore/include/Class.h index 7b5f26d..e727e4e 100644 --- a/vmcore/include/Class.h +++ b/vmcore/include/Class.h @@ -31,7 +31,7 @@ #include "open/gc.h" #include "port_malloc.h" #include "String_Pool.h" #include "vtable.h" - +#include "jit_intf.h" // forward declarations struct Class; @@ -765,7 +765,6 @@ enum AccessAndPropertiesFlags { ACC_ENUM = 0x4000 }; - /** VM representation of Java class. * This class contains methods for parsing classes, querying class properties, * setting external properties of a class (source file name, class file name), @@ -1767,6 +1766,9 @@ public: /** Updates throwing statistics for java/lang/Throwable decendants.*/ void class_thrown() { m_num_throws++; } + /** Allocates memory for code from pool of defining classloader for the class.*/ + void* code_alloc(size_t size, size_t alignment, Code_Allocation_Action action); + /** Updates initialization check statistics.*/ void initialization_checked() { m_num_class_init_checks++; } diff --git a/vmcore/include/classloader.h b/vmcore/include/classloader.h index f36e879..d64442e 100644 --- a/vmcore/include/classloader.h +++ b/vmcore/include/classloader.h @@ -193,7 +193,6 @@ public: } Class* AllocateAndReportInstance(const Global_Env* env, Class* klass); Class* NewClass(const Global_Env* env, const String* name); - ManagedObject** RegisterClassInstance(const String* className, ManagedObject* instance); Package* ProvidePackage(Global_Env* env, const String *class_name, const char *jar); Class* DefineClass(Global_Env* env, const char* class_name, uint8* bytecode, unsigned offset, unsigned length, const String** res_name = NULL); @@ -266,6 +265,18 @@ public: Unlock(); return ptr; } + + PoolManager* GetCodePool(){ + return CodeMemoryManager; + } + + inline void* CodeAlloc(size_t size, size_t alignment, Code_Allocation_Action action) { + return CodeMemoryManager->alloc(size, alignment, action); + } + inline void* VTableAlloc(size_t size, size_t alignment, Code_Allocation_Action action) { + return VM_Global_State::loader_env->VTableMemoryManager->alloc(size, alignment, action); + } + private: static Lock_Manager m_tableLock; static unsigned m_capacity; @@ -294,6 +305,7 @@ protected: unsigned m_fullSize; void* m_verifyData; apr_pool_t* pool; + PoolManager *CodeMemoryManager; // methods Class* WaitDefinition(Global_Env* env, const String* className); diff --git a/vmcore/include/environment.h b/vmcore/include/environment.h index 983ffbb..a9ff5e2 100644 --- a/vmcore/include/environment.h +++ b/vmcore/include/environment.h @@ -28,6 +28,7 @@ #include #include "open/hythread.h" #include "open/compmgr.h" #include "open/em_vm.h" +#include "mem_alloc.h" #include "String_Pool.h" #include "vm_core_types.h" @@ -50,6 +51,9 @@ struct Global_Env { void* portLib; // Classlib's port library DynamicCode* dcList; Assertion_Registry* assert_reg; + PoolManager* GlobalCodeMemoryManager; + PoolManager* VTableMemoryManager; + Method_Lookup_Table* vm_methods; hythread_library_t hythread_lib; String_Pool string_pool; // string table @@ -65,6 +69,7 @@ struct Global_Env { bool strings_are_compressed; // 2003-05-19: The VM searches the java.lang.String class for a "byte[] bvalue" field at startup, // as an indication that the Java class library supports compressed strings with 8-bit characters. bool use_large_pages; // 20040109 Use large pages for class-related data such as vtables. + size_t system_page_size; // system page size according to use_large_pages value bool verify_all; // psrebriy 20050815 Verify all classes including loaded by bootstrap class loader bool pin_interned_strings; // if true, interned strings are never moved @@ -192,7 +197,6 @@ struct Global_Env { // VTable for the java_lang_String class VTable* JavaLangString_VTable; - Allocation_Handle JavaLangString_allocation_handle; // Keeps uncaught exception for the thread which is destroying VM. jthrowable uncaught_exception; diff --git a/vmcore/include/mem_alloc.h b/vmcore/include/mem_alloc.h index 22e23de..dc11f68 100644 --- a/vmcore/include/mem_alloc.h +++ b/vmcore/include/mem_alloc.h @@ -15,39 +15,71 @@ * limitations under the License. */ /** - * @author Intel, Alexei Fedotov + * @author Intel, Aleksey Ignatenko, Alexei Fedotov * @version $Revision: 1.1.2.1.4.3 $ */ #ifndef _MEM_ALLOC_H_ #define _MEM_ALLOC_H_ +#include "jit_import.h" #include "port_vmem.h" -typedef struct Pool_Descriptor { - Byte *start; // (misnamed) points to the next free byte in the pool - Byte *end; // end of the pool's memory region - size_t default_size; - bool is_code; - bool optimize_for_trace_cache; -#ifdef VM_STATS - uint64 num_allocations; - uint64 num_pool_allocations; - size_t total_pool_size; - size_t total_size_allocated; - uint64 num_resizes; - size_t current_alloc_size; -#endif //VM_STATS - port_vmem_t *descriptor; -} Pool_Descriptor; - - -extern Pool_Descriptor* jit_code_pool; -extern Pool_Descriptor* vtable_data_pool; - -extern unsigned system_page_size; -extern unsigned page_size_for_allocation; -extern size_t initial_code_pool_size; +#define DEFAULT_COMMOT_JIT_CODE_POOL_SIZE 32*1024 // pool is used for common stub code +#define DEFAULT_COMMOT_VTABLE_POOL_SIZE_NO_RESIZE 8*1024*1024 // used for comressed VTable pointers +#define DEFAULT_CLASSLOADER_VTABLE_POOL_SIZE 32*1024 +#define DEFAULT_CLASSLOADER_JIT_CODE_POOL_SIZE 64*1024 +#define DEFAULT_BOOTSTRAP_JIT_CODE_POOL_SIZE 256*1024 +#define DEFAULT_VTABLE_POOL_SIZE 128*1024 + +#define MEMORY_UTILIZATION_LIMIT 15 + +typedef struct PoolDescriptor { + Byte *_begin; // next free byte in memory chunk + Byte *_end; // end of memory chunk + size_t _size; // size of memory chunk + port_vmem_t* _descriptor; // for further memory deallocation + PoolDescriptor* _next; +} PoolDescriptor; + +// PoolManager is a thread safe memory manager +// PoolDescriptor describes allocated memory chunk inside pool +// There are 2 kinds of PoolDescriptor in PoolManager: active and passive +// PoolManager uses active PoolDescriptors for memory allocations, passive PoolDescriptors are filled with allocated memory and not used. +// Division into active and passive PoolDescriptors is done on the basis of MEMORY_UTILIZATION_LIMIT value. +// if PoolDescriptors is filled less than (MEMORY_UTILIZATION_LIMIT)% of its size then it is considered to be passive, +// otherwise it is active (allows further memory allocations from it) + +class PoolManager { +public: + PoolManager(size_t initial_size, size_t page_size, bool use_large_pages, bool is_code, bool is_resize_allowed); + virtual ~PoolManager(); + + // alloc is synchronized inside the class + void* alloc(size_t size, size_t alignment, Code_Allocation_Action action); + inline Byte* get_pool_base(); + +protected: + PoolDescriptor* _active_pool; + PoolDescriptor* _passive_pool; + size_t _page_size; + bool _use_large_pages; + size_t _default_pool_size; + bool _is_code; + bool _is_resize_allowed; + + apr_pool_t* aux_pool; + apr_thread_mutex_t* aux_mutex; + + Byte *vtable_pool_start; // for compressed vtable pointers support only! + +protected: + inline PoolDescriptor* allocate_pool_storage(size_t size); // allocate memory for new PoolDescriptor + inline size_t round_up_to_page_size_multiple(size_t size); + inline void _lock(); + inline void _unlock(); +}; + #endif //_MEM_ALLOC_H_ diff --git a/vmcore/include/method_lookup.h b/vmcore/include/method_lookup.h index f13933f..a6a7e76 100644 --- a/vmcore/include/method_lookup.h +++ b/vmcore/include/method_lookup.h @@ -48,6 +48,7 @@ public: CodeChunkInfo *get(unsigned i); void add(CodeChunkInfo *m); + void remove(CodeChunkInfo *m); // Resembles add, but appends the new entry m at the end of the table. The new entry must have a starting address above all entries // in the table. This method does not acquire p_meth_addr_table_lock, so insertion must be protected by another lock or scheme. diff --git a/vmcore/include/nogc.h b/vmcore/include/nogc.h index db84da3..7682ea6 100644 --- a/vmcore/include/nogc.h +++ b/vmcore/include/nogc.h @@ -36,13 +36,6 @@ #else // !_IPF_ #define DEFAULT_CODE_ALIGNMENT 16 #endif // !_IPF_ -void vm_init_mem_alloc(); void *malloc_fixed_code_for_jit(size_t size, size_t alignment, unsigned heat, Code_Allocation_Action action); -// Allocate memory for vtable data. -void *allocate_vtable_data_from_pool(size_t size); - -// deallocate memory when finished -void vm_mem_dealloc(); - #endif // _NOGC_H_ diff --git a/vmcore/include/vm_stats.h b/vmcore/include/vm_stats.h index dfd0c40..7f8c3fa 100644 --- a/vmcore/include/vm_stats.h +++ b/vmcore/include/vm_stats.h @@ -248,6 +248,12 @@ public: Lock_Manager vm_stats_lock; apr_pool_t * vm_stats_pool; + // JIT and stub pools statistics + uint64 number_memoryblock_allocations; + uint64 total_memory_allocated; + uint64 total_memory_used; + uint64 number_memorymanager_created; + ~VM_Statistics(); static VM_Statistics & get_vm_stats(); diff --git a/vmcore/src/class_support/C_Interface.cpp b/vmcore/src/class_support/C_Interface.cpp index 479b18e..2f4d8a4 100644 --- a/vmcore/src/class_support/C_Interface.cpp +++ b/vmcore/src/class_support/C_Interface.cpp @@ -2140,6 +2140,14 @@ int class_get_referent_offset(Class_Hand return referent->get_offset(); } +void* class_alloc_via_classloader(Class_Handle ch, int32 size) +{ + assert(ch); + assert(size >= 0); + Class *clss = (Class *)ch; + assert (clss->get_class_loader()); + return clss->get_class_loader()->Alloc(size); +} //class_alloc_via_classloader unsigned class_get_alignment(Class_Handle ch) { @@ -2363,14 +2371,7 @@ Allocation_Handle class_get_allocation_h } -Runtime_Type_Handle class_get_runtime_type_handle(Class_Handle ch) -{ - assert(ch); - return (Runtime_Type_Handle)ch->get_allocation_handle(); -} - - -unsigned vm_get_runtime_type_handle_width() +unsigned vm_get_vtable_ptr_size() { if(vm_vtable_pointers_are_compressed()) { diff --git a/vmcore/src/class_support/Class.cpp b/vmcore/src/class_support/Class.cpp index 593f5ed..b7a9e62 100644 --- a/vmcore/src/class_support/Class.cpp +++ b/vmcore/src/class_support/Class.cpp @@ -37,6 +37,7 @@ #include "nogc.h" #include "vm_stats.h" #include "jit_intf_cpp.h" #include "type.h" +#include "cci.h" // // private static variable containing the id of the next class @@ -648,6 +649,13 @@ Method::Method() void Method::MethodClearInternals() { + CodeChunkInfo *jit_info; + for (jit_info = _jits; jit_info; jit_info = jit_info->_next) { + VM_Global_State::loader_env->vm_methods->remove(jit_info); + // ensure that jit_info was deleted + assert (!VM_Global_State::loader_env->vm_methods->find(jit_info->get_code_block_addr())); + } + if (_notify_recompiled_records != NULL) { Method_Change_Notification_Record *nr, *prev_nr; @@ -910,20 +918,10 @@ static void mark_classloader(ClassLoader } -void vm_notify_live_object_class(Class_Handle clss) -{ - if(!clss->is_reachable()) { - clss->mark_reachable(); - mark_classloader(clss->get_class_loader()); - } -} - // end pointers between struct Class and java.lang.Class //////////////////////////////////////////////////////////////////// - - //////////////////////////////////////////////////////////////////// // begin Support for compressed and raw reference pointers @@ -1182,3 +1180,11 @@ unsigned Class::calculate_size() return size; } + +void* Class::code_alloc(size_t size, size_t alignment, Code_Allocation_Action action) +{ + assert (m_class_loader); + return m_class_loader->CodeAlloc(size, alignment, action); +} + + diff --git a/vmcore/src/class_support/Environment.cpp b/vmcore/src/class_support/Environment.cpp index 443617b..ebc9b2d 100644 --- a/vmcore/src/class_support/Environment.cpp +++ b/vmcore/src/class_support/Environment.cpp @@ -111,7 +111,28 @@ #else // !_IPF_ #endif // !_IPF_ strings_are_compressed = false; + + // page size detection use_large_pages = false; + size_t *ps = port_vmem_page_sizes(); + if (ps[1] != 0 && use_large_pages) { + system_page_size = ps[1]; + } + else { + system_page_size = ps[0]; + } + + GlobalCodeMemoryManager = new PoolManager(DEFAULT_COMMOT_JIT_CODE_POOL_SIZE, system_page_size, use_large_pages, + true/*is_code*/, true/*is_resize_allowed*/); + if (vm_vtable_pointers_are_compressed()) { + VTableMemoryManager = new PoolManager(DEFAULT_COMMOT_VTABLE_POOL_SIZE_NO_RESIZE, system_page_size, use_large_pages, + false/*is_code*/, false/*is_resize_allowed*/); + } + else { + VTableMemoryManager = new PoolManager(DEFAULT_VTABLE_POOL_SIZE, system_page_size, use_large_pages, + false/*is_code*/, true/*is_resize_allowed*/); + } + verify_all = false; pin_interned_strings = false; @@ -162,7 +183,6 @@ #endif // !_IPF_ java_lang_reflect_Method_Class = NULL; JavaLangString_VTable = NULL; - JavaLangString_allocation_handle = 0; uncaught_exception = NULL; @@ -206,6 +226,11 @@ Global_Env::~Global_Env() compile_clear_dynamic_code_list(dcList); dcList = NULL; + delete GlobalCodeMemoryManager; + GlobalCodeMemoryManager = NULL; + delete VTableMemoryManager; + VTableMemoryManager = NULL; + hythread_lib_destroy(hythread_lib); } diff --git a/vmcore/src/class_support/Prepare.cpp b/vmcore/src/class_support/Prepare.cpp index 26745d4..129e428 100644 --- a/vmcore/src/class_support/Prepare.cpp +++ b/vmcore/src/class_support/Prepare.cpp @@ -901,7 +901,7 @@ void Class::create_vtable(unsigned n_vta unsigned vtable_size = VTABLE_OVERHEAD + n_vtable_entries * sizeof(void *); // Always allocate vtable data from vtable_data_pool - void* p_gc_hdr = allocate_vtable_data_from_pool(vtable_size); + void *p_gc_hdr = m_class_loader->VTableAlloc(vtable_size, 16, CAA_Allocate); #ifdef VM_STATS // For allocation statistics, include any rounding added to make each diff --git a/vmcore/src/class_support/classloader.cpp b/vmcore/src/class_support/classloader.cpp index 2577f89..617e623 100644 --- a/vmcore/src/class_support/classloader.cpp +++ b/vmcore/src/class_support/classloader.cpp @@ -50,6 +50,7 @@ #include #include "jarfile_util.h" #include "jni_utils.h" +#include "mem_alloc.h" unsigned ClassLoader::m_capacity = 0; unsigned ClassLoader::m_unloadedBytes = 0; @@ -88,23 +89,28 @@ bool ClassLoader::Initialize( ManagedObj if(!m_failedClasses) return false; m_javaTypes = new JavaTypes(); if(!m_javaTypes) return false; + + Global_Env *env = VM_Global_State::loader_env; + assert (env); + size_t code_pool_size = IsBootstrap() ? DEFAULT_BOOTSTRAP_JIT_CODE_POOL_SIZE : DEFAULT_CLASSLOADER_JIT_CODE_POOL_SIZE; + CodeMemoryManager = new PoolManager(code_pool_size, env->system_page_size, env->use_large_pages, true/*is_code*/, true/*is_resize_allowed*/); + if(!CodeMemoryManager) return false; + return true; } ClassLoader::~ClassLoader() { - apr_pool_destroy(pool); - - ManagedObject** ppc; - ReportedClasses* RepClasses = GetReportedClasses(); - ReportedClasses::iterator itc; - for (itc = RepClasses->begin(); itc != RepClasses->end(); itc++) + ClassTable::iterator it; + ClassTable* LoadedClasses = GetLoadedClasses(); + for (it = LoadedClasses->begin(); it != LoadedClasses->end(); it++) { - ppc = &itc->second; - assert(*ppc); - Class* c = jclass_to_struct_Class((jclass)ppc); + Class* c; + c = it->second; + assert(c); ClassClearInternals(c); } + if (GetLoadedClasses()) delete GetLoadedClasses(); if (GetFailedClasses()) @@ -127,6 +133,15 @@ ClassLoader::~ClassLoader() } if (m_package_table) delete m_package_table; + + for(NativeLibInfo* info = m_nativeLibraries; info;info = info->next ) { + natives_unload_library(info->handle); + } + + delete CodeMemoryManager; + CodeMemoryManager = NULL; + + apr_pool_destroy(pool); } void ClassLoader::LoadingClass::EnqueueInitiator(VM_thread* new_definer, ClassLoader* cl, const String* clsname) @@ -210,13 +225,6 @@ Class* ClassLoader::NewClass(const Globa return clss; } -ManagedObject** ClassLoader::RegisterClassInstance(const String* className, ManagedObject* instance) -{ - TRACE2("reported:newclass", "DIRECT: inserting class \"" << className->bytes - << "\" with key " << className << " and object " << instance); - return m_reportedClasses->Insert(className, instance); -} - Class* ClassLoader::DefineClass(Global_Env* env, const char* class_name, uint8* bytecode, unsigned offset, unsigned length, const String** res_name) diff --git a/vmcore/src/class_support/method.cpp b/vmcore/src/class_support/method.cpp index 710b828..c4577c1 100644 --- a/vmcore/src/class_support/method.cpp +++ b/vmcore/src/class_support/method.cpp @@ -295,7 +295,7 @@ #endif if (size == 0) { addr = NULL; } else { - addr = malloc_fixed_code_for_jit(size, alignment, heat, action); + addr = get_class()->code_alloc(size, alignment, action); } if (action == CAA_Simulate) { diff --git a/vmcore/src/class_support/method_lookup.cpp b/vmcore/src/class_support/method_lookup.cpp index 8e34da7..832217b 100644 --- a/vmcore/src/class_support/method_lookup.cpp +++ b/vmcore/src/class_support/method_lookup.cpp @@ -103,6 +103,58 @@ void Method_Lookup_Table::add(CodeChunkI p_meth_addr_table_lock->_unlock(); } //Method_Lookup_Table::add +#define USE_METHOD_LOOKUP_CACHE + +void Method_Lookup_Table::remove(CodeChunkInfo *m) +{ + void* addr = m->get_code_block_addr(); + if (addr == NULL) { + return; + } + +#ifdef USE_METHOD_LOOKUP_CACHE + // First remove from cache. + for (unsigned i = 0; i < EIP_CACHE_SIZE; i++){ + if (_cache[i]){ + void *guess_start = _cache[i]->get_code_block_addr(); + void *guess_end = ((char *)_cache[i]->get_code_block_addr()) + _cache[i]->get_code_block_size(); + if ((addr >= guess_start) && (addr < guess_end)) { + _cache[i] = NULL; + } + } + } +#endif //USE_METHOD_LOOKUP_CACHE + + p_meth_addr_table_lock->_lock(); + + unsigned L = 0, R = _next_free_entry; + while (L < R) { + unsigned M = (L + R) / 2; + CodeChunkInfo *m = _table[M]; + void *code_block_addr = m->get_code_block_addr(); + size_t code_block_size = m->get_code_block_size(); + void *code_end_addr = (void *)((char *)code_block_addr + code_block_size); + + if (addr < code_block_addr) { + R = M; + } else if (addr >= code_end_addr) { + // Should this be (addr >= code_end_addr)? + L = M + 1; + } else { + // Shift entries starting at idx one slot to the right, then insert the new entry at idx + for (unsigned i = M; i < (_next_free_entry - 1); i++) { + _table[i] = _table[i+1]; + } + _next_free_entry--; + + p_meth_addr_table_lock->_unlock(); + return; + } + } + + p_meth_addr_table_lock->_unlock(); +} //Method_Lookup_Table::remove + void Method_Lookup_Table::append_unlocked(CodeChunkInfo *m) { @@ -156,8 +208,6 @@ unsigned Method_Lookup_Table::find_index -#define USE_METHOD_LOOKUP_CACHE - CodeChunkInfo *Method_Lookup_Table::find(void *addr, bool is_ip_past) { if (addr == NULL) { diff --git a/vmcore/src/init/vm_init.cpp b/vmcore/src/init/vm_init.cpp index 4c9f71d..09f2e90 100644 --- a/vmcore/src/init/vm_init.cpp +++ b/vmcore/src/init/vm_init.cpp @@ -331,7 +331,6 @@ static jint preload_classes(Global_Env * vm_env->strings_are_compressed = (class_lookup_field_recursive(vm_env->JavaLangString_Class, "bvalue", "[B") != NULL); vm_env->JavaLangString_VTable = vm_env->JavaLangString_Class->get_vtable(); - vm_env->JavaLangString_allocation_handle = vm_env->JavaLangString_Class->get_allocation_handle(); TRACE2("init", "preloading exceptions"); vm_env->java_lang_Throwable_Class = @@ -626,7 +625,6 @@ int vm_init1(JavaVM_Internal * java_vm, /* END: Property processing. */ // Initialize memory allocation. - vm_init_mem_alloc(); gc_init(); // TODO: change all uses of Class::heap_base to Slot::heap_base diff --git a/vmcore/src/init/vm_shutdown.cpp b/vmcore/src/init/vm_shutdown.cpp index c5efbe1..562ae04 100644 --- a/vmcore/src/init/vm_shutdown.cpp +++ b/vmcore/src/init/vm_shutdown.cpp @@ -69,7 +69,6 @@ #endif // Release global data. // TODO: move these data to VM space. vm_uninitialize_critical_sections(); - vm_mem_dealloc(); } /** diff --git a/vmcore/src/jit/compile.cpp b/vmcore/src/jit/compile.cpp index 04b82ce..153e7e0 100644 --- a/vmcore/src/jit/compile.cpp +++ b/vmcore/src/jit/compile.cpp @@ -571,7 +571,7 @@ #endif //***** Now generate code assert(lil_is_valid(cs)); - NativeCodePtr addr = LilCodeGenerator::get_platform()->compile(cs); + NativeCodePtr addr = LilCodeGenerator::get_platform()->compile(cs, clss->get_class_loader()->GetCodePool()); #ifndef NDEBUG char buf[100]; diff --git a/vmcore/src/util/ia32/base/compile_IA32.cpp b/vmcore/src/util/ia32/base/compile_IA32.cpp index b652fac..a4abee2 100644 --- a/vmcore/src/util/ia32/base/compile_IA32.cpp +++ b/vmcore/src/util/ia32/base/compile_IA32.cpp @@ -225,8 +225,7 @@ NativeCodePtr compile_gen_compile_me(Met #ifdef VM_STATS ++VM_Statistics::get_vm_stats().num_compileme_generated; #endif - char * stub = (char *) malloc_fixed_code_for_jit(STUB_SIZE, - DEFAULT_CODE_ALIGNMENT, CODE_BLOCK_HEAT_DEFAULT, CAA_Allocate); + char * stub = (char *) method_get_class(method)->code_alloc(STUB_SIZE, DEFAULT_CODE_ALIGNMENT, CAA_Allocate); NativeCodePtr addr = stub; #ifndef NDEBUG memset(stub, 0xcc /*int 3*/, STUB_SIZE); diff --git a/vmcore/src/util/mem_alloc.cpp b/vmcore/src/util/mem_alloc.cpp index ca03e16..a1e0f30 100644 --- a/vmcore/src/util/mem_alloc.cpp +++ b/vmcore/src/util/mem_alloc.cpp @@ -6,6 +6,10 @@ * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * + + + + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software @@ -15,7 +19,7 @@ * limitations under the License. */ /** - * @author Intel, Alexei Fedotov + * @author Intel, Aleksey Ignatenko, Alexei Fedotov * @version $Revision: 1.1.2.1.4.3 $ */ @@ -24,7 +28,6 @@ #define LOG_DOMAIN "vm.core" #include "cxxlog.h" #include -#include #include "environment.h" #include "nogc.h" @@ -33,114 +36,123 @@ #include "mem_alloc.h" #include "vm_stats.h" #include "port_malloc.h" -static const unsigned default_data_pool_size = 512*1024; - -static Byte *vtable_pool_start = NULL; -static size_t default_initial_code_pool_size = 1024*1024; - -unsigned system_page_size = 0; -unsigned page_size_for_allocation = 0; -size_t initial_code_pool_size = 0; - -Pool_Descriptor* jit_code_pool = NULL; -Pool_Descriptor* vtable_data_pool = NULL; +//////////////////////////////////////////////////////////// +// allocation memory for code for stubs -static apr_pool_t* aux_pool; -static apr_thread_mutex_t* aux_mutex; -static apr_thread_mutex_t* vtable_mutex; -static apr_thread_mutex_t* jit_code_mutex; +void *malloc_fixed_code_for_jit(size_t size, size_t alignment, unsigned heat, Code_Allocation_Action action) +{ + return VM_Global_State::loader_env->GlobalCodeMemoryManager->alloc(size, alignment, action); +} //malloc_fixed_code_for_jit -// this vector is used to store ptrs of allocated memory to free it on exit -static std::vector m_allocated_memory_ptrs; +//////////////////////////////////////////////////////////////////////////// +//////////////////////MemoryManager //////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////// +Byte* PoolManager::get_pool_base() +{ + return vtable_pool_start; +} -static size_t round_up_to_page_size_multiple(size_t size, size_t page_size) +size_t PoolManager::round_up_to_page_size_multiple(size_t size) { - return ((size + page_size - 1) / page_size) * page_size; -} //round_up_to_page_size_multiple + return ((size + _page_size - 1) / _page_size) * _page_size; +} +void PoolManager::_lock() +{ + VERIFY(APR_SUCCESS == apr_thread_mutex_lock(aux_mutex), \ + "Cannot lock the pool's mutex"); +} + +void PoolManager::_unlock() +{ + VERIFY(APR_SUCCESS == apr_thread_mutex_unlock(aux_mutex), \ + "Cannot unlock the pool's mutex"); +} +PoolManager::PoolManager(size_t initial_size, size_t page_size, bool use_large_pages, bool is_code, bool is_resize_allowed) : +_page_size(page_size), _use_large_pages(use_large_pages), _default_pool_size(initial_size), _is_code(is_code), _is_resize_allowed(is_resize_allowed) + { + VERIFY(APR_SUCCESS == apr_pool_create(&aux_pool, 0), \ + "Cannot initialize a memory pool"); + VERIFY(APR_SUCCESS == apr_thread_mutex_create(&aux_mutex, APR_THREAD_MUTEX_NESTED, aux_pool), \ + "Cannot initialize pool reallocation mutex"); + + _active_pool = allocate_pool_storage(_default_pool_size); + _passive_pool = NULL; + vtable_pool_start = _active_pool->_begin; + +#ifdef VM_STATS + VM_Statistics::get_vm_stats().number_memorymanager_created++; +#endif +} -static void allocate_pool_storage(Pool_Descriptor *p_pool, size_t size, size_t page_size) +PoolManager::~PoolManager() { - bool is_code = p_pool->is_code; - void *pool_storage = NULL; - - size = round_up_to_page_size_multiple(size, page_size); - unsigned int mem_protection = PORT_VMEM_MODE_READ | PORT_VMEM_MODE_WRITE; - if (is_code) { - mem_protection |= PORT_VMEM_MODE_EXECUTE; - } - size_t ps = (!is_code && VM_Global_State::loader_env->use_large_pages) ? - PORT_VMEM_PAGESIZE_LARGE : PORT_VMEM_PAGESIZE_DEFAULT; + PoolDescriptor* pDesc = NULL; - VERIFY(APR_SUCCESS == apr_thread_mutex_lock(aux_mutex), \ - "Cannot lock the pool reallocation mutex"); - apr_status_t status = port_vmem_reserve(&p_pool->descriptor, &pool_storage, - size, mem_protection, ps, aux_pool); - VERIFY(APR_SUCCESS == apr_thread_mutex_unlock(aux_mutex), \ - "Cannot unlock the pool reallocation mutex"); - if (APR_SUCCESS == status) { - status = port_vmem_commit(&pool_storage, size, p_pool->descriptor); + while (_passive_pool) + { + pDesc = _passive_pool; + port_vmem_release(pDesc->_descriptor); + _passive_pool = _passive_pool->_next; } - if (status != APR_SUCCESS || pool_storage == NULL) { - DIE("Cannot allocate pool storage: " << (void *)size - << " bytes of virtual memory for code or data.\n" - "Error code = " << status); - } - -#ifdef VM_STATS - p_pool->num_pool_allocations++; - p_pool->total_pool_size += size; - if (is_code) { - VM_Statistics::get_vm_stats().codemgr_total_code_pool_size += size; - } else { - VM_Statistics::get_vm_stats().codemgr_total_data_pool_size += size; + while (_active_pool) + { + pDesc = _active_pool; + port_vmem_release(pDesc->_descriptor); + _active_pool = _active_pool->_next; } -#endif //VM_STATS - - p_pool->start = (Byte*)pool_storage; - p_pool->end = ((Byte*)(pool_storage) + size); - m_allocated_memory_ptrs.push_back(p_pool->descriptor); -} //allocate_pool_storage - + VERIFY(APR_SUCCESS == apr_thread_mutex_destroy(aux_mutex), \ + "Cannot destroy the mutex"); + apr_pool_destroy(aux_pool); +} -static void init_pool(Pool_Descriptor *p_pool, size_t page_size, size_t init_size, bool is_code) +PoolDescriptor* PoolManager::allocate_pool_storage(size_t size) { - p_pool->default_size = (size_t)(round_up_to_page_size_multiple(init_size, page_size) + 0.5); - p_pool->is_code = is_code; + PoolDescriptor* pDesc = (PoolDescriptor*) apr_palloc(aux_pool, sizeof(PoolDescriptor)); + memset(pDesc, 0, sizeof(PoolDescriptor)); + + void *pool_storage = NULL; + size = round_up_to_page_size_multiple(size); + pDesc->_size = size; + unsigned int mem_protection = PORT_VMEM_MODE_READ | PORT_VMEM_MODE_WRITE; + if (_is_code) { + mem_protection |= PORT_VMEM_MODE_EXECUTE; + } + size_t ps = (!_is_code && _use_large_pages) ? + PORT_VMEM_PAGESIZE_LARGE : PORT_VMEM_PAGESIZE_DEFAULT; + + apr_status_t status = port_vmem_reserve(&pDesc->_descriptor, &pool_storage, + size, mem_protection, ps, aux_pool); + if (status != APR_SUCCESS) { + DIE("Cannot allocate pool storage: " << (void *)size + << " bytes of virtual memory for code or data.\n" + "Error code = " << status); + } + + status = port_vmem_commit(&pool_storage, size, pDesc->_descriptor); + if (status != APR_SUCCESS || pool_storage == NULL) { + DIE("Cannot allocate pool storage: " << (void *)size + << " bytes of virtual memory for code or data.\n" + "Error code = " << status); + } + #ifdef VM_STATS - p_pool->num_allocations = 0; - p_pool->num_pool_allocations = 0; - p_pool->total_pool_size = 0; - p_pool->total_size_allocated = 0; - p_pool->num_resizes = 0; - p_pool->current_alloc_size = p_pool->default_size; -#endif //VM_STATS -} //init_pool - - - -static void init_pools(size_t page_size) -{ - jit_code_pool = new Pool_Descriptor; - init_pool(jit_code_pool, page_size, initial_code_pool_size, /*is_code*/ true); - allocate_pool_storage(jit_code_pool, jit_code_pool->default_size, page_size); - - vtable_data_pool = new Pool_Descriptor; - // 20040511: The vtable pool must be bigger for jAppServer for compresses vtable pointers (can't be resized) - unsigned size = (vm_vtable_pointers_are_compressed() ? 8*1024*1024 : default_data_pool_size); - init_pool(vtable_data_pool, page_size, size, /*is_code*/ false); - allocate_pool_storage(vtable_data_pool, vtable_data_pool->default_size, page_size); -} //init_pools - - + VM_Statistics::get_vm_stats().number_memoryblock_allocations++; + VM_Statistics::get_vm_stats().total_memory_allocated += size; +#endif + + pDesc->_begin = (Byte*)pool_storage; + pDesc->_end = ((Byte*)(pool_storage) + size); + + return pDesc; +} -static void *allocate_from_pool(Pool_Descriptor *p_pool, size_t size, size_t alignment, size_t page_size, - bool is_resize_allowed, Code_Allocation_Action action) +void* PoolManager::alloc(size_t size, size_t alignment, Code_Allocation_Action action) { // Make sure alignment is a power of 2. assert((alignment & (alignment-1)) == 0); @@ -149,165 +161,86 @@ static void *allocate_from_pool(Pool_Des // align the requested size size = (size + mask) & ~mask; - Byte *pool_start = p_pool->start; // (misnamed) this actually points to the next free byte in the pool - pool_start = (Byte *) ((POINTER_SIZE_INT)(pool_start + mask) & ~(POINTER_SIZE_INT)mask); - Byte *pool_end = p_pool->end; - - size_t mem_left_in_pool = (pool_end - pool_start); - if (size > mem_left_in_pool) { - if (action == CAA_Simulate) { - // Return NULL if we're simulating the allocation and it would have caused a pool resize. - return NULL; - } - if (!is_resize_allowed && pool_start != NULL) { - DIE("Error: Resizing of the memory pool is not allowed.\n"); + // CAA_Simulate functionality support + if (action == CAA_Simulate) + size = 0; + + _lock(); + + assert(_active_pool); + Byte *pool_start = _active_pool->_begin; + pool_start = (Byte *) ((POINTER_SIZE_INT)(pool_start + mask) & ~(POINTER_SIZE_INT)mask); + Byte *pool_end = _active_pool->_end; + + size_t mem_left_in_pool = (pool_end - pool_start); + while (size > mem_left_in_pool) { + if (!_is_resize_allowed) { + DIE("Error: VTable pool overflow, resize is not allowed. Please, extand VTable pool size.\n"); + // TODO: add functionality to commit additional part of memory if reserved enough: + // need for (is_resize_allowed = false) case - commit every time by little pieces + } + + // memory utilization logic + // check that required size less than MEMORY_UTILIZATION_LIMIT % of active memory block size - all active memory + // blocks have size more than MEMORY_UTILIZATION_LIMIT % of active memory block size + PoolDescriptor* pDesc = _active_pool->_next; + if (pDesc) + { + if ((size + mask)*MEMORY_UTILIZATION_LIMIT < (POINTER_SIZE_INT)(pDesc->_size)) + { + _active_pool->_next = _passive_pool; + _passive_pool = _active_pool; + _active_pool = pDesc; + + pool_start = _active_pool->_begin; + pool_start = (Byte *) ((POINTER_SIZE_INT)(pool_start + mask) & ~(POINTER_SIZE_INT)mask); + + break; + } } - assert(p_pool->default_size); - size_t new_pool_size = ((size > p_pool->default_size)? size : p_pool->default_size); + + assert(_default_pool_size); + size_t new_pool_size = ((size > _default_pool_size)? size : _default_pool_size); new_pool_size += mask; -#ifdef VM_STATS - p_pool->num_resizes++; - p_pool->current_alloc_size = new_pool_size; -#endif //VM_STATS - allocate_pool_storage(p_pool, new_pool_size, page_size); - pool_start = p_pool->start; + PoolDescriptor* p_pool = allocate_pool_storage(new_pool_size); + assert (p_pool); + + // memory utilization logic + // left size of pool more than MEMORY_UTILIZATION_LIMIT % of the pool's size + if ((mem_left_in_pool * MEMORY_UTILIZATION_LIMIT) > _active_pool->_size) //put pool in _active_pool list + { + p_pool->_next = _active_pool; + _active_pool = p_pool; + } + else // put in _passive_pool list + { + p_pool->_next = _active_pool->_next; + _active_pool->_next = _passive_pool; + _passive_pool = _active_pool; + _active_pool = p_pool; + } + + pool_start = p_pool->_begin; pool_start = (Byte *) ((POINTER_SIZE_INT)(pool_start + mask) & ~(POINTER_SIZE_INT)mask); - pool_end = p_pool->end; - } + break; + } void *p = pool_start; - if (action != CAA_Simulate) { - // Don't update the pool start pointer if we're only simulating allocation. - p_pool->start = pool_start + size; - } -#ifdef VM_STATS - p_pool->num_allocations++; - p_pool->total_size_allocated += size; - if (p_pool->is_code) { - VM_Statistics::get_vm_stats().codemgr_total_code_allocated += size; - } else { - VM_Statistics::get_vm_stats().codemgr_total_data_allocated += size; - } -#endif //VM_STATS + _active_pool->_begin += size; + + _unlock(); + + #ifdef VM_STATS + VM_Statistics::get_vm_stats().total_memory_used += size; +#endif + return p; -} //allocate_from_pool - - -////////////////////////////////////////////////////////////////////////////////////////////// -// Beginning of publicly exported functions. -////////////////////////////////////////////////////////////////////////////////////////////// - -//////////////////////////////////////////////////////////// -// begin allocating memory for code - -void *malloc_fixed_code_for_jit(size_t size, size_t alignment, unsigned heat, Code_Allocation_Action action) -{ - vm_init_mem_alloc(); - assert (jit_code_pool); - VERIFY(APR_SUCCESS == apr_thread_mutex_lock(jit_code_mutex), \ - "Cannot lock the jit mutex"); - void *p = allocate_from_pool(jit_code_pool, size, alignment, page_size_for_allocation, /*is_resize_allowed*/ true, action); - VERIFY(APR_SUCCESS == apr_thread_mutex_unlock(jit_code_mutex), \ - "Cannot unlock the jit mutex"); - return p; -} //malloc_fixed_code_for_jit - - -// end allocating memory for code -//////////////////////////////////////////////////////////// - - - -//////////////////////////////////////////////////////////////////////////////////////////////// -// -// begin memory allocation for class-related data structures such as class statics and vtables. - -void *allocate_vtable_data_from_pool(size_t size) -{ - bool is_resize_allowed = true; - if (vm_vtable_pointers_are_compressed()) { - is_resize_allowed = false; - } - assert (vtable_data_pool); - VERIFY(APR_SUCCESS == apr_thread_mutex_lock(vtable_mutex), \ - "Cannot lock the vtable mutex"); - void *p = allocate_from_pool(vtable_data_pool, size, 16, page_size_for_allocation, is_resize_allowed, CAA_Allocate); - VERIFY(APR_SUCCESS == apr_thread_mutex_unlock(vtable_mutex), \ - "Cannot unlock the vtable mutex"); - return p; -} //allocate_class_data_from_area - -// end allocating memory for data -// -//////////////////////////////////////////////////////////////////////////////////////////////// - - -void vm_init_mem_alloc() -{ - static int initialized = false; - if (initialized) { - return; - } - initialized = true; - - VERIFY(APR_SUCCESS == apr_pool_create(&aux_pool, 0), \ - "Cannot initialize a memory pool"); - VERIFY(APR_SUCCESS == apr_thread_mutex_create(&aux_mutex, APR_THREAD_MUTEX_NESTED, aux_pool), \ - "Cannot initialize pool reallocation mutex"); - VERIFY(APR_SUCCESS == apr_thread_mutex_create(&jit_code_mutex, APR_THREAD_MUTEX_NESTED, aux_pool), \ - "Cannot initialize jit table mutex"); - VERIFY(APR_SUCCESS == apr_thread_mutex_create(&vtable_mutex, APR_THREAD_MUTEX_NESTED, aux_pool), \ - "Cannot initialize vtable mutex"); - - size_t *ps = port_vmem_page_sizes(); - if (ps[1] != 0 && VM_Global_State::loader_env->use_large_pages) { - page_size_for_allocation = ps[1]; - } - else { - page_size_for_allocation = ps[0]; - } - - default_initial_code_pool_size = round_up_to_page_size_multiple(default_initial_code_pool_size, page_size_for_allocation); - initial_code_pool_size = default_initial_code_pool_size; - assert(initial_code_pool_size); - -#ifdef VM_STATS - VM_Statistics::get_vm_stats().codemgr_total_code_pool_size = 0; - VM_Statistics::get_vm_stats().codemgr_total_code_allocated = 0; - VM_Statistics::get_vm_stats().codemgr_total_data_pool_size = 0; - VM_Statistics::get_vm_stats().codemgr_total_data_allocated = 0; -#endif //VM_STATS - - init_pools(page_size_for_allocation); - vtable_pool_start = vtable_data_pool->start; -} //vm_init_mem_alloc - - -void vm_mem_dealloc() -{ - delete vtable_data_pool; - vtable_data_pool = NULL; - delete jit_code_pool; - jit_code_pool = NULL; - std::vector::iterator it; - for (it = m_allocated_memory_ptrs.begin(); it != m_allocated_memory_ptrs.end(); it++) - { - port_vmem_release(*it); - } - VERIFY(APR_SUCCESS == apr_thread_mutex_destroy(aux_mutex), \ - "Cannot destroy the mutex"); - VERIFY(APR_SUCCESS == apr_thread_mutex_destroy(jit_code_mutex), \ - "Cannot destroy the mutex"); - VERIFY(APR_SUCCESS == apr_thread_mutex_destroy(vtable_mutex), \ - "Cannot destroy the mutex"); - apr_pool_destroy(aux_pool); -} - - + } + POINTER_SIZE_INT vm_get_vtable_base() { - Byte *base = vtable_pool_start; - assert (base); // Subtract a small number (like 1) from the real base so that // no valid vtable offsets will ever be 0. - return (POINTER_SIZE_INT) (base - 8); + assert (VM_Global_State::loader_env->VTableMemoryManager); + assert (VM_Global_State::loader_env->VTableMemoryManager->get_pool_base()); + return (POINTER_SIZE_INT) (VM_Global_State::loader_env->VTableMemoryManager->get_pool_base() - 1); } //vm_get_vtable_base diff --git a/vmcore/src/util/vm_stats.cpp b/vmcore/src/util/vm_stats.cpp index 09a99b2..67f6753 100644 --- a/vmcore/src/util/vm_stats.cpp +++ b/vmcore/src/util/vm_stats.cpp @@ -299,6 +299,10 @@ #endif num_compileme_generated = 0; num_compileme_used = 0; + number_memoryblock_allocations = 0; + total_memory_allocated = 0; + total_memory_used = 0; + number_memorymanager_created = 0; num_statics_allocations = 0; num_nonempty_statics_allocations = 0; @@ -902,33 +906,11 @@ #endif printf("\n"); printf("Use_large_pages = %s\n", (VM_Global_State::loader_env->use_large_pages? "yes" : "no")); - printf("%11d ::::system_page_size\n", system_page_size); - printf("%11d ::::page_size_for_allocation\n", page_size_for_allocation); - printf("%11lu ::::init_pool_size\n", (unsigned long)initial_code_pool_size); - printf("%11" FMT64 "u ::::total_code_pool_size\n", codemgr_total_code_pool_size); - printf("%11" FMT64 "u ::::total_code_allocated\n", codemgr_total_code_allocated); - printf("%11" FMT64 "u ::::total_data_pool_size\n", codemgr_total_data_pool_size); - printf("%11" FMT64 "u ::::total_data_allocated\n", codemgr_total_data_allocated); -#ifdef VM_STATS - { // print jit_code_pool stats - printf(" jit code pool\n"); - printf("%11" FMT64 "u :::: num_allocations\n", jit_code_pool->num_allocations); - printf("%11" FMT64 "u :::: total_size_allocated\n", uint64(jit_code_pool->total_size_allocated)); - printf("%11" FMT64 "u :::: num_pool_allocations\n", jit_code_pool->num_pool_allocations); - printf("%11" FMT64 "u :::: total_pool_size\n", uint64(jit_code_pool->total_pool_size)); - printf("%11" FMT64 "u :::: num_resizes\n", jit_code_pool->num_resizes); - printf("%11" FMT64 "u :::: current_alloc_size\n", uint64(jit_code_pool->current_alloc_size)); - } - { // print vtable_data_pool stats - printf(" vtable data pool\n"); - printf("%11" FMT64 "u :::: num_allocations\n", vtable_data_pool->num_allocations); - printf("%11" FMT64 "u :::: total_size_allocated\n", uint64(vtable_data_pool->total_size_allocated)); - printf("%11" FMT64 "u :::: num_pool_allocations\n", vtable_data_pool->num_pool_allocations); - printf("%11" FMT64 "u :::: total_pool_size\n", uint64(vtable_data_pool->total_pool_size)); - printf("%11" FMT64 "u :::: num_resizes\n", vtable_data_pool->num_resizes); - printf("%11" FMT64 "u :::: current_alloc_size\n", uint64(vtable_data_pool->current_alloc_size)); - } - + printf("%11" FMT64 "u ::::number_memoryblock_allocations\n", number_memoryblock_allocations); + printf("%11" FMT64 "u ::::total_memory_allocated\n", total_memory_allocated); + printf("%11" FMT64 "u ::::total_memory_used\n", total_memory_used); + printf("%11" FMT64 "u ::::number_memorymanager_created\n", number_memorymanager_created); +#ifdef VM_STATS fflush(stdout); print_rt_function_stats();