Index: vm/include/open/types.h =================================================================== --- vm/include/open/types.h (revision 468306) +++ vm/include/open/types.h (working copy) @@ -203,7 +203,6 @@ typedef struct Method_Signature *Method_Signature_Handle; typedef struct TypeDesc *Type_Info_Handle; typedef POINTER_SIZE_INT Allocation_Handle; -typedef POINTER_SIZE_INT Runtime_Type_Handle; typedef void* NativeCodePtr; typedef struct ClassLoader* ClassLoaderHandle; typedef struct ManagedObject* ManagedPointer; Index: vm/include/open/vm.h =================================================================== --- vm/include/open/vm.h (revision 468306) +++ vm/include/open/vm.h (working copy) @@ -119,10 +119,6 @@ // routines, given a class handle. VMEXPORT Allocation_Handle class_get_allocation_handle(Class_Handle ch); -// Returns the allocation handle to be used for runtime type checks in -// JIT-generated code, given a class handle. -VMEXPORT Runtime_Type_Handle class_get_runtime_type_handle(Class_Handle ch); - // Returns the class handle corresponding to a given allocation handle. VMEXPORT Class_Handle allocation_handle_get_class(Allocation_Handle ah); @@ -195,6 +191,9 @@ // Returns TRUE if all instances of this class are pinned. VMEXPORT Boolean class_is_pinned(Class_Handle ch); +// Returns TRUE if all instances of this class are pinned. +VMEXPORT void* class_alloc_via_classloader(Class_Handle ch, int32 size); + // Returns TRUE if this is an array of primitives. VMEXPORT Boolean class_is_non_ref_array(Class_Handle ch); @@ -676,7 +675,7 @@ // information in each object's header. This is typically used // by the JIT for generating type-checking code, e.g. for inlined // type checks or for inlining of virtual methods. -VMEXPORT unsigned vm_get_runtime_type_handle_width(); +VMEXPORT unsigned vm_get_vtable_ptr_size(); // Returns a printable signature. The character buffer is owned by the // caller. Call free_string_buffer to reclaim the memory. Index: vm/include/jit_import.h =================================================================== --- vm/include/jit_import.h (revision 468306) +++ vm/include/jit_import.h (working copy) @@ -245,13 +245,10 @@ // heat values. The JIT is responsible for specifying ids that are unique // within the same method. // The first instruction of the chunk with id=0 is the entry point of the method. -// If the CAA_Allocate argument is specified, memory is allocated and a pointer +// DEPRECATED: If the CAA_Allocate argument is specified, memory is allocated and a pointer // to it is returned. If the CAA_Simulate argument is specified, no memory is -// actually allocated and the VM returns an address that would have been -// allocated if CAA_Allocate was specified and all the other arguments were -// the same. The VM may return NULL when CAA_Simulate is specified. This may -// for instance happen if multiple heat values were mapped to the same code -// pool or if the specified size would require a new code pool. +// allocated - the same as pass parameter size = 0 - function returns only current +// address for allocation in pool but no memory is allocated. VMEXPORT Byte * method_allocate_code_block(Method_Handle m, JIT_Handle j, Index: vm/include/jit_import_rt.h =================================================================== --- vm/include/jit_import_rt.h (revision 468306) +++ vm/include/jit_import_rt.h (working copy) @@ -46,6 +46,7 @@ // collection. VMEXPORT void vm_enumerate_root_reference(Managed_Object_Handle *ref, Boolean is_pinned); +VMEXPORT void vm_enumerate_weakroot_reference(Managed_Object_Handle *ref, Boolean is_pinned); // Resembles vm_enumerate_root_reference() but is passed the address of // a slot containing a compressed reference. VMEXPORT void vm_enumerate_compressed_root_reference(uint32 *ref, Boolean is_pinned); Index: vm/port/include/lil_code_generator.h =================================================================== --- vm/port/include/lil_code_generator.h (revision 468306) +++ vm/port/include/lil_code_generator.h (working copy) @@ -25,6 +25,8 @@ #include "lil.h" #include "vm_core_types.h" +#include "environment.h" +#include "mem_alloc.h" // This is an abstract base case for LIL code generators // Subclasses compile LIL into native code for a particular @@ -44,7 +46,8 @@ // The stub_name is for vtune support // Dump an ascii version of the compiled stub to stdout if dump_stub // If cs_stats is nonnull add the number of bytes of the compiled code to *cs_stats - NativeCodePtr compile(LilCodeStub* cs); + NativeCodePtr compile(LilCodeStub* cs, PoolManager* code_pool = + VM_Global_State::loader_env->GlobalCodeMemoryManager); protected: LilCodeGenerator(); @@ -52,7 +55,7 @@ // allocates a chunk of memory for a LIL stub; the user-provided function // compile_main() should call this function instead of allocating memory // directly. - NativeCodePtr allocate_memory(size_t); + NativeCodePtr allocate_memory(size_t, PoolManager*); // generates compiled code for a LIL stub, and returns its address. The // size of the compiled stub is placed in stub_size. Called by the @@ -61,7 +64,7 @@ // Each subclass of LilCodeGenerator should provide a platform-dependent // implementation of compile_main(). The memory area that holds the // compiled code should be allocated by calling allocate_memory(). - virtual NativeCodePtr compile_main(LilCodeStub* cs, size_t* stub_size) = 0; + virtual NativeCodePtr compile_main(LilCodeStub* cs, size_t* stub_size, PoolManager* code_pool) = 0; }; #endif // _LIL_CODE_GENERATOR_H_ Index: vm/port/src/lil/em64t/pim/lil_code_generator_em64t.cpp =================================================================== --- vm/port/src/lil/em64t/pim/lil_code_generator_em64t.cpp (revision 468306) +++ vm/port/src/lil/em64t/pim/lil_code_generator_em64t.cpp (working copy) @@ -1622,7 +1622,7 @@ LilCodeGeneratorEM64T::LilCodeGeneratorEM64T(): LilCodeGenerator() {} -NativeCodePtr LilCodeGeneratorEM64T::compile_main(LilCodeStub * cs, size_t * stub_size) { +NativeCodePtr LilCodeGeneratorEM64T::compile_main(LilCodeStub * cs, size_t * stub_size, PoolManager* code_pool) { // start a memory manager tl::MemoryPool m; // get context @@ -1631,7 +1631,7 @@ LcgEM64TCodeGen codegen(cs, *context, m); // copy generated code to the destination *stub_size = codegen.get_size(); - NativeCodePtr buffer = allocate_memory(*stub_size); + NativeCodePtr buffer = allocate_memory(*stub_size, code_pool); codegen.copy_stub(buffer); return buffer; Index: vm/port/src/lil/em64t/pim/include/lil_code_generator_em64t.h =================================================================== --- vm/port/src/lil/em64t/pim/include/lil_code_generator_em64t.h (revision 468306) +++ vm/port/src/lil/em64t/pim/include/lil_code_generator_em64t.h (working copy) @@ -609,7 +609,7 @@ LilCodeGeneratorEM64T(); protected: - NativeCodePtr compile_main(LilCodeStub* , size_t*); + NativeCodePtr compile_main(LilCodeStub* , size_t*, PoolManager*); }; #endif // _LIL_CODE_GENERATOR_EM64T_ Index: vm/port/src/lil/lil_code_generator.cpp =================================================================== --- vm/port/src/lil/lil_code_generator.cpp (revision 468306) +++ vm/port/src/lil/lil_code_generator.cpp (working copy) @@ -54,10 +54,11 @@ { } -NativeCodePtr LilCodeGenerator::compile(LilCodeStub* cs) +NativeCodePtr LilCodeGenerator::compile(LilCodeStub* cs, PoolManager* code_pool) { + assert (code_pool); size_t stub_size; - NativeCodePtr stub = compile_main(cs, &stub_size); + NativeCodePtr stub = compile_main(cs, &stub_size, code_pool); lil_cs_set_code_size(cs, stub_size); compile_add_dynamic_generated_code_chunk("unknown", stub, stub_size); @@ -69,9 +70,10 @@ } -NativeCodePtr LilCodeGenerator::allocate_memory(size_t size) +NativeCodePtr LilCodeGenerator::allocate_memory(size_t size, PoolManager* code_pool) { - NativeCodePtr buf = malloc_fixed_code_for_jit(size, DEFAULT_CODE_ALIGNMENT, CODE_BLOCK_HEAT_DEFAULT, CAA_Allocate); + assert(code_pool); + NativeCodePtr buf = code_pool->alloc(size, DEFAULT_CODE_ALIGNMENT, CAA_Allocate); // Check for 16-byte alignment assert((((POINTER_SIZE_INT)buf)&15)==0); Index: vm/port/src/lil/ia32/pim/lil_code_generator_ia32.cpp =================================================================== --- vm/port/src/lil/ia32/pim/lil_code_generator_ia32.cpp (revision 468306) +++ vm/port/src/lil/ia32/pim/lil_code_generator_ia32.cpp (working copy) @@ -1461,12 +1461,12 @@ } -NativeCodePtr LilCodeGeneratorIa32::compile_main(LilCodeStub* cs, size_t* stub_size) +NativeCodePtr LilCodeGeneratorIa32::compile_main(LilCodeStub* cs, size_t* stub_size, PoolManager* code_pool) { LcgIa32PrePassInfo* data; tl::MemoryPool mem; size_t size = pre_pass(cs, &mem, &data); - NativeCodePtr buf = allocate_memory(size); + NativeCodePtr buf = allocate_memory(size, code_pool); main_pass(cs, &mem, buf, data, stub_size); return buf; } Index: vm/port/src/lil/ia32/pim/include/lil_code_generator_ia32.h =================================================================== --- vm/port/src/lil/ia32/pim/include/lil_code_generator_ia32.h (revision 468306) +++ vm/port/src/lil/ia32/pim/include/lil_code_generator_ia32.h (working copy) @@ -32,7 +32,7 @@ LilCodeGeneratorIa32(); protected: - NativeCodePtr compile_main(LilCodeStub* , size_t*); + NativeCodePtr compile_main(LilCodeStub* , size_t*, PoolManager*); }; #endif // _LIL_CODE_GENERATOR_IA32_ Index: vm/port/src/lil/ipf/pim/lil_code_generator_ipf.cpp =================================================================== --- vm/port/src/lil/ipf/pim/lil_code_generator_ipf.cpp (revision 468306) +++ vm/port/src/lil/ipf/pim/lil_code_generator_ipf.cpp (working copy) @@ -2113,7 +2113,7 @@ { } -NativeCodePtr LilCodeGeneratorIpf::compile_main(LilCodeStub* cs, size_t* stub_size) { +NativeCodePtr LilCodeGeneratorIpf::compile_main(LilCodeStub* cs, size_t* stub_size, PoolManager* code_pool) { // start a memory manager tl::MemoryPool m; @@ -2131,7 +2131,7 @@ // get the goodies from the emitter emitter.flush_buffer(); *stub_size = emitter.get_size(); - NativeCodePtr buffer = allocate_memory(*stub_size); + NativeCodePtr buffer = allocate_memory(*stub_size, code_pool); emitter.copy((char*)buffer); flush_hw_cache((Byte*)buffer, *stub_size); sync_i_cache(); Index: vm/port/src/lil/ipf/pim/include/lil_code_generator_ipf.h =================================================================== --- vm/port/src/lil/ipf/pim/include/lil_code_generator_ipf.h (revision 468306) +++ vm/port/src/lil/ipf/pim/include/lil_code_generator_ipf.h (working copy) @@ -37,7 +37,7 @@ LilCodeGeneratorIpf(); protected: - NativeCodePtr compile_main(LilCodeStub* , size_t*); + NativeCodePtr compile_main(LilCodeStub* , size_t*, PoolManager*); }; #endif // _LIL_CODE_GENERATOR_IPF_ Index: vm/vmcore/include/nogc.h =================================================================== --- vm/vmcore/include/nogc.h (revision 468306) +++ vm/vmcore/include/nogc.h (working copy) @@ -36,13 +36,6 @@ #define DEFAULT_CODE_ALIGNMENT 16 #endif // !_IPF_ -void vm_init_mem_alloc(); void *malloc_fixed_code_for_jit(size_t size, size_t alignment, unsigned heat, Code_Allocation_Action action); -// Allocate memory for vtable data. -void *allocate_vtable_data_from_pool(size_t size); - -// deallocate memory when finished -void vm_mem_dealloc(); - #endif // _NOGC_H_ Index: vm/vmcore/include/mem_alloc.h =================================================================== --- vm/vmcore/include/mem_alloc.h (revision 468306) +++ vm/vmcore/include/mem_alloc.h (working copy) @@ -15,39 +15,70 @@ * limitations under the License. */ /** - * @author Intel, Alexei Fedotov + * @author Intel, Aleksey Ignatenko, Alexei Fedotov * @version $Revision: 1.1.2.1.4.3 $ */ #ifndef _MEM_ALLOC_H_ #define _MEM_ALLOC_H_ +#include "jit_import.h" #include "port_vmem.h" -typedef struct Pool_Descriptor { - Byte *start; // (misnamed) points to the next free byte in the pool - Byte *end; // end of the pool's memory region - size_t default_size; - bool is_code; - bool optimize_for_trace_cache; -#ifdef VM_STATS - uint64 num_allocations; - uint64 num_pool_allocations; - size_t total_pool_size; - size_t total_size_allocated; - uint64 num_resizes; - size_t current_alloc_size; -#endif //VM_STATS - port_vmem_t *descriptor; -} Pool_Descriptor; +#define DEFAULT_COMMOT_JIT_CODE_POOL_SIZE 32*1024 // pool is used for common stub code +#define DEFAULT_COMMOT_VTABLE_POOL_SIZE_NO_RESIZE 8*1024*1024 // used for comressed VTable pointers +#define DEFAULT_CLASSLOADER_VTABLE_POOL_SIZE 32*1024 +#define DEFAULT_CLASSLOADER_JIT_CODE_POOL_SIZE 64*1024 +#define DEFAULT_BOOTSTRAP_JIT_CODE_POOL_SIZE 256*1024 +#define DEFAULT_BOOTSTRAP_VTABLE_POOL_SIZE 128*1024 +#define MEMORY_UTILIZATION_LIMIT 15 -extern Pool_Descriptor* jit_code_pool; -extern Pool_Descriptor* vtable_data_pool; +typedef struct PoolDescriptor { + Byte *_begin; // next free byte in memory chunk + Byte *_end; // end of memory chunk + size_t _size; // size of memory chunk + port_vmem_t* _descriptor; // for further memory deallocation + PoolDescriptor* _next; +} PoolDescriptor; -extern unsigned system_page_size; -extern unsigned page_size_for_allocation; -extern size_t initial_code_pool_size; +// PoolManager contains 2 lists of PoolDescriptors (memory chunks): +// active and passive. Active chunks can be reused in future memory allocations +// passive chunks - not. Active chunks are chosen according to MEMORY_UTILIZATION_LIMIT value. It +// means that memory chunk is placed to active list if it's size is more than MEMORY_UTILIZATION_LIMIT% +// of initial size, otherwise it is placed to passive list. Reusage of active memory chunk is done the following way: +// if memory in current chunk is over and required size is less than MEMORY_UTILIZATION_LIMIT % of default chunk size value - +// then current chunk is placed to passive list and next active chunk is take as current. Otherwise new chunk is created as current. +class PoolManager { +public: + PoolManager(size_t initial_size, size_t page_size, bool use_large_pages, bool is_code, bool is_resize_allowed); + virtual ~PoolManager(); + + // alloc is synchronized inside the class + void* alloc(size_t size, size_t alignment, Code_Allocation_Action action); + +protected: + PoolDescriptor* _active_pool; + PoolDescriptor* _passive_pool; + size_t _page_size; + bool _use_large_pages; + size_t _default_pool_size; + bool _is_code; + bool _is_resize_allowed; + + apr_pool_t* aux_pool; + apr_thread_mutex_t* aux_mutex; + + Byte *vtable_pool_start; // for compressed vtable pointers support only! + +protected: + inline PoolDescriptor* allocate_pool_storage(size_t size); // allocate memory for new PoolDescriptor + inline size_t round_up_to_page_size_multiple(size_t size); + inline void _lock(); + inline void _unlock(); +}; + + #endif //_MEM_ALLOC_H_ Index: vm/vmcore/include/vm_stats.h =================================================================== --- vm/vmcore/include/vm_stats.h (revision 468306) +++ vm/vmcore/include/vm_stats.h (working copy) @@ -234,6 +234,12 @@ Lock_Manager vm_stats_lock; apr_pool_t * vm_stats_pool; + // JIT and stub pools statistics + uint64 number_memoryblock_allocations; + uint64 total_memory_allocated; + uint64 total_memory_used; + uint64 number_memorymanager_created; + ~VM_Statistics(); static VM_Statistics & get_vm_stats(); Index: vm/vmcore/include/object_layout.h =================================================================== --- vm/vmcore/include/object_layout.h (revision 468306) +++ vm/vmcore/include/object_layout.h (working copy) @@ -57,9 +57,16 @@ VTable *vt() { assert(vt_offset); return vt_unsafe(); } uint32 get_obj_info() { return obj_info; } void set_obj_info(uint32 value) { obj_info = value; } + void set_vtable_unsafe(VTable* vt_ptr) { + vt_offset = (uint32)((POINTER_SIZE_INT)vt_ptr - vm_get_vtable_base()); + } static VTable *allocation_handle_to_vtable(Allocation_Handle ah) { return (VTable *) ((POINTER_SIZE_INT)ah + vm_get_vtable_base()); } + static Allocation_Handle vtable_to_allocation_handle(VTable *vt) { + assert(vt); + return (Allocation_Handle) ((POINTER_SIZE_INT)vt - vm_get_vtable_base()); + } static unsigned header_offset() { return sizeof(uint32); } static size_t get_size() { return sizeof(ManagedObjectCompressedVtablePtr); } static bool are_vtable_pointers_compressed() { return TRUE; } @@ -72,15 +79,40 @@ VTable *vt() { assert(vt_raw); return vt_unsafe(); } uint32 get_obj_info() { return (uint32) obj_info; } void set_obj_info(uint32 value) { obj_info = (uint32) value; } + void set_vtable_unsafe(VTable* vt_ptr) { + assert(vt_ptr); + vt_raw = vt_ptr; + } static VTable *allocation_handle_to_vtable(Allocation_Handle ah) { + assert (ah); return (VTable *) ah; } + static Allocation_Handle vtable_to_allocation_handle(VTable *vt) { + assert(vt); + return (Allocation_Handle)vt; + } static unsigned header_offset() { return sizeof(VTable *); } static size_t get_size() { return sizeof(ManagedObjectUncompressedVtablePtr); } static bool are_vtable_pointers_compressed() { return FALSE; } } ManagedObjectUncompressedVtablePtr; typedef struct ManagedObject { + static unsigned get_header_size(){ + if (are_vtable_pointers_compressed()) + return sizeof(ManagedObjectCompressedVtablePtr); + else + return sizeof(ManagedObjectUncompressedVtablePtr); + } + // get pointer to object data area + void *get_data_ptr() { + return (VTable *)((POINTER_SIZE_INT)this + get_header_size()); + } + void set_vtable_unsafe(VTable* vt_ptr) { + if (are_vtable_pointers_compressed()) + ((ManagedObjectCompressedVtablePtr *)this)->set_vtable_unsafe(vt_ptr); + else + ((ManagedObjectUncompressedVtablePtr *)this)->set_vtable_unsafe(vt_ptr); + } VTable *vt_unsafe() { if (are_vtable_pointers_compressed()) return ((ManagedObjectCompressedVtablePtr *)this)->vt_unsafe(); @@ -114,6 +146,10 @@ return are_vtable_pointers_compressed() ? ManagedObjectCompressedVtablePtr::allocation_handle_to_vtable(ah) : ManagedObjectUncompressedVtablePtr::allocation_handle_to_vtable(ah); } + static Allocation_Handle vtable_to_allocation_handle(VTable *vt) { + return are_vtable_pointers_compressed() ? + ManagedObjectCompressedVtablePtr::vtable_to_allocation_handle(vt) : ManagedObjectUncompressedVtablePtr::vtable_to_allocation_handle(vt); + } static unsigned header_offset() { return are_vtable_pointers_compressed() ? ManagedObjectCompressedVtablePtr::header_offset() : ManagedObjectUncompressedVtablePtr::header_offset(); Index: vm/vmcore/include/version_svn_tag.h =================================================================== --- vm/vmcore/include/version_svn_tag.h (revision 468306) +++ vm/vmcore/include/version_svn_tag.h (working copy) @@ -18,6 +18,6 @@ #ifndef _VERSION_SVN_TAG_ #define _VERSION_SVN_TAG_ -#define VERSION_SVN_TAG "467316" +#define VERSION_SVN_TAG "468306" #endif // _VERSION_SVN_TAG_ Index: vm/vmcore/include/classloader.h =================================================================== --- vm/vmcore/include/classloader.h (revision 468306) +++ vm/vmcore/include/classloader.h (working copy) @@ -160,7 +160,7 @@ private: class FailedClasses : public MapEx {}; class LoadingClasses : public MapEx {}; - class ReportedClasses : public MapEx {}; + class ReportedClasses : public MapEx {}; class JavaTypes : public MapEx {}; @@ -169,7 +169,7 @@ ClassLoader() : m_loader(NULL), m_parent(NULL), m_package_table(NULL), m_loadedClasses(NULL), m_failedClasses(NULL), m_loadingClasses(NULL), m_reportedClasses(NULL), m_javaTypes(NULL), m_nativeLibraries(NULL), - m_markBit(0), m_unloading(false), m_fullSize(0), m_verifyData(NULL) + m_markBit(false), m_fullSize(0), m_verifyData(NULL) { apr_pool_create(&pool, 0); } @@ -185,17 +185,22 @@ Class** klass = m_loadedClasses->Lookup(name); return klass?*klass:NULL; } - void InsertClass(Class* clss) { - LMAutoUnlock aulock(&m_lock); - m_loadedClasses->Insert(clss->name, clss); - m_initiatedClasses->Insert(clss->name, clss); + + void RemoveFromReported(const String* name){ + Class** clss = m_reportedClasses->Lookup(name); + if(clss) { + m_reportedClasses->Remove(name); + } } - Class* AllocateAndReportInstance(const Global_Env* env, Class* klass); + + bool InsertClass(Class* clss); + Class* AllocateAndReportInstance(const Global_Env* env, Class* klass, bool b_report_class_handle = true); Class* NewClass(const Global_Env* env, const String* name); ManagedObject** RegisterClassInstance(const String* className, ManagedObject* instance); Class* DefineClass(Global_Env* env, const char* class_name, uint8* bytecode, unsigned offset, unsigned length, const String** res_name = NULL); Class* LoadClass( Global_Env* UNREF env, const String* UNREF name); + Class* CreateVTableClass(String*, int); Class* LoadVerifyAndPrepareClass( Global_Env* env, const String* name); virtual void ReportException(const char* exn_name, std::stringstream& message_stream); virtual void ReportFailedClass(Class* klass, const char* exnclass, std::stringstream& exnmsg); @@ -229,8 +234,15 @@ void FailedLoadingClass(const String* className); public: - bool IsBootstrap() { return m_loader == NULL; } - void Mark() { m_markBit = 1; } + bool IsBootstrap() { +#ifdef _DEBUG + if (m_loader){ + assert (this != (const ClassLoader*)VM_Global_State::loader_env->bootstrap_class_loader); + } +#endif + return m_loader == NULL; + } + void Mark() { m_markBit = true; } bool NotMarked() { return (m_markBit == 0); } unsigned GetFullSize(); ManagedObject* GetLoader() { return m_loader; } @@ -248,7 +260,7 @@ static ClassLoader* FindByObject( ManagedObject* loader ); // ppervov: NOTE: LookupLoader has side effect of adding 'loader' to the collection VMEXPORT static ClassLoader* LookupLoader( ManagedObject* loader ); - static void UnloadClassLoader( ManagedObject* loader ); + static void UnloadClassLoader( ClassLoader* loader ); static void gc_enumerate(); static void ClearMarkBits(); static void StartUnloading(); @@ -266,6 +278,15 @@ Unlock(); return ptr; } + + PoolManager* GetCodePool(){ + return CodeMemoryManager; + } + + inline void* CodeAlloc(size_t size, size_t alignment, Code_Allocation_Action action) { + return CodeMemoryManager->alloc(size, alignment, action); + } + private: static Lock_Manager m_tableLock; static unsigned m_capacity; @@ -284,16 +305,18 @@ ClassTable* m_initiatedClasses; FailedClasses* m_failedClasses; LoadingClasses* m_loadingClasses; - ReportedClasses* m_reportedClasses; + // m_reportedClasses used to store classes for the moment jlc is created for some class and before it is placed to m_loaded_classes + ReportedClasses* m_reportedClasses; JavaTypes* m_javaTypes; NativeLibraryList m_nativeLibraries; Lock_Manager m_lock; Lock_Manager m_types_cache_lock; - unsigned m_markBit:1; - unsigned m_unloading; + // m_markBit=true forces strong j.l.Classloader enumeration, m_markBit=false -- weak enumeration + bool m_markBit; unsigned m_fullSize; void* m_verifyData; apr_pool_t* pool; + PoolManager *CodeMemoryManager; // methods Class* WaitDefinition(Global_Env* env, const String* className); Index: vm/vmcore/include/environment.h =================================================================== --- vm/vmcore/include/environment.h (revision 468306) +++ vm/vmcore/include/environment.h (working copy) @@ -28,6 +28,7 @@ #include "open/hythread.h" #include "open/compmgr.h" #include "open/em_vm.h" +#include "mem_alloc.h" #include "String_Pool.h" #include "vm_core_types.h" @@ -50,6 +51,7 @@ void* portLib; // Classlib's port library DynamicCode* dcList; Assertion_Registry* assert_reg; + PoolManager* GlobalCodeMemoryManager; Method_Lookup_Table* vm_methods; hythread_library_t hythread_lib; String_Pool string_pool; // string table @@ -65,6 +67,7 @@ bool strings_are_compressed; // 2003-05-19: The VM searches the java.lang.String class for a "byte[] bvalue" field at startup, // as an indication that the Java class library supports compressed strings with 8-bit characters. bool use_large_pages; // 20040109 Use large pages for class-related data such as vtables. + size_t system_page_size; // system page size according to use_large_pages value bool verify_all; // psrebriy 20050815 Verify all classes including loaded by bootstrap class loader bool pin_interned_strings; // if true, interned strings are never moved @@ -83,6 +86,7 @@ String* VoidBooleanDescriptor_String; String* Clonable_String; String* Serializable_String; + String* VTableBaseObject_String; String* JavaLangReflectMethod_String; String* JavaLangNullPointerException_String; @@ -129,6 +133,7 @@ Class* JavaLangObject_Class; Class* JavaLangString_Class; Class* JavaLangClass_Class; + Class* VTableVTable_Class; Class* java_lang_Throwable_Class; Class* java_lang_StackTraceElement_Class; @@ -171,7 +176,6 @@ // VTable for the java_lang_String class VTable* JavaLangString_VTable; - Allocation_Handle JavaLangString_allocation_handle; // Keeps uncaught exception for the thread which is destroying VM. jthrowable uncaught_exception; Index: vm/vmcore/include/Class.h =================================================================== --- vm/vmcore/include/Class.h (revision 468306) +++ vm/vmcore/include/Class.h (working copy) @@ -383,7 +383,14 @@ #define GC_BYTES_IN_VTABLE (sizeof(void *)) +#if defined _IPF_ || defined _EM64T_ // CHECK ON THAT MODE!!! + #define VTABLE_FIELDS_NUMBER 12 // number of 32bit fields in struct VTable - 11.5 jlong fields actually +#else // _IA32_ + #define VTABLE_FIELDS_NUMBER 12 // number of 32bit fields in struct VTable +#endif +#define JLC_REF_NUMBER 3 //order of jlc field inside vtable class +// update VTABLE_FIELDS_NUMBER when you change struct VTable!!! typedef struct VTable { Byte _gc_private_information[GC_BYTES_IN_VTABLE]; @@ -395,6 +402,7 @@ uint32 class_properties; + ManagedObject *jlC; // reference inside object is ManagedObject * physically // Offset from the top by CLASS_ALLOCATED_SIZE_OFFSET // The number of bytes allocated for this object. It is the same as // instance_data_size with the constraint bit cleared. This includes @@ -414,8 +422,12 @@ #define VTABLE_OVERHEAD (sizeof(VTable) - sizeof(void *)) // The "- sizeof(void *)" part subtracts out the "unsigned char *methods[1]" contribution. -VTable *create_vtable(Class *p_class, unsigned n_vtable_entries); +VTable *create_vtable(Class *p_class, void* vt_ptr = NULL); +void *create_vtable_sequence(Class *p_class); +unsigned get_vtable_fields_number(Class* clss); +size_t get_vtable_size(Class* clss); + } // extern "C" @@ -506,6 +518,8 @@ ClassLoader* class_loader; + ManagedObject* vtObj; // TEMPORARY - used for enumeration + getting vtObj from class, change to (vtable-8) + // // Does it represent a primitive type? // @@ -538,6 +552,10 @@ unsigned is_verified : 2; // + // Is this class sythetic VTable class + unsigned is_VT_class : 1; + + // // Can instances of this class be allocated using a fast inline sequence containing // no calls to other routines. // @@ -753,15 +771,13 @@ // Required for fast enumeration of error objects Const_Pool* m_failedResolution; - // struct Class accessibility - unsigned m_markBit:1; - // verify data void *verify_data; AnnotationTable * annotations; } Class; // typedef struct Class +void* code_alloc(Class* p_class, size_t size, size_t alignment, Code_Allocation_Action action); } // extern "C" @@ -840,7 +856,7 @@ // // preparation phase of class loading // -bool class_prepare(Global_Env* env, Class *clss); +bool class_prepare(Global_Env* env, Class *clss, void* p_vt_container = NULL); @@ -859,7 +875,6 @@ unsigned class_calculate_size(const Class*); void mark_classloader(ClassLoader*); -VMEXPORT void vm_notify_live_object_class(Class_Handle); // // execute static initializer of class Index: vm/vmcore/include/method_lookup.h =================================================================== --- vm/vmcore/include/method_lookup.h (revision 468306) +++ vm/vmcore/include/method_lookup.h (working copy) @@ -48,6 +48,7 @@ CodeChunkInfo *get(unsigned i); void add(CodeChunkInfo *m); + void remove(CodeChunkInfo *m); // Resembles add, but appends the new entry m at the end of the table. The new entry must have a starting address above all entries // in the table. This method does not acquire p_meth_addr_table_lock, so insertion must be protected by another lock or scheme. Index: vm/vmcore/src/jit/compile.cpp =================================================================== --- vm/vmcore/src/jit/compile.cpp (revision 468306) +++ vm/vmcore/src/jit/compile.cpp (working copy) @@ -570,7 +570,7 @@ //***** Now generate code assert(lil_is_valid(cs)); - NativeCodePtr addr = LilCodeGenerator::get_platform()->compile(cs); + NativeCodePtr addr = LilCodeGenerator::get_platform()->compile(cs, clss->class_loader->GetCodePool()); #ifndef NDEBUG char buf[100]; Index: vm/vmcore/src/gc/dll_gc.cpp =================================================================== --- vm/vmcore/src/gc/dll_gc.cpp (revision 468306) +++ vm/vmcore/src/gc/dll_gc.cpp (working copy) @@ -173,6 +173,7 @@ (apr_dso_handle_sym_t)default_gc_supports_compressed_references); gc_add_root_set_entry = (void (*)(Managed_Object_Handle *ref, Boolean is_pinned)) getFunction(handle, "gc_add_root_set_entry", dllName); + gc_add_weak_root_set_entry = (void (*)(Managed_Object_Handle *ref, Boolean is_pinned, Boolean short_ref)) getFunction(handle, "gc_add_weak_root_set_entry", dllName); gc_add_compressed_root_set_entry = (void (*)(uint32 *ref, Boolean is_pinned)) getFunctionOptional(handle, "gc_add_compressed_root_set_entry", Index: vm/vmcore/src/gc/root_set_enum_common.cpp =================================================================== --- vm/vmcore/src/gc/root_set_enum_common.cpp (revision 468306) +++ vm/vmcore/src/gc/root_set_enum_common.cpp (working copy) @@ -64,56 +64,100 @@ // Enumerate all globally visible classes and their static fields. +static void vm_enumerate_jlc(Class* c, bool b_weak = false) +{ + assert (VM_Global_State::loader_env->system_class_loader); + assert (*c->class_handle); + if (!b_weak) { + vm_enumerate_root_reference((void**)c->class_handle, FALSE); + } + else { + vm_enumerate_weakroot_reference((void**)c->class_handle, FALSE); + } +} + +static void vm_enumerate_static_fields_process(Class* c) +{ + if (c->p_error) { + vm_enumerate_root_reference( + (void**)&c->p_error ,FALSE); + } + + Const_Pool* cp = c->m_failedResolution; + while(cp) { + vm_enumerate_root_reference((void**)(&(cp->error.cause)), FALSE); + cp = cp->error.next; + } + // Finally enumerate the static fields of the class + unsigned n_fields = c->n_fields; + if((c->state == ST_Prepared) + || (c->state == ST_Initializing) + || (c->state == ST_Initialized)) { + // Class has been prepared, so we can iterate over all its fields. + for(unsigned i = 0; i < n_fields; i++) { + Field *f = &c->fields[i]; + if(f->is_static()) { + char desc0 = f->get_descriptor()->bytes[0]; + if(desc0 == 'L' || desc0 == '[') { + // The field is static and it is a reference. + if (VM_Global_State::loader_env->compress_references) { + vm_enumerate_compressed_root_reference((uint32 *)f->get_address(), FALSE); + } else { + vm_enumerate_root_reference((void **)f->get_address(), FALSE); + } + } + } + } + } +} + static void vm_enumerate_static_fields() { TRACE2("enumeration", "vm_enumerate_static_fields()"); Global_Env *global_env = VM_Global_State::loader_env; - ManagedObject** ppc; GlobalClassLoaderIterator ClIterator; ClassLoader *cl = ClIterator.first(); while(cl) { + // 1. enum all reported classes as strong roots + // 2. enum all loaded classes as weak roots GlobalClassLoaderIterator::ClassIterator itc; GlobalClassLoaderIterator::ReportedClasses RepClasses = cl->GetReportedClasses(); for (itc = RepClasses->begin(); itc != RepClasses->end(); itc++) { - ppc = &itc->second; - assert(*ppc); - Class* c = jclass_to_struct_Class((jclass)ppc); + Class* c = itc->second; + { + vm_enumerate_jlc(c); + if (c->vtable) + { + vm_enumerate_root_reference((void**)&c->vtObj, FALSE); + } + vm_enumerate_static_fields_process(c); + } - if (c->p_error) { - vm_enumerate_root_reference( - (void**)&c->p_error ,FALSE); - } - vm_enumerate_root_reference((void**)ppc, FALSE); - Const_Pool* cp = c->m_failedResolution; - while(cp) { - vm_enumerate_root_reference((void**)(&(cp->error.cause)), FALSE); - cp = cp->error.next; - } - // Finally enumerate the static fields of the class - unsigned n_fields = c->n_fields; - if((c->state == ST_Prepared) - || (c->state == ST_Initializing) - || (c->state == ST_Initialized)) { - // Class has been prepared, so we can iterate over all its fields. - for(unsigned i = 0; i < n_fields; i++) { - Field *f = &c->fields[i]; - if(f->is_static()) { - char desc0 = f->get_descriptor()->bytes[0]; - if(desc0 == 'L' || desc0 == '[') { - // The field is static and it is a reference. - if (global_env->compress_references) { - vm_enumerate_compressed_root_reference((uint32 *)f->get_address(), FALSE); - } else { - vm_enumerate_root_reference((void **)f->get_address(), FALSE); - } - } - } + } + + ClassTable* ct = cl->GetLoadedClasses(); + ClassTable::iterator it; + for (it = ct->begin(); it != ct->end(); it++) + { + Class* c = it->second; + { + if (cl->IsBootstrap()) + { + vm_enumerate_jlc(c); + if (c->vtable) + vm_enumerate_root_reference((void**)&c->vtObj, FALSE); } + else + { + vm_enumerate_jlc(c, true/*weak*/); + } + vm_enumerate_static_fields_process(c); } } + cl = ClIterator.next(); } } //vm_enumerate_static_fields @@ -148,7 +192,29 @@ } //vm_enumerate_root_reference +void +vm_enumerate_weakroot_reference(void **ref, Boolean is_pinned) +{ + if (get_global_safepoint_status() == enumerate_the_universe) { +#if _DEBUG + if (VM_Global_State::loader_env->compress_references) { + // 20030324 DEBUG: verify the slot whose reference is being passed. + ManagedObject **p_obj = (ManagedObject **)ref; + ManagedObject* obj = *p_obj; + assert(obj != NULL); // See the comment at the top of the procedure. + if ((void *)obj != Class::heap_base) { + assert(((POINTER_SIZE_INT)Class::heap_base <= (POINTER_SIZE_INT)obj) && ((POINTER_SIZE_INT)obj <= (POINTER_SIZE_INT)Class::heap_end)); + (obj->vt())->clss->name->bytes; + } + } +#endif // _DEBUG + + gc_add_weak_root_set_entry((Managed_Object_Handle *)ref, is_pinned, FALSE); + } +} //vm_enumerate_root_reference + + // Resembles vm_enumerate_root_reference() but is passed the address of a uint32 slot containing a compressed reference. VMEXPORT void vm_enumerate_compressed_root_reference(uint32 *ref, Boolean is_pinned) { @@ -262,6 +328,22 @@ << "." << method_get_name(cci->get_method()) << method_get_descriptor(cci->get_method())); cci->get_jit()->get_root_set_from_stack_frame(cci->get_method(), 0, si_get_jit_context(si)); + ClassLoader* cl = cci->get_method()->get_class()->class_loader; + assert (cl); + mark_classloader(cl); + if (cci->has_inline_info()) { + JIT *jit = cci->get_jit(); + NativeCodePtr ip = si_get_ip(si); + uint32 inlined_depth = si_get_inline_depth(si); + uint32 offset = (POINTER_SIZE_INT)ip - (POINTER_SIZE_INT)cci->get_code_block_addr(); + for (uint32 i = 0; i < inlined_depth; i++) { + Method* m = jit->get_inlined_method(cci->get_inline_info(), offset, i); + assert (m); + cl = m->get_class()->class_loader; + assert (cl); + mark_classloader(cl); + } + } TRACE2("enumeration", "enumerated eip=" << (void *) si_get_ip(si) << " is_first=" << !si_get_jit_context(si)->is_ip_past << " " << class_get_name(method_get_class(cci->get_method())) @@ -275,6 +357,12 @@ << (m2n_get_method(si_get_m2n(si)) ? method_get_name(m2n_get_method(si_get_m2n(si))) : "") << (m2n_get_method(si_get_m2n(si)) ? method_get_descriptor(m2n_get_method(si_get_m2n(si))) : "")); oh_enumerate_handles(m2n_get_local_handles(si_get_m2n(si))); + Method* m = m2n_get_method(si_get_m2n(si)); + if (m) { + ClassLoader* cl = m->get_class()->class_loader; + assert (cl); + mark_classloader(cl); + } } si_goto_previous(si, false); } Index: vm/vmcore/src/kernel_classes/javasrc/java/lang/Class.java =================================================================== --- vm/vmcore/src/kernel_classes/javasrc/java/lang/Class.java (revision 468306) +++ vm/vmcore/src/kernel_classes/javasrc/java/lang/Class.java (working copy) @@ -398,6 +398,9 @@ .copyConstructor(getDeclaredConstructorInternal(argumentTypes)); } + public ClassLoader definingLoader; + public Object classVTInstance; + /** * @com.intel.drl.spec_ref */ Index: vm/vmcore/src/kernel_classes/javasrc/java/lang/ClassLoader.java =================================================================== --- vm/vmcore/src/kernel_classes/javasrc/java/lang/ClassLoader.java (revision 468306) +++ vm/vmcore/src/kernel_classes/javasrc/java/lang/ClassLoader.java (working copy) @@ -106,6 +106,12 @@ */ private final HashMap definedPackages; + /* + * The following mapping is used , where binaryClassName - class name, + * clazz - corresponding class. + */ + Hashtable loadedClasses = new Hashtable(); + /** * The following mapping is used , where binaryClassName - class name, * clazz - corresponding class. @@ -378,6 +384,17 @@ /** * @com.intel.drl.spec_ref */ + public void addToClassRegistry(String name, Class clazz) + { + synchronized (loadedClasses) + { + loadedClasses.put(name, clazz); // unloading + } + } + + /** + * @com.intel.drl.spec_ref + */ protected final Class defineClass(String name, ByteBuffer b, ProtectionDomain protectionDomain) throws ClassFormatError { byte[] data = b.array(); @@ -416,6 +433,7 @@ } Class clazz = VMClassRegistry .defineClass(name, this, data, offset, len); + clazz.definingLoader = this; clazz.setProtectionDomain(domain); if (certs != null) { packageCertificates.put(packageName, certs); Index: vm/vmcore/src/class_support/Prepare.cpp =================================================================== --- vm/vmcore/src/class_support/Prepare.cpp (revision 468306) +++ vm/vmcore/src/class_support/Prepare.cpp (working copy) @@ -289,7 +289,8 @@ #endif // not POINTER64 if ((clss->static_data_size%8)!=0) { clss->static_data_size += 4; - assert((clss->static_data_size%8)==0); + //assert((clss->static_data_size%8)==0); // remove to enable compact_fields field - do we need this if we + // align to 4 byte biundary?? probably - NO! } } @@ -363,6 +364,14 @@ assert(clss->state != ST_InstanceSizeComputed); bool do_field_compaction = Class::compact_fields; bool do_field_sorting = Class::sort_fields; + // force (do_field_compaction = true & do_field_sorting = false) for VTable for VTable class + // to reflect struct VTable to VT object data + Global_Env* env = VM_Global_State::loader_env; + if (clss->is_VT_class) + { + do_field_sorting = false; + do_field_compaction = true; + } // Create a temporary array of pointers to the class's fields. We do this to support sorting the fields // by size if the command line option "-sort_fields" is given, and because elements of the clss->fields array @@ -523,9 +532,11 @@ //extern bool bootstrapped; //// gloss over bootstrap inconsistency //if (bootstrapped == true) { - if(env->InBootstrap()) { - assert(clss->n_instance_refs == 0); - } + // NO WAY - VTable has 1 ref field - to jlc + // inconsistent check!!! - redo! + //if(env->InBootstrap()) { + // assert(clss->n_instance_refs == 0); + //} } assert (current_index <= clss->n_instance_refs); @@ -1276,7 +1287,7 @@ // // -bool class_prepare(Global_Env* env, Class *clss) +bool class_prepare(Global_Env* env, Class *clss, void* p_vt_mem) { ASSERT_RAISE_AREA; // fast path @@ -1473,20 +1484,21 @@ // code address (a stub or jitted code) // // - clss->vtable = create_vtable(clss, n_vtable_entries); + if (!p_vt_mem) + { + p_vt_mem = create_vtable_sequence(clss); + if (p_vt_mem == NULL){ + assert(false); + return false; + } + } + clss->vtable = create_vtable(clss, p_vt_mem); for (i = 0; i < n_vtable_entries; i++) { // need to populate with pointers to stubs or compiled code clss->vtable->methods[i] = NULL; // for now } - if (vm_vtable_pointers_are_compressed()) - { - clss->allocation_handle = (Allocation_Handle) ((POINTER_SIZE_INT)clss->vtable - vm_get_vtable_base()); - } - else - { - clss->allocation_handle = (Allocation_Handle) clss->vtable; - } clss->vtable->clss = clss; + clss->allocation_handle = ManagedObject::vtable_to_allocation_handle(clss->vtable); // Set the vtable entries to point to the code address (a stub or jitted code) point_class_vtable_entries_to_stubs(clss); Index: vm/vmcore/src/class_support/C_Interface.cpp =================================================================== --- vm/vmcore/src/class_support/C_Interface.cpp (revision 468306) +++ vm/vmcore/src/class_support/C_Interface.cpp (working copy) @@ -2136,6 +2136,14 @@ } } //class_is_pinned +void* class_alloc_via_classloader(Class_Handle ch, int32 size) +{ + assert(ch); + assert(size >= 0); + Class *clss = (Class *)ch; + assert (clss->class_loader); + return clss->class_loader->Alloc(size); +} //class_alloc_via_classloader Boolean class_is_finalizable(Class_Handle ch) @@ -2380,16 +2388,7 @@ Class_Handle allocation_handle_get_class(Allocation_Handle ah) { assert(ah); - VTable *vt; - - if (vm_vtable_pointers_are_compressed()) - { - vt = (VTable *) ((POINTER_SIZE_INT)ah + vm_get_vtable_base()); - } - else - { - vt = (VTable *) ah; - } + VTable *vt = ManagedObject::allocation_handle_to_vtable(ah); return (Class_Handle) vt->clss; } @@ -2401,15 +2400,8 @@ } -Runtime_Type_Handle class_get_runtime_type_handle(Class_Handle ch) +unsigned vm_get_vtable_ptr_size() { - assert(ch); - return (Runtime_Type_Handle) ((Class *) ch)->allocation_handle; -} - - -unsigned vm_get_runtime_type_handle_width() -{ if (vm_vtable_pointers_are_compressed()) { return sizeof(uint32); Index: vm/vmcore/src/class_support/Environment.cpp =================================================================== --- vm/vmcore/src/class_support/Environment.cpp (revision 468306) +++ vm/vmcore/src/class_support/Environment.cpp (working copy) @@ -80,6 +80,7 @@ EnqueueName_String = string_pool.lookup("enqueue"); Clonable_String = string_pool.lookup("java/lang/Cloneable"); Serializable_String = string_pool.lookup("java/io/Serializable"); + VTableBaseObject_String = string_pool.lookup("VTableBase"); Length_String = string_pool.lookup("length"); LoadClass_String = string_pool.lookup("loadClass"); @@ -102,7 +103,21 @@ #endif // !_IPF_ strings_are_compressed = false; + + // page size detection use_large_pages = false; + size_t *ps = port_vmem_page_sizes(); + if (ps[1] != 0 && use_large_pages) { + system_page_size = ps[1]; + } + else { + system_page_size = ps[0]; + } + + GlobalCodeMemoryManager = new PoolManager(DEFAULT_COMMOT_JIT_CODE_POOL_SIZE, system_page_size, use_large_pages, + true/*is_code*/, true/*is_resize_allowed*/); + + verify_all = false; pin_interned_strings = false; @@ -128,6 +143,7 @@ JavaLangObject_Class = NULL; JavaLangString_Class = NULL; JavaLangClass_Class = NULL; + VTableVTable_Class = NULL; java_lang_Throwable_Class = NULL; java_lang_Error_Class = NULL; java_lang_ThreadDeathError_Class = NULL; @@ -153,7 +169,6 @@ java_lang_reflect_Method_Class = NULL; JavaLangString_VTable = NULL; - JavaLangString_allocation_handle = 0; uncaught_exception = NULL; @@ -197,6 +212,9 @@ compile_clear_dynamic_code_list(dcList); dcList = NULL; + delete GlobalCodeMemoryManager; + GlobalCodeMemoryManager = NULL; + hythread_lib_destroy(hythread_lib); } Index: vm/vmcore/src/class_support/Class.cpp =================================================================== --- vm/vmcore/src/class_support/Class.cpp (revision 468306) +++ vm/vmcore/src/class_support/Class.cpp (working copy) @@ -31,6 +31,10 @@ #include "compile.h" #include "open/gc.h" #include "nogc.h" +#include "heap.h" +#include "classloader.h" +#include "method_lookup.h" +#include "interpreter_imports.h" // 20020923 Total number of allocations and total number of bytes for class-related data structures. // This includes any rounding added to make each item aligned (current alignment is to the next 16 byte boundary). @@ -240,6 +244,13 @@ void Method::MethodClearInternals() { + CodeChunkInfo *jit_info; + for (jit_info = _jits; jit_info; jit_info = jit_info->_next) { + VM_Global_State::loader_env->vm_methods->remove(jit_info); + // ensure that jit_info was deleted + assert (!VM_Global_State::loader_env->vm_methods->find(jit_info->get_code_block_addr())); + } + if (_notify_recompiled_records != NULL) { Method_Change_Notification_Record *nr, *prev_nr; @@ -533,14 +544,6 @@ return clss->simple_name; } -void vm_notify_live_object_class(Class_Handle clss) -{ - if(!clss->m_markBit) { - clss->m_markBit = 1; - mark_classloader(clss->class_loader); - } -} - // end pointers between struct Class and java.lang.Class //////////////////////////////////////////////////////////////////// @@ -618,29 +621,145 @@ // end Support for compressed and raw reference pointers //////////////////////////////////////////////////////////////////// +// allocate memory for jit&stub code +void* code_alloc(Class* p_class, size_t size, size_t alignment, Code_Allocation_Action action) +{ + assert (p_class); + assert (p_class->class_loader); + return p_class->class_loader->CodeAlloc(size, alignment, action); +} +#define VTABLE_TAIL_STRING "$$$VT" +/* + * create_vtable_sequence precreates vtable for class + * p_class parameter is class for which vtable is created +*/ +void* create_vtable_sequence(Class *p_class) +{ + Global_Env* env = VM_Global_State::loader_env; + // create local handles to enumerate vtable_for_VT and VT_for_object objects + // before we set them to clss->allocation_handle and enumerate them via class + tmn_suspend_disable(); + ObjectHandle vtVTableHandle = oh_allocate_local_handle(); + assert (vtVTableHandle); + // create VTable for VT + // VTable for VT class is equial to env->VTableVTable_Class as having the same size + vtVTableHandle->object = (ManagedObject*)gc_alloc(env->VTableVTable_Class->instance_data_size, + env->VTableVTable_Class->allocation_handle, vm_get_gc_thread_local()); + tmn_suspend_enable(); -VTable *create_vtable(Class *p_class, unsigned n_vtable_entries) -{ - unsigned vtable_size = VTABLE_OVERHEAD + n_vtable_entries * sizeof(void *); + if (vtVTableHandle->object == NULL) + { + assert (false); + REPORT_FAILED_CLASS_CLASS(env->VTableVTable_Class->class_loader, env->VTableVTable_Class, + "java/lang/OutOfMemoryError", + "Failed to create vtable_sequence for class " << env->VTableVTable_Class->name->bytes); + return NULL; + } + gc_pin_object((Managed_Object_Handle*)vtVTableHandle); + memset(vtVTableHandle->object->get_data_ptr(), 0, VTABLE_OVERHEAD); - // Always allocate vtable data from vtable_data_pool - void *p_gc_hdr = allocate_vtable_data_from_pool(vtable_size); + // Create VT for class + unsigned vt_clss_name_len = p_class->name->len + strlen(VTABLE_TAIL_STRING); + char* buf = (char*) STD_ALLOCA(vt_clss_name_len + 1); + sprintf(buf, "%s%s", p_class->name->bytes, VTABLE_TAIL_STRING); + buf[vt_clss_name_len] = 0; + String* vt_clss_name = env->string_pool.lookup(buf, vt_clss_name_len); + Class* VT_clss = p_class->class_loader->CreateVTableClass(vt_clss_name, get_vtable_fields_number(p_class)); + if (VT_clss == NULL){ + assert (false); + REPORT_FAILED_CLASS_CLASS(VT_clss->class_loader, VT_clss, + "java/lang/OutOfMemoryError", + "Failed to create vtable class " << vt_clss_name->bytes); + return NULL; + } + if(!class_prepare(env, VT_clss, vtVTableHandle->object->get_data_ptr())) { + assert (false); + return NULL; + } + int vt_size = get_vtable_size(p_class); + assert(vt_size + ManagedObject::get_header_size() == VT_clss->instance_data_size); + + tmn_suspend_disable(); + ObjectHandle vtVTHandle = oh_allocate_local_handle(); + assert (vtVTHandle); + vtVTHandle->object = (ManagedObject*)gc_alloc(VT_clss->instance_data_size, + VT_clss->allocation_handle, vm_get_gc_thread_local()); + tmn_suspend_enable(); + + if (vtVTHandle->object == NULL) + { + assert (false); + REPORT_FAILED_CLASS_CLASS(VT_clss->class_loader, VT_clss, + "java/lang/OutOfMemoryError", + "Failed to create vtable_sequence for class " << VT_clss->name->bytes); + return NULL; + } + gc_pin_object((Managed_Object_Handle*)vtVTHandle); + + memset(vtVTHandle->object->get_data_ptr(), 0, vt_size); + p_class->vtObj = vtVTHandle->object; // TEMPORARY - use allocation_handle instead!!! will be set later into allocation_handle + + // SET reference to VTable object in jlc: jlc.classVTInstance = vtVT + // skip BS classses as VTable is always enumerated for them + if (!p_class->class_loader->IsBootstrap()) + { + assert (env->JavaLangClass_Class); + assert (p_class->class_handle); + JNIEnv *jenv = get_jni_native_intf(); + tmn_suspend_disable(); + Field *field = class_lookup_field_recursive(env->JavaLangClass_Class, "classVTInstance", "Ljava/lang/Object;"); + assert (field); + Byte *java_ref = (Byte *)*p_class->class_handle; + ManagedObject **field_addr = (ManagedObject **)(java_ref + field->get_offset()); + STORE_REFERENCE((ManagedObject *)java_ref, field_addr, vtVTHandle->object); + tmn_suspend_enable(); + } + #ifdef VM_STATS - // For allocation statistics, include any rounding added to make each item aligned (current alignment is to the next 16 byte boundary). - unsigned num_bytes = (vtable_size + 15) & ~15; - // 20020923 Total number of allocations and total number of bytes for class-related data structures. - Class::num_vtable_allocations++; - Class::total_vtable_bytes += num_bytes; + unsigned vtables_allocated_size_total = vt_size + VTABLE_OVERHEAD + 2*ManagedObject::get_header_size(); + Class::num_vtable_allocations += 2; + Class::total_vtable_bytes += vtables_allocated_size_total; #endif - assert(p_gc_hdr); - memset(p_gc_hdr, 0, vtable_size); + return vtVTHandle->object->get_data_ptr(); +} + +/* + * get_vtable_fields_number get number of methods of class which are placed in methods array in VTable +*/ +unsigned get_vtable_fields_number(Class* clss) +{ + return (clss->n_virtual_method_entries + clss->n_intfc_method_entries); +} + +/* + * get_vtable_size returns size of VTable in bytes for corresponding class + * the fuction must correspond to VTABLE_FIELDS_NUMBER + * --- SYSTEM DEPENDENT --- jint for IA32, jlong for _IPF_ and _EM64T_ +*/ +size_t get_vtable_size(Class* clss) +{ + size_t sz; +#if defined _IPF_ || defined _EM64T_ + sz = get_vtable_fields_number(clss)*sizeof(jlong) + VTABLE_OVERHEAD; +#else // _IA32_ + sz = get_vtable_fields_number(clss)*sizeof(jint) + VTABLE_OVERHEAD; +#endif + return sz; +} + + +VTable *create_vtable(Class *p_class, void* p_gc_hdr) +{ + assert (p_gc_hdr); + assert (p_class); + Global_Env* env = VM_Global_State::loader_env; VTable *vtable = (VTable *)p_gc_hdr; - if(p_class && p_class->super_class) { + if(p_class->super_class) { p_class->depth = p_class->super_class->depth + 1; memcpy(&vtable->superclasses, &p_class->super_class->vtable->superclasses, @@ -659,6 +778,40 @@ { p_class->is_suitable_for_fast_instanceof = 1; } + + if (!env->InBootstrap()){ + tmn_suspend_disable(); + vtable->jlC = *p_class->class_handle; + tmn_suspend_enable(); + } + else { + // for BootStrap mode jlC is set in create_instance_for_class + // class_handle is NULL in bootstrap mode + assert (!p_class->class_handle); + vtable->jlC = NULL; + } + +#ifdef _DEBUG + // check of VTable structure consistency: + // test that jlC field in struct VTable is reflected to jlc ref field in VT object + // skip BS classes which do not have jlc on prepare + if (p_class->class_handle) + { + tmn_suspend_disable(); + ManagedObject* VT_obj = (ManagedObject*)((POINTER_SIZE_INT)vtable - ManagedObject::get_header_size()); + assert (vtable->jlC); + JNIEnv *jenv = get_jni_native_intf(); + ObjectHandle vt_handle = oh_allocate_local_handle(); + vt_handle->object= VT_obj; + Field *field = class_lookup_field_recursive(VT_obj->vt()->clss, "vt_jlc", "Ljava/lang/Class;"); + assert (field); + assert (!field->is_static()); + tmn_suspend_enable(); + jobject res = GetObjectField(jenv, (jobject)vt_handle/*&VT_obj*/, (jfieldID)field/*referentField*/); + assert (res->object == *p_class->class_handle); + } +#endif + return vtable; } //create_vtable Index: vm/vmcore/src/class_support/method_lookup.cpp =================================================================== --- vm/vmcore/src/class_support/method_lookup.cpp (revision 468306) +++ vm/vmcore/src/class_support/method_lookup.cpp (working copy) @@ -101,7 +101,59 @@ p_meth_addr_table_lock->_unlock(); } //Method_Lookup_Table::add +#define USE_METHOD_LOOKUP_CACHE +void Method_Lookup_Table::remove(CodeChunkInfo *m) +{ + void* addr = m->get_code_block_addr(); + if (addr == NULL) { + return; + } + +#ifdef USE_METHOD_LOOKUP_CACHE + // First remove from cache. + for (unsigned i = 0; i < EIP_CACHE_SIZE; i++){ + if (_cache[i]){ + void *guess_start = _cache[i]->get_code_block_addr(); + void *guess_end = ((char *)_cache[i]->get_code_block_addr()) + _cache[i]->get_code_block_size(); + if ((addr >= guess_start) && (addr < guess_end)) { + _cache[i] = NULL; + } + } + } +#endif //USE_METHOD_LOOKUP_CACHE + + p_meth_addr_table_lock->_lock(); + + unsigned L = 0, R = _next_free_entry; + while (L < R) { + unsigned M = (L + R) / 2; + CodeChunkInfo *m = _table[M]; + void *code_block_addr = m->get_code_block_addr(); + size_t code_block_size = m->get_code_block_size(); + void *code_end_addr = (void *)((char *)code_block_addr + code_block_size); + + if (addr < code_block_addr) { + R = M; + } else if (addr >= code_end_addr) { + // Should this be (addr >= code_end_addr)? + L = M + 1; + } else { + // Shift entries starting at idx one slot to the right, then insert the new entry at idx + for (unsigned i = M; i < (_next_free_entry - 1); i++) { + _table[i] = _table[i+1]; + } + _next_free_entry--; + + p_meth_addr_table_lock->_unlock(); + return; + } + } + + p_meth_addr_table_lock->_unlock(); +} //Method_Lookup_Table::remove + + void Method_Lookup_Table::append_unlocked(CodeChunkInfo *m) { void *code_block_addr = m->get_code_block_addr(); @@ -154,8 +206,6 @@ -#define USE_METHOD_LOOKUP_CACHE - CodeChunkInfo *Method_Lookup_Table::find(void *addr, bool is_ip_past) { if (addr == NULL) { Index: vm/vmcore/src/class_support/method.cpp =================================================================== --- vm/vmcore/src/class_support/method.cpp (revision 468306) +++ vm/vmcore/src/class_support/method.cpp (working copy) @@ -334,7 +334,7 @@ if (size == 0) { addr = NULL; } else { - addr = malloc_fixed_code_for_jit(size, alignment, heat, action); + addr = code_alloc(get_class(), size, alignment, action); } if (action == CAA_Simulate) { Index: vm/vmcore/src/class_support/classloader.cpp =================================================================== --- vm/vmcore/src/class_support/classloader.cpp (revision 468306) +++ vm/vmcore/src/class_support/classloader.cpp (working copy) @@ -24,6 +24,7 @@ #include "vm_log.h" #include +#include #include "classloader.h" #include "object_layout.h" @@ -51,6 +52,7 @@ #include "jarfile_util.h" #include "jni_utils.h" +#include "mem_alloc.h" // // private static variable containing the id of the next class @@ -73,7 +75,7 @@ if(cl->GetLoader() && cl->NotMarked()) { TRACE2("classloader.unloading.markloader", " Marking loader " << cl << " (" << (void*)cl->GetLoader() << " : " - << ((VTable*)(*(unsigned**)(cl->GetLoader())))->clss->name->bytes << ")"); + << cl->GetLoader()->vt_unsafe()->clss->name->bytes << ")"); cl->Mark(); } } @@ -120,23 +122,28 @@ if(!m_failedClasses) return false; m_javaTypes = new JavaTypes(); if(!m_javaTypes) return false; + + Global_Env *env = VM_Global_State::loader_env; + assert (env); + size_t code_pool_size = IsBootstrap() ? DEFAULT_BOOTSTRAP_JIT_CODE_POOL_SIZE : DEFAULT_CLASSLOADER_JIT_CODE_POOL_SIZE; + CodeMemoryManager = new PoolManager(code_pool_size, env->system_page_size, env->use_large_pages, true/*is_code*/, true/*is_resize_allowed*/); + if(!CodeMemoryManager) return false; + return true; } ClassLoader::~ClassLoader() { - apr_pool_destroy(pool); - - ManagedObject** ppc; - ReportedClasses* RepClasses = GetReportedClasses(); - ReportedClasses::iterator itc; - for (itc = RepClasses->begin(); itc != RepClasses->end(); itc++) + ClassTable::iterator it; + ClassTable* LoadedClasses = GetLoadedClasses(); + for (it = LoadedClasses->begin(); it != LoadedClasses->end(); it++) { - ppc = &itc->second; - assert(*ppc); - Class* c = jclass_to_struct_Class((jclass)ppc); + Class* c; + c = it->second; + assert(c); ClassClearInternals(c); } + if (GetLoadedClasses()) delete GetLoadedClasses(); if (GetFailedClasses()) @@ -159,6 +166,15 @@ } if (m_package_table) delete m_package_table; + + for(NativeLibInfo* info = m_nativeLibraries; info;info = info->next ) { + natives_unload_library(info->handle); + } + + delete CodeMemoryManager; + CodeMemoryManager = NULL; + + apr_pool_destroy(pool); } void ClassLoader::LoadingClass::EnqueueInitiator(VM_thread* new_definer, ClassLoader* cl, const String* clsname) @@ -237,13 +253,6 @@ return InitClassFields(env, clss, name); } -ManagedObject** ClassLoader::RegisterClassInstance(const String* className, ManagedObject* instance) -{ - TRACE2("reported:newclass", "DIRECT: inserting class \"" << className->bytes - << "\" with key " << className << " and object " << instance); - return m_reportedClasses->Insert(className, instance); -} - Class* ClassLoader::DefineClass(Global_Env* env, const char* class_name, uint8* bytecode, unsigned offset, unsigned length, const String** res_name) @@ -337,7 +346,11 @@ clss->package = ProvidePackage(env, className, NULL); - InsertClass(clss); + if (!InsertClass(clss)){ + assert (false); + FailedLoadingClass(className); + return NULL; + } SuccessLoadingClass(className); //bool doNotNotifyBaseClasses = // false if class is either j/l/Object, j/io/Serializable, or j/l/Class @@ -558,6 +571,8 @@ ClassLoader* ClassLoader::FindByObject(ManagedObject* loader) { LMAutoUnlock aulock( &(ClassLoader::m_tableLock) ); + //check that loader != NULL because cl->m_loader could be == NULL in case that class loader is to be unloaded + assert(loader); ClassLoader* cl; for(unsigned i = 0; i < m_nextEntry; i++) { @@ -579,14 +594,15 @@ } -void ClassLoader::UnloadClassLoader( ManagedObject* loader ) +void ClassLoader::UnloadClassLoader( ClassLoader* loader ) { + TRACE2("unloading", "Unloading class loader: " << (void*)loader); LMAutoUnlock aulock( &(ClassLoader::m_tableLock) ); unsigned i; for(i = 0; i < m_nextEntry; i++) { ClassLoader* cl = m_table[i]; - if( loader == cl->m_loader ) break; + if( loader == cl ) break; } if (i == m_nextEntry) return; ClassLoader* cl = m_table[i]; @@ -610,15 +626,19 @@ } for(unsigned int i = 0; i < m_nextEntry; i++) { - if(m_table[i]->m_loader != NULL) { + assert (m_table[i]->m_loader); + if (m_table[i]->m_markBit) { vm_enumerate_root_reference((void**)(&(m_table[i]->m_loader)), FALSE); - // should enumerate errors for classes - for(fci = m_table[i]->m_failedClasses->begin(); - fci != m_table[i]->m_failedClasses->end(); fci++) - { - vm_enumerate_root_reference((void**)(&(fci->second.m_exception)), FALSE); - } } + else { + vm_enumerate_weakroot_reference((void**)(&(m_table[i]->m_loader)), FALSE); + } + // should enumerate errors for classes + for(fci = m_table[i]->m_failedClasses->begin(); + fci != m_table[i]->m_failedClasses->end(); fci++) + { + vm_enumerate_root_reference((void**)(&(fci->second.m_exception)), FALSE); + } } } @@ -630,25 +650,12 @@ ClassTable::iterator cti; unsigned i; for(i = 0; i < m_nextEntry; i++) { - if(m_table[i]->m_unloading) { - TRACE2("classloader.unloading.debug", " Skipping \"unloaded\" classloader " - << m_table[i] << " (" << m_table[i]->m_loader << " : " - << ((VTable*)(*(unsigned**)(m_table[i]->m_loader)))->clss->name->bytes << ")"); - continue; - } TRACE2("classloader.unloading.debug", " Clearing mark bits in classloader " << m_table[i] << " (" << m_table[i]->m_loader << " : " - << ((VTable*)(*(unsigned**)(m_table[i]->m_loader)))->clss->name->bytes << ") and its classes"); + << m_table[i]->m_loader->vt_unsafe()->clss->name->bytes << ") and its classes"); // clear mark bits in loader and classes m_table[i]->m_markBit = 0; - for(cti = m_table[i]->m_loadedClasses->begin(); - cti != m_table[i]->m_loadedClasses->end(); cti++) - { - if(cti->second->class_loader == m_table[i]) { - cti->second->m_markBit = 0; - } - } - } + } TRACE2("classloader.unloading.clear", "Finished clearing mark bits"); TRACE2("classloader.unloading.marking", "Starting mark loaders"); } @@ -659,27 +666,44 @@ TRACE2("classloader.unloading.marking", "Finished marking loaders"); TRACE2("classloader.unloading.do", "Start checking loaders ready to be unloaded"); LMAutoUnlock aulock( &(ClassLoader::m_tableLock) ); + std::list unloadinglist; unsigned i; for(i = 0; i < m_nextEntry; i++) { - if(m_table[i]->m_unloading) { - TRACE2("classloader.unloading.debug", " Skipping \"unloaded\" classloader " + if(m_table[i]->m_loader) { + TRACE2("classloader.unloading.debug", " Skipping live classloader " << m_table[i] << " (" << m_table[i]->m_loader << " : " - << ((VTable*)(*(unsigned**)(m_table[i]->m_loader)))->clss->name->bytes << ")"); + << m_table[i]->m_loader->vt_unsafe()->clss->name->bytes << ")"); continue; } - TRACE2("classloader.unloading.debug", " Scanning loader " - << m_table[i] << " (" << m_table[i]->m_loader << " : " - << ((VTable*)(*(unsigned**)(m_table[i]->m_loader)))->clss->name->bytes << ")"); - if(!m_table[i]->m_markBit) { - TRACE2("classloader.unloading.stats", " (!) Ready to unload classloader " - << m_table[i] << " (" << m_table[i]->m_loader << " : " - << ((VTable*)(*(unsigned**)(m_table[i]->m_loader)))->clss->name->bytes << ")"); - TRACE2("classloader.unloading.stats", " (!) This will free " - << m_table[i]->GetFullSize() << " bytes in C heap"); - m_table[i]->m_unloading = true; - m_unloadedBytes += m_table[i]->GetFullSize(); + TRACE2("classloader.unloading.stats", " Unloading classloader " + << m_table[i] << " (" << m_table[i] << ")"); + TRACE2("classloader.unloading.stats", " (!) This will free " + << m_table[i]->GetFullSize() << " bytes in C heap"); +#ifdef _DEBUG // check that all j.l.Classes inside j.l.ClassLoader are also unloaded + ClassTable* ct = m_table[i]->GetLoadedClasses(); + ClassTable::iterator it; + for (it = ct->begin(); it != ct->end(); it++) + { + Class* c = it->second; + if (*c->class_handle) + { + DIE("FAILED on unloading classloader: \n" << (void*)m_table[i] << + "live j.l.Class of unloaded class is detected: " << c->name->bytes); + assert (false); + } } +#endif + unloadinglist.push_back(m_table[i]); + m_unloadedBytes += m_table[i]->GetFullSize(); } + + // safely remove classloaders from m_table + std::list::iterator it; + for (it = unloadinglist.begin(); it != unloadinglist.end(); it++) + { + UnloadClassLoader(*it); + } + TRACE2("classloader.unloading.do", "Finished checking loaders"); } @@ -690,10 +714,9 @@ TRACE2("classloader.unloading.stats", "----------------------------------------------"); TRACE2("classloader.unloading.stats", "Class unloading statistics:"); for(i = 0; i < m_nextEntry; i++) { - if(m_table[i]->m_unloading) { + if(!m_table[i]->m_loader) { TRACE2("classloader.unloading.stats", " Class loader " - << m_table[i] << " (" << m_table[i]->m_loader << " : " - << ((VTable*)(*(unsigned**)(m_table[i]->m_loader)))->clss->name->bytes + << m_table[i] << " (" << m_table[i]->m_loader << ") contains " << m_table[i]->GetFullSize() << " bytes in C heap"); } } @@ -708,7 +731,6 @@ // skip the object iteration if it is not needed // (logging is not enabled and // class unloading is not yet implemented). - if (!is_info_enabled("class_unload")) return; Managed_Object_Handle obj; int nobjects = 0; @@ -725,11 +747,12 @@ ClassLoader* cl = new UserDefinedClassLoader(); TRACE2("classloader.unloading.add", "Adding class loader " << cl << " (" << loader << " : " - << ((VTable*)(*(unsigned**)(loader)))->clss->name->bytes << ")"); + << loader->vt_unsafe()->clss->name->bytes << ")"); cl->Initialize( loader ); if( m_capacity <= m_nextEntry ) ReallocateTable( m_capacity?(2*m_capacity):32 ); m_table[m_nextEntry++] = cl; + return cl; } @@ -1092,7 +1115,11 @@ return NULL; } - InsertClass(klass); + if (!InsertClass(klass)){ + assert (false); + FailedLoadingClass(classNameString); + return NULL; + } SuccessLoadingClass(classNameString); return klass; @@ -1101,7 +1128,7 @@ /** Adds Class* pointer to m_reportedClasses HashTable. * clss->name must not be NULL. */ -Class* ClassLoader::AllocateAndReportInstance(const Global_Env* env, Class* clss) +Class* ClassLoader::AllocateAndReportInstance(const Global_Env* env, Class* clss, bool b_report_class_handle) { const String* name = clss->name; assert(name); @@ -1127,7 +1154,10 @@ } // add newly created java_lang_Class to reportable collection LMAutoUnlock aulock(&m_lock); - clss->class_handle = m_reportedClasses->Insert(name, new_java_lang_Class); + clss->class_handle = (ManagedObject**)Alloc(sizeof(ManagedObject**)); + *clss->class_handle = new_java_lang_Class; + if (b_report_class_handle) // skip for bs classes as it is done when they are already in m_loaded_classes + m_reportedClasses->Insert(name, clss); aulock.ForceUnlock(); TRACE("NewClass inserting class \"" << name->bytes << "\" with key " << name << " and object " << new_java_lang_Class); @@ -1879,6 +1909,68 @@ return clss; } // UserDefinedClassLoader::DoLoadClass +bool ClassLoader::InsertClass(Class* clss) { + if (!IsBootstrap()) // skip BS classes + { + Global_Env* env = VM_Global_State::loader_env; + jvalue args[3]; + ManagedObject* jstr; + + tmn_suspend_disable(); + if (env->compress_references) { + jstr = uncompress_compressed_reference(clss->name->intern.compressed_ref); + } else { + jstr = clss->name->intern.raw_ref; + } + ObjectHandle h = oh_allocate_local_handle(); + if (jstr != NULL) { + h->object = jstr; + } else { + h->object = vm_instantiate_cp_string_resolved((String*)clss->name); + } + args[1].l = h; + + if (exn_raised()) { + TRACE2("classloader", "OutOfMemoryError on class registering " << clss->name->bytes); + assert (false); + tmn_suspend_enable(); + return false; + } + + // this parameter + ObjectHandle hl = oh_allocate_local_handle(); + hl->object = m_loader; + args[0].l = hl; + + // jlc parameter + ObjectHandle chl = oh_allocate_local_handle(); + chl->object = *clss->class_handle; + args[2].l = chl; + + static String* acr_func_name = env->string_pool.lookup("addToClassRegistry"); + static String* acr_func_desc = env->string_pool.lookup("(Ljava/lang/String;Ljava/lang/Class;)V"); + + Method* method = class_lookup_method_recursive(m_loader->vt()->clss, acr_func_name, acr_func_desc); + assert(method); + + jvalue res; + vm_execute_java_method_array((jmethodID) method, &res, args); + + if(exn_raised()) { + assert (false); + tmn_suspend_enable(); + return false; + } + tmn_suspend_enable(); + } + + LMAutoUnlock aulock(&m_lock); + m_loadedClasses->Insert(clss->name, clss); + RemoveFromReported(clss->name); + return true; +} + + void BootstrapClassLoader::ReportAndExit(const char* exnclass, std::stringstream& exnmsg) { std::stringstream ss; @@ -2071,4 +2163,71 @@ ClassLoader::ReportException(exn_name, message_stream); } +/* + * CreateVTableClass fuction creates synthetic class vtable + * Arguments: + * clss_name - name of created class + * vtable_methods_number - number of methods inside vtable (methods array size) +*/ +Class* ClassLoader::CreateVTableClass(String* clss_name, int vtable_methods_number) +{ + Global_Env* env = VM_Global_State::loader_env; + + Class *clss = NewClass(env, clss_name); + clss->class_loader = this; + clss->access_flags = ACC_FINAL | ACC_SYNTHETIC; + clss->is_verified = 2; + /* FIELDS CONSTRUCTION*/ + // Current approach: + // fill VT class with fields according to struct VTable layout + // --- PLATFORM DEPENDENT --- + // jint fileds for _IA32_, jlong fileds for _IPF_ and _EM64T_ + // consider 32bit pointer as jint, and 64bit pointers as jlong + clss->n_fields = vtable_methods_number + VTABLE_FIELDS_NUMBER; + clss->fields = new Field[clss->n_fields]; + assert(clss->fields); + for (unsigned i = 0; i < clss->n_fields; i++) + { + // jlc ref field + if (i == JLC_REF_NUMBER) + { + static String* vt_jlc_name = env->string_pool.lookup("vt_jlc"); + static String* vt_jlc_desc = env->string_pool.lookup("Ljava/lang/Class;"); + clss->fields[i].set(clss, vt_jlc_name, vt_jlc_desc, ACC_FINAL | ACC_SYNTHETIC); + clss->fields[i].set_field_type_desc( + type_desc_create_from_java_descriptor("Ljava/lang/Class;", this)); + clss->fields[i].set_injected(); + continue; + } + + char vt_field_name[10] = {0}; + sprintf (vt_field_name, "vt_%d", i); + +#if defined _IPF_ || defined _EM64T_ + static String* LongDescriptor_String = env->string_pool.lookup("I"); + clss->fields[i].set(clss, env->string_pool.lookup(vt_field_name), + LongDescriptor_String, ACC_FINAL | ACC_SYNTHETIC); + clss->fields[i].set_field_type_desc( + type_desc_create_from_java_descriptor("J", NULL)); + clss->fields[i].set_injected(); +#else // IA32 + static String* IntDescriptor_String = env->string_pool.lookup("I"); + clss->fields[i].set(clss, env->string_pool.lookup(vt_field_name), + IntDescriptor_String, ACC_FINAL | ACC_SYNTHETIC); + clss->fields[i].set_field_type_desc( + type_desc_create_from_java_descriptor("I", NULL)); + clss->fields[i].set_injected(); +#endif + } + + clss->is_VT_class = true; + if (!InsertClass(clss)){ + assert (false); + FailedLoadingClass(clss_name); + return NULL; + } + + return clss; +} + Index: vm/vmcore/src/init/vm_init.cpp =================================================================== --- vm/vmcore/src/init/vm_init.cpp (revision 468306) +++ vm/vmcore/src/init/vm_init.cpp (working copy) @@ -119,9 +119,65 @@ // and set its "vm_class" field to point back to that structure. void create_instance_for_class(Global_Env * vm_env, Class *clss) { - clss->class_loader->AllocateAndReportInstance(vm_env, clss); + clss->class_loader->AllocateAndReportInstance(vm_env, clss, false); + // set jlC to vtable - for non BS classes jlc is set in create_vtable + if (clss->vtable && clss != vm_env->VTableVTable_Class) // vtable = NULL for interfaces // skip vtable base - already set + { + assert (!clss->vtable->jlC); // used for BS classes only + clss->vtable->jlC = *clss->class_handle; + assert (!clss->class_loader->GetLoader()); + } } +/* + * Create VTable for VTables + * Function creates VTable object which vtable references to this object + * Required by automatic class unloading design +*/ +static void create_vtable_for_vtables(Global_Env *env) +{ + env->VTableVTable_Class = env->bootstrap_class_loader->CreateVTableClass(env->VTableBaseObject_String, 0); + if (!env->VTableVTable_Class){ + DIE("Could not create Vtable for Vtables class" << env->VTableBaseObject_String->bytes); + } +#ifdef _DEBUG + // integrity check: + // VTableforVTable has no methods => sizeof VTableforVTable->VTable = VTABLE_OVERHEAD + unsigned vt_size = get_vtable_size(env->VTableVTable_Class); + assert (vt_size == VTABLE_OVERHEAD); +#endif + + void* vt_buf = STD_ALLOCA(VTABLE_OVERHEAD); + assert(vt_buf); + memset(vt_buf, 0, VTABLE_OVERHEAD); + if(!class_prepare(env, env->VTableVTable_Class, vt_buf)) { + assert (false); + return; + } + + tmn_suspend_disable(); + ObjectHandle VTableVTableHandle = oh_allocate_local_handle(); + assert (VTableVTableHandle); + VTableVTableHandle->object = (ManagedObject*)gc_alloc(env->VTableVTable_Class->instance_data_size, + env->VTableVTable_Class->allocation_handle, vm_get_gc_thread_local()); + assert (VTableVTableHandle->object); + gc_pin_object((Managed_Object_Handle*)&VTableVTableHandle->object); + + // cycle VTable to itself + memcpy(VTableVTableHandle->object->get_data_ptr(), env->VTableVTable_Class->vtable, VTABLE_OVERHEAD); + env->VTableVTable_Class->vtable = (VTable*)VTableVTableHandle->object->get_data_ptr(); + env->VTableVTable_Class->allocation_handle = ManagedObject::vtable_to_allocation_handle(env->VTableVTable_Class->vtable); + VTableVTableHandle->object->set_vtable_unsafe(env->VTableVTable_Class->vtable); + + env->VTableVTable_Class->vtObj = VTableVTableHandle->object; + tmn_suspend_enable(); +#ifdef VM_STATS + unsigned vtables_allocated_size_total = VTABLE_OVERHEAD + ManagedObject::get_header_size(); + Class::num_vtable_allocations++; + Class::total_vtable_bytes += vtables_allocated_size_total; +#endif +} + /** * Loads DLLs. */ @@ -270,6 +326,7 @@ * j.l.reflect.AnnotatedElement, GenericDeclaration and Type as per Java 5 */ vm_env->StartVMBootstrap(); + create_vtable_for_vtables(vm_env); vm_env->JavaLangObject_Class = preload_class(vm_env, vm_env->JavaLangObject_String); vm_env->java_io_Serializable_Class = preload_class(vm_env, vm_env->Serializable_String); vm_env->JavaLangClass_Class = preload_class(vm_env, vm_env->JavaLangClass_String); @@ -344,7 +401,6 @@ vm_env->strings_are_compressed = (class_lookup_field_recursive(vm_env->JavaLangString_Class, "bvalue", "[B") != NULL); vm_env->JavaLangString_VTable = vm_env->JavaLangString_Class->vtable; - vm_env->JavaLangString_allocation_handle = vm_env->JavaLangString_Class->allocation_handle; TRACE2("init", "preloading exceptions"); vm_env->java_lang_Throwable_Class = @@ -638,7 +694,6 @@ /* END: Property processing. */ // Initialize memory allocation. - vm_init_mem_alloc(); gc_init(); // TODO: find another way to initialize the following. Index: vm/vmcore/src/init/vm_shutdown.cpp =================================================================== --- vm/vmcore/src/init/vm_shutdown.cpp (revision 468306) +++ vm/vmcore/src/init/vm_shutdown.cpp (working copy) @@ -68,7 +68,6 @@ // Release global data. // TODO: move these data to VM space. vm_uninitialize_critical_sections(); - vm_mem_dealloc(); } /** Index: vm/vmcore/src/util/vm_stats.cpp =================================================================== --- vm/vmcore/src/util/vm_stats.cpp (revision 468306) +++ vm/vmcore/src/util/vm_stats.cpp (working copy) @@ -300,6 +300,10 @@ num_compileme_generated = 0; num_compileme_used = 0; + number_memoryblock_allocations = 0; + total_memory_allocated = 0; + total_memory_used = 0; + number_memorymanager_created = 0; // Enter the JIT RT support functions into a -> map. for (int i = 0; i < sizeof_jit_rt_function_entries; i++) { @@ -889,33 +893,11 @@ printf("\n"); printf("Use_large_pages = %s\n", (VM_Global_State::loader_env->use_large_pages? "yes" : "no")); - printf("%11d ::::system_page_size\n", system_page_size); - printf("%11d ::::page_size_for_allocation\n", page_size_for_allocation); - printf("%11lu ::::init_pool_size\n", (unsigned long)initial_code_pool_size); - printf("%11" FMT64 "u ::::total_code_pool_size\n", codemgr_total_code_pool_size); - printf("%11" FMT64 "u ::::total_code_allocated\n", codemgr_total_code_allocated); - printf("%11" FMT64 "u ::::total_data_pool_size\n", codemgr_total_data_pool_size); - printf("%11" FMT64 "u ::::total_data_allocated\n", codemgr_total_data_allocated); -#ifdef VM_STATS - { // print jit_code_pool stats - printf(" jit code pool\n"); - printf("%11" FMT64 "u :::: num_allocations\n", jit_code_pool->num_allocations); - printf("%11" FMT64 "u :::: total_size_allocated\n", uint64(jit_code_pool->total_size_allocated)); - printf("%11" FMT64 "u :::: num_pool_allocations\n", jit_code_pool->num_pool_allocations); - printf("%11" FMT64 "u :::: total_pool_size\n", uint64(jit_code_pool->total_pool_size)); - printf("%11" FMT64 "u :::: num_resizes\n", jit_code_pool->num_resizes); - printf("%11" FMT64 "u :::: current_alloc_size\n", uint64(jit_code_pool->current_alloc_size)); - } - { // print vtable_data_pool stats - printf(" vtable data pool\n"); - printf("%11" FMT64 "u :::: num_allocations\n", vtable_data_pool->num_allocations); - printf("%11" FMT64 "u :::: total_size_allocated\n", uint64(vtable_data_pool->total_size_allocated)); - printf("%11" FMT64 "u :::: num_pool_allocations\n", vtable_data_pool->num_pool_allocations); - printf("%11" FMT64 "u :::: total_pool_size\n", uint64(vtable_data_pool->total_pool_size)); - printf("%11" FMT64 "u :::: num_resizes\n", vtable_data_pool->num_resizes); - printf("%11" FMT64 "u :::: current_alloc_size\n", uint64(vtable_data_pool->current_alloc_size)); - } - + printf("%11" FMT64 "u ::::number_memoryblock_allocations\n", number_memoryblock_allocations); + printf("%11" FMT64 "u ::::total_memory_allocated\n", total_memory_allocated); + printf("%11" FMT64 "u ::::total_memory_used\n", total_memory_used); + printf("%11" FMT64 "u ::::number_memorymanager_created\n", number_memorymanager_created); +#ifdef VM_STATS fflush(stdout); print_rt_function_stats(); Index: vm/vmcore/src/util/natives_support.cpp =================================================================== --- vm/vmcore/src/util/natives_support.cpp (revision 468306) +++ vm/vmcore/src/util/natives_support.cpp (working copy) @@ -307,7 +307,7 @@ { assert(pfound); - // FIXME find_call_JNI_OnUnload(pfound->handle); + find_call_JNI_OnUnload(pfound->handle); apr_dso_unload(pfound->handle); } // natives_unload_library Index: vm/vmcore/src/util/mem_alloc.cpp =================================================================== --- vm/vmcore/src/util/mem_alloc.cpp (revision 468306) +++ vm/vmcore/src/util/mem_alloc.cpp (working copy) @@ -6,6 +6,10 @@ * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * + + + + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software @@ -15,7 +19,7 @@ * limitations under the License. */ /** - * @author Intel, Alexei Fedotov + * @author Intel, Aleksey Ignatenko, Alexei Fedotov * @version $Revision: 1.1.2.1.4.3 $ */ @@ -24,7 +28,6 @@ #include "cxxlog.h" #include -#include #include "environment.h" #include "nogc.h" @@ -33,115 +36,119 @@ #include "vm_stats.h" #include "port_malloc.h" -static const unsigned default_data_pool_size = 512*1024; +//////////////////////////////////////////////////////////// +// allocation memory for code for stubs -static Byte *vtable_pool_start = NULL; -static size_t default_initial_code_pool_size = 1024*1024; +void *malloc_fixed_code_for_jit(size_t size, size_t alignment, unsigned heat, Code_Allocation_Action action) +{ + return VM_Global_State::loader_env->GlobalCodeMemoryManager->alloc(size, alignment, action); +} //malloc_fixed_code_for_jit -unsigned system_page_size = 0; -unsigned page_size_for_allocation = 0; -size_t initial_code_pool_size = 0; -Pool_Descriptor* jit_code_pool = NULL; -Pool_Descriptor* vtable_data_pool = NULL; +//////////////////////////////////////////////////////////////////////////// +//////////////////////MemoryManager //////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////// -static apr_pool_t* aux_pool; -static apr_thread_mutex_t* aux_mutex; -static apr_thread_mutex_t* vtable_mutex; -static apr_thread_mutex_t* jit_code_mutex; +size_t PoolManager::round_up_to_page_size_multiple(size_t size) +{ + return ((size + _page_size - 1) / _page_size) * _page_size; +} - -// this vector is used to store ptrs of allocated memory to free it on exit -static std::vector m_allocated_memory_ptrs; - - -static size_t round_up_to_page_size_multiple(size_t size, size_t page_size) +void PoolManager::_lock() { - return ((size + page_size - 1) / page_size) * page_size; -} //round_up_to_page_size_multiple + VERIFY(APR_SUCCESS == apr_thread_mutex_lock(aux_mutex), \ + "Cannot lock the pool's mutex"); +} + +void PoolManager::_unlock() +{ + VERIFY(APR_SUCCESS == apr_thread_mutex_unlock(aux_mutex), \ + "Cannot unlock the pool's mutex"); +} +PoolManager::PoolManager(size_t initial_size, size_t page_size, bool use_large_pages, bool is_code, bool is_resize_allowed) : +_page_size(page_size), _use_large_pages(use_large_pages), _default_pool_size(initial_size), _is_code(is_code), _is_resize_allowed(is_resize_allowed) + { + VERIFY(APR_SUCCESS == apr_pool_create(&aux_pool, 0), \ + "Cannot initialize a memory pool"); + VERIFY(APR_SUCCESS == apr_thread_mutex_create(&aux_mutex, APR_THREAD_MUTEX_NESTED, aux_pool), \ + "Cannot initialize pool reallocation mutex"); + + _active_pool = allocate_pool_storage(_default_pool_size); + _passive_pool = NULL; + vtable_pool_start = _active_pool->_begin; + +#ifdef VM_STATS + VM_Statistics::get_vm_stats().number_memorymanager_created++; +#endif +} - -static void allocate_pool_storage(Pool_Descriptor *p_pool, size_t size, size_t page_size) +PoolManager::~PoolManager() { - bool is_code = p_pool->is_code; - void *pool_storage = NULL; - - size = round_up_to_page_size_multiple(size, page_size); - unsigned int mem_protection = PORT_VMEM_MODE_READ | PORT_VMEM_MODE_WRITE; - if (is_code) { - mem_protection |= PORT_VMEM_MODE_EXECUTE; - } - size_t ps = (!is_code && VM_Global_State::loader_env->use_large_pages) ? - PORT_VMEM_PAGESIZE_LARGE : PORT_VMEM_PAGESIZE_DEFAULT; + PoolDescriptor* pDesc = NULL; - VERIFY(APR_SUCCESS == apr_thread_mutex_lock(aux_mutex), \ - "Cannot lock the pool reallocation mutex"); - apr_status_t status = port_vmem_reserve(&p_pool->descriptor, &pool_storage, - size, mem_protection, ps, aux_pool); - VERIFY(APR_SUCCESS == apr_thread_mutex_unlock(aux_mutex), \ - "Cannot unlock the pool reallocation mutex"); - if (APR_SUCCESS == status) { - status = port_vmem_commit(&pool_storage, size, p_pool->descriptor); + while (_passive_pool) + { + pDesc = _passive_pool; + port_vmem_release(pDesc->_descriptor); + _passive_pool = _passive_pool->_next; } + while (_active_pool) + { + pDesc = _active_pool; + port_vmem_release(pDesc->_descriptor); + _active_pool = _active_pool->_next; + } + + VERIFY(APR_SUCCESS == apr_thread_mutex_destroy(aux_mutex), \ + "Cannot destroy the mutex"); + apr_pool_destroy(aux_pool); +} + +PoolDescriptor* PoolManager::allocate_pool_storage(size_t size) +{ + PoolDescriptor* pDesc = (PoolDescriptor*) apr_palloc(aux_pool, sizeof(PoolDescriptor)); + memset(pDesc, 0, sizeof(PoolDescriptor)); + + void *pool_storage = NULL; + size = round_up_to_page_size_multiple(size); + pDesc->_size = size; + unsigned int mem_protection = PORT_VMEM_MODE_READ | PORT_VMEM_MODE_WRITE; + if (_is_code) { + mem_protection |= PORT_VMEM_MODE_EXECUTE; + } + size_t ps = (!_is_code && _use_large_pages) ? + PORT_VMEM_PAGESIZE_LARGE : PORT_VMEM_PAGESIZE_DEFAULT; + + apr_status_t status = port_vmem_reserve(&pDesc->_descriptor, &pool_storage, + size, mem_protection, ps, aux_pool); + if (status != APR_SUCCESS) { + DIE("Cannot allocate pool storage: " << (void *)size + << " bytes of virtual memory for code or data.\n" + "Error code = " << status); + } + + status = port_vmem_commit(&pool_storage, size, pDesc->_descriptor); if (status != APR_SUCCESS || pool_storage == NULL) { DIE("Cannot allocate pool storage: " << (void *)size << " bytes of virtual memory for code or data.\n" "Error code = " << status); - } - + } + #ifdef VM_STATS - p_pool->num_pool_allocations++; - p_pool->total_pool_size += size; - if (is_code) { - VM_Statistics::get_vm_stats().codemgr_total_code_pool_size += size; - } else { - VM_Statistics::get_vm_stats().codemgr_total_data_pool_size += size; - } -#endif //VM_STATS + VM_Statistics::get_vm_stats().number_memoryblock_allocations++; + VM_Statistics::get_vm_stats().total_memory_allocated += size; +#endif + + pDesc->_begin = (Byte*)pool_storage; + pDesc->_end = ((Byte*)(pool_storage) + size); + + return pDesc; +} - p_pool->start = (Byte*)pool_storage; - p_pool->end = ((Byte*)(pool_storage) + size); - m_allocated_memory_ptrs.push_back(p_pool->descriptor); -} //allocate_pool_storage - - - -static void init_pool(Pool_Descriptor *p_pool, size_t page_size, size_t init_size, bool is_code) +void* PoolManager::alloc(size_t size, size_t alignment, Code_Allocation_Action action) { - p_pool->default_size = (size_t)(round_up_to_page_size_multiple(init_size, page_size) + 0.5); - p_pool->is_code = is_code; -#ifdef VM_STATS - p_pool->num_allocations = 0; - p_pool->num_pool_allocations = 0; - p_pool->total_pool_size = 0; - p_pool->total_size_allocated = 0; - p_pool->num_resizes = 0; - p_pool->current_alloc_size = p_pool->default_size; -#endif //VM_STATS -} //init_pool - - - -static void init_pools(size_t page_size) -{ - jit_code_pool = new Pool_Descriptor; - init_pool(jit_code_pool, page_size, initial_code_pool_size, /*is_code*/ true); - allocate_pool_storage(jit_code_pool, jit_code_pool->default_size, page_size); - - vtable_data_pool = new Pool_Descriptor; - // 20040511: The vtable pool must be bigger for jAppServer for compresses vtable pointers (can't be resized) - unsigned size = (vm_vtable_pointers_are_compressed() ? 8*1024*1024 : default_data_pool_size); - init_pool(vtable_data_pool, page_size, size, /*is_code*/ false); - allocate_pool_storage(vtable_data_pool, vtable_data_pool->default_size, page_size); -} //init_pools - - - -static void *allocate_from_pool(Pool_Descriptor *p_pool, size_t size, size_t alignment, size_t page_size, - bool is_resize_allowed, Code_Allocation_Action action) -{ // Make sure alignment is a power of 2. assert((alignment & (alignment-1)) == 0); size_t mask = alignment - 1; @@ -149,165 +156,84 @@ // align the requested size size = (size + mask) & ~mask; - Byte *pool_start = p_pool->start; // (misnamed) this actually points to the next free byte in the pool - pool_start = (Byte *) ((POINTER_SIZE_INT)(pool_start + mask) & ~(POINTER_SIZE_INT)mask); - Byte *pool_end = p_pool->end; + // CAA_Simulate functionality support + if (action == CAA_Simulate) + size = 0; - size_t mem_left_in_pool = (pool_end - pool_start); - if (size > mem_left_in_pool) { - if (action == CAA_Simulate) { - // Return NULL if we're simulating the allocation and it would have caused a pool resize. - return NULL; + _lock(); + + assert(_active_pool); + Byte *pool_start = _active_pool->_begin; + pool_start = (Byte *) ((POINTER_SIZE_INT)(pool_start + mask) & ~(POINTER_SIZE_INT)mask); + Byte *pool_end = _active_pool->_end; + + size_t mem_left_in_pool = (pool_end - pool_start); + while (size > mem_left_in_pool) { + if (!_is_resize_allowed) { + DIE("Error: VTable pool overflow, resize is not allowed. Please, extand VTable pool size.\n"); + // TODO: add functionality to commit additional part of memory if reserved enough: + // need for (is_resize_allowed = false) case - commit every time by little pieces + } + + // memory utilization logic + // check that required size less than MEMORY_UTILIZATION_LIMIT % of active memory block size - all active memory + // blocks have size more than MEMORY_UTILIZATION_LIMIT % of active memory block size + PoolDescriptor* pDesc = _active_pool->_next; + if (pDesc) + { + if ((size + mask)*MEMORY_UTILIZATION_LIMIT < (POINTER_SIZE_INT)(pDesc->_size)) + { + _active_pool->_next = _passive_pool; + _passive_pool = _active_pool; + _active_pool = pDesc; + + pool_start = _active_pool->_begin; + pool_start = (Byte *) ((POINTER_SIZE_INT)(pool_start + mask) & ~(POINTER_SIZE_INT)mask); + + break; + } } - if (!is_resize_allowed && pool_start != NULL) { - DIE("Error: Resizing of the memory pool is not allowed.\n"); - } - assert(p_pool->default_size); - size_t new_pool_size = ((size > p_pool->default_size)? size : p_pool->default_size); + + assert(_default_pool_size); + size_t new_pool_size = ((size > _default_pool_size)? size : _default_pool_size); new_pool_size += mask; -#ifdef VM_STATS - p_pool->num_resizes++; - p_pool->current_alloc_size = new_pool_size; -#endif //VM_STATS - allocate_pool_storage(p_pool, new_pool_size, page_size); - pool_start = p_pool->start; + PoolDescriptor* p_pool = allocate_pool_storage(new_pool_size); + assert (p_pool); + + // memory utilization logic + // left size of pool more than MEMORY_UTILIZATION_LIMIT % of the pool's size + if ((mem_left_in_pool * MEMORY_UTILIZATION_LIMIT) > _active_pool->_size) //put pool in _active_pool list + { + p_pool->_next = _active_pool; + _active_pool = p_pool; + } + else // put in _passive_pool list + { + p_pool->_next = _active_pool->_next; + _active_pool->_next = _passive_pool; + _passive_pool = _active_pool; + _active_pool = p_pool; + } + + pool_start = p_pool->_begin; pool_start = (Byte *) ((POINTER_SIZE_INT)(pool_start + mask) & ~(POINTER_SIZE_INT)mask); - pool_end = p_pool->end; - } + break; + } void *p = pool_start; - if (action != CAA_Simulate) { - // Don't update the pool start pointer if we're only simulating allocation. - p_pool->start = pool_start + size; - } -#ifdef VM_STATS - p_pool->num_allocations++; - p_pool->total_size_allocated += size; - if (p_pool->is_code) { - VM_Statistics::get_vm_stats().codemgr_total_code_allocated += size; - } else { - VM_Statistics::get_vm_stats().codemgr_total_data_allocated += size; - } -#endif //VM_STATS + _active_pool->_begin += size; + + _unlock(); + + #ifdef VM_STATS + VM_Statistics::get_vm_stats().total_memory_used += size; +#endif + return p; -} //allocate_from_pool - - -////////////////////////////////////////////////////////////////////////////////////////////// -// Beginning of publicly exported functions. -////////////////////////////////////////////////////////////////////////////////////////////// - -//////////////////////////////////////////////////////////// -// begin allocating memory for code - -void *malloc_fixed_code_for_jit(size_t size, size_t alignment, unsigned heat, Code_Allocation_Action action) -{ - vm_init_mem_alloc(); - assert (jit_code_pool); - VERIFY(APR_SUCCESS == apr_thread_mutex_lock(jit_code_mutex), \ - "Cannot lock the jit mutex"); - void *p = allocate_from_pool(jit_code_pool, size, alignment, page_size_for_allocation, /*is_resize_allowed*/ true, action); - VERIFY(APR_SUCCESS == apr_thread_mutex_unlock(jit_code_mutex), \ - "Cannot unlock the jit mutex"); - return p; -} //malloc_fixed_code_for_jit - - -// end allocating memory for code -//////////////////////////////////////////////////////////// - - - -//////////////////////////////////////////////////////////////////////////////////////////////// -// -// begin memory allocation for class-related data structures such as class statics and vtables. - -void *allocate_vtable_data_from_pool(size_t size) -{ - bool is_resize_allowed = true; - if (vm_vtable_pointers_are_compressed()) { - is_resize_allowed = false; - } - assert (vtable_data_pool); - VERIFY(APR_SUCCESS == apr_thread_mutex_lock(vtable_mutex), \ - "Cannot lock the vtable mutex"); - void *p = allocate_from_pool(vtable_data_pool, size, 16, page_size_for_allocation, is_resize_allowed, CAA_Allocate); - VERIFY(APR_SUCCESS == apr_thread_mutex_unlock(vtable_mutex), \ - "Cannot unlock the vtable mutex"); - return p; -} //allocate_class_data_from_area - -// end allocating memory for data -// -//////////////////////////////////////////////////////////////////////////////////////////////// - - -void vm_init_mem_alloc() -{ - static int initialized = false; - if (initialized) { - return; - } - initialized = true; - - VERIFY(APR_SUCCESS == apr_pool_create(&aux_pool, 0), \ - "Cannot initialize a memory pool"); - VERIFY(APR_SUCCESS == apr_thread_mutex_create(&aux_mutex, APR_THREAD_MUTEX_NESTED, aux_pool), \ - "Cannot initialize pool reallocation mutex"); - VERIFY(APR_SUCCESS == apr_thread_mutex_create(&jit_code_mutex, APR_THREAD_MUTEX_NESTED, aux_pool), \ - "Cannot initialize jit table mutex"); - VERIFY(APR_SUCCESS == apr_thread_mutex_create(&vtable_mutex, APR_THREAD_MUTEX_NESTED, aux_pool), \ - "Cannot initialize vtable mutex"); - - size_t *ps = port_vmem_page_sizes(); - if (ps[1] != 0 && VM_Global_State::loader_env->use_large_pages) { - page_size_for_allocation = ps[1]; - } - else { - page_size_for_allocation = ps[0]; - } - - default_initial_code_pool_size = round_up_to_page_size_multiple(default_initial_code_pool_size, page_size_for_allocation); - initial_code_pool_size = default_initial_code_pool_size; - assert(initial_code_pool_size); - -#ifdef VM_STATS - VM_Statistics::get_vm_stats().codemgr_total_code_pool_size = 0; - VM_Statistics::get_vm_stats().codemgr_total_code_allocated = 0; - VM_Statistics::get_vm_stats().codemgr_total_data_pool_size = 0; - VM_Statistics::get_vm_stats().codemgr_total_data_allocated = 0; -#endif //VM_STATS - - init_pools(page_size_for_allocation); - vtable_pool_start = vtable_data_pool->start; -} //vm_init_mem_alloc - - -void vm_mem_dealloc() -{ - delete vtable_data_pool; - vtable_data_pool = NULL; - delete jit_code_pool; - jit_code_pool = NULL; - std::vector::iterator it; - for (it = m_allocated_memory_ptrs.begin(); it != m_allocated_memory_ptrs.end(); it++) - { - port_vmem_release(*it); - } - VERIFY(APR_SUCCESS == apr_thread_mutex_destroy(aux_mutex), \ - "Cannot destroy the mutex"); - VERIFY(APR_SUCCESS == apr_thread_mutex_destroy(jit_code_mutex), \ - "Cannot destroy the mutex"); - VERIFY(APR_SUCCESS == apr_thread_mutex_destroy(vtable_mutex), \ - "Cannot destroy the mutex"); - apr_pool_destroy(aux_pool); -} - - + } + POINTER_SIZE_INT vm_get_vtable_base() { - Byte *base = vtable_pool_start; - assert (base); - // Subtract a small number (like 1) from the real base so that - // no valid vtable offsets will ever be 0. - return (POINTER_SIZE_INT) (base - 8); + // TEMPORARY for compilation on IA32 + //assert (false); + return (POINTER_SIZE_INT) (- 1); } //vm_get_vtable_base Index: vm/vmcore/src/util/ia32/base/compile_IA32.cpp =================================================================== --- vm/vmcore/src/util/ia32/base/compile_IA32.cpp (revision 468306) +++ vm/vmcore/src/util/ia32/base/compile_IA32.cpp (working copy) @@ -222,8 +222,7 @@ #ifdef VM_STATS ++VM_Statistics::get_vm_stats().num_compileme_generated; #endif - char * stub = (char *) malloc_fixed_code_for_jit(STUB_SIZE, - DEFAULT_CODE_ALIGNMENT, CODE_BLOCK_HEAT_DEFAULT, CAA_Allocate); + char * stub = (char *) code_alloc(method_get_class(method), STUB_SIZE, DEFAULT_CODE_ALIGNMENT, CAA_Allocate); NativeCodePtr addr = stub; #ifndef NDEBUG memset(stub, 0xcc /*int 3*/, STUB_SIZE); Index: vm/gcv4/src/gc_for_vm.cpp =================================================================== --- vm/gcv4/src/gc_for_vm.cpp (revision 468306) +++ vm/gcv4/src/gc_for_vm.cpp (working copy) @@ -497,17 +497,6 @@ heapTraceFinalize(); } - if (p_loaded_vtable_directory) { - p_loaded_vtable_directory->rewind(); - Partial_Reveal_VTable *vt_handle; - vt_handle = (Partial_Reveal_VTable *)p_loaded_vtable_directory->next(); - while(vt_handle != NULL) - { - STD_FREE(vt_handle->get_gcvt()); - vt_handle = (Partial_Reveal_VTable *)p_loaded_vtable_directory->next(); - } - delete p_loaded_vtable_directory; - } if (interior_pointer_table) delete interior_pointer_table; if (compressed_pointer_table) @@ -1381,7 +1370,7 @@ unsigned int size = (num_ref_fields+1) * sizeof (unsigned int); // malloc up the array if we need one. - int *new_ref_array = (int*) p_global_gc->get_gcvt_pool().alloc(size); + int *new_ref_array = (int*) class_alloc_via_classloader(ch, size); result = new_ref_array; for(idx = 0; idx < num_fields; idx++) { @@ -1497,7 +1486,9 @@ assert(ch); assert(vth); Partial_Reveal_VTable *vt = (Partial_Reveal_VTable *)vth; - vt->set_gcvt((GC_VTable_Info *) STD_MALLOC(sizeof(GC_VTable_Info))); + void* p = class_alloc_via_classloader(ch, sizeof(GC_VTable_Info)); + vt->set_gcvt((GC_VTable_Info *) p); + assert(vt->get_gcvt()); memset((void *) vt->get_gcvt(), 0, sizeof(GC_VTable_Info)); vt->get_gcvt()->gc_clss = ch; vt->get_gcvt()->gc_class_properties = 0; // Clear the properties. @@ -1505,13 +1496,6 @@ // Set the properties. gc_set_prop_alignment_mask(vt, class_get_alignment(ch)); - // Remember the VTable (vt) in a hash table so that delta_dynopt can check if it has a legal - // vtable. (see object_address_seems_valid for an example of its use.) - if (!p_loaded_vtable_directory) { - p_loaded_vtable_directory = new Hash_Table(); - } - p_loaded_vtable_directory->add_entry(vt); - if(class_is_array(ch)) { Class_Handle array_element_class = class_get_array_element_class(ch); // We have an array so not it. @@ -1600,8 +1584,7 @@ int gc_number_of_slots = vt->get_gcvt()->gc_number_of_slots; int* gc_ref_offset_array = vt->get_gcvt()->gc_ref_offset_array; - int* gc_strong_ref_offset_array = (int*) p_global_gc-> - get_gcvt_pool().alloc(gc_number_of_slots * sizeof (unsigned int)); + int* gc_strong_ref_offset_array = (int*) class_alloc_via_classloader(ch, gc_number_of_slots * sizeof (unsigned int)); assert(gc_strong_ref_offset_array); int i,j; Index: vm/gcv4/src/garbage_collector.h =================================================================== --- vm/gcv4/src/garbage_collector.h (revision 468306) +++ vm/gcv4/src/garbage_collector.h (working copy) @@ -57,10 +57,6 @@ int gc_add_fresh_chunks(unsigned int); - tl::MemoryPoolMT& get_gcvt_pool() { - return gcvt_pool; - } - /// Considers statistics and makes a decision to resize the heap. void consider_heap_resize(int size_failed); @@ -265,8 +261,6 @@ ///////////////////////////////////////////////////////////////////////////////////////////////////// private: - tl::MemoryPoolMT gcvt_pool; - block_info * get_fresh_chunk_from_block_store(bool stay_above_waterline); block_info *get_free_chunk_from_global_gc_chunks(); Index: vm/gcv4/src/gc_header.h =================================================================== --- vm/gcv4/src/gc_header.h (revision 468306) +++ vm/gcv4/src/gc_header.h (working copy) @@ -33,12 +33,6 @@ #include "open/hythread_ext.h" #include "hash_table.h" -// -// This Hash_Table has an entry per loaded class. -// It is used for determining valid vtable pointers -// when examining candidate objects. -// -extern Hash_Table *p_loaded_vtable_directory; // Define USE_COMPRESSED_VTABLE_POINTERS here to enable compressed vtable // pointers within objects. @@ -70,8 +64,8 @@ typedef struct Partial_Reveal_Object { #ifdef USE_COMPRESSED_VTABLE_POINTERS - uint32 vt_offset; private: + uint32 vt_offset; Obj_Info_Type obj_info; public: @@ -86,12 +80,11 @@ return (struct Partial_Reveal_VTable *) (vt_offset + vtable_base); } struct Partial_Reveal_VTable *vt() { assert(vt_offset); return (struct Partial_Reveal_VTable *) (vt_offset + vtable_base); } - bool vt_valid() {return p_loaded_vtable_directory->is_present((void *) (vt_offset + vtable_base));} + struct Partial_Reveal_Object *vtobject() { return (Partial_Reveal_Object *)(vt_offset + heap_base); } void set_vtable(Allocation_Handle ah) { // vtables are allocated from a fixed-size pool in the VM // see the details in mem_alloc.cpp, grep for vtable_data_pool. - assert(ah < 8*1048576); vt_offset = (uint32)ah; } @@ -139,16 +132,18 @@ } static uint64 max_supported_heap_size() { return (0x100000000) << forwarding_pointer_compression_shift(); } #else // !USE_COMPRESSED_VTABLE_POINTERS +private: struct Partial_Reveal_VTable *vt_raw; Obj_Info_Type obj_info; +public: Obj_Info_Type get_obj_info() { return obj_info; } void set_obj_info(Obj_Info_Type new_obj_info) { obj_info = new_obj_info; } Obj_Info_Type * obj_info_addr() { return &obj_info; } struct Partial_Reveal_VTable *vtraw() { return vt_raw; } struct Partial_Reveal_VTable *vt() { ASSERT(vt_raw, "incorrect object at " << this); return vt_raw; } - bool vt_valid() {return p_loaded_vtable_directory->is_present((void *)vt_raw);} + struct Partial_Reveal_Object *vtobject() { return (Partial_Reveal_Object *)((POINTER_SIZE_INT)vt_raw - get_size()); } void set_vtable(Allocation_Handle ah) { vt_raw = (struct Partial_Reveal_VTable *)ah; } struct Partial_Reveal_Object *get_forwarding_pointer() { @@ -180,8 +175,9 @@ } static uint64 max_supported_heap_size() { return ~((uint64)0); } #endif // !USE_COMPRESSED_VTABLE_POINTERS - + static POINTER_SIZE_INT vtable_base; + static unsigned get_size(){ return sizeof(Partial_Reveal_Object);} static POINTER_SIZE_INT heap_base; public: Index: vm/gcv4/src/mark_scan.cpp =================================================================== --- vm/gcv4/src/mark_scan.cpp (revision 468306) +++ vm/gcv4/src/mark_scan.cpp (working copy) @@ -98,6 +98,12 @@ // Object had better be marked. assert(is_object_marked(p_obj) == true); + // Scan VTable object + Slot slot(NULL); + Partial_Reveal_Object* p_vtobj = p_obj->vtobject(); + slot.set((void*)&p_vtobj, false); + scan_slot(slot, gc_thread); + if (!p_obj->has_slots()) { gc_trace(p_obj, " scan_object(): object doesn't contain slots"); return; @@ -110,7 +116,6 @@ } int *offset_scanner = init_strong_object_scanner(p_obj); - Slot slot(NULL); while (true) { void *ref = p_get_ref(offset_scanner, p_obj); Index: vm/gcv4/src/mark.h =================================================================== --- vm/gcv4/src/mark.h (revision 468306) +++ vm/gcv4/src/mark.h (working copy) @@ -121,7 +121,6 @@ uint8 mask = (uint8) (1 << bit_index_into_byte); - vm_notify_live_object_class(p_obj->vt()->get_gcvt()->gc_clss); while (true) { uint8 old_val = *p_byte; uint8 final_val = (uint8) (old_val | mask); Index: vm/gcv4/src/gc_globals.cpp =================================================================== --- vm/gcv4/src/gc_globals.cpp (revision 468306) +++ vm/gcv4/src/gc_globals.cpp (working copy) @@ -55,16 +55,6 @@ POINTER_SIZE_INT final_heap_size_bytes = 0; // -// This remembered set has an entry per loaded class. -// It is used for determining valid vtable pointers -// when examining candidate objects. -// - -Hash_Table *p_loaded_vtable_directory = NULL; - - - -// // Global to specify the size differentiating // unsigned los_threshold_bytes = 0; Index: vm/interpreter/src/interpreter.cpp =================================================================== --- vm/interpreter/src/interpreter.cpp (revision 468306) +++ vm/interpreter/src/interpreter.cpp (working copy) @@ -3265,7 +3265,8 @@ // Setup locals and stack on C stack. SETUP_LOCALS_AND_STACK(frame, method); - + + frame.This = *(method->get_class()->class_handle); int args = method->get_num_arg_bytes() >> 2; for(int i = args-1; i >= 0; --i) { Index: vm/jitrino/src/vm/drl/DrlVMInterface.h =================================================================== --- vm/jitrino/src/vm/drl/DrlVMInterface.h (revision 468306) +++ vm/jitrino/src/vm/drl/DrlVMInterface.h (working copy) @@ -100,7 +100,7 @@ return class_is_initialized((Class_Handle)vmTypeHandle)?true:false; } void* getVTable(void* vmTypeHandle) { - return (void *) class_get_runtime_type_handle((Class_Handle)vmTypeHandle); + return (void *) class_get_vtable((Class_Handle)vmTypeHandle); } // @@ -424,7 +424,7 @@ return (vm_vtable_pointers_are_compressed() != 0); } uint32 getVTablePtrSize() { - return vm_get_runtime_type_handle_width(); + return vm_get_vtable_ptr_size(); } uint64 getVTableBase() { return vm_get_vtable_base();