Index: vm/thread/src/thread_native_thin_monitor.c =================================================================== --- vm/thread/src/thread_native_thin_monitor.c (revision 553904) +++ vm/thread/src/thread_native_thin_monitor.c (working copy) @@ -48,7 +48,7 @@ // lockword operations #define THREAD_ID(lockword) (lockword >> 16) #define IS_FAT_LOCK(lockword) (lockword >> 31) -#define FAT_LOCK_ID(lockword) ((lockword << 1) >> 12) +#define FAT_LOCK_ID(lockword) ((lockword >> FAT_LOCK_ID_OFFSET) & FAT_LOCK_ID_MASK) // lock reservation support #define RESERVED_BITMASK ((1<<10)) #define IS_RESERVED(lockword) (0==(lockword & RESERVED_BITMASK)) @@ -58,7 +58,9 @@ #define RECURSION_DEC(lockword_ptr, lockword) (*lockword_ptr=lockword - (1<<11)) #define MAX_RECURSION 31 +#define FAT_TABLE(_x_) lock_table->tables[((U_32)(_x_))/FAT_TABLE_ENTRIES][((U_32)(_x_))%FAT_TABLE_ENTRIES] + /* * Lock table which holds the omapping between LockID and fat lock (OS fat_monitor) pointer. */ @@ -770,7 +772,7 @@ assert(lock_id >=0 && (U_32)lock_id < lock_table->size); // we don't need to protect this read, because monitor can't vanish or // be moved in lock table while we are doing get_fat_monitor - fat_monitor = lock_table->table[lock_id]; + fat_monitor = FAT_TABLE(lock_id); return fat_monitor; } @@ -780,50 +782,61 @@ IDATA locktable_put_fat_monitor(hythread_monitor_t fat_monitor) { U_32 i = 0; - int mon_index; + U_32 mon_index; short free_slot_found = 0; if (lock_table == 0) { DIE (("Lock table not initialized!")); } + locktable_writer_enter(); + mon_index = lock_table->array_cursor; - locktable_writer_enter(); for(i =0; i < lock_table->size; i++) { - if (lock_table->table[lock_table->array_cursor] == 0) { - assert(lock_table->live_objs[lock_table->array_cursor] == 0); - lock_table->table[lock_table->array_cursor] = fat_monitor; + hythread_monitor_t* table; + + if (mon_index == lock_table->size) + mon_index = 0; + + table = lock_table->tables[mon_index / FAT_TABLE_ENTRIES]; + + if (table[mon_index % FAT_TABLE_ENTRIES] == 0) { + assert(lock_table->live_objs[mon_index] == 0); + table[mon_index % FAT_TABLE_ENTRIES] = fat_monitor; free_slot_found = 1; break; } - lock_table->array_cursor++; - if (lock_table->array_cursor == lock_table->size) - lock_table->array_cursor = 0; + ++mon_index; } if(!free_slot_found) { - int old_size; + U_32 old_size; + hythread_monitor_t* table; + + if (lock_table->size >= MAX_FAT_LOCKS) { + DIE (("Fat monitor table is exceeded!")); + } old_size = lock_table->size; - lock_table->size += INITIAL_FAT_TABLE_ENTRIES; - lock_table->table = realloc(lock_table->table, - lock_table->size * sizeof(hythread_monitor_t)); - assert(lock_table->table); + lock_table->size += FAT_TABLE_ENTRIES; + table = (hythread_monitor_t *)calloc(FAT_TABLE_ENTRIES, + sizeof(hythread_monitor_t)); + assert(table); + + lock_table->tables[old_size / FAT_TABLE_ENTRIES] = table; lock_table->live_objs = realloc(lock_table->live_objs, - lock_table->size * sizeof(unsigned char)); + lock_table->size * sizeof(unsigned char)); assert(lock_table->live_objs); - memset(lock_table->table + old_size, 0, - INITIAL_FAT_TABLE_ENTRIES * sizeof(hythread_monitor_t)); memset(lock_table->live_objs + old_size, 0, - INITIAL_FAT_TABLE_ENTRIES * sizeof(unsigned char)); + FAT_TABLE_ENTRIES * sizeof(unsigned char)); - lock_table->array_cursor = old_size; - lock_table->table[lock_table->array_cursor] = fat_monitor; + table[0] = fat_monitor; + mon_index = old_size; + } - } - mon_index = lock_table->array_cursor; + lock_table->array_cursor = mon_index + 1; locktable_writer_exit(); return mon_index; } @@ -850,9 +863,10 @@ int old_slots_occupied = 0; int new_slots_occupied = 0; - for (i < lock_table->size) - if (lock_table->table[i]) - old_slots_occupied++; + for (;i < lock_table->size; i++) { + if (FAT_TABLE(i)) + old_slots_occupied++; + } #endif locktable_reader_enter(); @@ -870,7 +884,7 @@ for(i = 0; i < lock_table->size; i++) { if (lock_table->live_objs[i]) { - assert(lock_table->table[i]); + assert(FAT_TABLE(i)); #ifdef DEBUG_NATIVE_RESOURCE_COLLECTION new_slots_occupied++; #endif @@ -878,9 +892,9 @@ // reset the live array for the next major GC cycle lock_table->live_objs[i] = 0; } else { - if (lock_table->table[i]) { - hythread_monitor_destroy(lock_table->table[i]); - lock_table->table[i] = 0; + if (FAT_TABLE(i)) { + hythread_monitor_destroy(FAT_TABLE(i)); + FAT_TABLE(i) = 0; } } } @@ -899,8 +913,8 @@ hythread_monitor_t m; DIE(("shouldn't get here")); assert(lock_id >=0 && (U_32)lock_id < lock_table->size); - m = lock_table->table[lock_id]; - lock_table->table[lock_id] = NULL; + m = FAT_TABLE(lock_id); + FAT_TABLE(lock_id) = NULL; return m; } Index: vm/thread/src/thread_private.h =================================================================== --- vm/thread/src/thread_private.h (revision 553904) +++ vm/thread/src/thread_private.h (working copy) @@ -53,11 +53,17 @@ #define MAX_OWNED_MONITOR_NUMBER 200 //FIXME: switch to dynamic resize #define FAST_LOCAL_STORAGE_SIZE 10 -#define INITIAL_FAT_TABLE_ENTRIES 16*1024 //make this table exapandible if workloads show it is necessary +#define FAT_LOCK_ID_OFFSET 11 // fat lock ID offset within lockword +#define FAT_LOCK_ID_MASK 0xFFFFF // nonzero bits (starting from 0 bit) to mask fat lock ID -#define HY_DEFAULT_STACKSIZE 512 * 1024 // if default stack size is not through -Xss parameter, it is 256kb +#define FAT_TABLE_ENTRIES (16*1024) // fat lock table is exapandible by adding new table +#define MAX_FAT_LOCKS (FAT_LOCK_ID_MASK + 1)// max total count of fat locks +// max count of tables for exapansion +#define MAX_FAT_TABLES ((MAX_FAT_LOCKS + FAT_TABLE_ENTRIES - 1)/FAT_TABLE_ENTRIES) +#define HY_DEFAULT_STACKSIZE (512 * 1024) // if default stack size is not through -Xss parameter, it is 256kb + #if !defined (_IPF_) //use lock reservation #define LOCK_RESERVATION @@ -561,7 +567,7 @@ typedef struct HyFatLockTable { // locktable itself - hythread_monitor_t *table; + hythread_monitor_t* tables[MAX_FAT_TABLES]; // mutex guarding locktable hymutex_t mutex; Index: vm/thread/src/thread_init.c =================================================================== --- vm/thread/src/thread_init.c (revision 553904) +++ vm/thread/src/thread_init.c (working copy) @@ -282,15 +282,17 @@ groups_count = 0; lock_table = (HyFatLockTable *) malloc (sizeof(HyFatLockTable)); - lock_table->table = (hythread_monitor_t *)calloc(INITIAL_FAT_TABLE_ENTRIES, + memset(lock_table->tables, 0, sizeof(lock_table->tables)); + lock_table->tables[0] = (hythread_monitor_t *)calloc(FAT_TABLE_ENTRIES, sizeof(hythread_monitor_t)); - lock_table->live_objs = (unsigned char *)calloc(INITIAL_FAT_TABLE_ENTRIES, + lock_table->live_objs = (unsigned char *)calloc(FAT_TABLE_ENTRIES, sizeof(unsigned char)); - lock_table->size = INITIAL_FAT_TABLE_ENTRIES; + lock_table->size = FAT_TABLE_ENTRIES; lock_table->array_cursor = 0; assert (lock_table); - assert (lock_table->table); + assert (lock_table->tables); + assert (lock_table->tables[0]); assert (lock_table->live_objs); if (hymutex_create(&lock_table->mutex, APR_THREAD_MUTEX_NESTED)) { @@ -316,7 +318,9 @@ static IDATA destroy_group_list() { hythread_group_t cur; IDATA status,status2; + int i; + // This method works only if there are no running threads. // there is no good way to kill running threads status=hythread_global_lock(); @@ -335,8 +339,11 @@ } free(lock_table->live_objs); - free(lock_table->table); + for (i = 0; i < MAX_FAT_TABLES && lock_table->tables[i]; i++) { + free(lock_table->tables[i]); + } + hymutex_destroy(&lock_table->mutex); hycond_destroy(&lock_table->write); hycond_destroy(&lock_table->read);