Index: vm/thread/src/thread_native_thin_monitor.c =================================================================== --- vm/thread/src/thread_native_thin_monitor.c (revision 558712) +++ vm/thread/src/thread_native_thin_monitor.c (working copy) @@ -53,7 +53,8 @@ // lockword operations #define THREAD_ID(lockword) (lockword >> 16) #define IS_FAT_LOCK(lockword) (lockword >> 31) -#define FAT_LOCK_ID(lockword) ((lockword << 1) >> 12) +#define FAT_LOCK_ID(lockword) \ + ((lockword >> HY_FAT_LOCK_ID_OFFSET) & HY_FAT_LOCK_ID_MASK) // lock reservation support #define RESERVED_BITMASK ((1<<10)) #define IS_RESERVED(lockword) (0==(lockword & RESERVED_BITMASK)) @@ -63,7 +64,11 @@ #define RECURSION_DEC(lockword_ptr, lockword) (*lockword_ptr=lockword - (1<<11)) #define MAX_RECURSION 31 +#define FAT_LOCK(_x_) \ + lock_table->tables[((U_32)(_x_))/HY_FAT_TABLE_ENTRIES]\ + [((U_32)(_x_))%HY_FAT_TABLE_ENTRIES] + /* * Lock table which holds the omapping between LockID and fat lock (OS fat_monitor) pointer. */ @@ -775,7 +780,7 @@ assert(lock_id >=0 && (U_32)lock_id < lock_table->size); // we don't need to protect this read, because monitor can't vanish or // be moved in lock table while we are doing get_fat_monitor - fat_monitor = lock_table->table[lock_id]; + fat_monitor = FAT_LOCK(lock_id); return fat_monitor; } @@ -785,50 +790,60 @@ IDATA locktable_put_fat_monitor(hythread_monitor_t fat_monitor) { U_32 i = 0; - int mon_index; + U_32 mon_index; short free_slot_found = 0; if (lock_table == 0) { DIE (("Lock table not initialized!")); } + locktable_writer_enter(); + mon_index = lock_table->array_cursor; - locktable_writer_enter(); for(i =0; i < lock_table->size; i++) { - if (lock_table->table[lock_table->array_cursor] == 0) { - assert(lock_table->live_objs[lock_table->array_cursor] == 0); - lock_table->table[lock_table->array_cursor] = fat_monitor; + hythread_monitor_t* table; + + if (mon_index == lock_table->size) + mon_index = 0; + + table = lock_table->tables[mon_index / HY_FAT_TABLE_ENTRIES]; + + if (table[mon_index % HY_FAT_TABLE_ENTRIES] == 0) { + assert(lock_table->live_objs[mon_index] == 0); + table[mon_index % HY_FAT_TABLE_ENTRIES] = fat_monitor; free_slot_found = 1; break; } - lock_table->array_cursor++; - if (lock_table->array_cursor == lock_table->size) - lock_table->array_cursor = 0; + ++mon_index; } if(!free_slot_found) { - int old_size; + U_32 old_size; + hythread_monitor_t* table; + + if (lock_table->size >= HY_MAX_FAT_LOCKS) { + DIE (("Fat monitor table is exceeded!")); + } old_size = lock_table->size; - lock_table->size += INITIAL_FAT_TABLE_ENTRIES; - lock_table->table = realloc(lock_table->table, - lock_table->size * sizeof(hythread_monitor_t)); - assert(lock_table->table); + lock_table->size += HY_FAT_TABLE_ENTRIES; + table = (hythread_monitor_t *)calloc(HY_FAT_TABLE_ENTRIES, + sizeof(hythread_monitor_t)); + assert(table); + lock_table->tables[old_size / HY_FAT_TABLE_ENTRIES] = table; - lock_table->live_objs = realloc(lock_table->live_objs, + lock_table->live_objs = realloc(lock_table->live_objs, lock_table->size * sizeof(unsigned char)); assert(lock_table->live_objs); - memset(lock_table->table + old_size, 0, - INITIAL_FAT_TABLE_ENTRIES * sizeof(hythread_monitor_t)); - memset(lock_table->live_objs + old_size, 0, - INITIAL_FAT_TABLE_ENTRIES * sizeof(unsigned char)); + memset(lock_table->live_objs + old_size, 0, + HY_FAT_TABLE_ENTRIES * sizeof(unsigned char)); - lock_table->array_cursor = old_size; - lock_table->table[lock_table->array_cursor] = fat_monitor; + table[0] = fat_monitor; + mon_index = old_size; + } - } - mon_index = lock_table->array_cursor; + lock_table->array_cursor = mon_index + 1; locktable_writer_exit(); return mon_index; } @@ -855,9 +870,10 @@ int old_slots_occupied = 0; int new_slots_occupied = 0; - for (i < lock_table->size) - if (lock_table->table[i]) - old_slots_occupied++; + for (;i < lock_table->size; i++) { + if (FAT_LOCK(i)) + old_slots_occupied++; + } #endif locktable_reader_enter(); @@ -875,7 +891,7 @@ for(i = 0; i < lock_table->size; i++) { if (lock_table->live_objs[i]) { - assert(lock_table->table[i]); + assert(FAT_LOCK(i)); #ifdef DEBUG_NATIVE_RESOURCE_COLLECTION new_slots_occupied++; #endif @@ -883,9 +899,9 @@ // reset the live array for the next major GC cycle lock_table->live_objs[i] = 0; } else { - if (lock_table->table[i]) { - hythread_monitor_destroy(lock_table->table[i]); - lock_table->table[i] = 0; + if (FAT_LOCK(i)) { + hythread_monitor_destroy(FAT_LOCK(i)); + FAT_LOCK(i) = 0; } } } @@ -904,8 +920,8 @@ hythread_monitor_t m; DIE(("shouldn't get here")); assert(lock_id >=0 && (U_32)lock_id < lock_table->size); - m = lock_table->table[lock_id]; - lock_table->table[lock_id] = NULL; + m = FAT_LOCK(lock_id); + FAT_LOCK(lock_id) = NULL; return m; } Index: vm/thread/src/thread_private.h =================================================================== --- vm/thread/src/thread_private.h (revision 558712) +++ vm/thread/src/thread_private.h (working copy) @@ -52,8 +52,15 @@ #define FAST_LOCAL_STORAGE_SIZE 10 -#define INITIAL_FAT_TABLE_ENTRIES 16*1024 //make this table exapandible if workloads show it is necessary +#define HY_FAT_LOCK_ID_OFFSET 11 // fat lock ID offset within lockword +#define HY_FAT_LOCK_ID_MASK 0xFFFFF // nonzero bits (starting from 0 bit) to mask fat lock ID +#define HY_FAT_TABLE_ENTRIES (16*1024) // fat lock table is exapandible by adding new table +#define HY_MAX_FAT_LOCKS (HY_FAT_LOCK_ID_MASK + 1)// max total count of fat locks +// max count of tables for exapansion +#define HY_MAX_FAT_TABLES ((HY_MAX_FAT_LOCKS + HY_FAT_TABLE_ENTRIES - 1)/HY_FAT_TABLE_ENTRIES) + + #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ @@ -421,7 +428,7 @@ typedef struct HyFatLockTable { // locktable itself - hythread_monitor_t *table; + hythread_monitor_t* tables[HY_MAX_FAT_TABLES]; // mutex guarding locktable hymutex_t mutex; Index: vm/thread/src/thread_init.c =================================================================== --- vm/thread/src/thread_init.c (revision 558712) +++ vm/thread/src/thread_init.c (working copy) @@ -282,15 +282,16 @@ groups_count = 0; lock_table = (HyFatLockTable *) malloc (sizeof(HyFatLockTable)); - lock_table->table = (hythread_monitor_t *)calloc(INITIAL_FAT_TABLE_ENTRIES, + memset(lock_table, 0, sizeof(HyFatLockTable)); + lock_table->tables[0] = (hythread_monitor_t *)calloc(HY_FAT_TABLE_ENTRIES, sizeof(hythread_monitor_t)); - lock_table->live_objs = (unsigned char *)calloc(INITIAL_FAT_TABLE_ENTRIES, + lock_table->live_objs = (unsigned char *)calloc(HY_FAT_TABLE_ENTRIES, sizeof(unsigned char)); - lock_table->size = INITIAL_FAT_TABLE_ENTRIES; + lock_table->size = HY_FAT_TABLE_ENTRIES; lock_table->array_cursor = 0; assert (lock_table); - assert (lock_table->table); + assert (lock_table->tables[0]); assert (lock_table->live_objs); if (hymutex_create(&lock_table->mutex, APR_THREAD_MUTEX_NESTED)) { @@ -316,6 +317,7 @@ static IDATA destroy_group_list() { hythread_group_t cur; IDATA status,status2; + int i; // This method works only if there are no running threads. // there is no good way to kill running threads @@ -335,8 +337,11 @@ } free(lock_table->live_objs); - free(lock_table->table); + for (i = 0; i < HY_MAX_FAT_TABLES && lock_table->tables[i]; i++) { + free(lock_table->tables[i]); + } + hymutex_destroy(&lock_table->mutex); hycond_destroy(&lock_table->write); hycond_destroy(&lock_table->read);