FS-10167: fixed a couple deadlock issues and a misconception about the locks on hash

This commit is contained in:
Shane Bryldt 2017-04-17 11:10:05 -06:00
parent 37f9b2afdc
commit 8d4eac7f69
3 changed files with 23 additions and 15 deletions

View File

@ -319,8 +319,8 @@ KS_DECLARE(void) blade_session_state_set(blade_session_t *bs, blade_session_stat
blade_handle_session_state_callbacks_execute(bs, BLADE_SESSION_STATE_CONDITION_PRE); blade_handle_session_state_callbacks_execute(bs, BLADE_SESSION_STATE_CONDITION_PRE);
ks_cond_try_signal(bs->cond);
ks_mutex_unlock(bs->mutex); ks_mutex_unlock(bs->mutex);
ks_cond_try_signal(bs->cond);
} }
KS_DECLARE(void) blade_session_hangup(blade_session_t *bs) KS_DECLARE(void) blade_session_hangup(blade_session_t *bs)

View File

@ -196,22 +196,23 @@ KS_DECLARE(ks_status_t) blade_handle_create(blade_handle_t **bhP, ks_pool_t *poo
bh->pool = pool; bh->pool = pool;
bh->tpool = tpool; bh->tpool = tpool;
ks_hash_create(&bh->transports, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_NOLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool); // @todo this needs to be reviewed, NOLOCK is incorrect, but RWLOCK causes deadlock somewhere
ks_hash_create(&bh->transports, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool);
ks_assert(bh->transports); ks_assert(bh->transports);
ks_hash_create(&bh->spaces, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_NOLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool); ks_hash_create(&bh->spaces, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool);
ks_assert(bh->spaces); ks_assert(bh->spaces);
ks_hash_create(&bh->events, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_NOLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool); ks_hash_create(&bh->events, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool);
ks_assert(bh->events); ks_assert(bh->events);
ks_hash_create(&bh->connections, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_NOLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool); ks_hash_create(&bh->connections, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool);
ks_assert(bh->connections); ks_assert(bh->connections);
ks_hash_create(&bh->sessions, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_NOLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool); ks_hash_create(&bh->sessions, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool);
ks_assert(bh->sessions); ks_assert(bh->sessions);
ks_hash_create(&bh->session_state_callbacks, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_NOLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool); ks_hash_create(&bh->session_state_callbacks, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool);
ks_assert(bh->session_state_callbacks); ks_assert(bh->session_state_callbacks);
ks_hash_create(&bh->requests, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_NOLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool); ks_hash_create(&bh->requests, KS_HASH_MODE_CASE_INSENSITIVE, KS_HASH_FLAG_RWLOCK | KS_HASH_FLAG_DUP_CHECK, bh->pool);
ks_assert(bh->requests); ks_assert(bh->requests);
*bhP = bh; *bhP = bh;
@ -665,6 +666,13 @@ KS_DECLARE(blade_session_t *) blade_handle_sessions_get(blade_handle_t *bh, cons
ks_assert(bh); ks_assert(bh);
ks_assert(sid); ks_assert(sid);
// @todo consider using blade_session_t via reference counting, rather than locking a mutex to simulate a reference count to halt cleanups while in use
// using actual reference counting would mean that mutexes would not need to be held locked when looking up a session by id just to prevent cleanup,
// instead cleanup would automatically occur when the last reference is actually removed (which SHOULD be at the end of the state machine thread),
// which is safer than another thread potentially waiting on the write lock to release while it's being destroyed, or external code forgetting to unlock
// then use short lived mutex or rwl for accessing the content of the session while it is referenced
// this approach should also be used for blade_connection_t, which has a similar threaded state machine
ks_hash_read_lock(bh->sessions); ks_hash_read_lock(bh->sessions);
bs = ks_hash_search(bh->sessions, (void *)sid, KS_UNLOCKED); bs = ks_hash_search(bh->sessions, (void *)sid, KS_UNLOCKED);
if (bs && blade_session_read_lock(bs, KS_FALSE) != KS_STATUS_SUCCESS) bs = NULL; if (bs && blade_session_read_lock(bs, KS_FALSE) != KS_STATUS_SUCCESS) bs = NULL;
@ -766,7 +774,7 @@ KS_DECLARE(ks_status_t) blade_handle_session_state_callback_unregister(blade_han
ks_hash_write_lock(bh->session_state_callbacks); ks_hash_write_lock(bh->session_state_callbacks);
bhsscr = (blade_handle_session_state_callback_registration_t *)ks_hash_remove(bh->session_state_callbacks, (void *)id); bhsscr = (blade_handle_session_state_callback_registration_t *)ks_hash_remove(bh->session_state_callbacks, (void *)id);
if (!bhsscr) ret = KS_STATUS_FAIL; if (!bhsscr) ret = KS_STATUS_FAIL;
ks_hash_write_lock(bh->session_state_callbacks); ks_hash_write_unlock(bh->session_state_callbacks);
if (bhsscr) blade_handle_session_state_callback_registration_destroy(&bhsscr); if (bhsscr) blade_handle_session_state_callback_registration_destroy(&bhsscr);

View File

@ -267,8 +267,6 @@ static void ks_list_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_clean
break; break;
case KS_MPCL_TEARDOWN: case KS_MPCL_TEARDOWN:
ks_list_clear(l); ks_list_clear(l);
break;
case KS_MPCL_DESTROY:
ks_rwl_write_lock(l->lock); ks_rwl_write_lock(l->lock);
for (unsigned int i = 0; i < l->spareelsnum; i++) ks_pool_free(l->pool, &l->spareels[i]); for (unsigned int i = 0; i < l->spareelsnum; i++) ks_pool_free(l->pool, &l->spareels[i]);
l->spareelsnum = 0; l->spareelsnum = 0;
@ -278,6 +276,8 @@ static void ks_list_cleanup(ks_pool_t *pool, void *ptr, void *arg, ks_pool_clean
ks_rwl_write_unlock(l->lock); ks_rwl_write_unlock(l->lock);
ks_rwl_destroy(&l->lock); ks_rwl_destroy(&l->lock);
break; break;
case KS_MPCL_DESTROY:
break;
} }
} }
@ -648,13 +648,13 @@ KS_DECLARE(int) ks_list_delete_at(ks_list_t *restrict l, unsigned int pos) {
KS_DECLARE(int) ks_list_delete_iterator(ks_list_t *restrict l) { KS_DECLARE(int) ks_list_delete_iterator(ks_list_t *restrict l) {
struct ks_list_entry_s *delendo; struct ks_list_entry_s *delendo;
if (!l->iter_active || l->iter_pos >= l->numels) return -1; if (!l->iter_active || l->iter_pos > l->numels) return -1;
ks_rwl_write_lock(l->lock); ks_rwl_write_lock(l->lock);
delendo = ks_list_findpos(l, l->iter_pos); delendo = ks_list_findpos(l, l->iter_pos - 1);
ks_list_drop_elem(l, delendo, l->iter_pos); ks_list_drop_elem(l, delendo, l->iter_pos - 1);
l->numels--; l->numels--;