mirror of
https://github.com/asterisk/asterisk.git
synced 2025-09-18 18:58:22 +00:00
taskpool: Add taskpool API, switch Stasis to using it.
This change introduces a new API called taskpool. This is a pool of taskprocessors. It provides the following functionality: 1. Task pushing to a pool of taskprocessors 2. Synchronous tasks 3. Serializers for execution ordering of tasks 4. Growing/shrinking of number of taskprocessors in pool This functionality already exists through the combination of threadpool+taskprocessors but through investigating I determined that this carries substantial overhead for short to medium duration tasks. The threadpool uses a single queue of work, and for management of threads it involves additional tasks. I wrote taskpool to eliminate the extra overhead and management as much as possible. Instead of a single queue of work each taskprocessor has its own queue and at push time a selector chooses the taskprocessor to queue the task to. Each taskprocessor also has its own thread like normal. This spreads out the tasks immediately and reduces contention on shared resources. Using the included efficiency tests the number of tasks that can be executed per second in a taskpool is 6-12 times more than an equivalent threadpool+taskprocessor setup. Stasis has been moved over to using this new API as it is a heavy consumer of threadpool+taskprocessors and produces a lot of tasks. UpgradeNote: The threadpool_* options in stasis.conf have now been deprecated though they continue to be read and used. They have been replaced with taskpool options that give greater control over the underlying taskpool used for stasis. DeveloperNote: The taskpool API has been added for common usage of a pool of taskprocessors. It is suggested to use this API instead of the threadpool+taskprocessor approach.
This commit is contained in:
@@ -1211,103 +1211,6 @@ static int worker_set_state(struct worker_thread *worker, enum worker_state stat
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*! Serializer group shutdown control object. */
|
||||
struct ast_serializer_shutdown_group {
|
||||
/*! Shutdown thread waits on this conditional. */
|
||||
ast_cond_t cond;
|
||||
/*! Count of serializers needing to shutdown. */
|
||||
int count;
|
||||
};
|
||||
|
||||
static void serializer_shutdown_group_dtor(void *vdoomed)
|
||||
{
|
||||
struct ast_serializer_shutdown_group *doomed = vdoomed;
|
||||
|
||||
ast_cond_destroy(&doomed->cond);
|
||||
}
|
||||
|
||||
struct ast_serializer_shutdown_group *ast_serializer_shutdown_group_alloc(void)
|
||||
{
|
||||
struct ast_serializer_shutdown_group *shutdown_group;
|
||||
|
||||
shutdown_group = ao2_alloc(sizeof(*shutdown_group), serializer_shutdown_group_dtor);
|
||||
if (!shutdown_group) {
|
||||
return NULL;
|
||||
}
|
||||
ast_cond_init(&shutdown_group->cond, NULL);
|
||||
return shutdown_group;
|
||||
}
|
||||
|
||||
int ast_serializer_shutdown_group_join(struct ast_serializer_shutdown_group *shutdown_group, int timeout)
|
||||
{
|
||||
int remaining;
|
||||
ast_mutex_t *lock;
|
||||
|
||||
if (!shutdown_group) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
lock = ao2_object_get_lockaddr(shutdown_group);
|
||||
ast_assert(lock != NULL);
|
||||
|
||||
ao2_lock(shutdown_group);
|
||||
if (timeout) {
|
||||
struct timeval start;
|
||||
struct timespec end;
|
||||
|
||||
start = ast_tvnow();
|
||||
end.tv_sec = start.tv_sec + timeout;
|
||||
end.tv_nsec = start.tv_usec * 1000;
|
||||
while (shutdown_group->count) {
|
||||
if (ast_cond_timedwait(&shutdown_group->cond, lock, &end)) {
|
||||
/* Error or timed out waiting for the count to reach zero. */
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
while (shutdown_group->count) {
|
||||
if (ast_cond_wait(&shutdown_group->cond, lock)) {
|
||||
/* Error */
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
remaining = shutdown_group->count;
|
||||
ao2_unlock(shutdown_group);
|
||||
return remaining;
|
||||
}
|
||||
|
||||
/*!
|
||||
* \internal
|
||||
* \brief Increment the number of serializer members in the group.
|
||||
* \since 13.5.0
|
||||
*
|
||||
* \param shutdown_group Group shutdown controller.
|
||||
*/
|
||||
static void serializer_shutdown_group_inc(struct ast_serializer_shutdown_group *shutdown_group)
|
||||
{
|
||||
ao2_lock(shutdown_group);
|
||||
++shutdown_group->count;
|
||||
ao2_unlock(shutdown_group);
|
||||
}
|
||||
|
||||
/*!
|
||||
* \internal
|
||||
* \brief Decrement the number of serializer members in the group.
|
||||
* \since 13.5.0
|
||||
*
|
||||
* \param shutdown_group Group shutdown controller.
|
||||
*/
|
||||
static void serializer_shutdown_group_dec(struct ast_serializer_shutdown_group *shutdown_group)
|
||||
{
|
||||
ao2_lock(shutdown_group);
|
||||
--shutdown_group->count;
|
||||
if (!shutdown_group->count) {
|
||||
ast_cond_signal(&shutdown_group->cond);
|
||||
}
|
||||
ao2_unlock(shutdown_group);
|
||||
}
|
||||
|
||||
struct serializer {
|
||||
/*! Threadpool the serializer will use to process the jobs. */
|
||||
struct ast_threadpool *pool;
|
||||
@@ -1379,7 +1282,7 @@ static void serializer_shutdown(struct ast_taskprocessor_listener *listener)
|
||||
struct serializer *ser = ast_taskprocessor_listener_get_user_data(listener);
|
||||
|
||||
if (ser->shutdown_group) {
|
||||
serializer_shutdown_group_dec(ser->shutdown_group);
|
||||
ast_serializer_shutdown_group_dec(ser->shutdown_group);
|
||||
}
|
||||
ao2_cleanup(ser);
|
||||
}
|
||||
@@ -1418,7 +1321,7 @@ struct ast_taskprocessor *ast_threadpool_serializer_group(const char *name,
|
||||
/* ser ref transferred to listener but not cleaned without tps */
|
||||
ao2_ref(ser, -1);
|
||||
} else if (shutdown_group) {
|
||||
serializer_shutdown_group_inc(shutdown_group);
|
||||
ast_serializer_shutdown_group_inc(shutdown_group);
|
||||
}
|
||||
|
||||
ao2_ref(listener, -1);
|
||||
|
Reference in New Issue
Block a user