2023-12-27 07:56:02 +08:00
|
|
|
/*
|
|
|
|
threadpool.h - part of lz4 project
|
|
|
|
Copyright (C) Yann Collet 2023
|
|
|
|
GPL v2 License
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; either version 2 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License along
|
|
|
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
|
|
|
|
You can contact the author at :
|
|
|
|
- LZ4 source repository : https://github.com/lz4/lz4
|
|
|
|
- LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
/* ====== Dependencies ======= */
|
|
|
|
#include <assert.h>
|
2024-01-02 01:49:22 +08:00
|
|
|
#include "lz4conf.h" /* LZ4IO_MULTITHREAD */
|
2023-12-27 07:56:02 +08:00
|
|
|
#include "threadpool.h"
|
|
|
|
|
2024-01-01 07:54:47 +08:00
|
|
|
|
2023-12-27 07:56:02 +08:00
|
|
|
/* ====== Compiler specifics ====== */
|
|
|
|
#if defined(_MSC_VER)
|
|
|
|
# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
|
|
|
|
#endif
|
|
|
|
|
2024-03-11 08:57:26 +08:00
|
|
|
#if !LZ4IO_MULTITHREAD
|
2023-12-27 07:56:02 +08:00
|
|
|
|
|
|
|
/* ===================================================== */
|
|
|
|
/* Backup implementation with no multi-threading support */
|
|
|
|
/* ===================================================== */
|
|
|
|
|
|
|
|
/* Non-zero size, to ensure g_poolCtx != NULL */
|
2024-07-08 04:22:46 +08:00
|
|
|
struct TPool_s {
|
2023-12-27 07:56:02 +08:00
|
|
|
int dummy;
|
|
|
|
};
|
2024-07-08 04:22:46 +08:00
|
|
|
static TPool g_poolCtx;
|
2023-12-27 07:56:02 +08:00
|
|
|
|
2024-07-08 04:22:46 +08:00
|
|
|
TPool* TPool_create(int numThreads, int queueSize) {
|
2023-12-27 07:56:02 +08:00
|
|
|
(void)numThreads;
|
|
|
|
(void)queueSize;
|
|
|
|
return &g_poolCtx;
|
|
|
|
}
|
|
|
|
|
2024-07-08 04:22:46 +08:00
|
|
|
void TPool_free(TPool* ctx) {
|
2023-12-27 07:56:02 +08:00
|
|
|
assert(!ctx || ctx == &g_poolCtx);
|
|
|
|
(void)ctx;
|
|
|
|
}
|
|
|
|
|
2024-07-08 04:22:46 +08:00
|
|
|
void TPool_submitJob(TPool* ctx, void (*job_function)(void*), void* arg) {
|
2023-12-27 07:56:02 +08:00
|
|
|
(void)ctx;
|
|
|
|
job_function(arg);
|
|
|
|
}
|
|
|
|
|
2024-07-08 04:22:46 +08:00
|
|
|
void TPool_jobsCompleted(TPool* ctx) {
|
2023-12-27 07:56:02 +08:00
|
|
|
assert(!ctx || ctx == &g_poolCtx);
|
|
|
|
(void)ctx;
|
|
|
|
}
|
|
|
|
|
2024-07-06 01:11:58 +08:00
|
|
|
|
|
|
|
#elif defined(_WIN32)
|
|
|
|
|
|
|
|
/* Window TPool implementation using Completion Ports */
|
|
|
|
#include <windows.h>
|
|
|
|
|
2024-07-08 04:22:46 +08:00
|
|
|
typedef struct TPool_s {
|
2024-07-06 01:11:58 +08:00
|
|
|
HANDLE completionPort;
|
2024-07-06 19:05:28 +08:00
|
|
|
HANDLE* workerThreads;
|
2024-07-06 08:16:27 +08:00
|
|
|
int nbWorkers;
|
2024-07-06 01:11:58 +08:00
|
|
|
int queueSize;
|
2024-07-08 02:45:22 +08:00
|
|
|
LONG nbPendingJobs;
|
|
|
|
HANDLE jobSlotAvail; /* For queue size control */
|
|
|
|
HANDLE allJobsCompleted; /* Event */
|
2024-07-08 04:22:46 +08:00
|
|
|
} TPool;
|
2024-07-06 01:11:58 +08:00
|
|
|
|
2024-07-08 04:35:52 +08:00
|
|
|
void TPool_free(TPool* pool)
|
2024-07-08 02:55:53 +08:00
|
|
|
{
|
2024-07-08 04:35:52 +08:00
|
|
|
if (!pool) return;
|
2024-07-06 01:11:58 +08:00
|
|
|
|
2024-07-06 09:25:48 +08:00
|
|
|
/* Signal workers to exit by posting NULL completions */
|
2024-07-08 08:47:08 +08:00
|
|
|
{ int i;
|
|
|
|
for (i = 0; i < pool->nbWorkers; i++) {
|
|
|
|
PostQueuedCompletionStatus(pool->completionPort, 0, 0, NULL);
|
|
|
|
}
|
2024-07-06 01:11:58 +08:00
|
|
|
}
|
|
|
|
|
2024-07-06 09:25:48 +08:00
|
|
|
/* Wait for worker threads to finish */
|
2024-07-08 04:35:52 +08:00
|
|
|
WaitForMultipleObjects(pool->nbWorkers, pool->workerThreads, TRUE, INFINITE);
|
2024-07-06 01:11:58 +08:00
|
|
|
|
2024-07-06 09:25:48 +08:00
|
|
|
/* Close thread handles and completion port */
|
2024-07-08 04:35:52 +08:00
|
|
|
{ int i;
|
|
|
|
for (i = 0; i < pool->nbWorkers; i++) {
|
|
|
|
CloseHandle(pool->workerThreads[i]);
|
|
|
|
}
|
2024-07-06 01:11:58 +08:00
|
|
|
}
|
2024-07-08 04:35:52 +08:00
|
|
|
free(pool->workerThreads);
|
|
|
|
CloseHandle(pool->completionPort);
|
2024-07-06 01:11:58 +08:00
|
|
|
|
2024-07-06 09:25:48 +08:00
|
|
|
/* Clean up synchronization objects */
|
2024-07-08 04:35:52 +08:00
|
|
|
CloseHandle(pool->jobSlotAvail);
|
|
|
|
CloseHandle(pool->allJobsCompleted);
|
2024-07-06 01:11:58 +08:00
|
|
|
|
2024-07-08 04:35:52 +08:00
|
|
|
free(pool);
|
2024-07-06 01:11:58 +08:00
|
|
|
}
|
|
|
|
|
2024-07-08 02:55:53 +08:00
|
|
|
static DWORD WINAPI WorkerThread(LPVOID lpParameter)
|
|
|
|
{
|
2024-07-08 04:35:52 +08:00
|
|
|
TPool* const pool = (TPool*)lpParameter;
|
2024-07-06 01:11:58 +08:00
|
|
|
DWORD bytesTransferred;
|
|
|
|
ULONG_PTR completionKey;
|
|
|
|
LPOVERLAPPED overlapped;
|
|
|
|
|
2024-07-08 04:35:52 +08:00
|
|
|
while (GetQueuedCompletionStatus(pool->completionPort,
|
2024-07-08 00:08:14 +08:00
|
|
|
&bytesTransferred, &completionKey,
|
|
|
|
&overlapped, INFINITE)) {
|
2024-07-06 01:11:58 +08:00
|
|
|
|
2024-07-08 02:55:53 +08:00
|
|
|
/* End signal */
|
|
|
|
if (overlapped == NULL) { break; }
|
2024-07-06 01:11:58 +08:00
|
|
|
|
2024-07-08 02:55:53 +08:00
|
|
|
/* Execute job */
|
|
|
|
((void (*)(void*))completionKey)(overlapped);
|
2024-07-06 01:11:58 +08:00
|
|
|
|
2024-07-08 02:55:53 +08:00
|
|
|
/* Signal job completion */
|
2024-07-08 04:35:52 +08:00
|
|
|
if (InterlockedDecrement(&pool->nbPendingJobs) == 0) {
|
|
|
|
SetEvent(pool->allJobsCompleted);
|
2024-07-08 02:45:22 +08:00
|
|
|
}
|
2024-07-08 04:35:52 +08:00
|
|
|
ReleaseSemaphore(pool->jobSlotAvail, 1, NULL);
|
2024-07-06 01:11:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-07-08 04:22:46 +08:00
|
|
|
TPool* TPool_create(int nbWorkers, int queueSize)
|
2024-07-06 01:11:58 +08:00
|
|
|
{
|
2024-07-08 04:35:52 +08:00
|
|
|
TPool* pool;
|
2024-07-06 01:11:58 +08:00
|
|
|
|
2024-07-06 08:11:35 +08:00
|
|
|
/* parameters sanitization */
|
2024-07-08 04:59:44 +08:00
|
|
|
if (nbWorkers <= 0 || queueSize <= 0) return NULL;
|
|
|
|
if (nbWorkers>LZ4_NBWORKERS_MAX) nbWorkers=LZ4_NBWORKERS_MAX;
|
2024-07-06 08:11:35 +08:00
|
|
|
|
2024-07-08 04:35:52 +08:00
|
|
|
pool = calloc(1, sizeof(TPool));
|
|
|
|
if (!pool) return NULL;
|
|
|
|
|
2024-07-06 09:25:48 +08:00
|
|
|
/* Create completion port */
|
2024-07-08 04:35:52 +08:00
|
|
|
pool->completionPort = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, nbWorkers);
|
|
|
|
if (!pool->completionPort) { goto _cleanup; }
|
2024-07-06 01:11:58 +08:00
|
|
|
|
2024-07-06 09:25:48 +08:00
|
|
|
/* Create worker threads */
|
2024-07-08 04:35:52 +08:00
|
|
|
pool->nbWorkers = nbWorkers;
|
|
|
|
pool->workerThreads = (HANDLE*)malloc(sizeof(HANDLE) * nbWorkers);
|
|
|
|
if (pool->workerThreads == NULL) { goto _cleanup; }
|
|
|
|
|
|
|
|
{ int i;
|
|
|
|
for (i = 0; i < nbWorkers; i++) {
|
|
|
|
pool->workerThreads[i] = CreateThread(NULL, 0, WorkerThread, pool, 0, NULL);
|
|
|
|
if (!pool->workerThreads[i]) { goto _cleanup; }
|
2024-07-06 01:11:58 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-07-08 04:35:52 +08:00
|
|
|
/* Initialize sync objects members */
|
|
|
|
pool->queueSize = queueSize;
|
|
|
|
pool->nbPendingJobs = 0;
|
|
|
|
|
|
|
|
pool->jobSlotAvail = CreateSemaphore(NULL, queueSize+nbWorkers, queueSize+nbWorkers, NULL);
|
|
|
|
if (!pool->jobSlotAvail) { goto _cleanup; }
|
|
|
|
|
|
|
|
pool->allJobsCompleted = CreateEvent(NULL, FALSE, FALSE, NULL);
|
|
|
|
if (!pool->allJobsCompleted) { goto _cleanup; }
|
|
|
|
|
|
|
|
return pool;
|
|
|
|
|
|
|
|
_cleanup:
|
|
|
|
TPool_free(pool);
|
|
|
|
return NULL;
|
2024-07-06 01:11:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2024-07-08 04:35:52 +08:00
|
|
|
void TPool_submitJob(TPool* pool, void (*job_function)(void*), void* arg)
|
2024-07-06 01:11:58 +08:00
|
|
|
{
|
2024-07-08 04:59:44 +08:00
|
|
|
assert(pool);
|
2024-07-06 01:11:58 +08:00
|
|
|
|
2024-07-06 09:25:48 +08:00
|
|
|
/* Atomically increment pending jobs and check for overflow */
|
2024-07-08 04:35:52 +08:00
|
|
|
WaitForSingleObject(pool->jobSlotAvail, INFINITE);
|
|
|
|
ResetEvent(pool->allJobsCompleted);
|
|
|
|
InterlockedIncrement(&pool->nbPendingJobs);
|
2024-07-06 01:11:58 +08:00
|
|
|
|
2024-07-06 09:25:48 +08:00
|
|
|
/* Post the job directly to the completion port */
|
2024-07-08 04:35:52 +08:00
|
|
|
PostQueuedCompletionStatus(pool->completionPort,
|
2024-07-06 09:25:48 +08:00
|
|
|
0, /* Bytes transferred not used */
|
|
|
|
(ULONG_PTR)job_function, /* Store function pointer in completionKey */
|
|
|
|
(LPOVERLAPPED)arg); /* Store argument in overlapped */
|
2024-07-06 01:11:58 +08:00
|
|
|
}
|
|
|
|
|
2024-07-08 04:35:52 +08:00
|
|
|
void TPool_jobsCompleted(TPool* pool)
|
2024-07-06 01:11:58 +08:00
|
|
|
{
|
2024-07-08 04:59:44 +08:00
|
|
|
assert(pool);
|
2024-07-08 04:35:52 +08:00
|
|
|
WaitForSingleObject(pool->allJobsCompleted, INFINITE);
|
2024-07-06 01:11:58 +08:00
|
|
|
}
|
|
|
|
|
2023-12-27 07:56:02 +08:00
|
|
|
#else
|
|
|
|
|
2024-07-06 01:11:58 +08:00
|
|
|
/* pthread availability assumed */
|
|
|
|
#include <stdlib.h> /* malloc, free */
|
2023-12-27 07:56:02 +08:00
|
|
|
#include <pthread.h> /* pthread_* */
|
|
|
|
|
|
|
|
/* A job is just a function with an opaque argument */
|
2024-07-08 04:22:46 +08:00
|
|
|
typedef struct TPool_job_s {
|
2023-12-27 07:56:02 +08:00
|
|
|
void (*job_function)(void*);
|
|
|
|
void *arg;
|
2024-07-08 04:22:46 +08:00
|
|
|
} TPool_job;
|
2023-12-27 07:56:02 +08:00
|
|
|
|
2024-07-08 04:22:46 +08:00
|
|
|
struct TPool_s {
|
2023-12-27 07:56:02 +08:00
|
|
|
pthread_t* threads;
|
|
|
|
size_t threadCapacity;
|
|
|
|
size_t threadLimit;
|
|
|
|
|
|
|
|
/* The queue is a circular buffer */
|
2024-07-08 04:22:46 +08:00
|
|
|
TPool_job* queue;
|
2023-12-27 07:56:02 +08:00
|
|
|
size_t queueHead;
|
|
|
|
size_t queueTail;
|
|
|
|
size_t queueSize;
|
|
|
|
|
|
|
|
/* The number of threads working on jobs */
|
|
|
|
size_t numThreadsBusy;
|
|
|
|
/* Indicates if the queue is empty */
|
|
|
|
int queueEmpty;
|
|
|
|
|
|
|
|
/* The mutex protects the queue */
|
|
|
|
pthread_mutex_t queueMutex;
|
|
|
|
/* Condition variable for pushers to wait on when the queue is full */
|
|
|
|
pthread_cond_t queuePushCond;
|
|
|
|
/* Condition variables for poppers to wait on when the queue is empty */
|
|
|
|
pthread_cond_t queuePopCond;
|
|
|
|
/* Indicates if the queue is shutting down */
|
|
|
|
int shutdown;
|
|
|
|
};
|
|
|
|
|
2024-07-08 04:22:46 +08:00
|
|
|
static void TPool_shutdown(TPool* ctx);
|
2023-12-27 07:56:02 +08:00
|
|
|
|
2024-07-08 04:22:46 +08:00
|
|
|
void TPool_free(TPool* ctx) {
|
2023-12-27 07:56:02 +08:00
|
|
|
if (!ctx) { return; }
|
2024-07-08 04:22:46 +08:00
|
|
|
TPool_shutdown(ctx);
|
2023-12-27 07:56:02 +08:00
|
|
|
pthread_mutex_destroy(&ctx->queueMutex);
|
|
|
|
pthread_cond_destroy(&ctx->queuePushCond);
|
|
|
|
pthread_cond_destroy(&ctx->queuePopCond);
|
|
|
|
free(ctx->queue);
|
|
|
|
free(ctx->threads);
|
|
|
|
free(ctx);
|
|
|
|
}
|
|
|
|
|
2024-07-08 04:22:46 +08:00
|
|
|
static void* TPool_thread(void* opaque);
|
2023-12-27 07:56:02 +08:00
|
|
|
|
2024-07-08 04:22:46 +08:00
|
|
|
TPool* TPool_create(int nbThreads, int queueSize)
|
2023-12-27 07:56:02 +08:00
|
|
|
{
|
2024-07-08 04:22:46 +08:00
|
|
|
TPool* ctx;
|
2023-12-27 07:56:02 +08:00
|
|
|
/* Check parameters */
|
|
|
|
if (nbThreads<1 || queueSize<1) { return NULL; }
|
|
|
|
/* Allocate the context and zero initialize */
|
2024-07-08 04:22:46 +08:00
|
|
|
ctx = (TPool*)calloc(1, sizeof(TPool));
|
2023-12-27 07:56:02 +08:00
|
|
|
if (!ctx) { return NULL; }
|
|
|
|
/* init pthread variables */
|
|
|
|
{ int error = 0;
|
|
|
|
error |= pthread_mutex_init(&ctx->queueMutex, NULL);
|
|
|
|
error |= pthread_cond_init(&ctx->queuePushCond, NULL);
|
|
|
|
error |= pthread_cond_init(&ctx->queuePopCond, NULL);
|
2024-07-08 04:22:46 +08:00
|
|
|
if (error) { TPool_free(ctx); return NULL; }
|
2023-12-27 07:56:02 +08:00
|
|
|
}
|
|
|
|
/* Initialize the job queue.
|
|
|
|
* It needs one extra space since one space is wasted to differentiate
|
|
|
|
* empty and full queues.
|
|
|
|
*/
|
|
|
|
ctx->queueSize = (size_t)queueSize + 1;
|
2024-07-08 04:22:46 +08:00
|
|
|
ctx->queue = (TPool_job*)calloc(1, ctx->queueSize * sizeof(TPool_job));
|
2023-12-27 07:56:02 +08:00
|
|
|
if (ctx->queue == NULL) {
|
2024-07-08 04:22:46 +08:00
|
|
|
TPool_free(ctx);
|
2023-12-27 07:56:02 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
ctx->queueHead = 0;
|
|
|
|
ctx->queueTail = 0;
|
|
|
|
ctx->numThreadsBusy = 0;
|
|
|
|
ctx->queueEmpty = 1;
|
|
|
|
ctx->shutdown = 0;
|
|
|
|
/* Allocate space for the thread handles */
|
|
|
|
ctx->threads = (pthread_t*)calloc(1, (size_t)nbThreads * sizeof(pthread_t));
|
|
|
|
if (ctx->threads == NULL) {
|
2024-07-08 04:22:46 +08:00
|
|
|
TPool_free(ctx);
|
2023-12-27 07:56:02 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
ctx->threadCapacity = 0;
|
|
|
|
/* Initialize the threads */
|
|
|
|
{ int i;
|
|
|
|
for (i = 0; i < nbThreads; ++i) {
|
2024-07-08 04:22:46 +08:00
|
|
|
if (pthread_create(&ctx->threads[i], NULL, &TPool_thread, ctx)) {
|
2023-12-27 07:56:02 +08:00
|
|
|
ctx->threadCapacity = (size_t)i;
|
2024-07-08 04:22:46 +08:00
|
|
|
TPool_free(ctx);
|
2023-12-27 07:56:02 +08:00
|
|
|
return NULL;
|
|
|
|
} }
|
|
|
|
ctx->threadCapacity = (size_t)nbThreads;
|
|
|
|
ctx->threadLimit = (size_t)nbThreads;
|
|
|
|
}
|
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
|
2024-07-08 04:22:46 +08:00
|
|
|
/* TPool_thread() :
|
2023-12-27 07:56:02 +08:00
|
|
|
* Work thread for the thread pool.
|
|
|
|
* Waits for jobs and executes them.
|
|
|
|
* @returns : NULL on failure else non-null.
|
|
|
|
*/
|
2024-07-08 04:22:46 +08:00
|
|
|
static void* TPool_thread(void* opaque) {
|
|
|
|
TPool* const ctx = (TPool*)opaque;
|
2023-12-27 07:56:02 +08:00
|
|
|
if (!ctx) { return NULL; }
|
|
|
|
for (;;) {
|
|
|
|
/* Lock the mutex and wait for a non-empty queue or until shutdown */
|
|
|
|
pthread_mutex_lock(&ctx->queueMutex);
|
|
|
|
|
|
|
|
while ( ctx->queueEmpty
|
|
|
|
|| (ctx->numThreadsBusy >= ctx->threadLimit) ) {
|
|
|
|
if (ctx->shutdown) {
|
|
|
|
/* even if !queueEmpty, (possible if numThreadsBusy >= threadLimit),
|
|
|
|
* a few threads will be shutdown while !queueEmpty,
|
|
|
|
* but enough threads will remain active to finish the queue */
|
|
|
|
pthread_mutex_unlock(&ctx->queueMutex);
|
|
|
|
return opaque;
|
|
|
|
}
|
|
|
|
pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
|
|
|
|
}
|
|
|
|
/* Pop a job off the queue */
|
2024-07-08 04:22:46 +08:00
|
|
|
{ TPool_job const job = ctx->queue[ctx->queueHead];
|
2023-12-27 07:56:02 +08:00
|
|
|
ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
|
|
|
|
ctx->numThreadsBusy++;
|
|
|
|
ctx->queueEmpty = (ctx->queueHead == ctx->queueTail);
|
|
|
|
/* Unlock the mutex, signal a pusher, and run the job */
|
|
|
|
pthread_cond_signal(&ctx->queuePushCond);
|
|
|
|
pthread_mutex_unlock(&ctx->queueMutex);
|
|
|
|
|
|
|
|
job.job_function(job.arg);
|
|
|
|
|
|
|
|
/* If the intended queue size was 0, signal after finishing job */
|
|
|
|
pthread_mutex_lock(&ctx->queueMutex);
|
|
|
|
ctx->numThreadsBusy--;
|
|
|
|
pthread_cond_signal(&ctx->queuePushCond);
|
|
|
|
pthread_mutex_unlock(&ctx->queueMutex);
|
|
|
|
}
|
|
|
|
} /* for (;;) */
|
|
|
|
assert(0); /* Unreachable */
|
|
|
|
}
|
|
|
|
|
2024-07-08 04:22:46 +08:00
|
|
|
/*! TPool_shutdown() :
|
2023-12-27 07:56:02 +08:00
|
|
|
Shutdown the queue, wake any sleeping threads, and join all of the threads.
|
|
|
|
*/
|
2024-07-08 04:22:46 +08:00
|
|
|
static void TPool_shutdown(TPool* ctx) {
|
2023-12-27 07:56:02 +08:00
|
|
|
/* Shut down the queue */
|
|
|
|
pthread_mutex_lock(&ctx->queueMutex);
|
|
|
|
ctx->shutdown = 1;
|
|
|
|
pthread_mutex_unlock(&ctx->queueMutex);
|
|
|
|
/* Wake up sleeping threads */
|
|
|
|
pthread_cond_broadcast(&ctx->queuePushCond);
|
|
|
|
pthread_cond_broadcast(&ctx->queuePopCond);
|
|
|
|
/* Join all of the threads */
|
|
|
|
{ size_t i;
|
|
|
|
for (i = 0; i < ctx->threadCapacity; ++i) {
|
|
|
|
pthread_join(ctx->threads[i], NULL); /* note : could fail */
|
|
|
|
} }
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2024-07-08 04:22:46 +08:00
|
|
|
/*! TPool_jobsCompleted() :
|
2023-12-27 07:56:02 +08:00
|
|
|
* Waits for all queued jobs to finish executing.
|
|
|
|
*/
|
2024-07-08 04:22:46 +08:00
|
|
|
void TPool_jobsCompleted(TPool* ctx){
|
2023-12-27 07:56:02 +08:00
|
|
|
pthread_mutex_lock(&ctx->queueMutex);
|
|
|
|
while(!ctx->queueEmpty || ctx->numThreadsBusy > 0) {
|
|
|
|
pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&ctx->queueMutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Returns 1 if the queue is full and 0 otherwise.
|
|
|
|
*
|
|
|
|
* When queueSize is 1 (pool was created with an intended queueSize of 0),
|
|
|
|
* then a queue is empty if there is a thread free _and_ no job is waiting.
|
|
|
|
*/
|
2024-07-08 04:22:46 +08:00
|
|
|
static int isQueueFull(TPool const* ctx) {
|
2023-12-27 07:56:02 +08:00
|
|
|
if (ctx->queueSize > 1) {
|
|
|
|
return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize);
|
|
|
|
} else {
|
|
|
|
return (ctx->numThreadsBusy == ctx->threadLimit) ||
|
|
|
|
!ctx->queueEmpty;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2024-07-08 04:22:46 +08:00
|
|
|
TPool_submitJob_internal(TPool* ctx, void (*job_function)(void*), void *arg)
|
2023-12-27 07:56:02 +08:00
|
|
|
{
|
2024-07-08 04:22:46 +08:00
|
|
|
TPool_job job;
|
2023-12-27 07:56:02 +08:00
|
|
|
job.job_function = job_function;
|
|
|
|
job.arg = arg;
|
|
|
|
assert(ctx != NULL);
|
|
|
|
if (ctx->shutdown) return;
|
|
|
|
|
|
|
|
ctx->queueEmpty = 0;
|
|
|
|
ctx->queue[ctx->queueTail] = job;
|
|
|
|
ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize;
|
|
|
|
pthread_cond_signal(&ctx->queuePopCond);
|
|
|
|
}
|
|
|
|
|
2024-07-08 04:22:46 +08:00
|
|
|
void TPool_submitJob(TPool* ctx, void (*job_function)(void*), void* arg)
|
2023-12-27 07:56:02 +08:00
|
|
|
{
|
|
|
|
assert(ctx != NULL);
|
|
|
|
pthread_mutex_lock(&ctx->queueMutex);
|
|
|
|
/* Wait until there is space in the queue for the new job */
|
|
|
|
while (isQueueFull(ctx) && (!ctx->shutdown)) {
|
|
|
|
pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
|
|
|
|
}
|
2024-07-08 04:22:46 +08:00
|
|
|
TPool_submitJob_internal(ctx, job_function, arg);
|
2023-12-27 07:56:02 +08:00
|
|
|
pthread_mutex_unlock(&ctx->queueMutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* LZ4IO_NO_MT */
|