X-Git-Url: https://gerrit.automotivelinux.org/gerrit/gitweb?a=blobdiff_plain;f=src%2Fjobs.c;h=f5c9ddea2c66fc774925e8bd7b69e2ff3d904f07;hb=48df1b4c1e9d34ab8cafe0f496ddac299a00e00a;hp=c4f32244fc67767f42c3c4bac60873930a43e45b;hpb=feccdb76f572a5fad947475c21b5b9aff696b04b;p=src%2Fapp-framework-binder.git diff --git a/src/jobs.c b/src/jobs.c index c4f32244..f5c9ddea 100644 --- a/src/jobs.c +++ b/src/jobs.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016, 2017 "IoT.bzh" + * Copyright (C) 2016, 2017, 2018 "IoT.bzh" * Author José Bollo * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,328 +17,1095 @@ #define _GNU_SOURCE +#if defined(NO_JOBS_WATCHDOG) +# define HAS_WATCHDOG 0 +#else +# define HAS_WATCHDOG 1 +#endif + #include +#include #include #include +#include #include #include #include #include #include +#include + +#include +#include "fdev.h" +#if HAS_WATCHDOG +#include +#endif #include "jobs.h" #include "sig-monitor.h" #include "verbose.h" -/* control of threads */ +#if defined(REMOVE_SYSTEMD_EVENT) +#include "fdev-epoll.h" +#endif + +#define EVENT_TIMEOUT_TOP ((uint64_t)-1) +#define EVENT_TIMEOUT_CHILD ((uint64_t)10000) + +struct thread; + +/** Internal shortcut for callback */ +typedef void (*job_cb_t)(int, void*); + +/** Description of a pending job */ +struct job +{ + struct job *next; /**< link to the next job enqueued */ + const void *group; /**< group of the request */ + job_cb_t callback; /**< processing callback */ + void *arg; /**< argument */ + int timeout; /**< timeout in second for processing the request */ + unsigned blocked: 1; /**< is an other request blocking this one ? */ + unsigned dropped: 1; /**< is removed ? */ +}; + +/** Description of handled event loops */ +struct evloop +{ + unsigned state; /**< encoded state */ + int efd; /**< event notification */ + struct sd_event *sdev; /**< the systemd event loop */ + struct fdev *fdev; /**< handling of events */ + struct thread *holder; /**< holder of the evloop */ +}; + +#define EVLOOP_STATE_WAIT 1U +#define EVLOOP_STATE_RUN 2U + +/** Description of threads */ struct thread { - pthread_t tid; /* the thread id */ - unsigned stop: 1; /* stop request */ - unsigned ended: 1; /* ended status */ - unsigned works: 1; /* is it processing a job? */ + struct thread *next; /**< next thread of the list */ + struct thread *upper; /**< upper same thread */ + struct thread *nholder;/**< next holder for evloop */ + pthread_cond_t *cwhold;/**< condition wait for holding */ + struct job *job; /**< currently processed job */ + pthread_t tid; /**< the thread id */ + volatile unsigned stop: 1; /**< stop requested */ + volatile unsigned waits: 1; /**< is waiting? */ }; -/* describes pending job */ -struct job +/** + * Description of synchronous callback + */ +struct sync { - struct job *next; /* link to the next job enqueued */ - void *group; /* group of the request */ - void (*callback)(int,void*,void*,void*); /* processing callback */ - void *arg1; /* first arg */ - void *arg2; /* second arg */ - void *arg3; /* second arg */ - int timeout; /* timeout in second for processing the request */ - int blocked; /* is an other request blocking this one ? */ + struct thread thread; /**< thread loop data */ + union { + void (*callback)(int, void*); /**< the synchronous callback */ + void (*enter)(int signum, void *closure, struct jobloop *jobloop); + /**< the entering synchronous routine */ + }; + void *arg; /**< the argument of the callback */ }; + /* synchronisation of threads */ static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_cond_t cond = PTHREAD_COND_INITIALIZER; -/* queue of pending jobs */ -static struct job *first_job = NULL; - /* count allowed, started and running threads */ -static int allowed = 0; -static int started = 0; -static int running = 0; -static int remains = 0; +static int allowed = 0; /** allowed count of threads */ +static int started = 0; /** started count of threads */ +static int running = 0; /** running count of threads */ +static int remains = 0; /** allowed count of waiting jobs */ /* list of threads */ -static struct thread *threads = NULL; +static struct thread *threads; +static _Thread_local struct thread *current_thread; + +/* queue of pending jobs */ +static struct job *first_job; +static struct job *free_jobs; -/* add the job to the list */ -static inline void job_add(struct job *job) +/* event loop */ +static struct evloop evloop; + +#if defined(REMOVE_SYSTEMD_EVENT) +static struct fdev_epoll *fdevepoll; +static int waitevt; +#endif + +/** + * Create a new job with the given parameters + * @param group the group of the job + * @param timeout the timeout of the job (0 if none) + * @param callback the function that achieves the job + * @param arg the argument of the callback + * @return the created job unblock or NULL when no more memory + */ +static struct job *job_create( + const void *group, + int timeout, + job_cb_t callback, + void *arg) +{ + struct job *job; + + /* try recyle existing job */ + job = free_jobs; + if (job) + free_jobs = job->next; + else { + /* allocation without blocking */ + pthread_mutex_unlock(&mutex); + job = malloc(sizeof *job); + pthread_mutex_lock(&mutex); + if (!job) { + ERROR("out of memory"); + errno = ENOMEM; + goto end; + } + } + /* initialises the job */ + job->group = group; + job->timeout = timeout; + job->callback = callback; + job->arg = arg; + job->blocked = 0; + job->dropped = 0; +end: + return job; +} + +/** + * Adds 'job' at the end of the list of jobs, marking it + * as blocked if an other job with the same group is pending. + * @param job the job to add + */ +static void job_add(struct job *job) { - void *group = job->group; + const void *group; struct job *ijob, **pjob; + /* prepare to add */ + group = job->group; + job->next = NULL; + + /* search end and blockers */ pjob = &first_job; ijob = first_job; - group = job->group ? : job; while (ijob) { - if (ijob->group == group) + if (group && ijob->group == group) job->blocked = 1; pjob = &ijob->next; ijob = ijob->next; } + + /* queue the jobs */ *pjob = job; - job->next = NULL; remains--; } -/* get the next job to process or NULL if none */ +/** + * Get the next job to process or NULL if none. + * @return the first job that isn't blocked or NULL + */ static inline struct job *job_get() { - struct job *job, **pjob; - pjob = &first_job; - job = first_job; - while (job && job->blocked) { - pjob = &job->next; + struct job *job = first_job; + while (job && job->blocked) job = job->next; - } - if (job) { - *pjob = job->next; + if (job) remains++; - } return job; } -/* unblock a group of job */ -static inline void job_unblock(void *group) +/** + * Releases the processed 'job': removes it + * from the list of jobs and unblock the first + * pending job of the same group if any. + * @param job the job to release + */ +static inline void job_release(struct job *job) { - struct job *job; + struct job *ijob, **pjob; + const void *group; - job = first_job; - while (job) { - if (job->group == group) { - job->blocked = 0; - break; - } - job = job->next; + /* first unqueue the job */ + pjob = &first_job; + ijob = first_job; + while (ijob != job) { + pjob = &ijob->next; + ijob = ijob->next; + } + *pjob = job->next; + + /* then unblock jobs of the same group */ + group = job->group; + if (group) { + ijob = job->next; + while (ijob && ijob->group != group) + ijob = ijob->next; + if (ijob) + ijob->blocked = 0; } + + /* recycle the job */ + job->next = free_jobs; + free_jobs = job; } -/* call the job */ -static inline void job_call(int signum, void *arg) +/** + * Monitored cancel callback for a job. + * This function is called by the monitor + * to cancel the job when the safe environment + * is set. + * @param signum 0 on normal flow or the number + * of the signal that interrupted the normal + * flow, isn't used + * @param arg the job to run + */ +static void job_cancel(int signum, void *arg) { struct job *job = arg; - job->callback(signum, job->arg1, job->arg2, job->arg3); + job->callback(SIGABRT, job->arg); } -/* cancel the job */ -static inline void job_cancel(int signum, void *arg) +#if defined(REMOVE_SYSTEMD_EVENT) +/** + * Gets a fdev_epoll item. + * @return a fdev_epoll or NULL in case of error + */ +static struct fdev_epoll *get_fdevepoll() { - struct job *job = arg; - job->callback(SIGABRT, job->arg1, job->arg2, job->arg3); + struct fdev_epoll *result; + + result = fdevepoll; + if (!result) + result = fdevepoll = fdev_epoll_create(); + + return result; +} +#endif + +/** + * Monitored normal callback for events. + * This function is called by the monitor + * to run the event loop when the safe environment + * is set. + * @param signum 0 on normal flow or the number + * of the signal that interrupted the normal + * flow + * @param arg the events to run + */ +static void evloop_run(int signum, void *arg) +{ + int rc; + struct sd_event *se; + + if (!signum) { + se = evloop.sdev; + rc = sd_event_prepare(se); + if (rc < 0) { + errno = -rc; + CRITICAL("sd_event_prepare returned an error (state: %d): %m", sd_event_get_state(se)); + abort(); + } else { + if (rc == 0) { + rc = sd_event_wait(se, (uint64_t)(int64_t)-1); + if (rc < 0) { + errno = -rc; + ERROR("sd_event_wait returned an error (state: %d): %m", sd_event_get_state(se)); + } + } + evloop.state = EVLOOP_STATE_RUN; + if (rc > 0) { + rc = sd_event_dispatch(se); + if (rc < 0) { + errno = -rc; + ERROR("sd_event_dispatch returned an error (state: %d): %m", sd_event_get_state(se)); + } + } + } + } } -/* main loop of processing threads */ -static void *thread_main_loop(void *data) +/** + * Internal callback for evloop management. + * The effect of this function is hidden: it exits + * the waiting poll if any. + */ +static void evloop_on_efd_event() +{ + uint64_t x; + read(evloop.efd, &x, sizeof x); +} + +/** + * wakeup the event loop if needed by sending + * an event. + */ +static void evloop_wakeup() +{ + uint64_t x; + + if (evloop.state & EVLOOP_STATE_WAIT) { + x = 1; + write(evloop.efd, &x, sizeof x); + } +} + +/** + * Release the currently held event loop + */ +static void evloop_release() +{ + struct thread *nh, *ct = current_thread; + + if (evloop.holder == ct) { + nh = ct->nholder; + evloop.holder = nh; + if (nh) + pthread_cond_signal(nh->cwhold); + } +} + +/** + * get the eventloop for the current thread + */ +static int evloop_get() +{ + struct thread *ct = current_thread; + + if (evloop.holder) + return evloop.holder == ct; + + ct->nholder = NULL; + evloop.holder = ct; + return 1; +} + +/** + * acquire the eventloop for the current thread + */ +static void evloop_acquire() +{ + struct thread **pwait, *ct; + pthread_cond_t cond; + + /* try to get the evloop */ + if (!evloop_get()) { + /* failed, init waiting state */ + ct = current_thread; + ct->nholder = NULL; + ct->cwhold = &cond; + pthread_cond_init(&cond, NULL); + + /* queue current thread in holder list */ + pwait = &evloop.holder; + while (*pwait) + pwait = &(*pwait)->nholder; + *pwait = ct; + + /* wake up the evloop */ + evloop_wakeup(); + + /* wait to acquire the evloop */ + pthread_cond_wait(&cond, &mutex); + pthread_cond_destroy(&cond); + } +} + +#if defined(REMOVE_SYSTEMD_EVENT) +/** + * Monitored normal loop for waiting events. + * @param signum 0 on normal flow or the number + * of the signal that interrupted the normal + * flow + * @param arg the events to run + */ +static void monitored_wait_and_dispatch(int signum, void *arg) +{ + struct fdev_epoll *fdev_epoll = arg; + if (!signum) { + fdev_epoll_wait_and_dispatch(fdev_epoll, -1); + } +} +#endif + +/** + * Enter the thread + * @param me the description of the thread to enter + */ +static void thread_enter(volatile struct thread *me) +{ + /* initialize description of itself and link it in the list */ + me->tid = pthread_self(); + me->stop = 0; + me->waits = 0; + me->upper = current_thread; + me->next = threads; + threads = (struct thread*)me; + current_thread = (struct thread*)me; +} + +/** + * leave the thread + * @param me the description of the thread to leave + */ +static void thread_leave() +{ + struct thread **prv, *me; + + /* unlink the current thread and cleanup */ + me = current_thread; + prv = &threads; + while (*prv != me) + prv = &(*prv)->next; + *prv = me->next; + + current_thread = me->upper; +} + +/** + * Main processing loop of internal threads with processing jobs. + * The loop must be called with the mutex locked + * and it returns with the mutex locked. + * @param me the description of the thread to use + * TODO: how are timeout handled when reentering? + */ +static void thread_run_internal(volatile struct thread *me) { - struct thread *me = data; struct job *job; - me->works = 0; - me->ended = 0; - sig_monitor_init_timeouts(); - pthread_mutex_lock(&mutex); + /* enter thread */ + thread_enter(me); + + /* loop until stopped */ while (!me->stop) { + /* release the current event loop */ + evloop_release(); + /* get a job */ job = job_get(); - if (job == NULL && first_job != NULL && running == 0) { - /* sad situation!! should not happen */ - ERROR("threads are blocked!"); - job = first_job; - first_job = job->next; - } - if (job == NULL) { - /* no job... */ - pthread_cond_wait(&cond, &mutex); - } else { + if (job) { + /* prepare running the job */ + job->blocked = 1; /* mark job as blocked */ + me->job = job; /* record the job (only for terminate) */ + /* run the job */ - running++; - me->works = 1; pthread_mutex_unlock(&mutex); - sig_monitor(job->timeout, job_call, job); + sig_monitor(job->timeout, job->callback, job->arg); pthread_mutex_lock(&mutex); - me->works = 0; + + /* release the run job */ + job_release(job); +#if !defined(REMOVE_SYSTEMD_EVENT) + /* no job, check event loop wait */ + } else if (evloop_get()) { + if (evloop.state != 0) { + /* busy ? */ + CRITICAL("Can't enter dispatch while in dispatch!"); + abort(); + } + /* run the events */ + evloop.state = EVLOOP_STATE_RUN|EVLOOP_STATE_WAIT; + pthread_mutex_unlock(&mutex); + sig_monitor(0, evloop_run, NULL); + pthread_mutex_lock(&mutex); + evloop.state = 0; + } else { + /* no job and no event loop */ running--; - if (job->group != NULL) - job_unblock(job->group); - free(job); + if (!running) + ERROR("Entering job deep sleep! Check your bindings."); + me->waits = 1; + pthread_cond_wait(&cond, &mutex); + me->waits = 0; + running++; +#else + } else if (waitevt) { + /* no job and not events */ + running--; + if (!running) + ERROR("Entering job deep sleep! Check your bindings."); + me->waits = 1; + pthread_cond_wait(&cond, &mutex); + me->waits = 0; + running++; + } else { + /* wait for events */ + waitevt = 1; + pthread_mutex_unlock(&mutex); + sig_monitor(0, monitored_wait_and_dispatch, get_fdevepoll()); + pthread_mutex_lock(&mutex); + waitevt = 0; +#endif } - } - me->ended = 1; - pthread_mutex_unlock(&mutex); + /* cleanup */ + evloop_release(); + thread_leave(); +} + +/** + * Main processing loop of external threads. + * The loop must be called with the mutex locked + * and it returns with the mutex locked. + * @param me the description of the thread to use + */ +static void thread_run_external(volatile struct thread *me) +{ + /* enter thread */ + thread_enter(me); + + /* loop until stopped */ + me->waits = 1; + while (!me->stop) + pthread_cond_wait(&cond, &mutex); + me->waits = 0; + thread_leave(); +} + +/** + * Root for created threads. + */ +static void thread_main() +{ + struct thread me; + + running++; + started++; + sig_monitor_init_timeouts(); + thread_run_internal(&me); sig_monitor_clean_timeouts(); - return me; + started--; + running--; } -/* start a new thread */ +/** + * Entry point for created threads. + * @param data not used + * @return NULL + */ +static void *thread_starter(void *data) +{ + pthread_mutex_lock(&mutex); + thread_main(); + pthread_mutex_unlock(&mutex); + return NULL; +} + +/** + * Starts a new thread + * @return 0 in case of success or -1 in case of error + */ static int start_one_thread() { - struct thread *t; + pthread_t tid; int rc; - assert(started < allowed); - - t = &threads[started++]; - t->stop = 0; - rc = pthread_create(&t->tid, NULL, thread_main_loop, t); + rc = pthread_create(&tid, NULL, thread_starter, NULL); if (rc != 0) { - started--; - errno = rc; + /* errno = rc; */ WARNING("not able to start thread: %m"); rc = -1; } return rc; } +/** + * Queues a new asynchronous job represented by 'callback' and 'arg' + * for the 'group' and the 'timeout'. + * Jobs are queued FIFO and are possibly executed in parallel + * concurrently except for job of the same group that are + * executed sequentially in FIFO order. + * @param group The group of the job or NULL when no group. + * @param timeout The maximum execution time in seconds of the job + * or 0 for unlimited time. + * @param callback The function to execute for achieving the job. + * Its first parameter is either 0 on normal flow + * or the signal number that broke the normal flow. + * The remaining parameter is the parameter 'arg1' + * given here. + * @param arg The second argument for 'callback' + * @return 0 in case of success or -1 in case of error + */ int jobs_queue( - void *group, + const void *group, int timeout, void (*callback)(int, void*), void *arg) { - return jobs_queue3(group, timeout, (void(*)(int,void*,void*,void*))callback, arg, NULL, NULL); -} - -int jobs_queue2( - void *group, - int timeout, - void (*callback)(int, void*, void*), - void *arg1, - void *arg2) -{ - return jobs_queue3(group, timeout, (void(*)(int,void*,void*,void*))callback, arg1, arg2, NULL); -} - -/* queue the job to the 'callback' using a separate thread if available */ -int jobs_queue3( - void *group, - int timeout, - void (*callback)(int, void*, void *, void*), - void *arg1, - void *arg2, - void *arg3) -{ - const char *info; struct job *job; int rc; + pthread_mutex_lock(&mutex); + /* allocates the job */ - job = malloc(sizeof *job); - if (job == NULL) { - errno = ENOMEM; - info = "out of memory"; + job = job_create(group, timeout, callback, arg); + if (!job) goto error; - } - /* start a thread if needed */ - pthread_mutex_lock(&mutex); - if (remains == 0) { + /* check availability */ + if (remains <= 0) { + ERROR("can't process job with threads: too many jobs"); errno = EBUSY; - info = "too many jobs"; goto error2; } - if (started == running && started < allowed) { + + /* start a thread if needed */ + if (running == started && started < allowed) { + /* all threads are busy and a new can be started */ rc = start_one_thread(); if (rc < 0 && started == 0) { - /* failed to start threading */ - info = "can't start first thread"; + ERROR("can't start initial thread: %m"); goto error2; } } - /* fills and queues the job */ - job->group = group; - job->timeout = timeout; - job->callback = callback; - job->arg1 = arg1; - job->arg2 = arg2; - job->arg3 = arg3; - job->blocked = 0; + /* queues the job */ job_add(job); - pthread_mutex_unlock(&mutex); /* signal an existing job */ pthread_cond_signal(&cond); + pthread_mutex_unlock(&mutex); return 0; error2: - pthread_mutex_unlock(&mutex); - free(job); + job->next = free_jobs; + free_jobs = job; error: - ERROR("can't process job with threads: %s, %m", info); + pthread_mutex_unlock(&mutex); return -1; } -/* initialise the threads */ -int jobs_init(int allowed_count, int start_count, int waiter_count) +/** + * Internal helper function for 'jobs_enter'. + * @see jobs_enter, jobs_leave + */ +static void enter_cb(int signum, void *closure) +{ + struct sync *sync = closure; + sync->enter(signum, sync->arg, (void*)&sync->thread); +} + +/** + * Internal helper function for 'jobs_call'. + * @see jobs_call + */ +static void call_cb(int signum, void *closure) +{ + struct sync *sync = closure; + sync->callback(signum, sync->arg); + jobs_leave((void*)&sync->thread); +} + +/** + * Internal helper for synchronous jobs. It enters + * a new thread loop for evaluating the given job + * as recorded by the couple 'sync_cb' and 'sync'. + * @see jobs_call, jobs_enter, jobs_leave + */ +static int do_sync( + const void *group, + int timeout, + void (*sync_cb)(int signum, void *closure), + struct sync *sync +) { - threads = calloc(allowed_count, sizeof *threads); - if (threads == NULL) { - errno = ENOMEM; - ERROR("can't allocate threads"); + struct job *job; + + pthread_mutex_lock(&mutex); + + /* allocates the job */ + job = job_create(group, timeout, sync_cb, sync); + if (!job) { + pthread_mutex_unlock(&mutex); return -1; } + /* queues the job */ + job_add(job); + + /* run until stopped */ + if (current_thread) + thread_run_internal(&sync->thread); + else + thread_run_external(&sync->thread); + pthread_mutex_unlock(&mutex); + return 0; +} + +/** + * Enter a synchronisation point: activates the job given by 'callback' + * and 'closure' using 'group' and 'timeout' to control sequencing and + * execution time. + * @param group the group for sequencing jobs + * @param timeout the time in seconds allocated to the job + * @param callback the callback that will handle the job. + * it receives 3 parameters: 'signum' that will be 0 + * on normal flow or the catched signal number in case + * of interrupted flow, the context 'closure' as given and + * a 'jobloop' reference that must be used when the job is + * terminated to unlock the current execution flow. + * @param closure the argument to the callback + * @return 0 on success or -1 in case of error + */ +int jobs_enter( + const void *group, + int timeout, + void (*callback)(int signum, void *closure, struct jobloop *jobloop), + void *closure +) +{ + struct sync sync; + + sync.enter = callback; + sync.arg = closure; + return do_sync(group, timeout, enter_cb, &sync); +} + +/** + * Unlocks the execution flow designed by 'jobloop'. + * @param jobloop indication of the flow to unlock + * @return 0 in case of success of -1 on error + */ +int jobs_leave(struct jobloop *jobloop) +{ + struct thread *t; + + pthread_mutex_lock(&mutex); + t = threads; + while (t && t != (struct thread*)jobloop) + t = t->next; + if (!t) { + errno = EINVAL; + } else { + t->stop = 1; + if (t->waits) + pthread_cond_broadcast(&cond); + else + evloop_wakeup(); + } + pthread_mutex_unlock(&mutex); + return -!t; +} + +/** + * Calls synchronously the job represented by 'callback' and 'arg1' + * for the 'group' and the 'timeout' and waits for its completion. + * @param group The group of the job or NULL when no group. + * @param timeout The maximum execution time in seconds of the job + * or 0 for unlimited time. + * @param callback The function to execute for achieving the job. + * Its first parameter is either 0 on normal flow + * or the signal number that broke the normal flow. + * The remaining parameter is the parameter 'arg1' + * given here. + * @param arg The second argument for 'callback' + * @return 0 in case of success or -1 in case of error + */ +int jobs_call( + const void *group, + int timeout, + void (*callback)(int, void*), + void *arg) +{ + struct sync sync; + + sync.callback = callback; + sync.arg = arg; + + return do_sync(group, timeout, call_cb, &sync); +} + +/** + * Internal callback for evloop management. + * The effect of this function is hidden: it exits + * the waiting poll if any. Then it wakes up a thread + * awaiting the evloop using signal. + */ +static int on_evloop_efd(sd_event_source *s, int fd, uint32_t revents, void *userdata) +{ + evloop_on_efd_event(); + return 1; +} + +/* temporary hack */ +#if !defined(REMOVE_SYSTEMD_EVENT) +__attribute__((unused)) +#endif +static void evloop_callback(void *arg, uint32_t event, struct fdev *fdev) +{ + sig_monitor(0, evloop_run, arg); +} + +/** + * Gets a sd_event item for the current thread. + * @return a sd_event or NULL in case of error + */ +static struct sd_event *get_sd_event_locked() +{ + int rc; + + /* creates the evloop on need */ + if (!evloop.sdev) { + /* start the creation */ + evloop.state = 0; + /* creates the eventfd for waking up polls */ + evloop.efd = eventfd(0, EFD_CLOEXEC|EFD_SEMAPHORE); + if (evloop.efd < 0) { + ERROR("can't make eventfd for events"); + goto error1; + } + /* create the systemd event loop */ + rc = sd_event_new(&evloop.sdev); + if (rc < 0) { + ERROR("can't make new event loop"); + goto error2; + } + /* put the eventfd in the event loop */ + rc = sd_event_add_io(evloop.sdev, NULL, evloop.efd, EPOLLIN, on_evloop_efd, NULL); + if (rc < 0) { + ERROR("can't register eventfd"); +#if !defined(REMOVE_SYSTEMD_EVENT) + sd_event_unref(evloop.sdev); + evloop.sdev = NULL; +error2: + close(evloop.efd); +error1: + return NULL; + } +#else + goto error3; + } + /* handle the event loop */ + evloop.fdev = fdev_epoll_add(get_fdevepoll(), sd_event_get_fd(evloop.sdev)); + if (!evloop.fdev) { + ERROR("can't create fdev"); +error3: + sd_event_unref(evloop.sdev); +error2: + close(evloop.efd); +error1: + memset(&evloop, 0, sizeof evloop); + return NULL; + } + fdev_set_autoclose(evloop.fdev, 0); + fdev_set_events(evloop.fdev, EPOLLIN); + fdev_set_callback(evloop.fdev, evloop_callback, NULL); +#endif + } + + /* acquire the event loop */ + evloop_acquire(); + + return evloop.sdev; +} + +/** + * Gets a sd_event item for the current thread. + * @return a sd_event or NULL in case of error + */ +struct sd_event *jobs_get_sd_event() +{ + struct sd_event *result; + struct thread lt; + + /* ensure an existing thread environment */ + if (!current_thread) { + memset(<, 0, sizeof lt); + current_thread = < + } + + /* process */ + pthread_mutex_lock(&mutex); + result = get_sd_event_locked(); + pthread_mutex_unlock(&mutex); + + /* release the faked thread environment if needed */ + if (current_thread == <) { + /* + * Releasing it is needed because there is no way to guess + * when it has to be released really. But here is where it is + * hazardous: if the caller modifies the eventloop when it + * is waiting, there is no way to make the change effective. + * A workaround to achieve that goal is for the caller to + * require the event loop a second time after having modified it. + */ + NOTICE("Requiring sd_event loop out of binder callbacks is hazardous!"); + if (verbose_wants(Log_Level_Info)) + sig_monitor_dumpstack(); + evloop_release(); + current_thread = NULL; + } + + return result; +} + +#if defined(REMOVE_SYSTEMD_EVENT) +/** + * Gets the fdev_epoll item. + * @return a fdev_epoll or NULL in case of error + */ +struct fdev_epoll *jobs_get_fdev_epoll() +{ + struct fdev_epoll *result; + + pthread_mutex_lock(&mutex); + result = get_fdevepoll(); + pthread_mutex_unlock(&mutex); + + return result; +} +#endif + +/** + * Enter the jobs processing loop. + * @param allowed_count Maximum count of thread for jobs including this one + * @param start_count Count of thread to start now, must be lower. + * @param waiter_count Maximum count of jobs that can be waiting. + * @param start The start routine to activate (can't be NULL) + * @return 0 in case of success or -1 in case of error. + */ +int jobs_start(int allowed_count, int start_count, int waiter_count, void (*start)(int signum, void* arg), void *arg) +{ + int rc, launched; + struct job *job; + + assert(allowed_count >= 1); + assert(start_count >= 0); + assert(waiter_count > 0); + assert(start_count <= allowed_count); + + rc = -1; + pthread_mutex_lock(&mutex); + + /* check whether already running */ + if (current_thread || allowed) { + ERROR("thread already started"); + errno = EINVAL; + goto error; + } + /* records the allowed count */ allowed = allowed_count; started = 0; running = 0; remains = waiter_count; - /* start at least one thread */ - pthread_mutex_lock(&mutex); - while (started < start_count && start_one_thread() == 0); - pthread_mutex_unlock(&mutex); +#if HAS_WATCHDOG + /* set the watchdog */ + if (sd_watchdog_enabled(0, NULL)) + sd_event_set_watchdog(get_sd_event_locked(), 1); +#endif - /* end */ - return -(started != start_count); + /* start at least one thread: the current one */ + launched = 1; + while (launched < start_count) { + if (start_one_thread() != 0) { + ERROR("Not all threads can be started"); + goto error; + } + launched++; + } + + /* queue the start job */ + job = job_create(NULL, 0, start, arg); + if (!job) + goto error; + job_add(job); + + /* run until end */ + thread_main(); + rc = 0; +error: + pthread_mutex_unlock(&mutex); + return rc; } -/* terminate all the threads and all pending requests */ +/** + * Terminate all the threads and cancel all pending jobs. + */ void jobs_terminate() { - int i, n; - struct job *job; + struct job *job, *head, *tail; + pthread_t me, *others; + struct thread *t; + int count; + + /* how am i? */ + me = pthread_self(); /* request all threads to stop */ pthread_mutex_lock(&mutex); allowed = 0; - n = started; - for (i = 0 ; i < n ; i++) - threads[i].stop = 1; - /* wait until all thread are terminated */ - while (started != 0) { - /* signal threads */ - pthread_mutex_unlock(&mutex); - pthread_cond_broadcast(&cond); - pthread_mutex_lock(&mutex); + /* count the number of threads */ + count = 0; + t = threads; + while (t) { + if (!t->upper && !pthread_equal(t->tid, me)) + count++; + t = t->next; + } - /* join the terminated threads */ - for (i = 0 ; i < n ; i++) { - if (threads[i].tid && threads[i].ended) { - pthread_join(threads[i].tid, NULL); - threads[i].tid = 0; - started--; - } - } + /* fill the array of threads */ + others = alloca(count * sizeof *others); + count = 0; + t = threads; + while (t) { + if (!t->upper && !pthread_equal(t->tid, me)) + others[count++] = t->tid; + t = t->next; + } + + /* stops the threads */ + t = threads; + while (t) { + t->stop = 1; + t = t->next; } + + /* wait the threads */ + pthread_cond_broadcast(&cond); pthread_mutex_unlock(&mutex); - free(threads); - - /* cancel pending jobs */ - while (first_job) { - job = first_job; - first_job = job->next; - sig_monitor(0, job_cancel, job); - free(job); + while (count) + pthread_join(others[--count], NULL); + pthread_mutex_lock(&mutex); + + /* cancel pending jobs of other threads */ + remains = 0; + head = first_job; + first_job = NULL; + tail = NULL; + while (head) { + /* unlink the job */ + job = head; + head = job->next; + + /* search if job is stacked for current */ + t = current_thread; + while (t && t->job != job) + t = t->upper; + if (t) { + /* yes, relink it at end */ + if (tail) + tail->next = job; + else + first_job = job; + tail = job; + job->next = NULL; + } else { + /* no cancel the job */ + pthread_mutex_unlock(&mutex); + sig_monitor(0, job_cancel, job); + free(job); + pthread_mutex_lock(&mutex); + } } + pthread_mutex_unlock(&mutex); }