2 * Copyright (C) 2016-2019 "IoT.bzh"
3 * Author José Bollo <jose.bollo@iot.bzh>
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
26 #include <sys/syscall.h>
30 #include <sys/eventfd.h>
32 #include <systemd/sd-event.h>
36 #include "sig-monitor.h"
40 #define EVENT_TIMEOUT_TOP ((uint64_t)-1)
41 #define EVENT_TIMEOUT_CHILD ((uint64_t)10000)
45 /** Internal shortcut for callback */
46 typedef void (*job_cb_t)(int, void*);
48 /** Description of a pending job */
51 struct job *next; /**< link to the next job enqueued */
52 const void *group; /**< group of the request */
53 job_cb_t callback; /**< processing callback */
54 void *arg; /**< argument */
55 int timeout; /**< timeout in second for processing the request */
56 unsigned blocked: 1; /**< is an other request blocking this one ? */
57 unsigned dropped: 1; /**< is removed ? */
60 /** Description of threads */
63 struct thread *next; /**< next thread of the list */
64 struct thread *upper; /**< upper same thread */
65 struct thread *nholder;/**< next holder for evloop */
66 pthread_cond_t *cwhold;/**< condition wait for holding */
67 struct job *job; /**< currently processed job */
68 pthread_t tid; /**< the thread id */
69 volatile unsigned stop: 1; /**< stop requested */
70 volatile unsigned waits: 1; /**< is waiting? */
74 * Description of synchronous callback
78 struct thread thread; /**< thread loop data */
80 void (*callback)(int, void*); /**< the synchronous callback */
81 void (*enter)(int signum, void *closure, struct jobloop *jobloop);
82 /**< the entering synchronous routine */
84 void *arg; /**< the argument of the callback */
88 /* synchronisation of threads */
89 static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
90 static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
92 /* count allowed, started and running threads */
93 static int allowed = 0; /** allowed count of threads */
94 static int started = 0; /** started count of threads */
95 static int running = 0; /** running count of threads */
96 static int remains = 0; /** allowed count of waiting jobs */
99 static struct thread *threads;
100 static _Thread_local struct thread *current_thread;
102 /* queue of pending jobs */
103 static struct job *first_job;
104 static struct job *free_jobs;
107 static struct evmgr *evmgr;
110 * Create a new job with the given parameters
111 * @param group the group of the job
112 * @param timeout the timeout of the job (0 if none)
113 * @param callback the function that achieves the job
114 * @param arg the argument of the callback
115 * @return the created job unblock or NULL when no more memory
117 static struct job *job_create(
125 /* try recyle existing job */
128 free_jobs = job->next;
130 /* allocation without blocking */
131 pthread_mutex_unlock(&mutex);
132 job = malloc(sizeof *job);
133 pthread_mutex_lock(&mutex);
135 ERROR("out of memory");
140 /* initialises the job */
142 job->timeout = timeout;
143 job->callback = callback;
152 * Adds 'job' at the end of the list of jobs, marking it
153 * as blocked if an other job with the same group is pending.
154 * @param job the job to add
156 static void job_add(struct job *job)
159 struct job *ijob, **pjob;
165 /* search end and blockers */
169 if (group && ijob->group == group)
181 * Get the next job to process or NULL if none.
182 * @return the first job that isn't blocked or NULL
184 static inline struct job *job_get()
186 struct job *job = first_job;
187 while (job && job->blocked)
195 * Releases the processed 'job': removes it
196 * from the list of jobs and unblock the first
197 * pending job of the same group if any.
198 * @param job the job to release
200 static inline void job_release(struct job *job)
202 struct job *ijob, **pjob;
205 /* first unqueue the job */
208 while (ijob != job) {
214 /* then unblock jobs of the same group */
218 while (ijob && ijob->group != group)
224 /* recycle the job */
225 job->next = free_jobs;
230 * Monitored cancel callback for a job.
231 * This function is called by the monitor
232 * to cancel the job when the safe environment
234 * @param signum 0 on normal flow or the number
235 * of the signal that interrupted the normal
237 * @param arg the job to run
239 static void job_cancel(int signum, void *arg)
241 struct job *job = arg;
242 job->callback(SIGABRT, job->arg);
246 * wakeup the event loop if needed by sending
249 static void evloop_wakeup()
256 * Release the currently held event loop
258 static void evloop_release()
260 struct thread *nh, *ct = current_thread;
262 if (ct && evmgr && evmgr_release_if(evmgr, ct)) {
266 evmgr_try_hold(evmgr, nh);
267 pthread_cond_signal(nh->cwhold);
273 * get the eventloop for the current thread
275 static int evloop_get()
277 return evmgr && evmgr_try_hold(evmgr, current_thread);
281 * acquire the eventloop for the current thread
283 static void evloop_acquire()
285 struct thread *pwait, *ct;
288 /* try to get the evloop */
290 /* failed, init waiting state */
294 pthread_cond_init(&cond, NULL);
296 /* queue current thread in holder list */
297 pwait = evmgr_holder(evmgr);
298 while (pwait->nholder)
299 pwait = pwait->nholder;
302 /* wake up the evloop */
305 /* wait to acquire the evloop */
306 pthread_cond_wait(&cond, &mutex);
307 pthread_cond_destroy(&cond);
313 * @param me the description of the thread to enter
315 static void thread_enter(volatile struct thread *me)
318 /* initialize description of itself and link it in the list */
319 me->tid = pthread_self();
323 me->upper = current_thread;
325 threads = (struct thread*)me;
326 current_thread = (struct thread*)me;
331 * @param me the description of the thread to leave
333 static void thread_leave()
335 struct thread **prv, *me;
337 /* unlink the current thread and cleanup */
344 current_thread = me->upper;
348 * Main processing loop of internal threads with processing jobs.
349 * The loop must be called with the mutex locked
350 * and it returns with the mutex locked.
351 * @param me the description of the thread to use
352 * TODO: how are timeout handled when reentering?
354 static void thread_run_internal(volatile struct thread *me)
361 /* loop until stopped */
363 /* release the current event loop */
369 /* prepare running the job */
370 job->blocked = 1; /* mark job as blocked */
371 me->job = job; /* record the job (only for terminate) */
374 pthread_mutex_unlock(&mutex);
375 sig_monitor(job->timeout, job->callback, job->arg);
376 pthread_mutex_lock(&mutex);
378 /* release the run job */
380 /* no job, check event loop wait */
381 } else if (evloop_get()) {
382 if (!evmgr_can_run(evmgr)) {
384 CRITICAL("Can't enter dispatch while in dispatch!");
388 pthread_mutex_unlock(&mutex);
389 sig_monitor(0, (void(*)(int,void*))evmgr_job_run, evmgr);
390 pthread_mutex_lock(&mutex);
392 /* no job and no event loop */
395 ERROR("Entering job deep sleep! Check your bindings.");
397 pthread_cond_wait(&cond, &mutex);
408 * Main processing loop of external threads.
409 * The loop must be called with the mutex locked
410 * and it returns with the mutex locked.
411 * @param me the description of the thread to use
413 static void thread_run_external(volatile struct thread *me)
418 /* loop until stopped */
421 pthread_cond_wait(&cond, &mutex);
427 * Root for created threads.
429 static void thread_main()
435 sig_monitor_init_timeouts();
436 thread_run_internal(&me);
437 sig_monitor_clean_timeouts();
443 * Entry point for created threads.
444 * @param data not used
447 static void *thread_starter(void *data)
449 pthread_mutex_lock(&mutex);
451 pthread_mutex_unlock(&mutex);
456 * Starts a new thread
457 * @return 0 in case of success or -1 in case of error
459 static int start_one_thread()
464 rc = pthread_create(&tid, NULL, thread_starter, NULL);
467 WARNING("not able to start thread: %m");
474 * Queues a new asynchronous job represented by 'callback' and 'arg'
475 * for the 'group' and the 'timeout'.
476 * Jobs are queued FIFO and are possibly executed in parallel
477 * concurrently except for job of the same group that are
478 * executed sequentially in FIFO order.
479 * @param group The group of the job or NULL when no group.
480 * @param timeout The maximum execution time in seconds of the job
481 * or 0 for unlimited time.
482 * @param callback The function to execute for achieving the job.
483 * Its first parameter is either 0 on normal flow
484 * or the signal number that broke the normal flow.
485 * The remaining parameter is the parameter 'arg1'
487 * @param arg The second argument for 'callback'
488 * @return 0 in case of success or -1 in case of error
493 void (*callback)(int, void*),
499 pthread_mutex_lock(&mutex);
501 /* allocates the job */
502 job = job_create(group, timeout, callback, arg);
506 /* check availability */
508 ERROR("can't process job with threads: too many jobs");
513 /* start a thread if needed */
514 if (running == started && started < allowed) {
515 /* all threads are busy and a new can be started */
516 rc = start_one_thread();
517 if (rc < 0 && started == 0) {
518 ERROR("can't start initial thread: %m");
526 /* signal an existing job */
527 pthread_cond_signal(&cond);
528 pthread_mutex_unlock(&mutex);
532 job->next = free_jobs;
535 pthread_mutex_unlock(&mutex);
540 * Internal helper function for 'jobs_enter'.
541 * @see jobs_enter, jobs_leave
543 static void enter_cb(int signum, void *closure)
545 struct sync *sync = closure;
546 sync->enter(signum, sync->arg, (void*)&sync->thread);
550 * Internal helper function for 'jobs_call'.
553 static void call_cb(int signum, void *closure)
555 struct sync *sync = closure;
556 sync->callback(signum, sync->arg);
557 jobs_leave((void*)&sync->thread);
561 * Internal helper for synchronous jobs. It enters
562 * a new thread loop for evaluating the given job
563 * as recorded by the couple 'sync_cb' and 'sync'.
564 * @see jobs_call, jobs_enter, jobs_leave
569 void (*sync_cb)(int signum, void *closure),
575 pthread_mutex_lock(&mutex);
577 /* allocates the job */
578 job = job_create(group, timeout, sync_cb, sync);
580 pthread_mutex_unlock(&mutex);
587 /* run until stopped */
589 thread_run_internal(&sync->thread);
591 thread_run_external(&sync->thread);
592 pthread_mutex_unlock(&mutex);
597 * Enter a synchronisation point: activates the job given by 'callback'
598 * and 'closure' using 'group' and 'timeout' to control sequencing and
600 * @param group the group for sequencing jobs
601 * @param timeout the time in seconds allocated to the job
602 * @param callback the callback that will handle the job.
603 * it receives 3 parameters: 'signum' that will be 0
604 * on normal flow or the catched signal number in case
605 * of interrupted flow, the context 'closure' as given and
606 * a 'jobloop' reference that must be used when the job is
607 * terminated to unlock the current execution flow.
608 * @param closure the argument to the callback
609 * @return 0 on success or -1 in case of error
614 void (*callback)(int signum, void *closure, struct jobloop *jobloop),
620 sync.enter = callback;
622 return do_sync(group, timeout, enter_cb, &sync);
626 * Unlocks the execution flow designed by 'jobloop'.
627 * @param jobloop indication of the flow to unlock
628 * @return 0 in case of success of -1 on error
630 int jobs_leave(struct jobloop *jobloop)
634 pthread_mutex_lock(&mutex);
636 while (t && t != (struct thread*)jobloop)
643 pthread_cond_broadcast(&cond);
647 pthread_mutex_unlock(&mutex);
652 * Calls synchronously the job represented by 'callback' and 'arg1'
653 * for the 'group' and the 'timeout' and waits for its completion.
654 * @param group The group of the job or NULL when no group.
655 * @param timeout The maximum execution time in seconds of the job
656 * or 0 for unlimited time.
657 * @param callback The function to execute for achieving the job.
658 * Its first parameter is either 0 on normal flow
659 * or the signal number that broke the normal flow.
660 * The remaining parameter is the parameter 'arg1'
662 * @param arg The second argument for 'callback'
663 * @return 0 in case of success or -1 in case of error
668 void (*callback)(int, void*),
673 sync.callback = callback;
676 return do_sync(group, timeout, call_cb, &sync);
680 * Ensure that the current running thread can control the event loop.
682 void jobs_acquire_event_manager()
686 /* ensure an existing thread environment */
687 if (!current_thread) {
688 memset(<, 0, sizeof lt);
689 current_thread = <
693 pthread_mutex_lock(&mutex);
695 /* creates the evloop on need */
697 evmgr_create(&evmgr);
699 /* acquire the event loop under lock */
704 pthread_mutex_unlock(&mutex);
706 /* release the faked thread environment if needed */
707 if (current_thread == <) {
709 * Releasing it is needed because there is no way to guess
710 * when it has to be released really. But here is where it is
711 * hazardous: if the caller modifies the eventloop when it
712 * is waiting, there is no way to make the change effective.
713 * A workaround to achieve that goal is for the caller to
714 * require the event loop a second time after having modified it.
716 NOTICE("Requiring event manager/loop from outside of binder's callback is hazardous!");
717 if (verbose_wants(Log_Level_Info))
718 sig_monitor_dumpstack();
720 current_thread = NULL;
725 * Enter the jobs processing loop.
726 * @param allowed_count Maximum count of thread for jobs including this one
727 * @param start_count Count of thread to start now, must be lower.
728 * @param waiter_count Maximum count of jobs that can be waiting.
729 * @param start The start routine to activate (can't be NULL)
730 * @return 0 in case of success or -1 in case of error.
732 int jobs_start(int allowed_count, int start_count, int waiter_count, void (*start)(int signum, void* arg), void *arg)
737 assert(allowed_count >= 1);
738 assert(start_count >= 0);
739 assert(waiter_count > 0);
740 assert(start_count <= allowed_count);
743 pthread_mutex_lock(&mutex);
745 /* check whether already running */
746 if (current_thread || allowed) {
747 ERROR("thread already started");
752 /* records the allowed count */
753 allowed = allowed_count;
756 remains = waiter_count;
758 /* start at least one thread: the current one */
760 while (launched < start_count) {
761 if (start_one_thread() != 0) {
762 ERROR("Not all threads can be started");
768 /* queue the start job */
769 job = job_create(NULL, 0, start, arg);
778 pthread_mutex_unlock(&mutex);
783 * Terminate all the threads and cancel all pending jobs.
785 void jobs_terminate()
787 struct job *job, *head, *tail;
788 pthread_t me, *others;
795 /* request all threads to stop */
796 pthread_mutex_lock(&mutex);
799 /* count the number of threads */
803 if (!t->upper && !pthread_equal(t->tid, me))
808 /* fill the array of threads */
809 others = alloca(count * sizeof *others);
813 if (!t->upper && !pthread_equal(t->tid, me))
814 others[count++] = t->tid;
818 /* stops the threads */
825 /* wait the threads */
826 pthread_cond_broadcast(&cond);
827 pthread_mutex_unlock(&mutex);
829 pthread_join(others[--count], NULL);
830 pthread_mutex_lock(&mutex);
832 /* cancel pending jobs of other threads */
842 /* search if job is stacked for current */
844 while (t && t->job != job)
847 /* yes, relink it at end */
855 /* no cancel the job */
856 pthread_mutex_unlock(&mutex);
857 sig_monitor(0, job_cancel, job);
859 pthread_mutex_lock(&mutex);
862 pthread_mutex_unlock(&mutex);