2 * Copyright (C) 2016-2019 "IoT.bzh"
3 * Author José Bollo <jose.bollo@iot.bzh>
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
26 #include <sys/syscall.h>
30 #include <sys/eventfd.h>
32 #include <systemd/sd-event.h>
35 #include "sig-monitor.h"
38 #define EVENT_TIMEOUT_TOP ((uint64_t)-1)
39 #define EVENT_TIMEOUT_CHILD ((uint64_t)10000)
43 /** Internal shortcut for callback */
44 typedef void (*job_cb_t)(int, void*);
46 /** Description of a pending job */
49 struct job *next; /**< link to the next job enqueued */
50 const void *group; /**< group of the request */
51 job_cb_t callback; /**< processing callback */
52 void *arg; /**< argument */
53 int timeout; /**< timeout in second for processing the request */
54 unsigned blocked: 1; /**< is an other request blocking this one ? */
55 unsigned dropped: 1; /**< is removed ? */
58 /** Description of handled event loops */
61 unsigned state; /**< encoded state */
62 int efd; /**< event notification */
63 struct sd_event *sdev; /**< the systemd event loop */
64 struct thread *holder; /**< holder of the evloop */
67 #define EVLOOP_STATE_WAIT 1U
68 #define EVLOOP_STATE_RUN 2U
70 /** Description of threads */
73 struct thread *next; /**< next thread of the list */
74 struct thread *upper; /**< upper same thread */
75 struct thread *nholder;/**< next holder for evloop */
76 pthread_cond_t *cwhold;/**< condition wait for holding */
77 struct job *job; /**< currently processed job */
78 pthread_t tid; /**< the thread id */
79 volatile unsigned stop: 1; /**< stop requested */
80 volatile unsigned waits: 1; /**< is waiting? */
84 * Description of synchronous callback
88 struct thread thread; /**< thread loop data */
90 void (*callback)(int, void*); /**< the synchronous callback */
91 void (*enter)(int signum, void *closure, struct jobloop *jobloop);
92 /**< the entering synchronous routine */
94 void *arg; /**< the argument of the callback */
98 /* synchronisation of threads */
99 static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
100 static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
102 /* count allowed, started and running threads */
103 static int allowed = 0; /** allowed count of threads */
104 static int started = 0; /** started count of threads */
105 static int running = 0; /** running count of threads */
106 static int remains = 0; /** allowed count of waiting jobs */
108 /* list of threads */
109 static struct thread *threads;
110 static _Thread_local struct thread *current_thread;
112 /* queue of pending jobs */
113 static struct job *first_job;
114 static struct job *free_jobs;
117 static struct evloop evloop;
120 * Create a new job with the given parameters
121 * @param group the group of the job
122 * @param timeout the timeout of the job (0 if none)
123 * @param callback the function that achieves the job
124 * @param arg the argument of the callback
125 * @return the created job unblock or NULL when no more memory
127 static struct job *job_create(
135 /* try recyle existing job */
138 free_jobs = job->next;
140 /* allocation without blocking */
141 pthread_mutex_unlock(&mutex);
142 job = malloc(sizeof *job);
143 pthread_mutex_lock(&mutex);
145 ERROR("out of memory");
150 /* initialises the job */
152 job->timeout = timeout;
153 job->callback = callback;
162 * Adds 'job' at the end of the list of jobs, marking it
163 * as blocked if an other job with the same group is pending.
164 * @param job the job to add
166 static void job_add(struct job *job)
169 struct job *ijob, **pjob;
175 /* search end and blockers */
179 if (group && ijob->group == group)
191 * Get the next job to process or NULL if none.
192 * @return the first job that isn't blocked or NULL
194 static inline struct job *job_get()
196 struct job *job = first_job;
197 while (job && job->blocked)
205 * Releases the processed 'job': removes it
206 * from the list of jobs and unblock the first
207 * pending job of the same group if any.
208 * @param job the job to release
210 static inline void job_release(struct job *job)
212 struct job *ijob, **pjob;
215 /* first unqueue the job */
218 while (ijob != job) {
224 /* then unblock jobs of the same group */
228 while (ijob && ijob->group != group)
234 /* recycle the job */
235 job->next = free_jobs;
240 * Monitored cancel callback for a job.
241 * This function is called by the monitor
242 * to cancel the job when the safe environment
244 * @param signum 0 on normal flow or the number
245 * of the signal that interrupted the normal
247 * @param arg the job to run
249 static void job_cancel(int signum, void *arg)
251 struct job *job = arg;
252 job->callback(SIGABRT, job->arg);
256 * Monitored normal callback for events.
257 * This function is called by the monitor
258 * to run the event loop when the safe environment
260 * @param signum 0 on normal flow or the number
261 * of the signal that interrupted the normal
263 * @param arg the events to run
265 static void evloop_run(int signum, void *arg)
272 rc = sd_event_prepare(se);
275 CRITICAL("sd_event_prepare returned an error (state: %d): %m", sd_event_get_state(se));
279 rc = sd_event_wait(se, (uint64_t)(int64_t)-1);
282 ERROR("sd_event_wait returned an error (state: %d): %m", sd_event_get_state(se));
285 evloop.state = EVLOOP_STATE_RUN;
287 rc = sd_event_dispatch(se);
290 ERROR("sd_event_dispatch returned an error (state: %d): %m", sd_event_get_state(se));
298 * Internal callback for evloop management.
299 * The effect of this function is hidden: it exits
300 * the waiting poll if any.
302 static void evloop_on_efd_event()
305 read(evloop.efd, &x, sizeof x);
309 * wakeup the event loop if needed by sending
312 static void evloop_wakeup()
316 if (evloop.state & EVLOOP_STATE_WAIT) {
318 write(evloop.efd, &x, sizeof x);
323 * Release the currently held event loop
325 static void evloop_release()
327 struct thread *nh, *ct = current_thread;
329 if (ct && evloop.holder == ct) {
333 pthread_cond_signal(nh->cwhold);
338 * get the eventloop for the current thread
340 static int evloop_get()
342 struct thread *ct = current_thread;
345 return evloop.holder == ct;
356 * acquire the eventloop for the current thread
358 static void evloop_acquire()
360 struct thread **pwait, *ct;
363 /* try to get the evloop */
365 /* failed, init waiting state */
369 pthread_cond_init(&cond, NULL);
371 /* queue current thread in holder list */
372 pwait = &evloop.holder;
374 pwait = &(*pwait)->nholder;
377 /* wake up the evloop */
380 /* wait to acquire the evloop */
381 pthread_cond_wait(&cond, &mutex);
382 pthread_cond_destroy(&cond);
388 * @param me the description of the thread to enter
390 static void thread_enter(volatile struct thread *me)
393 /* initialize description of itself and link it in the list */
394 me->tid = pthread_self();
397 me->upper = current_thread;
399 threads = (struct thread*)me;
400 current_thread = (struct thread*)me;
405 * @param me the description of the thread to leave
407 static void thread_leave()
409 struct thread **prv, *me;
411 /* unlink the current thread and cleanup */
418 current_thread = me->upper;
422 * Main processing loop of internal threads with processing jobs.
423 * The loop must be called with the mutex locked
424 * and it returns with the mutex locked.
425 * @param me the description of the thread to use
426 * TODO: how are timeout handled when reentering?
428 static void thread_run_internal(volatile struct thread *me)
435 /* loop until stopped */
437 /* release the current event loop */
443 /* prepare running the job */
444 job->blocked = 1; /* mark job as blocked */
445 me->job = job; /* record the job (only for terminate) */
448 pthread_mutex_unlock(&mutex);
449 sig_monitor(job->timeout, job->callback, job->arg);
450 pthread_mutex_lock(&mutex);
452 /* release the run job */
454 /* no job, check event loop wait */
455 } else if (evloop_get()) {
456 if (evloop.state != 0) {
458 CRITICAL("Can't enter dispatch while in dispatch!");
462 evloop.state = EVLOOP_STATE_RUN|EVLOOP_STATE_WAIT;
463 pthread_mutex_unlock(&mutex);
464 sig_monitor(0, evloop_run, NULL);
465 pthread_mutex_lock(&mutex);
468 /* no job and no event loop */
471 ERROR("Entering job deep sleep! Check your bindings.");
473 pthread_cond_wait(&cond, &mutex);
484 * Main processing loop of external threads.
485 * The loop must be called with the mutex locked
486 * and it returns with the mutex locked.
487 * @param me the description of the thread to use
489 static void thread_run_external(volatile struct thread *me)
494 /* loop until stopped */
497 pthread_cond_wait(&cond, &mutex);
503 * Root for created threads.
505 static void thread_main()
511 sig_monitor_init_timeouts();
512 thread_run_internal(&me);
513 sig_monitor_clean_timeouts();
519 * Entry point for created threads.
520 * @param data not used
523 static void *thread_starter(void *data)
525 pthread_mutex_lock(&mutex);
527 pthread_mutex_unlock(&mutex);
532 * Starts a new thread
533 * @return 0 in case of success or -1 in case of error
535 static int start_one_thread()
540 rc = pthread_create(&tid, NULL, thread_starter, NULL);
543 WARNING("not able to start thread: %m");
550 * Queues a new asynchronous job represented by 'callback' and 'arg'
551 * for the 'group' and the 'timeout'.
552 * Jobs are queued FIFO and are possibly executed in parallel
553 * concurrently except for job of the same group that are
554 * executed sequentially in FIFO order.
555 * @param group The group of the job or NULL when no group.
556 * @param timeout The maximum execution time in seconds of the job
557 * or 0 for unlimited time.
558 * @param callback The function to execute for achieving the job.
559 * Its first parameter is either 0 on normal flow
560 * or the signal number that broke the normal flow.
561 * The remaining parameter is the parameter 'arg1'
563 * @param arg The second argument for 'callback'
564 * @return 0 in case of success or -1 in case of error
569 void (*callback)(int, void*),
575 pthread_mutex_lock(&mutex);
577 /* allocates the job */
578 job = job_create(group, timeout, callback, arg);
582 /* check availability */
584 ERROR("can't process job with threads: too many jobs");
589 /* start a thread if needed */
590 if (running == started && started < allowed) {
591 /* all threads are busy and a new can be started */
592 rc = start_one_thread();
593 if (rc < 0 && started == 0) {
594 ERROR("can't start initial thread: %m");
602 /* signal an existing job */
603 pthread_cond_signal(&cond);
604 pthread_mutex_unlock(&mutex);
608 job->next = free_jobs;
611 pthread_mutex_unlock(&mutex);
616 * Internal helper function for 'jobs_enter'.
617 * @see jobs_enter, jobs_leave
619 static void enter_cb(int signum, void *closure)
621 struct sync *sync = closure;
622 sync->enter(signum, sync->arg, (void*)&sync->thread);
626 * Internal helper function for 'jobs_call'.
629 static void call_cb(int signum, void *closure)
631 struct sync *sync = closure;
632 sync->callback(signum, sync->arg);
633 jobs_leave((void*)&sync->thread);
637 * Internal helper for synchronous jobs. It enters
638 * a new thread loop for evaluating the given job
639 * as recorded by the couple 'sync_cb' and 'sync'.
640 * @see jobs_call, jobs_enter, jobs_leave
645 void (*sync_cb)(int signum, void *closure),
651 pthread_mutex_lock(&mutex);
653 /* allocates the job */
654 job = job_create(group, timeout, sync_cb, sync);
656 pthread_mutex_unlock(&mutex);
663 /* run until stopped */
665 thread_run_internal(&sync->thread);
667 thread_run_external(&sync->thread);
668 pthread_mutex_unlock(&mutex);
673 * Enter a synchronisation point: activates the job given by 'callback'
674 * and 'closure' using 'group' and 'timeout' to control sequencing and
676 * @param group the group for sequencing jobs
677 * @param timeout the time in seconds allocated to the job
678 * @param callback the callback that will handle the job.
679 * it receives 3 parameters: 'signum' that will be 0
680 * on normal flow or the catched signal number in case
681 * of interrupted flow, the context 'closure' as given and
682 * a 'jobloop' reference that must be used when the job is
683 * terminated to unlock the current execution flow.
684 * @param closure the argument to the callback
685 * @return 0 on success or -1 in case of error
690 void (*callback)(int signum, void *closure, struct jobloop *jobloop),
696 sync.enter = callback;
698 return do_sync(group, timeout, enter_cb, &sync);
702 * Unlocks the execution flow designed by 'jobloop'.
703 * @param jobloop indication of the flow to unlock
704 * @return 0 in case of success of -1 on error
706 int jobs_leave(struct jobloop *jobloop)
710 pthread_mutex_lock(&mutex);
712 while (t && t != (struct thread*)jobloop)
719 pthread_cond_broadcast(&cond);
723 pthread_mutex_unlock(&mutex);
728 * Calls synchronously the job represented by 'callback' and 'arg1'
729 * for the 'group' and the 'timeout' and waits for its completion.
730 * @param group The group of the job or NULL when no group.
731 * @param timeout The maximum execution time in seconds of the job
732 * or 0 for unlimited time.
733 * @param callback The function to execute for achieving the job.
734 * Its first parameter is either 0 on normal flow
735 * or the signal number that broke the normal flow.
736 * The remaining parameter is the parameter 'arg1'
738 * @param arg The second argument for 'callback'
739 * @return 0 in case of success or -1 in case of error
744 void (*callback)(int, void*),
749 sync.callback = callback;
752 return do_sync(group, timeout, call_cb, &sync);
756 * Internal callback for evloop management.
757 * The effect of this function is hidden: it exits
758 * the waiting poll if any. Then it wakes up a thread
759 * awaiting the evloop using signal.
761 static int on_evloop_efd(sd_event_source *s, int fd, uint32_t revents, void *userdata)
763 evloop_on_efd_event();
768 * Gets a sd_event item for the current thread.
769 * @return a sd_event or NULL in case of error
771 static struct sd_event *get_sd_event_locked()
775 /* creates the evloop on need */
777 /* start the creation */
779 /* creates the eventfd for waking up polls */
780 evloop.efd = eventfd(0, EFD_CLOEXEC|EFD_SEMAPHORE);
781 if (evloop.efd < 0) {
782 ERROR("can't make eventfd for events");
785 /* create the systemd event loop */
786 rc = sd_event_new(&evloop.sdev);
788 ERROR("can't make new event loop");
791 /* put the eventfd in the event loop */
792 rc = sd_event_add_io(evloop.sdev, NULL, evloop.efd, EPOLLIN, on_evloop_efd, NULL);
794 ERROR("can't register eventfd");
795 sd_event_unref(evloop.sdev);
804 /* acquire the event loop */
811 * Gets a sd_event item for the current thread.
812 * @return a sd_event or NULL in case of error
814 struct sd_event *jobs_get_sd_event()
816 struct sd_event *result;
819 /* ensure an existing thread environment */
820 if (!current_thread) {
821 memset(<, 0, sizeof lt);
822 current_thread = <
826 pthread_mutex_lock(&mutex);
827 result = get_sd_event_locked();
828 pthread_mutex_unlock(&mutex);
830 /* release the faked thread environment if needed */
831 if (current_thread == <) {
833 * Releasing it is needed because there is no way to guess
834 * when it has to be released really. But here is where it is
835 * hazardous: if the caller modifies the eventloop when it
836 * is waiting, there is no way to make the change effective.
837 * A workaround to achieve that goal is for the caller to
838 * require the event loop a second time after having modified it.
840 NOTICE("Requiring sd_event loop out of binder callbacks is hazardous!");
841 if (verbose_wants(Log_Level_Info))
842 sig_monitor_dumpstack();
844 current_thread = NULL;
851 * Enter the jobs processing loop.
852 * @param allowed_count Maximum count of thread for jobs including this one
853 * @param start_count Count of thread to start now, must be lower.
854 * @param waiter_count Maximum count of jobs that can be waiting.
855 * @param start The start routine to activate (can't be NULL)
856 * @return 0 in case of success or -1 in case of error.
858 int jobs_start(int allowed_count, int start_count, int waiter_count, void (*start)(int signum, void* arg), void *arg)
863 assert(allowed_count >= 1);
864 assert(start_count >= 0);
865 assert(waiter_count > 0);
866 assert(start_count <= allowed_count);
869 pthread_mutex_lock(&mutex);
871 /* check whether already running */
872 if (current_thread || allowed) {
873 ERROR("thread already started");
878 /* records the allowed count */
879 allowed = allowed_count;
882 remains = waiter_count;
884 /* start at least one thread: the current one */
886 while (launched < start_count) {
887 if (start_one_thread() != 0) {
888 ERROR("Not all threads can be started");
894 /* queue the start job */
895 job = job_create(NULL, 0, start, arg);
904 pthread_mutex_unlock(&mutex);
909 * Terminate all the threads and cancel all pending jobs.
911 void jobs_terminate()
913 struct job *job, *head, *tail;
914 pthread_t me, *others;
921 /* request all threads to stop */
922 pthread_mutex_lock(&mutex);
925 /* count the number of threads */
929 if (!t->upper && !pthread_equal(t->tid, me))
934 /* fill the array of threads */
935 others = alloca(count * sizeof *others);
939 if (!t->upper && !pthread_equal(t->tid, me))
940 others[count++] = t->tid;
944 /* stops the threads */
951 /* wait the threads */
952 pthread_cond_broadcast(&cond);
953 pthread_mutex_unlock(&mutex);
955 pthread_join(others[--count], NULL);
956 pthread_mutex_lock(&mutex);
958 /* cancel pending jobs of other threads */
968 /* search if job is stacked for current */
970 while (t && t->job != job)
973 /* yes, relink it at end */
981 /* no cancel the job */
982 pthread_mutex_unlock(&mutex);
983 sig_monitor(0, job_cancel, job);
985 pthread_mutex_lock(&mutex);
988 pthread_mutex_unlock(&mutex);