+ /* queues the job */
+ job_add(job);
+
+ /* run until stopped */
+ thread_run(&sync->thread);
+ pthread_mutex_unlock(&mutex);
+ return 0;
+}
+
+/**
+ * Enter a synchronisation point: activates the job given by 'callback'
+ * and 'closure' using 'group' and 'timeout' to control sequencing and
+ * execution time.
+ * @param group the group for sequencing jobs
+ * @param timeout the time in seconds allocated to the job
+ * @param callback the callback that will handle the job.
+ * it receives 3 parameters: 'signum' that will be 0
+ * on normal flow or the catched signal number in case
+ * of interrupted flow, the context 'closure' as given and
+ * a 'jobloop' reference that must be used when the job is
+ * terminated to unlock the current execution flow.
+ * @param closure the argument to the callback
+ * @return 0 on success or -1 in case of error
+ */
+int jobs_enter(
+ const void *group,
+ int timeout,
+ void (*callback)(int signum, void *closure, struct jobloop *jobloop),
+ void *closure
+)
+{
+ struct sync sync;
+
+ sync.enter = callback;
+ sync.arg = closure;
+ return do_sync(group, timeout, enter_cb, &sync);
+}
+
+/**
+ * Internal callback for evloop management.
+ * The effect of this function is hidden: it exits
+ * the waiting poll if any. Then it wakes up a thread
+ * awaiting the evloop using signal.
+ */
+static int on_evloop_efd(sd_event_source *s, int fd, uint32_t revents, void *userdata)
+{
+ uint64_t x;
+ struct evloop *evloop = userdata;
+ read(evloop->efd, &x, sizeof x);
+ pthread_mutex_lock(&mutex);
+ pthread_cond_broadcast(&evloop->cond);
+ pthread_mutex_unlock(&mutex);
+ return 1;
+}
+
+/**
+ * unlock the event loop if needed by sending
+ * an event.
+ * @param el the event loop to unlock
+ * @param wait wait the unlocked state of the event loop
+ */
+static void unlock_evloop(struct evloop *el, int wait)
+{
+ /* wait for a modifiable event loop */
+ while (__atomic_load_n(&el->state, __ATOMIC_RELAXED) & EVLOOP_STATE_WAIT) {
+ uint64_t x = 1;
+ write(el->efd, &x, sizeof x);
+ if (!wait)
+ break;
+ pthread_cond_wait(&el->cond, &mutex);
+ }
+}
+
+/**
+ * Unlocks the execution flow designed by 'jobloop'.
+ * @param jobloop indication of the flow to unlock
+ * @return 0 in case of success of -1 on error
+ */
+int jobs_leave(struct jobloop *jobloop)
+{
+ struct thread *t;
+ int i;
+
+ pthread_mutex_lock(&mutex);
+ t = threads;
+ while (t && t != (struct thread*)jobloop)
+ t = t->next;
+ if (!t) {
+ errno = EINVAL;
+ } else {
+ t->stop = 1;
+ if (t->waits)
+ pthread_cond_broadcast(&cond);
+ else {
+ i = (int)(sizeof evloop / sizeof *evloop);
+ while(i) {
+ if (evloop[--i].holder == t) {
+ unlock_evloop(&evloop[i], 0);
+ break;
+ }
+ }
+ }
+ }
+ pthread_mutex_unlock(&mutex);
+ return -!t;
+}
+
+/**
+ * Calls synchronously the job represented by 'callback' and 'arg1'
+ * for the 'group' and the 'timeout' and waits for its completion.
+ * @param group The group of the job or NULL when no group.
+ * @param timeout The maximum execution time in seconds of the job
+ * or 0 for unlimited time.
+ * @param callback The function to execute for achieving the job.
+ * Its first parameter is either 0 on normal flow
+ * or the signal number that broke the normal flow.
+ * The remaining parameter is the parameter 'arg1'
+ * given here.
+ * @param arg The second argument for 'callback'
+ * @return 0 in case of success or -1 in case of error
+ */
+int jobs_call(
+ const void *group,
+ int timeout,
+ void (*callback)(int, void*),
+ void *arg)
+{
+ struct sync sync;
+
+ sync.callback = callback;
+ sync.arg = arg;
+
+ return do_sync(group, timeout, call_cb, &sync);
+}
+
+/* temporary hack */
+#if !defined(REMOVE_SYSTEMD_EVENT)
+__attribute__((unused))
+#endif
+static void evloop_callback(void *arg, uint32_t event, struct fdev *fdev)
+{
+ sig_monitor(0, evloop_run, arg);
+}
+
+/**
+ * Gets a sd_event item for the current thread.
+ * @return a sd_event or NULL in case of error
+ */
+static struct sd_event *get_sd_event_locked()
+{
+ struct evloop *el;
+ int rc;
+
+ /* creates the evloop on need */
+ el = &evloop[0];
+ if (!el->sdev) {
+ /* start the creation */
+ el->state = 0;
+ /* creates the eventfd for waking up polls */
+ el->efd = eventfd(0, EFD_CLOEXEC);
+ if (el->efd < 0) {
+ ERROR("can't make eventfd for events");
+ goto error1;
+ }
+ /* create the systemd event loop */
+ rc = sd_event_new(&el->sdev);
+ if (rc < 0) {
+ ERROR("can't make new event loop");
+ goto error2;
+ }
+ /* put the eventfd in the event loop */
+ rc = sd_event_add_io(el->sdev, NULL, el->efd, EPOLLIN, on_evloop_efd, el);
+ if (rc < 0) {
+ ERROR("can't register eventfd");
+#if !defined(REMOVE_SYSTEMD_EVENT)
+ sd_event_unref(el->sdev);
+ el->sdev = NULL;
+error2:
+ close(el->efd);
+error1:
+ return NULL;
+ }
+#else
+ goto error3;
+ }
+ /* handle the event loop */
+ el->fdev = fdev_epoll_add(get_fdevepoll(), sd_event_get_fd(el->sdev));
+ if (!el->fdev) {
+ ERROR("can't create fdev");
+error3:
+ sd_event_unref(el->sdev);
+error2:
+ close(el->efd);
+error1:
+ memset(el, 0, sizeof *el);
+ return NULL;
+ }
+ fdev_set_autoclose(el->fdev, 0);
+ fdev_set_events(el->fdev, EPOLLIN);
+ fdev_set_callback(el->fdev, evloop_callback, el);
+#endif
+ }
+
+ /* attach the event loop to the current thread */
+ if (current_evloop != el) {
+ if (current_evloop) {
+ __atomic_and_fetch(¤t_evloop->state, ~EVLOOP_STATE_LOCK, __ATOMIC_RELAXED);
+ __atomic_store_n(¤t_evloop->holder, NULL, __ATOMIC_RELAXED);
+ }
+ current_evloop = el;
+ __atomic_or_fetch(&el->state, EVLOOP_STATE_LOCK, __ATOMIC_RELAXED);
+ __atomic_store_n(&el->holder, current_thread, __ATOMIC_RELAXED);
+ }
+
+ /* wait for a modifiable event loop */
+ unlock_evloop(el, 1);
+
+ return el->sdev;
+}
+
+/**
+ * Gets a sd_event item for the current thread.
+ * @return a sd_event or NULL in case of error
+ */
+struct sd_event *jobs_get_sd_event()
+{
+ struct sd_event *result;
+
+ pthread_mutex_lock(&mutex);
+ result = get_sd_event_locked();
+ pthread_mutex_unlock(&mutex);
+
+ return result;
+}
+
+#if defined(REMOVE_SYSTEMD_EVENT)
+/**
+ * Gets the fdev_epoll item.
+ * @return a fdev_epoll or NULL in case of error
+ */
+struct fdev_epoll *jobs_get_fdev_epoll()
+{
+ struct fdev_epoll *result;
+
+ pthread_mutex_lock(&mutex);
+ result = get_fdevepoll();
+ pthread_mutex_unlock(&mutex);
+
+ return result;
+}
+#endif
+
+/**
+ * Enter the jobs processing loop.
+ * @param allowed_count Maximum count of thread for jobs including this one
+ * @param start_count Count of thread to start now, must be lower.
+ * @param waiter_count Maximum count of jobs that can be waiting.
+ * @param start The start routine to activate (can't be NULL)
+ * @return 0 in case of success or -1 in case of error.
+ */
+int jobs_start(int allowed_count, int start_count, int waiter_count, void (*start)(int signum, void* arg), void *arg)
+{
+ int rc, launched;
+ struct thread me;
+ struct job *job;
+
+ assert(allowed_count >= 1);
+ assert(start_count >= 0);
+ assert(waiter_count > 0);
+ assert(start_count <= allowed_count);
+
+ rc = -1;
+ pthread_mutex_lock(&mutex);
+
+ /* check whether already running */
+ if (current_thread || allowed) {
+ ERROR("thread already started");
+ errno = EINVAL;
+ goto error;
+ }
+