+ /* queues the job */
+ job_add(job);
+
+ /* run until stopped */
+ thread_run(&sync->thread);
+ pthread_mutex_unlock(&mutex);
+ return 0;
+}
+
+/**
+ * Enter a synchronisation point: activates the job given by 'callback'
+ * and 'closure' using 'group' and 'timeout' to control sequencing and
+ * execution time.
+ * @param group the group for sequencing jobs
+ * @param timeout the time in seconds allocated to the job
+ * @param callback the callback that will handle the job.
+ * it receives 3 parameters: 'signum' that will be 0
+ * on normal flow or the catched signal number in case
+ * of interrupted flow, the context 'closure' as given and
+ * a 'jobloop' reference that must be used when the job is
+ * terminated to unlock the current execution flow.
+ * @param closure the argument to the callback
+ * @return 0 on success or -1 in case of error
+ */
+int jobs_enter(
+ const void *group,
+ int timeout,
+ void (*callback)(int signum, void *closure, struct jobloop *jobloop),
+ void *closure
+)
+{
+ struct sync sync;
+
+ sync.enter = callback;
+ sync.arg = closure;
+ return do_sync(group, timeout, enter_cb, &sync);
+}
+
+/**
+ * Unlocks the execution flow designed by 'jobloop'.
+ * @param jobloop indication of the flow to unlock
+ * @return 0 in case of success of -1 on error
+ */
+int jobs_leave(struct jobloop *jobloop)
+{
+ struct thread *t;
+
+ pthread_mutex_lock(&mutex);
+ t = threads;
+ while (t && t != (struct thread*)jobloop)
+ t = t->next;
+ if (!t) {
+ errno = EINVAL;
+ } else {
+ t->stop = 1;
+ if (t->waits)
+ pthread_cond_broadcast(&cond);
+ else
+ evloop_wakeup();
+ }
+ pthread_mutex_unlock(&mutex);
+ return -!t;
+}
+
+/**
+ * Calls synchronously the job represented by 'callback' and 'arg1'
+ * for the 'group' and the 'timeout' and waits for its completion.
+ * @param group The group of the job or NULL when no group.
+ * @param timeout The maximum execution time in seconds of the job
+ * or 0 for unlimited time.
+ * @param callback The function to execute for achieving the job.
+ * Its first parameter is either 0 on normal flow
+ * or the signal number that broke the normal flow.
+ * The remaining parameter is the parameter 'arg1'
+ * given here.
+ * @param arg The second argument for 'callback'
+ * @return 0 in case of success or -1 in case of error
+ */
+int jobs_call(
+ const void *group,
+ int timeout,
+ void (*callback)(int, void*),
+ void *arg)
+{
+ struct sync sync;
+
+ sync.callback = callback;
+ sync.arg = arg;
+
+ return do_sync(group, timeout, call_cb, &sync);
+}
+
+/**
+ * Internal callback for evloop management.
+ * The effect of this function is hidden: it exits
+ * the waiting poll if any. Then it wakes up a thread
+ * awaiting the evloop using signal.
+ */
+static int on_evloop_efd(sd_event_source *s, int fd, uint32_t revents, void *userdata)
+{
+ evloop_on_efd_event();
+ return 1;
+}
+
+/* temporary hack */
+#if !defined(REMOVE_SYSTEMD_EVENT)
+__attribute__((unused))
+#endif
+static void evloop_callback(void *arg, uint32_t event, struct fdev *fdev)
+{
+ sig_monitor(0, evloop_run, arg);
+}
+
+/**
+ * Gets a sd_event item for the current thread.
+ * @return a sd_event or NULL in case of error
+ */
+static struct sd_event *get_sd_event_locked()
+{
+ int rc;
+
+ /* creates the evloop on need */
+ if (!evloop.sdev) {
+ /* start the creation */
+ evloop.state = 0;
+ /* creates the eventfd for waking up polls */
+ evloop.efd = eventfd(0, EFD_CLOEXEC|EFD_SEMAPHORE);
+ if (evloop.efd < 0) {
+ ERROR("can't make eventfd for events");
+ goto error1;
+ }
+ /* create the systemd event loop */
+ rc = sd_event_new(&evloop.sdev);
+ if (rc < 0) {
+ ERROR("can't make new event loop");
+ goto error2;
+ }
+ /* put the eventfd in the event loop */
+ rc = sd_event_add_io(evloop.sdev, NULL, evloop.efd, EPOLLIN, on_evloop_efd, NULL);
+ if (rc < 0) {
+ ERROR("can't register eventfd");
+#if !defined(REMOVE_SYSTEMD_EVENT)
+ sd_event_unref(evloop.sdev);
+ evloop.sdev = NULL;
+error2:
+ close(evloop.efd);
+error1:
+ return NULL;
+ }
+#else
+ goto error3;
+ }
+ /* handle the event loop */
+ evloop.fdev = fdev_epoll_add(get_fdevepoll(), sd_event_get_fd(evloop.sdev));
+ if (!evloop.fdev) {
+ ERROR("can't create fdev");
+error3:
+ sd_event_unref(evloop.sdev);
+error2:
+ close(evloop.efd);
+error1:
+ memset(&evloop, 0, sizeof evloop);
+ return NULL;
+ }
+ fdev_set_autoclose(evloop.fdev, 0);
+ fdev_set_events(evloop.fdev, EPOLLIN);
+ fdev_set_callback(evloop.fdev, evloop_callback, NULL);
+#endif
+ }
+
+ /* acquire the event loop */
+ evloop_acquire();
+
+ return evloop.sdev;
+}
+
+/**
+ * Gets a sd_event item for the current thread.
+ * @return a sd_event or NULL in case of error
+ */
+struct sd_event *jobs_get_sd_event()
+{
+ struct sd_event *result;
+ struct thread lt;
+
+ /* ensure an existing thread environment */
+ if (!current_thread) {
+ memset(<, 0, sizeof lt);
+ current_thread = <
+ }
+
+ /* process */
+ pthread_mutex_lock(&mutex);
+ result = get_sd_event_locked();
+ pthread_mutex_unlock(&mutex);
+
+ /* release the faked thread environment if needed */
+ if (current_thread == <) {
+ /*
+ * Releasing it is needed because there is no way to guess
+ * when it has to be released really. But here is where it is
+ * hazardous: if the caller modifies the eventloop when it
+ * is waiting, there is no way to make the change effective.
+ * A workaround to achieve that goal is for the caller to
+ * require the event loop a second time after having modified it.
+ */
+ NOTICE("Requiring sd_event loop out of binder callbacks is hazardous!");
+ if (verbose_wants(Log_Level_Info))
+ sig_monitor_dumpstack();
+ evloop_release();
+ current_thread = NULL;
+ }
+
+ return result;
+}
+
+#if defined(REMOVE_SYSTEMD_EVENT)
+/**
+ * Gets the fdev_epoll item.
+ * @return a fdev_epoll or NULL in case of error
+ */
+struct fdev_epoll *jobs_get_fdev_epoll()
+{
+ struct fdev_epoll *result;
+
+ pthread_mutex_lock(&mutex);
+ result = get_fdevepoll();
+ pthread_mutex_unlock(&mutex);
+
+ return result;
+}
+#endif
+
+/**
+ * Enter the jobs processing loop.
+ * @param allowed_count Maximum count of thread for jobs including this one
+ * @param start_count Count of thread to start now, must be lower.
+ * @param waiter_count Maximum count of jobs that can be waiting.
+ * @param start The start routine to activate (can't be NULL)
+ * @return 0 in case of success or -1 in case of error.
+ */
+int jobs_start(int allowed_count, int start_count, int waiter_count, void (*start)(int signum, void* arg), void *arg)
+{
+ int rc, launched;
+ struct thread me;
+ struct job *job;
+
+ assert(allowed_count >= 1);
+ assert(start_count >= 0);
+ assert(waiter_count > 0);
+ assert(start_count <= allowed_count);
+
+ rc = -1;
+ pthread_mutex_lock(&mutex);
+
+ /* check whether already running */
+ if (current_thread || allowed) {
+ ERROR("thread already started");
+ errno = EINVAL;
+ goto error;
+ }
+