Update copyright dates
[src/app-framework-binder.git] / src / jobs.c
index d71e5d0..c2a2ec3 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016, 2017, 2018 "IoT.bzh"
+ * Copyright (C) 2015-2020 "IoT.bzh"
  * Author José Bollo <jose.bollo@iot.bzh>
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
 
 #define _GNU_SOURCE
 
-#if defined(NO_JOBS_WATCHDOG)
-#   define HAS_WATCHDOG 0
-#else
-#   define HAS_WATCHDOG 1
-#endif
-
 #include <stdlib.h>
 #include <stdint.h>
 #include <unistd.h>
 #include <sys/eventfd.h>
 
 #include <systemd/sd-event.h>
-#include "fdev.h"
-#if HAS_WATCHDOG
-#include <systemd/sd-daemon.h>
-#endif
 
 #include "jobs.h"
-#include "fdev-epoll.h"
+#include "evmgr.h"
 #include "sig-monitor.h"
 #include "verbose.h"
-
-#if 0
-#define _alert_ "do you really want to remove signal monitoring?"
-#define sig_monitor_init_timeouts()  ((void)0)
-#define sig_monitor_clean_timeouts() ((void)0)
-#define sig_monitor(to,cb,arg)       (cb(0,arg))
-#endif
+#include "systemd.h"
 
 #define EVENT_TIMEOUT_TOP      ((uint64_t)-1)
 #define EVENT_TIMEOUT_CHILD    ((uint64_t)10000)
 /** Internal shortcut for callback */
 typedef void (*job_cb_t)(int, void*);
 
+/** starting mode for jobs */
+enum start_mode
+{
+       Start_Default,  /**< Start a thread if more than one jobs is pending */
+       Start_Urgent,   /**< Always start a thread */
+       Start_Lazy      /**< Never start a thread */
+};
+
 /** Description of a pending job */
 struct job
 {
@@ -71,33 +63,22 @@ struct job
        unsigned dropped: 1; /**< is removed ? */
 };
 
-/** Description of handled event loops */
-struct evloop
-{
-       unsigned state;        /**< encoded state */
-       int efd;               /**< event notification */
-       struct sd_event *sdev; /**< the systemd event loop */
-       pthread_cond_t  cond;  /**< condition */
-       struct fdev *fdev;     /**< handling of events */
-};
-
-#define EVLOOP_STATE_WAIT           1U
-#define EVLOOP_STATE_RUN            2U
-#define EVLOOP_STATE_LOCK           4U
-
 /** Description of threads */
 struct thread
 {
        struct thread *next;   /**< next thread of the list */
        struct thread *upper;  /**< upper same thread */
+       struct thread *nholder;/**< next holder for evloop */
+       pthread_cond_t *cwhold;/**< condition wait for holding */
        struct job *job;       /**< currently processed job */
        pthread_t tid;         /**< the thread id */
        volatile unsigned stop: 1;      /**< stop requested */
        volatile unsigned waits: 1;     /**< is waiting? */
+       volatile unsigned leaved: 1;    /**< was leaved? */
 };
 
 /**
- * Description of synchonous callback
+ * Description of synchronous callback
  */
 struct sync
 {
@@ -110,30 +91,31 @@ struct sync
        void *arg;              /**< the argument of the callback */
 };
 
-
 /* synchronisation of threads */
 static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
 static pthread_cond_t  cond = PTHREAD_COND_INITIALIZER;
 
-/* count allowed, started and running threads */
-static int allowed = 0; /** allowed count of threads */
-static int started = 0; /** started count of threads */
-static int running = 0; /** running count of threads */
-static int remains = 0; /** allowed count of waiting jobs */
+/* counts for threads */
+static int allowed_thread_count = 0; /** allowed count of threads */
+static int started_thread_count = 0; /** started count of threads */
+static int busy_thread_count = 0;    /** count of busy threads */
 
 /* list of threads */
 static struct thread *threads;
 static _Thread_local struct thread *current_thread;
-static _Thread_local struct evloop *current_evloop;
+
+/* counts for jobs */
+static int remaining_job_count = 0;  /** count of job that can be created */
+static int allowed_job_count = 0;    /** allowed count of pending jobs */
 
 /* queue of pending jobs */
-static struct job *first_job;
-static struct job *free_jobs;
+static struct job *first_pending_job;
+static struct job *first_free_job;
 
 /* event loop */
-static struct evloop evloop[1];
-static struct fdev_epoll *fdevepoll;
-static int waitevt;
+static struct evmgr *evmgr;
+
+static void (*exit_handler)();
 
 /**
  * Create a new job with the given parameters
@@ -152,16 +134,17 @@ static struct job *job_create(
        struct job *job;
 
        /* try recyle existing job */
-       job = free_jobs;
+       job = first_free_job;
        if (job)
-               free_jobs = job->next;
+               first_free_job = job->next;
        else {
                /* allocation without blocking */
                pthread_mutex_unlock(&mutex);
                job = malloc(sizeof *job);
                pthread_mutex_lock(&mutex);
                if (!job) {
-                       errno = -ENOMEM;
+                       ERROR("out of memory");
+                       errno = ENOMEM;
                        goto end;
                }
        }
@@ -191,8 +174,8 @@ static void job_add(struct job *job)
        job->next = NULL;
 
        /* search end and blockers */
-       pjob = &first_job;
-       ijob = first_job;
+       pjob = &first_pending_job;
+       ijob = first_pending_job;
        while (ijob) {
                if (group && ijob->group == group)
                        job->blocked = 1;
@@ -202,6 +185,7 @@ static void job_add(struct job *job)
 
        /* queue the jobs */
        *pjob = job;
+       remaining_job_count--;
 }
 
 /**
@@ -210,9 +194,11 @@ static void job_add(struct job *job)
  */
 static inline struct job *job_get()
 {
-       struct job *job = first_job;
+       struct job *job = first_pending_job;
        while (job && job->blocked)
                job = job->next;
+       if (job)
+               remaining_job_count++;
        return job;
 }
 
@@ -228,8 +214,8 @@ static inline void job_release(struct job *job)
        const void *group;
 
        /* first unqueue the job */
-       pjob = &first_job;
-       ijob = first_job;
+       pjob = &first_pending_job;
+       ijob = first_pending_job;
        while (ijob != job) {
                pjob = &ijob->next;
                ijob = ijob->next;
@@ -247,8 +233,8 @@ static inline void job_release(struct job *job)
        }
 
        /* recycle the job */
-       job->next = free_jobs;
-       free_jobs = job;
+       job->next = first_free_job;
+       first_free_job = job;
 }
 
 /**
@@ -261,6 +247,7 @@ static inline void job_release(struct job *job)
  *               flow, isn't used
  * @param arg    the job to run
  */
+__attribute__((unused))
 static void job_cancel(int signum, void *arg)
 {
        struct job *job = arg;
@@ -268,120 +255,131 @@ static void job_cancel(int signum, void *arg)
 }
 
 /**
- * Gets a fdev_epoll item.
- * @return a fdev_epoll or NULL in case of error
+ * wakeup the event loop if needed by sending
+ * an event.
  */
-static struct fdev_epoll *get_fdevepoll()
+static void evloop_wakeup()
 {
-       struct fdev_epoll *result;
-
-       result = fdevepoll;
-       if (!result)
-               result = fdevepoll = fdev_epoll_create();
-
-       return result;
+       if (evmgr)
+               evmgr_wakeup(evmgr);
 }
 
 /**
- * Monitored normal callback for events.
- * This function is called by the monitor
- * to run the event loop when the safe environment
- * is set.
- * @param signum 0 on normal flow or the number
- *               of the signal that interrupted the normal
- *               flow
- * @param arg     the events to run
+ * Release the currently held event loop
  */
-static void evloop_run(int signum, void *arg)
+static void evloop_release()
 {
-       int rc;
-       struct sd_event *se;
-       struct evloop *el = arg;
-
-       if (!signum) {
-               current_evloop = el;
-               __atomic_store_n(&el->state, EVLOOP_STATE_LOCK|EVLOOP_STATE_RUN|EVLOOP_STATE_WAIT, __ATOMIC_RELAXED);
-               se = el->sdev;
-               rc = sd_event_prepare(se);
-               if (rc < 0) {
-                       errno = -rc;
-                       ERROR("sd_event_prepare returned an error (state: %d): %m", sd_event_get_state(se));
-               } else {
-                       if (rc == 0) {
-                               rc = sd_event_wait(se, (uint64_t)(int64_t)-1);
-                               if (rc < 0) {
-                                       errno = -rc;
-                                       ERROR("sd_event_wait returned an error (state: %d): %m", sd_event_get_state(se));
-                               }
-                       }
-                       __atomic_and_fetch(&el->state, ~(EVLOOP_STATE_WAIT), __ATOMIC_RELAXED);
-
-                       if (rc > 0) {
-                               rc = sd_event_dispatch(se);
-                               if (rc < 0) {
-                                       errno = -rc;
-                                       ERROR("sd_event_dispatch returned an error (state: %d): %m", sd_event_get_state(se));
-                               }
-                       }
+       struct thread *nh, *ct = current_thread;
+
+       if (ct && evmgr && evmgr_release_if(evmgr, ct)) {
+               nh = ct->nholder;
+               ct->nholder = 0;
+               if (nh) {
+                       evmgr_try_hold(evmgr, nh);
+                       pthread_cond_signal(nh->cwhold);
                }
        }
-       __atomic_and_fetch(&el->state, ~(EVLOOP_STATE_WAIT|EVLOOP_STATE_RUN), __ATOMIC_RELAXED);
 }
 
+/**
+ * get the eventloop for the current thread
+ */
+static int evloop_get()
+{
+       return evmgr && evmgr_try_hold(evmgr, current_thread);
+}
 
 /**
- * Monitored normal loop for waiting events.
- * @param signum 0 on normal flow or the number
- *               of the signal that interrupted the normal
- *               flow
- * @param arg     the events to run
+ * acquire the eventloop for the current thread
  */
-static void monitored_wait_and_dispatch(int signum, void *arg)
+static void evloop_acquire()
 {
-       struct fdev_epoll *fdev_epoll = arg;
-       if (!signum) {
-               fdev_epoll_wait_and_dispatch(fdev_epoll, -1);
+       struct thread *pwait, *ct;
+       pthread_cond_t cond;
+
+       /* try to get the evloop */
+       if (!evloop_get()) {
+               /* failed, init waiting state */
+               ct = current_thread;
+               ct->nholder = NULL;
+               ct->cwhold = &cond;
+               pthread_cond_init(&cond, NULL);
+
+               /* queue current thread in holder list */
+               pwait = evmgr_holder(evmgr);
+               while (pwait->nholder)
+                       pwait = pwait->nholder;
+               pwait->nholder = ct;
+
+               /* wake up the evloop */
+               evloop_wakeup();
+
+               /* wait to acquire the evloop */
+               pthread_cond_wait(&cond, &mutex);
+               pthread_cond_destroy(&cond);
        }
 }
 
 /**
- * Main processing loop of threads processing jobs.
- * The loop must be called with the mutex locked
- * and it returns with the mutex locked.
- * @param me the description of the thread to use
- * TODO: how are timeout handled when reentering?
+ * Enter the thread
+ * @param me the description of the thread to enter
  */
-static void thread_run(volatile struct thread *me)
+static void thread_enter(volatile struct thread *me)
 {
-       struct thread **prv;
-       struct job *job;
-
+       evloop_release();
        /* initialize description of itself and link it in the list */
        me->tid = pthread_self();
        me->stop = 0;
        me->waits = 0;
+       me->leaved = 0;
+       me->nholder = 0;
        me->upper = current_thread;
-       if (!current_thread) {
-               started++;
-               sig_monitor_init_timeouts();
-       }
        me->next = threads;
        threads = (struct thread*)me;
        current_thread = (struct thread*)me;
+}
+
+/**
+ * leave the thread
+ * @param me the description of the thread to leave
+ */
+static void thread_leave()
+{
+       struct thread **prv, *me;
+
+       /* unlink the current thread and cleanup */
+       me = current_thread;
+       prv = &threads;
+       while (*prv != me)
+               prv = &(*prv)->next;
+       *prv = me->next;
+
+       current_thread = me->upper;
+}
+
+/**
+ * Main processing loop of internal threads with processing jobs.
+ * The loop must be called with the mutex locked
+ * and it returns with the mutex locked.
+ * @param me the description of the thread to use
+ * TODO: how are timeout handled when reentering?
+ */
+static void thread_run_internal(volatile struct thread *me)
+{
+       struct job *job;
+
+       /* enter thread */
+       thread_enter(me);
 
        /* loop until stopped */
        while (!me->stop) {
-               /* release the event loop */
-               if (current_evloop) {
-                       __atomic_and_fetch(&current_evloop->state, ~EVLOOP_STATE_LOCK, __ATOMIC_RELAXED);
-                       current_evloop = NULL;
-               }
+               /* release the current event loop */
+               evloop_release();
 
                /* get a job */
                job = job_get();
                if (job) {
                        /* prepare running the job */
-                       remains++; /* increases count of job that can wait */
                        job->blocked = 1; /* mark job as blocked */
                        me->job = job; /* record the job (only for terminate) */
 
@@ -392,41 +390,67 @@ static void thread_run(volatile struct thread *me)
 
                        /* release the run job */
                        job_release(job);
-               } else if (waitevt) {
-                       /* no job and not events */
-                       running--;
-                       if (!running)
+               /* no job, check event loop wait */
+               } else if (evloop_get()) {
+                       if (!evmgr_can_run(evmgr)) {
+                               /* busy ? */
+                               CRITICAL("Can't enter dispatch while in dispatch!");
+                               abort();
+                       }
+                       /* run the events */
+                       evmgr_prepare_run(evmgr);
+                       pthread_mutex_unlock(&mutex);
+                       sig_monitor(0, (void(*)(int,void*))evmgr_job_run, evmgr);
+                       pthread_mutex_lock(&mutex);
+               } else {
+                       /* no job and no event loop */
+                       busy_thread_count--;
+                       if (!busy_thread_count)
                                ERROR("Entering job deep sleep! Check your bindings.");
                        me->waits = 1;
                        pthread_cond_wait(&cond, &mutex);
                        me->waits = 0;
-                       running++;
-               } else {
-                       /* wait for events */
-                       waitevt = 1;
-                       pthread_mutex_unlock(&mutex);
-                       sig_monitor(0, monitored_wait_and_dispatch, get_fdevepoll());
-                       pthread_mutex_lock(&mutex);
-                       waitevt = 0;
+                       busy_thread_count++;
                }
        }
+       /* cleanup */
+       evloop_release();
+       thread_leave();
+}
 
-       /* release the event loop */
-       if (current_evloop) {
-               __atomic_and_fetch(&current_evloop->state, ~EVLOOP_STATE_LOCK, __ATOMIC_RELAXED);
-               current_evloop = NULL;
-       }
+/**
+ * Main processing loop of external threads.
+ * The loop must be called with the mutex locked
+ * and it returns with the mutex locked.
+ * @param me the description of the thread to use
+ */
+static void thread_run_external(volatile struct thread *me)
+{
+       /* enter thread */
+       thread_enter(me);
 
-       /* unlink the current thread and cleanup */
-       prv = &threads;
-       while (*prv != me)
-               prv = &(*prv)->next;
-       *prv = me->next;
-       current_thread = me->upper;
-       if (!current_thread) {
-               sig_monitor_clean_timeouts();
-               started--;
-       }
+       /* loop until stopped */
+       me->waits = 1;
+       while (!me->stop)
+               pthread_cond_wait(&cond, &mutex);
+       me->waits = 0;
+       thread_leave();
+}
+
+/**
+ * Root for created threads.
+ */
+static void thread_main()
+{
+       struct thread me;
+
+       busy_thread_count++;
+       started_thread_count++;
+       sig_monitor_init_timeouts();
+       thread_run_internal(&me);
+       sig_monitor_clean_timeouts();
+       started_thread_count--;
+       busy_thread_count--;
 }
 
 /**
@@ -434,14 +458,10 @@ static void thread_run(volatile struct thread *me)
  * @param data not used
  * @return NULL
  */
-static void *thread_main(void *data)
+static void *thread_starter(void *data)
 {
-       struct thread me;
-
        pthread_mutex_lock(&mutex);
-       running++;
-       thread_run(&me);
-       running--;
+       thread_main();
        pthread_mutex_unlock(&mutex);
        return NULL;
 }
@@ -455,7 +475,7 @@ static int start_one_thread()
        pthread_t tid;
        int rc;
 
-       rc = pthread_create(&tid, NULL, thread_main, NULL);
+       rc = pthread_create(&tid, NULL, thread_starter, NULL);
        if (rc != 0) {
                /* errno = rc; */
                WARNING("not able to start thread: %m");
@@ -479,63 +499,175 @@ static int start_one_thread()
  *                 The remaining parameter is the parameter 'arg1'
  *                 given here.
  * @param arg      The second argument for 'callback'
+ * @param start    The start mode for threads
  * @return 0 in case of success or -1 in case of error
  */
-int jobs_queue(
+static int queue_job_internal(
                const void *group,
                int timeout,
                void (*callback)(int, void*),
-               void *arg)
+               void *arg,
+               enum start_mode start_mode)
 {
-       const char *info;
        struct job *job;
-       int rc;
+       int rc, busy;
 
-       pthread_mutex_lock(&mutex);
+       /* check availability */
+       if (remaining_job_count <= 0) {
+               ERROR("can't process job with threads: too many jobs");
+               errno = EBUSY;
+               goto error;
+       }
 
        /* allocates the job */
        job = job_create(group, timeout, callback, arg);
-       if (!job) {
-               errno = ENOMEM;
-               info = "out of memory";
+       if (!job)
                goto error;
-       }
-
-       /* check availability */
-       if (remains == 0) {
-               errno = EBUSY;
-               info = "too many jobs";
-               goto error2;
-       }
 
        /* start a thread if needed */
-       if (running == started && started < allowed) {
+       busy = busy_thread_count == started_thread_count;
+       if (start_mode != Start_Lazy
+        && busy
+        && (start_mode == Start_Urgent || remaining_job_count + started_thread_count < allowed_job_count)
+        && started_thread_count < allowed_thread_count) {
                /* all threads are busy and a new can be started */
                rc = start_one_thread();
-               if (rc < 0 && started == 0) {
-                       info = "can't start first thread";
+               if (rc < 0 && started_thread_count == 0) {
+                       ERROR("can't start initial thread: %m");
                        goto error2;
                }
+               busy = 0;
        }
 
        /* queues the job */
-       remains--;
        job_add(job);
 
-       /* signal an existing job */
+       /* wakeup an evloop if needed */
+       if (busy)
+               evloop_wakeup();
+
        pthread_cond_signal(&cond);
-       pthread_mutex_unlock(&mutex);
        return 0;
 
 error2:
-       job->next = free_jobs;
-       free_jobs = job;
+       job->next = first_free_job;
+       first_free_job = job;
 error:
-       ERROR("can't process job with threads: %s, %m", info);
-       pthread_mutex_unlock(&mutex);
        return -1;
 }
 
+/**
+ * Queues a new asynchronous job represented by 'callback' and 'arg'
+ * for the 'group' and the 'timeout'.
+ * Jobs are queued FIFO and are possibly executed in parallel
+ * concurrently except for job of the same group that are
+ * executed sequentially in FIFO order.
+ * @param group    The group of the job or NULL when no group.
+ * @param timeout  The maximum execution time in seconds of the job
+ *                 or 0 for unlimited time.
+ * @param callback The function to execute for achieving the job.
+ *                 Its first parameter is either 0 on normal flow
+ *                 or the signal number that broke the normal flow.
+ *                 The remaining parameter is the parameter 'arg1'
+ *                 given here.
+ * @param arg      The second argument for 'callback'
+ * @param start    The start mode for threads
+ * @return 0 in case of success or -1 in case of error
+ */
+static int queue_job(
+               const void *group,
+               int timeout,
+               void (*callback)(int, void*),
+               void *arg,
+               enum start_mode start_mode)
+{
+       int rc;
+
+       pthread_mutex_lock(&mutex);
+       rc = queue_job_internal(group, timeout, callback, arg, start_mode);
+       pthread_mutex_unlock(&mutex);
+       return rc;
+
+}
+
+/**
+ * Queues a new asynchronous job represented by 'callback' and 'arg'
+ * for the 'group' and the 'timeout'.
+ * Jobs are queued FIFO and are possibly executed in parallel
+ * concurrently except for job of the same group that are
+ * executed sequentially in FIFO order.
+ * @param group    The group of the job or NULL when no group.
+ * @param timeout  The maximum execution time in seconds of the job
+ *                 or 0 for unlimited time.
+ * @param callback The function to execute for achieving the job.
+ *                 Its first parameter is either 0 on normal flow
+ *                 or the signal number that broke the normal flow.
+ *                 The remaining parameter is the parameter 'arg1'
+ *                 given here.
+ * @param arg      The second argument for 'callback'
+ * @return 0 in case of success or -1 in case of error
+ */
+int jobs_queue(
+               const void *group,
+               int timeout,
+               void (*callback)(int, void*),
+               void *arg)
+{
+       return queue_job(group, timeout, callback, arg, Start_Default);
+}
+
+/**
+ * Queues lazyly a new asynchronous job represented by 'callback' and 'arg'
+ * for the 'group' and the 'timeout'.
+ * Jobs are queued FIFO and are possibly executed in parallel
+ * concurrently except for job of the same group that are
+ * executed sequentially in FIFO order.
+ * @param group    The group of the job or NULL when no group.
+ * @param timeout  The maximum execution time in seconds of the job
+ *                 or 0 for unlimited time.
+ * @param callback The function to execute for achieving the job.
+ *                 Its first parameter is either 0 on normal flow
+ *                 or the signal number that broke the normal flow.
+ *                 The remaining parameter is the parameter 'arg1'
+ *                 given here.
+ * @param arg      The second argument for 'callback'
+ * @return 0 in case of success or -1 in case of error
+ */
+int jobs_queue_lazy(
+               const void *group,
+               int timeout,
+               void (*callback)(int, void*),
+               void *arg)
+{
+       return queue_job(group, timeout, callback, arg, Start_Lazy);
+}
+
+/**
+ * Queues urgently a new asynchronous job represented by 'callback' and 'arg'
+ * for the 'group' and the 'timeout'.
+ * Jobs are queued FIFO and are possibly executed in parallel
+ * concurrently except for job of the same group that are
+ * executed sequentially in FIFO order.
+ * @param group    The group of the job or NULL when no group.
+ * @param timeout  The maximum execution time in seconds of the job
+ *                 or 0 for unlimited time.
+ * @param callback The function to execute for achieving the job.
+ *                 Its first parameter is either 0 on normal flow
+ *                 or the signal number that broke the normal flow.
+ *                 The remaining parameter is the parameter 'arg1'
+ *                 given here.
+ * @param arg      The second argument for 'callback'
+ * @return 0 in case of success or -1 in case of error
+ */
+int jobs_queue_urgent(
+               const void *group,
+               int timeout,
+               void (*callback)(int, void*),
+               void *arg)
+{
+       return queue_job(group, timeout, callback, arg, Start_Urgent);
+}
+
 /**
  * Internal helper function for 'jobs_enter'.
  * @see jobs_enter, jobs_leave
@@ -570,26 +702,24 @@ static int do_sync(
                struct sync *sync
 )
 {
-       struct job *job;
+       int rc;
 
        pthread_mutex_lock(&mutex);
 
-       /* allocates the job */
-       job = job_create(group, timeout, sync_cb, sync);
-       if (!job) {
-               ERROR("out of memory");
-               errno = ENOMEM;
-               pthread_mutex_unlock(&mutex);
-               return -1;
+       rc = queue_job_internal(group, timeout, sync_cb, sync, Start_Default);
+       if (rc == 0) {
+               /* run until stopped */
+               if (current_thread)
+                       thread_run_internal(&sync->thread);
+               else
+                       thread_run_external(&sync->thread);
+               if (!sync->thread.leaved) {
+                       errno = EINTR;
+                       rc = -1;
+               }
        }
-
-       /* queues the job */
-       job_add(job);
-
-       /* run until stopped */
-       thread_run(&sync->thread);
        pthread_mutex_unlock(&mutex);
-       return 0;
+       return rc;
 }
 
 /**
@@ -637,9 +767,12 @@ int jobs_leave(struct jobloop *jobloop)
        if (!t) {
                errno = EINVAL;
        } else {
+               t->leaved = 1;
                t->stop = 1;
                if (t->waits)
                        pthread_cond_broadcast(&cond);
+               else
+                       evloop_wakeup();
        }
        pthread_mutex_unlock(&mutex);
        return -!t;
@@ -674,124 +807,48 @@ int jobs_call(
 }
 
 /**
- * Internal callback for evloop management.
- * The effect of this function is hidden: it exits
- * the waiting poll if any. Then it wakes up a thread
- * awaiting the evloop using signal.
+ * Ensure that the current running thread can control the event loop.
  */
-static int on_evloop_efd(sd_event_source *s, int fd, uint32_t revents, void *userdata)
+void jobs_acquire_event_manager()
 {
-       uint64_t x;
-       struct evloop *evloop = userdata;
-       read(evloop->efd, &x, sizeof x);
-       pthread_mutex_lock(&mutex);
-       pthread_cond_broadcast(&evloop->cond);  
-       pthread_mutex_unlock(&mutex);
-       return 1;
-}
+       struct thread lt;
 
-/* temporary hack */
-static void evloop_callback(void *arg, uint32_t event, struct fdev *fdev)
-{
-       sig_monitor(0, evloop_run, arg);
-}
-
-/**
- * Gets a sd_event item for the current thread.
- * @return a sd_event or NULL in case of error
- */
-static struct sd_event *get_sd_event_locked()
-{
-       struct evloop *el;
-       uint64_t x;
-       int rc;
-
-       /* creates the evloop on need */
-       el = &evloop[0];
-       if (!el->sdev) {
-               /* start the creation */
-               el->state = 0;
-               /* creates the eventfd for waking up polls */
-               el->efd = eventfd(0, EFD_CLOEXEC);
-               if (el->efd < 0) {
-                       ERROR("can't make eventfd for events");
-                       goto error1;
-               }
-               /* create the systemd event loop */
-               rc = sd_event_new(&el->sdev);
-               if (rc < 0) {
-                       ERROR("can't make new event loop");
-                       goto error2;
-               }
-               /* put the eventfd in the event loop */
-               rc = sd_event_add_io(el->sdev, NULL, el->efd, EPOLLIN, on_evloop_efd, el);
-               if (rc < 0) {
-                       ERROR("can't register eventfd");
-                       goto error3;
-               }
-               /* handle the event loop */
-               el->fdev = fdev_epoll_add(get_fdevepoll(), sd_event_get_fd(el->sdev));
-               if (!el->fdev) {
-                       ERROR("can't create fdev");
-error3:
-                       sd_event_unref(el->sdev);
-error2:
-                       close(el->efd);
-error1:
-                       memset(el, 0, sizeof *el);
-                       return NULL;
-               }
-               fdev_set_autoclose(el->fdev, 0);
-               fdev_set_events(el->fdev, EPOLLIN);
-               fdev_set_callback(el->fdev, evloop_callback, el);
-       }
-
-       /* attach the event loop to the current thread */
-       if (current_evloop != el) {
-               if (current_evloop)
-                       __atomic_and_fetch(&current_evloop->state, ~EVLOOP_STATE_LOCK, __ATOMIC_RELAXED);
-               current_evloop = el;
-               __atomic_or_fetch(&el->state, EVLOOP_STATE_LOCK, __ATOMIC_RELAXED);
-       }
-
-       /* wait for a modifiable event loop */
-       while (__atomic_load_n(&el->state, __ATOMIC_RELAXED) & EVLOOP_STATE_WAIT) {
-               x = 1;
-               write(el->efd, &x, sizeof x);
-               pthread_cond_wait(&el->cond, &mutex);
+       /* ensure an existing thread environment */
+       if (!current_thread) {
+               memset(&lt, 0, sizeof lt);
+               current_thread = &lt;
        }
 
-       return el->sdev;
-}
-
-/**
- * Gets a sd_event item for the current thread.
- * @return a sd_event or NULL in case of error
- */
-struct sd_event *jobs_get_sd_event()
-{
-       struct sd_event *result;
-
+       /* lock */
        pthread_mutex_lock(&mutex);
-       result = get_sd_event_locked();
-       pthread_mutex_unlock(&mutex);
 
-       return result;
-}
+       /* creates the evloop on need */
+       if (!evmgr)
+               evmgr_create(&evmgr);
 
-/**
- * Gets the fdev_epoll item.
- * @return a fdev_epoll or NULL in case of error
- */
-struct fdev_epoll *jobs_get_fdev_epoll()
-{
-       struct fdev_epoll *result;
+       /* acquire the event loop under lock */
+       if (evmgr)
+               evloop_acquire();
 
-       pthread_mutex_lock(&mutex);
-       result = get_fdevepoll();
+       /* unlock */
        pthread_mutex_unlock(&mutex);
 
-       return result;
+       /* release the faked thread environment if needed */
+       if (current_thread == &lt) {
+               /*
+                * Releasing it is needed because there is no way to guess
+                * when it has to be released really. But here is where it is
+                * hazardous: if the caller modifies the eventloop when it
+                * is waiting, there is no way to make the change effective.
+                * A workaround to achieve that goal is for the caller to
+                * require the event loop a second time after having modified it.
+                */
+               NOTICE("Requiring event manager/loop from outside of binder's callback is hazardous!");
+               if (verbose_wants(Log_Level_Info))
+                       sig_monitor_dumpstack();
+               evloop_release();
+               current_thread = NULL;
+       }
 }
 
 /**
@@ -802,10 +859,14 @@ struct fdev_epoll *jobs_get_fdev_epoll()
  * @param start         The start routine to activate (can't be NULL)
  * @return 0 in case of success or -1 in case of error.
  */
-int jobs_start(int allowed_count, int start_count, int waiter_count, void (*start)(int signum, void* arg), void *arg)
+int jobs_start(
+       int allowed_count,
+       int start_count,
+       int waiter_count,
+       void (*start)(int signum, void* arg),
+       void *arg)
 {
        int rc, launched;
-       struct thread me;
        struct job *job;
 
        assert(allowed_count >= 1);
@@ -817,33 +878,22 @@ int jobs_start(int allowed_count, int start_count, int waiter_count, void (*star
        pthread_mutex_lock(&mutex);
 
        /* check whether already running */
-       if (current_thread || allowed) {
+       if (current_thread || allowed_thread_count) {
                ERROR("thread already started");
                errno = EINVAL;
                goto error;
        }
 
-       /* start */
-       if (sig_monitor_init() < 0) {
-               ERROR("failed to initialise signal handlers");
-               goto error;
-       }
-
        /* records the allowed count */
-       allowed = allowed_count;
-       started = 0;
-       running = 0;
-       remains = waiter_count;
-
-#if HAS_WATCHDOG
-       /* set the watchdog */
-       if (sd_watchdog_enabled(0, NULL))
-               sd_event_set_watchdog(get_sd_event_locked(), 1);
-#endif
-
-       /* start at least one thread */
-       launched = 0;
-       while ((launched + 1) < start_count) {
+       allowed_thread_count = allowed_count;
+       started_thread_count = 0;
+       busy_thread_count = 0;
+       remaining_job_count = waiter_count;
+       allowed_job_count = waiter_count;
+
+       /* start at least one thread: the current one */
+       launched = 1;
+       while (launched < start_count) {
                if (start_one_thread() != 0) {
                        ERROR("Not all threads can be started");
                        goto error;
@@ -853,57 +903,32 @@ int jobs_start(int allowed_count, int start_count, int waiter_count, void (*star
 
        /* queue the start job */
        job = job_create(NULL, 0, start, arg);
-       if (!job) {
-               ERROR("out of memory");
-               errno = ENOMEM;
+       if (!job)
                goto error;
-       }
        job_add(job);
-       remains--;
 
        /* run until end */
-       thread_run(&me);
+       thread_main();
        rc = 0;
 error:
        pthread_mutex_unlock(&mutex);
+       if (exit_handler)
+               exit_handler();
        return rc;
 }
 
 /**
- * Terminate all the threads and cancel all pending jobs.
+ * Exit jobs threads and call handler if not NULL.
  */
-void jobs_terminate()
+void jobs_exit(void (*handler)())
 {
-       struct job *job, *head, *tail;
-       pthread_t me, *others;
        struct thread *t;
-       int count;
-
-       /* how am i? */
-       me = pthread_self();
 
        /* request all threads to stop */
        pthread_mutex_lock(&mutex);
-       allowed = 0;
 
-       /* count the number of threads */
-       count = 0;
-       t = threads;
-       while (t) {
-               if (!t->upper && !pthread_equal(t->tid, me))
-                       count++;
-               t = t->next;
-       }
-
-       /* fill the array of threads */
-       others = alloca(count * sizeof *others);
-       count = 0;
-       t = threads;
-       while (t) {
-               if (!t->upper && !pthread_equal(t->tid, me))
-                       others[count++] = t->tid;
-               t = t->next;
-       }
+       /* set the handler */
+       exit_handler = handler;
 
        /* stops the threads */
        t = threads;
@@ -912,43 +937,10 @@ void jobs_terminate()
                t = t->next;
        }
 
-       /* wait the threads */
+       /* wake up the threads */
+       evloop_wakeup();
        pthread_cond_broadcast(&cond);
-       pthread_mutex_unlock(&mutex);
-       while (count)
-               pthread_join(others[--count], NULL);
-       pthread_mutex_lock(&mutex);
 
-       /* cancel pending jobs of other threads */
-       remains = 0;
-       head = first_job;
-       first_job = NULL;
-       tail = NULL;
-       while (head) {
-               /* unlink the job */
-               job = head;
-               head = job->next;
-
-               /* search if job is stacked for current */
-               t = current_thread;
-               while (t && t->job != job)
-                       t = t->upper;
-               if (t) {
-                       /* yes, relink it at end */
-                       if (tail)
-                               tail->next = job;
-                       else
-                               first_job = job;
-                       tail = job;
-                       job->next = NULL;
-               } else {
-                       /* no cancel the job */
-                       pthread_mutex_unlock(&mutex);
-                       sig_monitor(0, job_cancel, job);
-                       free(job);
-                       pthread_mutex_lock(&mutex);
-               }
-       }
+       /* leave */
        pthread_mutex_unlock(&mutex);
 }
-