/*
- * Copyright (C) 2016, 2017 "IoT.bzh"
+ * Copyright (C) 2016, 2017, 2018 "IoT.bzh"
* Author José Bollo <jose.bollo@iot.bzh>
*
* Licensed under the Apache License, Version 2.0 (the "License");
#define _GNU_SOURCE
+#if defined(NO_JOBS_WATCHDOG)
+# define HAS_WATCHDOG 0
+#else
+# define HAS_WATCHDOG 1
+#endif
+
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
#include <signal.h>
+#include <string.h>
#include <time.h>
#include <sys/syscall.h>
#include <pthread.h>
#include <errno.h>
#include <assert.h>
+#include <sys/eventfd.h>
#include <systemd/sd-event.h>
+#include "fdev.h"
+#if HAS_WATCHDOG
+#include <systemd/sd-daemon.h>
+#endif
#include "jobs.h"
+#include "fdev-epoll.h"
#include "sig-monitor.h"
#include "verbose.h"
#if 0
-#define _alert_ "do you really want to remove monitoring?"
+#define _alert_ "do you really want to remove signal monitoring?"
#define sig_monitor_init_timeouts() ((void)0)
#define sig_monitor_clean_timeouts() ((void)0)
#define sig_monitor(to,cb,arg) (cb(0,arg))
struct job
{
struct job *next; /**< link to the next job enqueued */
- void *group; /**< group of the request */
+ const void *group; /**< group of the request */
job_cb_t callback; /**< processing callback */
void *arg; /**< argument */
int timeout; /**< timeout in second for processing the request */
};
/** Description of handled event loops */
-struct events
+struct evloop
{
- struct events *next;
- struct sd_event *event;
- uint64_t timeout;
- unsigned used: 1;
- unsigned runs: 1;
+ unsigned state; /**< encoded state */
+ int efd; /**< event notification */
+ struct sd_event *sdev; /**< the systemd event loop */
+ pthread_cond_t cond; /**< condition */
+ struct fdev *fdev; /**< handling of events */
};
+#define EVLOOP_STATE_WAIT 1U
+#define EVLOOP_STATE_RUN 2U
+#define EVLOOP_STATE_LOCK 4U
+
/** Description of threads */
struct thread
{
struct thread *next; /**< next thread of the list */
struct thread *upper; /**< upper same thread */
struct job *job; /**< currently processed job */
- struct events *events; /**< currently processed job */
pthread_t tid; /**< the thread id */
- unsigned stop: 1; /**< stop requested */
- unsigned lowered: 1; /**< has a lower same thread */
- unsigned waits: 1; /**< is waiting? */
+ volatile unsigned stop: 1; /**< stop requested */
+ volatile unsigned waits: 1; /**< is waiting? */
};
/**
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
-/* count allowed, started and waiting threads */
+/* count allowed, started and running threads */
static int allowed = 0; /** allowed count of threads */
static int started = 0; /** started count of threads */
-static int waiting = 0; /** waiting count of threads */
+static int running = 0; /** running count of threads */
static int remains = 0; /** allowed count of waiting jobs */
-static int nevents = 0; /** count of events */
/* list of threads */
static struct thread *threads;
-static _Thread_local struct thread *current;
+static _Thread_local struct thread *current_thread;
+static _Thread_local struct evloop *current_evloop;
/* queue of pending jobs */
static struct job *first_job;
-static struct events *first_events;
static struct job *free_jobs;
+/* event loop */
+static struct evloop evloop[1];
+static struct fdev_epoll *fdevepoll;
+static int waitevt;
+
/**
* Create a new job with the given parameters
* @param group the group of the job
* @return the created job unblock or NULL when no more memory
*/
static struct job *job_create(
- void *group,
+ const void *group,
int timeout,
job_cb_t callback,
void *arg)
if (job)
free_jobs = job->next;
else {
- /* allocation without blocking */
+ /* allocation without blocking */
pthread_mutex_unlock(&mutex);
job = malloc(sizeof *job);
pthread_mutex_lock(&mutex);
*/
static void job_add(struct job *job)
{
- void *group;
+ const void *group;
struct job *ijob, **pjob;
/* prepare to add */
return job;
}
-/**
- * Get the next events to process or NULL if none.
- * @return the first events that isn't running or NULL
- */
-static inline struct events *events_get()
-{
- struct events *events = first_events;
- while (events && events->used)
- events = events->next;
- return events;
-}
-
/**
* Releases the processed 'job': removes it
* from the list of jobs and unblock the first
static inline void job_release(struct job *job)
{
struct job *ijob, **pjob;
- void *group;
+ const void *group;
/* first unqueue the job */
pjob = &first_job;
job->callback(SIGABRT, job->arg);
}
+/**
+ * Gets a fdev_epoll item.
+ * @return a fdev_epoll or NULL in case of error
+ */
+static struct fdev_epoll *get_fdevepoll()
+{
+ struct fdev_epoll *result;
+
+ result = fdevepoll;
+ if (!result)
+ result = fdevepoll = fdev_epoll_create();
+
+ return result;
+}
+
/**
* Monitored normal callback for events.
* This function is called by the monitor
* flow
* @param arg the events to run
*/
-static void events_call(int signum, void *arg)
+static void evloop_run(int signum, void *arg)
{
- struct events *events = arg;
- if (!signum)
- sd_event_run(events->event, events->timeout);
+ int rc;
+ struct sd_event *se;
+ struct evloop *el = arg;
+
+ if (!signum) {
+ current_evloop = el;
+ __atomic_store_n(&el->state, EVLOOP_STATE_LOCK|EVLOOP_STATE_RUN|EVLOOP_STATE_WAIT, __ATOMIC_RELAXED);
+ se = el->sdev;
+ rc = sd_event_prepare(se);
+ if (rc < 0) {
+ errno = -rc;
+ ERROR("sd_event_prepare returned an error (state: %d): %m", sd_event_get_state(se));
+ } else {
+ if (rc == 0) {
+ rc = sd_event_wait(se, (uint64_t)(int64_t)-1);
+ if (rc < 0) {
+ errno = -rc;
+ ERROR("sd_event_wait returned an error (state: %d): %m", sd_event_get_state(se));
+ }
+ }
+ __atomic_and_fetch(&el->state, ~(EVLOOP_STATE_WAIT), __ATOMIC_RELAXED);
+
+ if (rc > 0) {
+ rc = sd_event_dispatch(se);
+ if (rc < 0) {
+ errno = -rc;
+ ERROR("sd_event_dispatch returned an error (state: %d): %m", sd_event_get_state(se));
+ }
+ }
+ }
+ }
+ __atomic_and_fetch(&el->state, ~(EVLOOP_STATE_WAIT|EVLOOP_STATE_RUN), __ATOMIC_RELAXED);
+}
+
+
+/**
+ * Monitored normal loop for waiting events.
+ * @param signum 0 on normal flow or the number
+ * of the signal that interrupted the normal
+ * flow
+ * @param arg the events to run
+ */
+static void monitored_wait_and_dispatch(int signum, void *arg)
+{
+ struct fdev_epoll *fdev_epoll = arg;
+ if (!signum) {
+ fdev_epoll_wait_and_dispatch(fdev_epoll, -1);
+ }
}
/**
*/
static void thread_run(volatile struct thread *me)
{
- struct thread **prv, *thr;
+ struct thread **prv;
struct job *job;
- struct events *events;
- uint64_t evto;
/* initialize description of itself and link it in the list */
me->tid = pthread_self();
me->stop = 0;
- me->lowered = 0;
me->waits = 0;
- me->upper = current;
- if (current) {
- current->lowered = 1;
- evto = EVENT_TIMEOUT_CHILD;
- me->events = current->events;
- } else {
+ me->upper = current_thread;
+ if (!current_thread) {
started++;
sig_monitor_init_timeouts();
- evto = EVENT_TIMEOUT_TOP;
- me->events = NULL;
}
me->next = threads;
threads = (struct thread*)me;
- current = (struct thread*)me;
+ current_thread = (struct thread*)me;
/* loop until stopped */
while (!me->stop) {
+ /* release the event loop */
+ if (current_evloop) {
+ __atomic_and_fetch(¤t_evloop->state, ~EVLOOP_STATE_LOCK, __ATOMIC_RELAXED);
+ current_evloop = NULL;
+ }
+
/* get a job */
- job = job_get(first_job);
+ job = job_get();
if (job) {
/* prepare running the job */
remains++; /* increases count of job that can wait */
/* release the run job */
job_release(job);
-
- /* release event if any */
- events = me->events;
- if (events) {
- events->used = 0;
- me->events = NULL;
- }
+ } else if (waitevt) {
+ /* no job and not events */
+ running--;
+ if (!running)
+ ERROR("Entering job deep sleep! Check your bindings.");
+ me->waits = 1;
+ pthread_cond_wait(&cond, &mutex);
+ me->waits = 0;
+ running++;
} else {
- /* no job, check events */
- events = me->events;
- if (!events || events->runs)
- events = events_get();
- if (events) {
- /* run the events */
- events->used = 1;
- events->runs = 1;
- events->timeout = evto;
- me->events = events;
- pthread_mutex_unlock(&mutex);
- sig_monitor(0, events_call, events);
- pthread_mutex_lock(&mutex);
- events->used = 0;
- events->runs = 0;
- me->events = NULL;
- thr = me->upper;
- while (thr && thr->events == events) {
- thr->events = NULL;
- thr = thr->upper;
- }
- } else {
- /* no job and not events */
- waiting++;
- me->waits = 1;
- pthread_cond_wait(&cond, &mutex);
- me->waits = 0;
- waiting--;
- }
+ /* wait for events */
+ waitevt = 1;
+ pthread_mutex_unlock(&mutex);
+ sig_monitor(0, monitored_wait_and_dispatch, get_fdevepoll());
+ pthread_mutex_lock(&mutex);
+ waitevt = 0;
}
}
+ /* release the event loop */
+ if (current_evloop) {
+ __atomic_and_fetch(¤t_evloop->state, ~EVLOOP_STATE_LOCK, __ATOMIC_RELAXED);
+ current_evloop = NULL;
+ }
+
/* unlink the current thread and cleanup */
prv = &threads;
while (*prv != me)
prv = &(*prv)->next;
*prv = me->next;
- current = me->upper;
- if (current) {
- current->lowered = 0;
- } else {
+ current_thread = me->upper;
+ if (!current_thread) {
sig_monitor_clean_timeouts();
started--;
}
struct thread me;
pthread_mutex_lock(&mutex);
+ running++;
thread_run(&me);
+ running--;
pthread_mutex_unlock(&mutex);
return NULL;
}
* @return 0 in case of success or -1 in case of error
*/
int jobs_queue(
- void *group,
+ const void *group,
int timeout,
void (*callback)(int, void*),
void *arg)
}
/* start a thread if needed */
- if (waiting == 0 && started < allowed) {
+ if (running == started && started < allowed) {
/* all threads are busy and a new can be started */
rc = start_one_thread();
if (rc < 0 && started == 0) {
* @see jobs_call, jobs_enter, jobs_leave
*/
static int do_sync(
- void *group,
+ const void *group,
int timeout,
void (*sync_cb)(int signum, void *closure),
struct sync *sync
* of interrupted flow, the context 'closure' as given and
* a 'jobloop' reference that must be used when the job is
* terminated to unlock the current execution flow.
- * @param arg the argument to the callback
+ * @param closure the argument to the callback
* @return 0 on success or -1 in case of error
*/
int jobs_enter(
- void *group,
+ const void *group,
int timeout,
void (*callback)(int signum, void *closure, struct jobloop *jobloop),
void *closure
* @return 0 in case of success or -1 in case of error
*/
int jobs_call(
- void *group,
+ const void *group,
int timeout,
void (*callback)(int, void*),
void *arg)
return do_sync(group, timeout, call_cb, &sync);
}
+/**
+ * Internal callback for evloop management.
+ * The effect of this function is hidden: it exits
+ * the waiting poll if any. Then it wakes up a thread
+ * awaiting the evloop using signal.
+ */
+static int on_evloop_efd(sd_event_source *s, int fd, uint32_t revents, void *userdata)
+{
+ uint64_t x;
+ struct evloop *evloop = userdata;
+ read(evloop->efd, &x, sizeof x);
+ pthread_mutex_lock(&mutex);
+ pthread_cond_broadcast(&evloop->cond);
+ pthread_mutex_unlock(&mutex);
+ return 1;
+}
+
+/* temporary hack */
+static void evloop_callback(void *arg, uint32_t event, struct fdev *fdev)
+{
+ sig_monitor(0, evloop_run, arg);
+}
+
/**
* Gets a sd_event item for the current thread.
* @return a sd_event or NULL in case of error
*/
-struct sd_event *jobs_get_sd_event()
+static struct sd_event *get_sd_event_locked()
{
- struct events *events;
- struct thread *me;
+ struct evloop *el;
+ uint64_t x;
int rc;
- pthread_mutex_lock(&mutex);
-
- /* search events on stack */
- me = current;
- while (me && !me->events)
- me = me->upper;
- if (me)
- /* return the stacked events */
- events = me->events;
- else {
- /* search an available events */
- events = events_get();
- if (!events) {
- /* not found, check if creation possible */
- if (nevents >= allowed) {
- ERROR("not possible to add a new event");
- events = NULL;
- } else {
- events = malloc(sizeof *events);
- if (events && (rc = sd_event_new(&events->event)) >= 0) {
- if (nevents < started || start_one_thread() >= 0) {
- events->used = 0;
- events->runs = 0;
- events->next = first_events;
- first_events = events;
- } else {
- ERROR("can't start thread for events");
- sd_event_unref(events->event);
- free(events);
- events = NULL;
- }
- } else {
- if (!events) {
- ERROR("out of memory");
- errno = ENOMEM;
- } else {
- free(events);
- ERROR("creation of sd_event failed: %m");
- events = NULL;
- errno = -rc;
- }
- }
- }
+ /* creates the evloop on need */
+ el = &evloop[0];
+ if (!el->sdev) {
+ /* start the creation */
+ el->state = 0;
+ /* creates the eventfd for waking up polls */
+ el->efd = eventfd(0, EFD_CLOEXEC);
+ if (el->efd < 0) {
+ ERROR("can't make eventfd for events");
+ goto error1;
}
- if (events) {
- me = current;
- if (me) {
- events->used = 1;
- me->events = events;
- } else {
- WARNING("event returned for unknown thread!");
- }
+ /* create the systemd event loop */
+ rc = sd_event_new(&el->sdev);
+ if (rc < 0) {
+ ERROR("can't make new event loop");
+ goto error2;
}
+ /* put the eventfd in the event loop */
+ rc = sd_event_add_io(el->sdev, NULL, el->efd, EPOLLIN, on_evloop_efd, el);
+ if (rc < 0) {
+ ERROR("can't register eventfd");
+ goto error3;
+ }
+ /* handle the event loop */
+ el->fdev = fdev_epoll_add(get_fdevepoll(), sd_event_get_fd(el->sdev));
+ if (!el->fdev) {
+ ERROR("can't create fdev");
+error3:
+ sd_event_unref(el->sdev);
+error2:
+ close(el->efd);
+error1:
+ memset(el, 0, sizeof *el);
+ return NULL;
+ }
+ fdev_set_autoclose(el->fdev, 0);
+ fdev_set_events(el->fdev, EPOLLIN);
+ fdev_set_callback(el->fdev, evloop_callback, el);
+ }
+
+ /* attach the event loop to the current thread */
+ if (current_evloop != el) {
+ if (current_evloop)
+ __atomic_and_fetch(¤t_evloop->state, ~EVLOOP_STATE_LOCK, __ATOMIC_RELAXED);
+ current_evloop = el;
+ __atomic_or_fetch(&el->state, EVLOOP_STATE_LOCK, __ATOMIC_RELAXED);
+ }
+
+ /* wait for a modifiable event loop */
+ while (__atomic_load_n(&el->state, __ATOMIC_RELAXED) & EVLOOP_STATE_WAIT) {
+ x = 1;
+ write(el->efd, &x, sizeof x);
+ pthread_cond_wait(&el->cond, &mutex);
}
+
+ return el->sdev;
+}
+
+/**
+ * Gets a sd_event item for the current thread.
+ * @return a sd_event or NULL in case of error
+ */
+struct sd_event *jobs_get_sd_event()
+{
+ struct sd_event *result;
+
+ pthread_mutex_lock(&mutex);
+ result = get_sd_event_locked();
pthread_mutex_unlock(&mutex);
- return events ? events->event : NULL;
+
+ return result;
+}
+
+/**
+ * Gets the fdev_epoll item.
+ * @return a fdev_epoll or NULL in case of error
+ */
+struct fdev_epoll *jobs_get_fdev_epoll()
+{
+ struct fdev_epoll *result;
+
+ pthread_mutex_lock(&mutex);
+ result = get_fdevepoll();
+ pthread_mutex_unlock(&mutex);
+
+ return result;
}
/**
* @param start The start routine to activate (can't be NULL)
* @return 0 in case of success or -1 in case of error.
*/
-int jobs_start(int allowed_count, int start_count, int waiter_count, void (*start)(int signum))
+int jobs_start(int allowed_count, int start_count, int waiter_count, void (*start)(int signum, void* arg), void *arg)
{
int rc, launched;
struct thread me;
pthread_mutex_lock(&mutex);
/* check whether already running */
- if (current || allowed) {
+ if (current_thread || allowed) {
ERROR("thread already started");
errno = EINVAL;
goto error;
/* records the allowed count */
allowed = allowed_count;
started = 0;
- waiting = 0;
+ running = 0;
remains = waiter_count;
+#if HAS_WATCHDOG
+ /* set the watchdog */
+ if (sd_watchdog_enabled(0, NULL))
+ sd_event_set_watchdog(get_sd_event_locked(), 1);
+#endif
+
/* start at least one thread */
launched = 0;
while ((launched + 1) < start_count) {
}
/* queue the start job */
- job = job_create(NULL, 0, (job_cb_t)start, NULL);
+ job = job_create(NULL, 0, start, arg);
if (!job) {
ERROR("out of memory");
errno = ENOMEM;
head = job->next;
/* search if job is stacked for current */
- t = current;
+ t = current_thread;
while (t && t->job != job)
t = t->upper;
if (t) {