On 2013-07-21 10:42, Liu Ping Fan wrote:
> We will arm each AioContext with its own timer stuff. As the first
> step, we should make each AioContext with its own alarm_timer,
> so they can raise the deadline independent.
> Each thread with AioContext will have dedicated signal handler
> to trigger it own alarm_timer. And all of the alarm timers are
> linked on a QSLIST for clock reset.
> 
> Signed-off-by: Liu Ping Fan <pingf...@linux.vnet.ibm.com>
> ----- To fix ------------
> sigaction(SIGALRM, &act, NULL) is process-wide, not thread?
> I think what I need is an timerfd for each thread, right?
> Will fix in next version.
> 
> ---
>  async.c              |   3 ++
>  include/block/aio.h  |   1 +
>  include/qemu/timer.h |   4 +-
>  main-loop.c          |   4 --
>  qemu-timer.c         | 106 
> +++++++++++++++++++++++++++++++++++++--------------
>  5 files changed, 85 insertions(+), 33 deletions(-)
> 
> diff --git a/async.c b/async.c
> index ba4072c..8209cea 100644
> --- a/async.c
> +++ b/async.c
> @@ -26,6 +26,7 @@
>  #include "block/aio.h"
>  #include "block/thread-pool.h"
>  #include "qemu/main-loop.h"
> +#include "qemu/timer.h"
>  
>  DEFINE_TLS(AioContext*, thread_aio_context);
>  
> @@ -206,6 +207,7 @@ aio_ctx_finalize(GSource     *source)
>      aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL);
>      event_notifier_cleanup(&ctx->notifier);
>      g_array_free(ctx->pollfds, TRUE);
> +    alarm_timer_destroy(ctx->alarm_timer);
>  }
>  
>  static GSourceFuncs aio_source_funcs = {
> @@ -245,6 +247,7 @@ AioContext *aio_context_new(void)
>      aio_set_event_notifier(ctx, &ctx->notifier, 
>                             (EventNotifierHandler *)
>                             event_notifier_test_and_clear, NULL);
> +    ctx->alarm_timer = alarm_timer_create(ctx);
>  
>      return ctx;
>  }
> diff --git a/include/block/aio.h b/include/block/aio.h
> index 04598b2..84537a2 100644
> --- a/include/block/aio.h
> +++ b/include/block/aio.h
> @@ -73,6 +73,7 @@ typedef struct AioContext {
>  
>      /* Thread pool for performing work and receiving completion callbacks */
>      struct ThreadPool *thread_pool;
> +    struct qemu_alarm_timer *alarm_timer;
>  } AioContext;
>  
>  /* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
> diff --git a/include/qemu/timer.h b/include/qemu/timer.h
> index 9dd206c..4a72c99 100644
> --- a/include/qemu/timer.h
> +++ b/include/qemu/timer.h
> @@ -57,7 +57,9 @@ void qemu_run_timers(QEMUClock *clock);
>  void qemu_run_all_timers(void);
>  void configure_alarms(char const *opt);
>  void init_clocks(void);
> -int init_timer_alarm(void);
> +int init_timer_alarm(struct qemu_alarm_timer *t);
> +struct qemu_alarm_timer *alarm_timer_create(AioContext *ctx);
> +void alarm_timer_destroy(struct qemu_alarm_timer *alarm);
>  
>  int64_t cpu_get_ticks(void);
>  void cpu_enable_ticks(void);
> diff --git a/main-loop.c b/main-loop.c
> index 5fbdd4a..4a94a52 100644
> --- a/main-loop.c
> +++ b/main-loop.c
> @@ -131,10 +131,6 @@ int qemu_init_main_loop(void)
>      GSource *src;
>  
>      init_clocks();
> -    if (init_timer_alarm() < 0) {
> -        fprintf(stderr, "could not initialize alarm timer\n");
> -        exit(1);
> -    }
>  
>      ret = qemu_signal_init();
>      if (ret) {
> diff --git a/qemu-timer.c b/qemu-timer.c
> index 9500d12..32c70ed 100644
> --- a/qemu-timer.c
> +++ b/qemu-timer.c
> @@ -65,6 +65,8 @@ struct QEMUTimer {
>      int scale;
>  };
>  
> +typedef struct qemu_alarm_timer qemu_alarm_timer;
> +
>  struct qemu_alarm_timer {
>      char const *name;
>      int (*start)(struct qemu_alarm_timer *t);
> @@ -82,9 +84,43 @@ struct qemu_alarm_timer {
>      /* Was the nearest deadline timer modified (possibly by another thread)? 
> */
>      QemuMutex timer_modified_lock;
>      bool timer_modified;
> +    AioContext *ctx;
> +    /* protected by alarm_timer_list_lock */
> +    QSLIST_ENTRY(qemu_alarm_timer) next_alarm_timer;
>  };
>  
> -static struct qemu_alarm_timer *alarm_timer;
> +static QSLIST_HEAD(, qemu_alarm_timer) \
> +    alarm_timer_list = QSLIST_HEAD_INITIALIZER(alarm_timer_list);
> +/* innermost lock */
> +static QemuMutex alarm_timer_list_lock;
> +
> +struct qemu_alarm_timer *alarm_timer_create(AioContext *ctx)
> +{
> +    struct qemu_alarm_timer *t;
> +
> +    t = g_malloc0(sizeof(qemu_alarm_timer));
> +    init_timer_alarm(t);
> +    t->ctx = ctx;
> +    qemu_mutex_lock(&alarm_timer_list_lock);
> +    QSLIST_INSERT_HEAD(&alarm_timer_list, t, next_alarm_timer);
> +    qemu_mutex_unlock(&alarm_timer_list_lock);
> +    return t;
> +}
> +
> +void alarm_timer_destroy(struct qemu_alarm_timer *t)
> +{
> +    struct qemu_alarm_timer *var, *tvar;
> +
> +    t->stop(t);
> +    qemu_mutex_lock(&alarm_timer_list_lock);
> +    QSLIST_FOREACH_SAFE(var, &alarm_timer_list, next_alarm_timer, tvar) {
> +        if (tvar == t) {
> +            QSLIST_REMOVE_AFTER(var, next_alarm_timer);
> +        }
> +    }
> +    qemu_mutex_unlock(&alarm_timer_list_lock);
> +    g_free(t);
> +}
>  
>  static bool qemu_timer_expired_ns(QEMUTimer *timer_head, int64_t 
> current_time)
>  {
> @@ -114,7 +150,10 @@ static int64_t qemu_next_clock_deadline(QEMUClock 
> *clock, int64_t delta)
>      return MIN(next, delta);
>  }
>  
> -static int64_t qemu_next_alarm_deadline(void)
> +/* Soon this will be fixed: till now, timer list is not associated with
> + * AioContext, so @ctx has no effect on deadline currently.
> + */
> +static int64_t qemu_next_alarm_deadline(AioContext *ctx)
>  {
>      int64_t delta = INT64_MAX;
>  
> @@ -127,12 +166,23 @@ static int64_t qemu_next_alarm_deadline(void)
>  
>  static void qemu_rearm_alarm_timer(struct qemu_alarm_timer *t)
>  {
> -    int64_t nearest_delta_ns = qemu_next_alarm_deadline();
> +    int64_t nearest_delta_ns = qemu_next_alarm_deadline(t->ctx);
>      if (nearest_delta_ns < INT64_MAX) {
>          t->rearm(t, nearest_delta_ns);
>      }
>  }
>  
> +static void qemu_rearm_alarm_timers(void)
> +{
> +    struct qemu_alarm_timer *t;
> +
> +    qemu_mutex_lock(&alarm_timer_list_lock);
> +    QSLIST_FOREACH(t, &alarm_timer_list, next_alarm_timer) {
> +        qemu_rearm_alarm_timer(t);
> +    }
> +    qemu_mutex_unlock(&alarm_timer_list_lock);
> +}
> +
>  /* TODO: MIN_TIMER_REARM_NS should be optimized */
>  #define MIN_TIMER_REARM_NS 250000
>  
> @@ -262,7 +312,7 @@ void qemu_clock_enable(QEMUClock *clock, bool enabled)
>      bool old = clock->enabled;
>      clock->enabled = enabled;
>      if (enabled && !old) {
> -        qemu_rearm_alarm_timer(alarm_timer);
> +        qemu_rearm_alarm_timers();
>      }
>  }
>  
> @@ -355,6 +405,8 @@ void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time)
>  {
>      QEMUClock *clock = ts->clock;
>      QEMUTimer **pt, *t;
> +    AioContext *ctx = *tls_get_thread_aio_context();
> +    struct qemu_alarm_timer *alarm_timer = ctx->alarm_timer;
>  
>      qemu_del_timer(ts);
>  
> @@ -485,6 +537,8 @@ uint64_t qemu_timer_expire_time_ns(QEMUTimer *ts)
>  void qemu_run_all_timers(void)
>  {
>      bool timer_modified;
> +    AioContext *ctx = *tls_get_thread_aio_context();
> +    struct qemu_alarm_timer *alarm_timer = ctx->alarm_timer;
>  
>      alarm_timer->pending = false;
>  
> @@ -515,13 +569,15 @@ static void CALLBACK host_alarm_handler(PVOID lpParam, 
> BOOLEAN unused)
>  static void host_alarm_handler(int host_signum)
>  #endif
>  {
> -    struct qemu_alarm_timer *t = alarm_timer;
> +    AioContext *ctx = *tls_get_thread_aio_context();
> +    struct qemu_alarm_timer *t = ctx->alarm_timer;
> +
>      if (!t)
>       return;
>  
>      t->expired = true;
>      t->pending = true;
> -    qemu_notify_event();
> +    aio_notify(ctx);
>  }
>  
>  #if defined(__linux__)
> @@ -774,37 +830,30 @@ static void win32_rearm_timer(struct qemu_alarm_timer 
> *t,
>  
>  #endif /* _WIN32 */
>  
> -static void quit_timers(void)
> -{
> -    struct qemu_alarm_timer *t = alarm_timer;
> -    alarm_timer = NULL;
> -    t->stop(t);
> -}
> -
>  #ifdef CONFIG_POSIX
>  static void reinit_timers(void)
>  {
> -    struct qemu_alarm_timer *t = alarm_timer;
> -    t->stop(t);
> -    if (t->start(t)) {
> -        fprintf(stderr, "Internal timer error: aborting\n");
> -        exit(1);
> +    struct qemu_alarm_timer *t;
> +
> +    qemu_mutex_lock(&alarm_timer_list_lock);
> +    QSLIST_FOREACH(t, &alarm_timer_list, next_alarm_timer) {
> +        t->stop(t);
> +        if (t->start(t)) {
> +            fprintf(stderr, "Internal timer error: aborting\n");
> +            exit(1);
> +        }
> +        qemu_rearm_alarm_timer(t);
>      }
> -    qemu_rearm_alarm_timer(t);
> +    qemu_mutex_unlock(&alarm_timer_list_lock);
>  }
>  #endif /* CONFIG_POSIX */
>  
> -int init_timer_alarm(void)
> +int init_timer_alarm(struct qemu_alarm_timer *t)
>  {
> -    struct qemu_alarm_timer *t = NULL;
>      int i, err = -1;
>  
> -    if (alarm_timer) {
> -        return 0;
> -    }
> -
>      for (i = 0; alarm_timers[i].name; i++) {
> -        t = &alarm_timers[i];
> +        *t = alarm_timers[i];
>  
>          err = t->start(t);
>          if (!err)
> @@ -818,14 +867,15 @@ int init_timer_alarm(void)
>  
>      qemu_mutex_init(&t->timer_modified_lock);
>  
> -    atexit(quit_timers);
>  #ifdef CONFIG_POSIX
>      pthread_atfork(NULL, NULL, reinit_timers);
>  #endif
> -    alarm_timer = t;
>      return 0;
>  
>  fail:
> +    fprintf(stderr, "could not initialize alarm timer\n");
> +    exit(1);
> +
>      return err;
>  }
>  
> 

This goes into a similar direction like what I have here locally.
However, as said in a different thread, you must not remove the main
alarm timers from the main loop, rather just add the option for
additional alarm timers so that AIO etc. could define their private ones.

Jan

-- 
Siemens AG, Corporate Technology, CT RTC ITP SES-DE
Corporate Competence Center Embedded Linux

Reply via email to