Module: xenomai-3
Branch: wip/drivers
Commit: 596acc3ce2f77bab32bc36ec2666e79d4d5c00f4
URL:    
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=596acc3ce2f77bab32bc36ec2666e79d4d5c00f4

Author: Philippe Gerum <r...@xenomai.org>
Date:   Wed Nov 16 20:49:47 2016 +0100

cobalt/clock: add support for devices without percpu semantics

Some clock devices have no percpu semantics. Passing a NULL affinity
mask to xnclock_register() (and cobalt_clock_register() as well) now
causes all timers for any such clock to be maintained into a single
queue xnclock_tick() will inspect exclusively regardless of the
current CPU.

---

 include/cobalt/kernel/timer.h |    9 ------
 kernel/cobalt/clock.c         |   65 ++++++++++++++++++++++++++++++-----------
 kernel/cobalt/timer.c         |   22 +++++++++-----
 3 files changed, 63 insertions(+), 33 deletions(-)

diff --git a/include/cobalt/kernel/timer.h b/include/cobalt/kernel/timer.h
index 700392e..86ac7a7 100644
--- a/include/cobalt/kernel/timer.h
+++ b/include/cobalt/kernel/timer.h
@@ -311,15 +311,6 @@ static inline struct xnsched *xntimer_sched(struct xntimer 
*timer)
                &tmd->q;                                                \
        })
 
-static inline xntimerq_t *xntimer_this_queue(struct xntimer *timer)
-{
-       struct xntimerdata *tmd;
-
-       tmd = xnclock_this_timerdata(xntimer_clock(timer));
-
-       return &tmd->q;
-}
-
 static inline unsigned long xntimer_gravity(struct xntimer *timer)
 {
        struct xnclock *clock = xntimer_clock(timer);
diff --git a/kernel/cobalt/clock.c b/kernel/cobalt/clock.c
index a4d0df8..796358f 100644
--- a/kernel/cobalt/clock.c
+++ b/kernel/cobalt/clock.c
@@ -137,6 +137,10 @@ void xnclock_core_local_shot(struct xnsched *sched)
        if (sched->status & XNINTCK)
                return;
 
+       /*
+        * Assume the core clock device always has percpu semantics in
+        * SMP.
+        */
        tmd = xnclock_this_timerdata(&nkclock);
        h = xntimerq_head(&tmd->q);
        if (h == NULL)
@@ -334,7 +338,13 @@ int xnclock_get_default_cpu(struct xnclock *clock, int cpu)
         * suggested CPU does not receive events from this device,
         * return the first one which does.  We also account for the
         * dynamic set of real-time CPUs.
+        *
+        * A clock device with no percpu semantics causes this routine
+        * to return CPU0 unconditionally.
         */
+       if (cpumask_empty(&clock->affinity))
+               return 0;
+       
        cpumask_and(&set, &clock->affinity, &cobalt_cpu_affinity);
        if (!cpumask_empty(&set) && !cpumask_test_cpu(cpu, &set))
                cpu = cpumask_first(&set);
@@ -620,7 +630,10 @@ static inline void cleanup_clock_proc(struct xnclock 
*clock) { }
  * @param clock The new clock to register.
  *
  * @param affinity The set of CPUs we may expect the backing clock
- * device to tick on.
+ * device to tick on. As a special case, passing a NULL affinity mask
+ * means that timer IRQs cannot be seen as percpu events, in which
+ * case all outstanding timers will be maintained into a single global
+ * queue instead of percpu timer queues.
  *
  * @coretags{secondary-only}
  */
@@ -633,14 +646,17 @@ int xnclock_register(struct xnclock *clock, const 
cpumask_t *affinity)
 
 #ifdef CONFIG_SMP
        /*
-        * A CPU affinity set is defined for each clock, enumerating
-        * the CPUs which can receive ticks from the backing clock
-        * device.  This set must be a subset of the real-time CPU
-        * set.
+        * A CPU affinity set may be defined for each clock,
+        * enumerating the CPUs which can receive ticks from the
+        * backing clock device.  When given, this set must be a
+        * subset of the real-time CPU set.
         */
-       cpumask_and(&clock->affinity, affinity, &xnsched_realtime_cpus);
-       if (cpumask_empty(&clock->affinity))
-               return -EINVAL;
+       if (affinity) {
+               cpumask_and(&clock->affinity, affinity, &xnsched_realtime_cpus);
+               if (cpumask_empty(&clock->affinity))
+                       return -EINVAL;
+       } else  /* No percpu semantics. */
+               cpumask_clear(&clock->affinity);
 #endif
 
        /* Allocate the percpu timer queue slot. */
@@ -651,7 +667,8 @@ int xnclock_register(struct xnclock *clock, const cpumask_t 
*affinity)
        /*
         * POLA: init all timer slots for the new clock, although some
         * of them might remain unused depending on the CPU affinity
-        * of the event source(s).
+        * of the event source(s). If the clock device has no percpu
+        * semantics, all timers will be queued to slot #0.
         */
        for_each_online_cpu(cpu) {
                tmd = xnclock_percpu_timerdata(clock, cpu);
@@ -712,18 +729,32 @@ EXPORT_SYMBOL_GPL(xnclock_deregister);
  *
  * @coretags{coreirq-only, atomic-entry}
  *
- * @note The current CPU must be part of the real-time affinity set,
- * otherwise weird things may happen.
+ * @note The current CPU must be part of the real-time affinity set
+ * unless the clock device has no percpu semantics, otherwise weird
+ * things may happen.
  */
 void xnclock_tick(struct xnclock *clock)
 {
-       xntimerq_t *timerq = &xnclock_this_timerdata(clock)->q;
        struct xnsched *sched = xnsched_current();
        struct xntimer *timer;
        xnsticks_t delta;
+       xntimerq_t *tmq;
        xnticks_t now;
        xntimerh_t *h;
 
+       atomic_only();
+
+       /*
+        * Some external clock devices may have no percpu semantics,
+        * in which case all timers are queued to slot #0.
+        */
+       if (config_enabled(CONFIG_XENO_OPT_EXTCLOCK) &&
+           clock != &nkclock &&
+           !cpumask_test_cpu(xnsched_cpu(sched), &clock->affinity))
+               tmq = &xnclock_percpu_timerdata(clock, 0)->q;
+       else
+               tmq = &xnclock_this_timerdata(clock)->q;
+       
        /*
         * Optimisation: any local timer reprogramming triggered by
         * invoked timer handlers can wait until we leave the tick
@@ -732,7 +763,7 @@ void xnclock_tick(struct xnclock *clock)
        sched->status |= XNINTCK;
 
        now = xnclock_read_raw(clock);
-       while ((h = xntimerq_head(timerq)) != NULL) {
+       while ((h = xntimerq_head(tmq)) != NULL) {
                timer = container_of(h, struct xntimer, aplink);
                delta = (xnsticks_t)(xntimerh_date(&timer->aplink) - now);
                if (delta > 0)
@@ -740,7 +771,7 @@ void xnclock_tick(struct xnclock *clock)
 
                trace_cobalt_timer_expire(timer);
 
-               xntimer_dequeue(timer, timerq);
+               xntimer_dequeue(timer, tmq);
                xntimer_account_fired(timer);
 
                /*
@@ -805,14 +836,14 @@ void xnclock_tick(struct xnclock *clock)
                 * we have to do this now if required.
                 */
                if (unlikely(timer->sched != sched)) {
-                       timerq = xntimer_percpu_queue(timer);
-                       xntimer_enqueue(timer, timerq);
+                       tmq = xntimer_percpu_queue(timer);
+                       xntimer_enqueue(timer, tmq);
                        if (xntimer_heading_p(timer))
                                xnclock_remote_shot(clock, timer->sched);
                        continue;
                }
 #endif
-               xntimer_enqueue(timer, timerq);
+               xntimer_enqueue(timer, tmq);
        }
 
        sched->status &= ~XNINTCK;
diff --git a/kernel/cobalt/timer.c b/kernel/cobalt/timer.c
index 6263f5b..3a4732d 100644
--- a/kernel/cobalt/timer.c
+++ b/kernel/cobalt/timer.c
@@ -349,12 +349,15 @@ void __xntimer_init(struct xntimer *timer,
        timer->handler = handler;
        timer->interval_ns = 0;
        /*
-        * Unlike common IRQs, timer events are per-CPU by design. If
-        * the CPU the caller is affine to does not receive timer
+        * If the CPU the caller is affine to does not receive timer
         * events, or no affinity was specified (i.e. sched == NULL),
         * assign the timer to the first possible CPU which can
         * receive interrupt events from the clock device backing this
         * timer.
+        *
+        * If the clock device has no percpu semantics,
+        * xnclock_get_default_cpu() makes the timer always affine to
+        * CPU0 unconditionally.
         */
        cpu = xnclock_get_default_cpu(clock, sched ? xnsched_cpu(sched) : 0);
        timer->sched = xnsched_struct(cpu);
@@ -522,11 +525,15 @@ void __xntimer_migrate(struct xntimer *timer, struct 
xnsched *sched)
         * This assertion triggers when the timer is migrated to a CPU
         * for which we do not expect any clock events/IRQs from the
         * associated clock device. If so, the timer would never fire
-        * since clock ticks would never happen on that CPU (timer
-        * queues are per-CPU constructs).
+        * since clock ticks would never happen on that CPU.
+        *
+        * A clock device with an empty affinity mask has no percpu
+        * semantics, which disables the check.
         */
-       XENO_WARN_ON_SMP(COBALT, !cpumask_test_cpu(xnsched_cpu(sched),
-                      &xntimer_clock(timer)->affinity));
+       XENO_WARN_ON_SMP(COBALT,
+                        !cpumask_empty(&xntimer_clock(timer)->affinity) &&
+                        !cpumask_test_cpu(xnsched_cpu(sched),
+                                          &xntimer_clock(timer)->affinity));
 
        if (timer->status & XNTIMER_RUNNING) {
                xntimer_stop(timer);
@@ -546,7 +553,8 @@ bool xntimer_set_sched(struct xntimer *timer,
 {
        /*
         * We may deny the request if the target CPU does not receive
-        * any event from the clock device backing the timer.
+        * any event from the clock device backing the timer, or the
+        * clock device has no percpu semantics.
         */
        if (cpumask_test_cpu(xnsched_cpu(sched),
                             &xntimer_clock(timer)->affinity)) {


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
https://xenomai.org/mailman/listinfo/xenomai-git

Reply via email to