[Xenomai-git] Philippe Gerum : cobalt/clock: add support for devices without percpu semantics
Module: xenomai-3 Branch: master Commit: 596acc3ce2f77bab32bc36ec2666e79d4d5c00f4 URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=596acc3ce2f77bab32bc36ec2666e79d4d5c00f4 Author: Philippe Gerum Date: Wed Nov 16 20:49:47 2016 +0100 cobalt/clock: add support for devices without percpu semantics Some clock devices have no percpu semantics. Passing a NULL affinity mask to xnclock_register() (and cobalt_clock_register() as well) now causes all timers for any such clock to be maintained into a single queue xnclock_tick() will inspect exclusively regardless of the current CPU. --- include/cobalt/kernel/timer.h |9 -- kernel/cobalt/clock.c | 65 ++--- kernel/cobalt/timer.c | 22 +- 3 files changed, 63 insertions(+), 33 deletions(-) diff --git a/include/cobalt/kernel/timer.h b/include/cobalt/kernel/timer.h index 700392e..86ac7a7 100644 --- a/include/cobalt/kernel/timer.h +++ b/include/cobalt/kernel/timer.h @@ -311,15 +311,6 @@ static inline struct xnsched *xntimer_sched(struct xntimer *timer) &tmd->q;\ }) -static inline xntimerq_t *xntimer_this_queue(struct xntimer *timer) -{ - struct xntimerdata *tmd; - - tmd = xnclock_this_timerdata(xntimer_clock(timer)); - - return &tmd->q; -} - static inline unsigned long xntimer_gravity(struct xntimer *timer) { struct xnclock *clock = xntimer_clock(timer); diff --git a/kernel/cobalt/clock.c b/kernel/cobalt/clock.c index a4d0df8..796358f 100644 --- a/kernel/cobalt/clock.c +++ b/kernel/cobalt/clock.c @@ -137,6 +137,10 @@ void xnclock_core_local_shot(struct xnsched *sched) if (sched->status & XNINTCK) return; + /* +* Assume the core clock device always has percpu semantics in +* SMP. +*/ tmd = xnclock_this_timerdata(&nkclock); h = xntimerq_head(&tmd->q); if (h == NULL) @@ -334,7 +338,13 @@ int xnclock_get_default_cpu(struct xnclock *clock, int cpu) * suggested CPU does not receive events from this device, * return the first one which does. We also account for the * dynamic set of real-time CPUs. +* +* A clock device with no percpu semantics causes this routine +* to return CPU0 unconditionally. */ + if (cpumask_empty(&clock->affinity)) + return 0; + cpumask_and(&set, &clock->affinity, &cobalt_cpu_affinity); if (!cpumask_empty(&set) && !cpumask_test_cpu(cpu, &set)) cpu = cpumask_first(&set); @@ -620,7 +630,10 @@ static inline void cleanup_clock_proc(struct xnclock *clock) { } * @param clock The new clock to register. * * @param affinity The set of CPUs we may expect the backing clock - * device to tick on. + * device to tick on. As a special case, passing a NULL affinity mask + * means that timer IRQs cannot be seen as percpu events, in which + * case all outstanding timers will be maintained into a single global + * queue instead of percpu timer queues. * * @coretags{secondary-only} */ @@ -633,14 +646,17 @@ int xnclock_register(struct xnclock *clock, const cpumask_t *affinity) #ifdef CONFIG_SMP /* -* A CPU affinity set is defined for each clock, enumerating -* the CPUs which can receive ticks from the backing clock -* device. This set must be a subset of the real-time CPU -* set. +* A CPU affinity set may be defined for each clock, +* enumerating the CPUs which can receive ticks from the +* backing clock device. When given, this set must be a +* subset of the real-time CPU set. */ - cpumask_and(&clock->affinity, affinity, &xnsched_realtime_cpus); - if (cpumask_empty(&clock->affinity)) - return -EINVAL; + if (affinity) { + cpumask_and(&clock->affinity, affinity, &xnsched_realtime_cpus); + if (cpumask_empty(&clock->affinity)) + return -EINVAL; + } else /* No percpu semantics. */ + cpumask_clear(&clock->affinity); #endif /* Allocate the percpu timer queue slot. */ @@ -651,7 +667,8 @@ int xnclock_register(struct xnclock *clock, const cpumask_t *affinity) /* * POLA: init all timer slots for the new clock, although some * of them might remain unused depending on the CPU affinity -* of the event source(s). +* of the event source(s). If the clock device has no percpu +* semantics, all timers will be queued to slot #0. */ for_each_online_cpu(cpu) { tmd = xnclock_percpu_timerdata(clock, cpu); @@ -712,18 +729,32 @@ EXPORT_SYMBOL_GPL(xnclock_deregister); * * @coretags{coreirq-only, atomic-entry} * - * @note The current CPU must be part of the real-time affinity set, - * otherwise weird things may hap
[Xenomai-git] Philippe Gerum : cobalt/clock: add support for devices without percpu semantics
Module: xenomai-3 Branch: wip/drivers Commit: 596acc3ce2f77bab32bc36ec2666e79d4d5c00f4 URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=596acc3ce2f77bab32bc36ec2666e79d4d5c00f4 Author: Philippe Gerum Date: Wed Nov 16 20:49:47 2016 +0100 cobalt/clock: add support for devices without percpu semantics Some clock devices have no percpu semantics. Passing a NULL affinity mask to xnclock_register() (and cobalt_clock_register() as well) now causes all timers for any such clock to be maintained into a single queue xnclock_tick() will inspect exclusively regardless of the current CPU. --- include/cobalt/kernel/timer.h |9 -- kernel/cobalt/clock.c | 65 ++--- kernel/cobalt/timer.c | 22 +- 3 files changed, 63 insertions(+), 33 deletions(-) diff --git a/include/cobalt/kernel/timer.h b/include/cobalt/kernel/timer.h index 700392e..86ac7a7 100644 --- a/include/cobalt/kernel/timer.h +++ b/include/cobalt/kernel/timer.h @@ -311,15 +311,6 @@ static inline struct xnsched *xntimer_sched(struct xntimer *timer) &tmd->q;\ }) -static inline xntimerq_t *xntimer_this_queue(struct xntimer *timer) -{ - struct xntimerdata *tmd; - - tmd = xnclock_this_timerdata(xntimer_clock(timer)); - - return &tmd->q; -} - static inline unsigned long xntimer_gravity(struct xntimer *timer) { struct xnclock *clock = xntimer_clock(timer); diff --git a/kernel/cobalt/clock.c b/kernel/cobalt/clock.c index a4d0df8..796358f 100644 --- a/kernel/cobalt/clock.c +++ b/kernel/cobalt/clock.c @@ -137,6 +137,10 @@ void xnclock_core_local_shot(struct xnsched *sched) if (sched->status & XNINTCK) return; + /* +* Assume the core clock device always has percpu semantics in +* SMP. +*/ tmd = xnclock_this_timerdata(&nkclock); h = xntimerq_head(&tmd->q); if (h == NULL) @@ -334,7 +338,13 @@ int xnclock_get_default_cpu(struct xnclock *clock, int cpu) * suggested CPU does not receive events from this device, * return the first one which does. We also account for the * dynamic set of real-time CPUs. +* +* A clock device with no percpu semantics causes this routine +* to return CPU0 unconditionally. */ + if (cpumask_empty(&clock->affinity)) + return 0; + cpumask_and(&set, &clock->affinity, &cobalt_cpu_affinity); if (!cpumask_empty(&set) && !cpumask_test_cpu(cpu, &set)) cpu = cpumask_first(&set); @@ -620,7 +630,10 @@ static inline void cleanup_clock_proc(struct xnclock *clock) { } * @param clock The new clock to register. * * @param affinity The set of CPUs we may expect the backing clock - * device to tick on. + * device to tick on. As a special case, passing a NULL affinity mask + * means that timer IRQs cannot be seen as percpu events, in which + * case all outstanding timers will be maintained into a single global + * queue instead of percpu timer queues. * * @coretags{secondary-only} */ @@ -633,14 +646,17 @@ int xnclock_register(struct xnclock *clock, const cpumask_t *affinity) #ifdef CONFIG_SMP /* -* A CPU affinity set is defined for each clock, enumerating -* the CPUs which can receive ticks from the backing clock -* device. This set must be a subset of the real-time CPU -* set. +* A CPU affinity set may be defined for each clock, +* enumerating the CPUs which can receive ticks from the +* backing clock device. When given, this set must be a +* subset of the real-time CPU set. */ - cpumask_and(&clock->affinity, affinity, &xnsched_realtime_cpus); - if (cpumask_empty(&clock->affinity)) - return -EINVAL; + if (affinity) { + cpumask_and(&clock->affinity, affinity, &xnsched_realtime_cpus); + if (cpumask_empty(&clock->affinity)) + return -EINVAL; + } else /* No percpu semantics. */ + cpumask_clear(&clock->affinity); #endif /* Allocate the percpu timer queue slot. */ @@ -651,7 +667,8 @@ int xnclock_register(struct xnclock *clock, const cpumask_t *affinity) /* * POLA: init all timer slots for the new clock, although some * of them might remain unused depending on the CPU affinity -* of the event source(s). +* of the event source(s). If the clock device has no percpu +* semantics, all timers will be queued to slot #0. */ for_each_online_cpu(cpu) { tmd = xnclock_percpu_timerdata(clock, cpu); @@ -712,18 +729,32 @@ EXPORT_SYMBOL_GPL(xnclock_deregister); * * @coretags{coreirq-only, atomic-entry} * - * @note The current CPU must be part of the real-time affinity set, - * otherwise weird things ma
[Xenomai-git] Philippe Gerum : cobalt/clock: add support for devices without percpu semantics
Module: xenomai-3 Branch: next Commit: 596acc3ce2f77bab32bc36ec2666e79d4d5c00f4 URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=596acc3ce2f77bab32bc36ec2666e79d4d5c00f4 Author: Philippe Gerum Date: Wed Nov 16 20:49:47 2016 +0100 cobalt/clock: add support for devices without percpu semantics Some clock devices have no percpu semantics. Passing a NULL affinity mask to xnclock_register() (and cobalt_clock_register() as well) now causes all timers for any such clock to be maintained into a single queue xnclock_tick() will inspect exclusively regardless of the current CPU. --- include/cobalt/kernel/timer.h |9 -- kernel/cobalt/clock.c | 65 ++--- kernel/cobalt/timer.c | 22 +- 3 files changed, 63 insertions(+), 33 deletions(-) diff --git a/include/cobalt/kernel/timer.h b/include/cobalt/kernel/timer.h index 700392e..86ac7a7 100644 --- a/include/cobalt/kernel/timer.h +++ b/include/cobalt/kernel/timer.h @@ -311,15 +311,6 @@ static inline struct xnsched *xntimer_sched(struct xntimer *timer) &tmd->q;\ }) -static inline xntimerq_t *xntimer_this_queue(struct xntimer *timer) -{ - struct xntimerdata *tmd; - - tmd = xnclock_this_timerdata(xntimer_clock(timer)); - - return &tmd->q; -} - static inline unsigned long xntimer_gravity(struct xntimer *timer) { struct xnclock *clock = xntimer_clock(timer); diff --git a/kernel/cobalt/clock.c b/kernel/cobalt/clock.c index a4d0df8..796358f 100644 --- a/kernel/cobalt/clock.c +++ b/kernel/cobalt/clock.c @@ -137,6 +137,10 @@ void xnclock_core_local_shot(struct xnsched *sched) if (sched->status & XNINTCK) return; + /* +* Assume the core clock device always has percpu semantics in +* SMP. +*/ tmd = xnclock_this_timerdata(&nkclock); h = xntimerq_head(&tmd->q); if (h == NULL) @@ -334,7 +338,13 @@ int xnclock_get_default_cpu(struct xnclock *clock, int cpu) * suggested CPU does not receive events from this device, * return the first one which does. We also account for the * dynamic set of real-time CPUs. +* +* A clock device with no percpu semantics causes this routine +* to return CPU0 unconditionally. */ + if (cpumask_empty(&clock->affinity)) + return 0; + cpumask_and(&set, &clock->affinity, &cobalt_cpu_affinity); if (!cpumask_empty(&set) && !cpumask_test_cpu(cpu, &set)) cpu = cpumask_first(&set); @@ -620,7 +630,10 @@ static inline void cleanup_clock_proc(struct xnclock *clock) { } * @param clock The new clock to register. * * @param affinity The set of CPUs we may expect the backing clock - * device to tick on. + * device to tick on. As a special case, passing a NULL affinity mask + * means that timer IRQs cannot be seen as percpu events, in which + * case all outstanding timers will be maintained into a single global + * queue instead of percpu timer queues. * * @coretags{secondary-only} */ @@ -633,14 +646,17 @@ int xnclock_register(struct xnclock *clock, const cpumask_t *affinity) #ifdef CONFIG_SMP /* -* A CPU affinity set is defined for each clock, enumerating -* the CPUs which can receive ticks from the backing clock -* device. This set must be a subset of the real-time CPU -* set. +* A CPU affinity set may be defined for each clock, +* enumerating the CPUs which can receive ticks from the +* backing clock device. When given, this set must be a +* subset of the real-time CPU set. */ - cpumask_and(&clock->affinity, affinity, &xnsched_realtime_cpus); - if (cpumask_empty(&clock->affinity)) - return -EINVAL; + if (affinity) { + cpumask_and(&clock->affinity, affinity, &xnsched_realtime_cpus); + if (cpumask_empty(&clock->affinity)) + return -EINVAL; + } else /* No percpu semantics. */ + cpumask_clear(&clock->affinity); #endif /* Allocate the percpu timer queue slot. */ @@ -651,7 +667,8 @@ int xnclock_register(struct xnclock *clock, const cpumask_t *affinity) /* * POLA: init all timer slots for the new clock, although some * of them might remain unused depending on the CPU affinity -* of the event source(s). +* of the event source(s). If the clock device has no percpu +* semantics, all timers will be queued to slot #0. */ for_each_online_cpu(cpu) { tmd = xnclock_percpu_timerdata(clock, cpu); @@ -712,18 +729,32 @@ EXPORT_SYMBOL_GPL(xnclock_deregister); * * @coretags{coreirq-only, atomic-entry} * - * @note The current CPU must be part of the real-time affinity set, - * otherwise weird things may happe
[Xenomai-git] Philippe Gerum : cobalt/clock: add support for devices without percpu semantics
Module: xenomai-3 Branch: stable-3.0.x Commit: 596acc3ce2f77bab32bc36ec2666e79d4d5c00f4 URL: http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=596acc3ce2f77bab32bc36ec2666e79d4d5c00f4 Author: Philippe Gerum Date: Wed Nov 16 20:49:47 2016 +0100 cobalt/clock: add support for devices without percpu semantics Some clock devices have no percpu semantics. Passing a NULL affinity mask to xnclock_register() (and cobalt_clock_register() as well) now causes all timers for any such clock to be maintained into a single queue xnclock_tick() will inspect exclusively regardless of the current CPU. --- include/cobalt/kernel/timer.h |9 -- kernel/cobalt/clock.c | 65 ++--- kernel/cobalt/timer.c | 22 +- 3 files changed, 63 insertions(+), 33 deletions(-) diff --git a/include/cobalt/kernel/timer.h b/include/cobalt/kernel/timer.h index 700392e..86ac7a7 100644 --- a/include/cobalt/kernel/timer.h +++ b/include/cobalt/kernel/timer.h @@ -311,15 +311,6 @@ static inline struct xnsched *xntimer_sched(struct xntimer *timer) &tmd->q;\ }) -static inline xntimerq_t *xntimer_this_queue(struct xntimer *timer) -{ - struct xntimerdata *tmd; - - tmd = xnclock_this_timerdata(xntimer_clock(timer)); - - return &tmd->q; -} - static inline unsigned long xntimer_gravity(struct xntimer *timer) { struct xnclock *clock = xntimer_clock(timer); diff --git a/kernel/cobalt/clock.c b/kernel/cobalt/clock.c index a4d0df8..796358f 100644 --- a/kernel/cobalt/clock.c +++ b/kernel/cobalt/clock.c @@ -137,6 +137,10 @@ void xnclock_core_local_shot(struct xnsched *sched) if (sched->status & XNINTCK) return; + /* +* Assume the core clock device always has percpu semantics in +* SMP. +*/ tmd = xnclock_this_timerdata(&nkclock); h = xntimerq_head(&tmd->q); if (h == NULL) @@ -334,7 +338,13 @@ int xnclock_get_default_cpu(struct xnclock *clock, int cpu) * suggested CPU does not receive events from this device, * return the first one which does. We also account for the * dynamic set of real-time CPUs. +* +* A clock device with no percpu semantics causes this routine +* to return CPU0 unconditionally. */ + if (cpumask_empty(&clock->affinity)) + return 0; + cpumask_and(&set, &clock->affinity, &cobalt_cpu_affinity); if (!cpumask_empty(&set) && !cpumask_test_cpu(cpu, &set)) cpu = cpumask_first(&set); @@ -620,7 +630,10 @@ static inline void cleanup_clock_proc(struct xnclock *clock) { } * @param clock The new clock to register. * * @param affinity The set of CPUs we may expect the backing clock - * device to tick on. + * device to tick on. As a special case, passing a NULL affinity mask + * means that timer IRQs cannot be seen as percpu events, in which + * case all outstanding timers will be maintained into a single global + * queue instead of percpu timer queues. * * @coretags{secondary-only} */ @@ -633,14 +646,17 @@ int xnclock_register(struct xnclock *clock, const cpumask_t *affinity) #ifdef CONFIG_SMP /* -* A CPU affinity set is defined for each clock, enumerating -* the CPUs which can receive ticks from the backing clock -* device. This set must be a subset of the real-time CPU -* set. +* A CPU affinity set may be defined for each clock, +* enumerating the CPUs which can receive ticks from the +* backing clock device. When given, this set must be a +* subset of the real-time CPU set. */ - cpumask_and(&clock->affinity, affinity, &xnsched_realtime_cpus); - if (cpumask_empty(&clock->affinity)) - return -EINVAL; + if (affinity) { + cpumask_and(&clock->affinity, affinity, &xnsched_realtime_cpus); + if (cpumask_empty(&clock->affinity)) + return -EINVAL; + } else /* No percpu semantics. */ + cpumask_clear(&clock->affinity); #endif /* Allocate the percpu timer queue slot. */ @@ -651,7 +667,8 @@ int xnclock_register(struct xnclock *clock, const cpumask_t *affinity) /* * POLA: init all timer slots for the new clock, although some * of them might remain unused depending on the CPU affinity -* of the event source(s). +* of the event source(s). If the clock device has no percpu +* semantics, all timers will be queued to slot #0. */ for_each_online_cpu(cpu) { tmd = xnclock_percpu_timerdata(clock, cpu); @@ -712,18 +729,32 @@ EXPORT_SYMBOL_GPL(xnclock_deregister); * * @coretags{coreirq-only, atomic-entry} * - * @note The current CPU must be part of the real-time affinity set, - * otherwise weird things m