From: "Luis R. Rodriguez"
Under special circumstances we may want to force
voluntary preemption even for CONFIG_PREEMPT=n
with interrupts disabled. This adds helpers to
let us do that.
Cc: Borislav Petkov
Cc: David Vrabel
Cc: Thomas Gleixner
Cc: Ingo Molnar
Cc: "H. Peter Anvin"
Cc: x...@kernel.org
Cc: Andy Lutomirski
Cc: Steven Rostedt
Cc: Masami Hiramatsu
Cc: Jan Beulich
Cc: linux-ker...@vger.kernel.org
Signed-off-by: Luis R. Rodriguez
---
include/linux/sched.h | 7 +++
kernel/sched/core.c | 10 ++
2 files changed, 17 insertions(+)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5e344bb..92da927 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2759,6 +2759,13 @@ static inline int signal_pending_state(long state,
struct task_struct *p)
*/
extern int _cond_resched(void);
+/*
+ * Voluntarily preempting the kernel even for CONFIG_PREEMPT=n kernels
+ * on very special circumstances. This is to be used with interrupts
+ * disabled.
+ */
+extern int cond_resched_irq(void);
+
#define cond_resched() ({ \
__might_sleep(__FILE__, __LINE__, 0); \
_cond_resched();\
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 89e7283..573edb1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4239,6 +4239,16 @@ int __sched _cond_resched(void)
}
EXPORT_SYMBOL(_cond_resched);
+int __sched cond_resched_irq(void)
+{
+ if (should_resched()) {
+ preempt_schedule_irq();
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cond_resched_irq);
+
/*
* __cond_resched_lock() - if a reschedule is pending, drop the given lock,
* call schedule, and on return reacquire the lock.
--
2.1.1
___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel