From: pan xinhui <[email protected]> this supports to fix lock holder preempted issue which run as a guest
two interfaces, bool vcpu_is_preempted(int cpu); unsigned int vcpu_get_yield_count(int cpu); arch may need implement anyone of them. some spinneris may also need call need_yield_to(int cpu, unsigned int old_yield_count) to know if it need stop the spinning. Signed-off-by: Pan Xinhui <[email protected]> --- include/linux/sched.h | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/include/linux/sched.h b/include/linux/sched.h index 6e42ada..9c565d2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -3293,6 +3293,40 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) #endif /* CONFIG_SMP */ +#ifdef arch_vcpu_is_preempted +static inline bool vcpu_is_preempted(int cpu) +{ + return arch_vcpu_is_preempted(cpu); +} +#else +static inline bool vcpu_is_preempted(int cpu) +{ + return 0; +} +#endif + +#ifdef arch_vcpu_get_yield_count +static inline unsigned int vcpu_get_yield_count(int cpu) +{ + return arch_vcpu_get_yield_count(cpu); +} +#else +static inline unsigned int vcpu_get_yield_count(int cpu) +{ + return 0; +} +#endif + +static inline bool +need_yield_to(int vcpu, unsigned int old_yield_count) +{ + /* if we find the vcpu is preempted, + * then we may want to kick it, IOW, yield to it + */ + return vcpu_is_preempted(vcpu) || + (vcpu_get_yield_count(vcpu) != old_yield_count); +} + extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); extern long sched_getaffinity(pid_t pid, struct cpumask *mask); -- 2.4.11

