Vcpu flags are checked and cleared atomically. Performance can be
improved with corresponding non-atomic versions since schedule.c
already has spin_locks in place.
Signed-off-by: Tianyang Chen
---
xen/common/sched_rt.c | 16
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 1584d53..1a18f6d 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -936,7 +936,7 @@ burn_budget(const struct scheduler *ops, struct rt_vcpu
*svc, s_time_t now)
if ( svc->cur_budget <= 0 )
{
svc->cur_budget = 0;
-set_bit(__RTDS_depleted, >flags);
+__set_bit(__RTDS_depleted, >flags);
}
/* TRACE */
@@ -1050,7 +1050,7 @@ rt_schedule(const struct scheduler *ops, s_time_t now,
bool_t tasklet_work_sched
if ( snext != scurr &&
!is_idle_vcpu(current) &&
vcpu_runnable(current) )
-set_bit(__RTDS_delayed_runq_add, >flags);
+__set_bit(__RTDS_delayed_runq_add, >flags);
snext->last_start = now;
ret.time = -1; /* if an idle vcpu is picked */
@@ -1059,7 +1059,7 @@ rt_schedule(const struct scheduler *ops, s_time_t now,
bool_t tasklet_work_sched
if ( snext != scurr )
{
q_remove(snext);
-set_bit(__RTDS_scheduled, >flags);
+__set_bit(__RTDS_scheduled, >flags);
}
if ( snext->vcpu->processor != cpu )
{
@@ -1093,7 +1093,7 @@ rt_vcpu_sleep(const struct scheduler *ops, struct vcpu
*vc)
replq_remove(ops, svc);
}
else if ( svc->flags & RTDS_delayed_runq_add )
-clear_bit(__RTDS_delayed_runq_add, >flags);
+__clear_bit(__RTDS_delayed_runq_add, >flags);
}
/*
@@ -1235,7 +1235,7 @@ rt_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
*/
if ( unlikely(svc->flags & RTDS_scheduled) )
{
-set_bit(__RTDS_delayed_runq_add, >flags);
+__set_bit(__RTDS_delayed_runq_add, >flags);
/*
* The vcpu is waking up already, and we didn't even had the time to
* remove its next replenishment event from the replenishment queue
@@ -1266,12 +1266,12 @@ rt_context_saved(const struct scheduler *ops, struct
vcpu *vc)
struct rt_vcpu *svc = rt_vcpu(vc);
spinlock_t *lock = vcpu_schedule_lock_irq(vc);
-clear_bit(__RTDS_scheduled, >flags);
+__clear_bit(__RTDS_scheduled, >flags);
/* not insert idle vcpu to runq */
if ( is_idle_vcpu(vc) )
goto out;
-if ( test_and_clear_bit(__RTDS_delayed_runq_add, >flags) &&
+if ( __test_and_clear_bit(__RTDS_delayed_runq_add, >flags) &&
likely(vcpu_runnable(vc)) )
{
runq_insert(ops, svc);
@@ -1447,7 +1447,7 @@ static void repl_timer_handler(void *data){
runq_tickle(ops, next_on_runq);
}
else if ( vcpu_on_q(svc) &&
- test_and_clear_bit(__RTDS_depleted, >flags) )
+ __test_and_clear_bit(__RTDS_depleted, >flags) )
runq_tickle(ops, svc);
list_del(>replq_elem);
--
1.7.9.5
___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel