Hi Jan,
On 23/11/2020 13:28, Jan Beulich wrote:
Use {read,write}_atomic() to exclude any eventualities, in particular
observing that accesses aren't all happening under a consistent lock.
Requested-by: Julien Grall <jul...@xen.org>
Signed-off-by: Jan Beulich <jbeul...@suse.com>
Reviewed-by: Julien Grall <jgr...@amazon.com>
Cheers,
---
v3: New.
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -446,7 +446,7 @@ int evtchn_bind_virq(evtchn_bind_virq_t
spin_lock(&d->event_lock);
- if ( v->virq_to_evtchn[virq] != 0 )
+ if ( read_atomic(&v->virq_to_evtchn[virq]) )
ERROR_EXIT(-EEXIST);
if ( port != 0 )
@@ -474,7 +474,8 @@ int evtchn_bind_virq(evtchn_bind_virq_t
evtchn_write_unlock(chn);
- v->virq_to_evtchn[virq] = bind->port = port;
+ bind->port = port;
+ write_atomic(&v->virq_to_evtchn[virq], port);
out:
spin_unlock(&d->event_lock);
@@ -660,9 +661,9 @@ int evtchn_close(struct domain *d1, int
case ECS_VIRQ:
for_each_vcpu ( d1, v )
{
- if ( v->virq_to_evtchn[chn1->u.virq] != port1 )
+ if ( read_atomic(&v->virq_to_evtchn[chn1->u.virq]) != port1 )
continue;
- v->virq_to_evtchn[chn1->u.virq] = 0;
+ write_atomic(&v->virq_to_evtchn[chn1->u.virq], 0);
spin_barrier(&v->virq_lock);
}
break;
@@ -801,7 +802,7 @@ bool evtchn_virq_enabled(const struct vc
if ( virq_is_global(virq) && v->vcpu_id )
v = domain_vcpu(v->domain, 0);
- return v->virq_to_evtchn[virq];
+ return read_atomic(&v->virq_to_evtchn[virq]);
}
void send_guest_vcpu_virq(struct vcpu *v, uint32_t virq)
@@ -814,7 +815,7 @@ void send_guest_vcpu_virq(struct vcpu *v
spin_lock_irqsave(&v->virq_lock, flags);
- port = v->virq_to_evtchn[virq];
+ port = read_atomic(&v->virq_to_evtchn[virq]);
if ( unlikely(port == 0) )
goto out;
@@ -843,7 +844,7 @@ void send_guest_global_virq(struct domai
spin_lock_irqsave(&v->virq_lock, flags);
- port = v->virq_to_evtchn[virq];
+ port = read_atomic(&v->virq_to_evtchn[virq]);
if ( unlikely(port == 0) )
goto out;
--
Julien Grall