With the event channel lock no longer disabling interrupts commit
52e1fc47abc3a0123 ("evtchn/Flask: pre-allocate node on send path") can
be reverted again.

Signed-off-by: Juergen Gross <jgr...@suse.com>
---
 xen/common/event_channel.c  |  6 ---
 xen/include/xsm/xsm.h       |  1 -
 xen/xsm/flask/avc.c         | 78 ++++---------------------------------
 xen/xsm/flask/hooks.c       | 10 -----
 xen/xsm/flask/include/avc.h |  2 -
 5 files changed, 7 insertions(+), 90 deletions(-)

diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 43e3520df6..eacd96b92f 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -740,12 +740,6 @@ int evtchn_send(struct domain *ld, unsigned int lport)
     if ( !port_is_valid(ld, lport) )
         return -EINVAL;
 
-    /*
-     * As the call further down needs to avoid allocations (due to running
-     * with IRQs off), give XSM a chance to pre-allocate if needed.
-     */
-    xsm_evtchn_send(XSM_HOOK, ld, NULL);
-
     lchn = evtchn_from_port(ld, lport);
 
     evtchn_read_lock(lchn);
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index 358ec13ba8..7bd03d8817 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -59,7 +59,6 @@ struct xsm_operations {
     int (*evtchn_interdomain) (struct domain *d1, struct evtchn *chn1,
                                         struct domain *d2, struct evtchn 
*chn2);
     void (*evtchn_close_post) (struct evtchn *chn);
-    /* Note: Next hook may be called with 'chn' set to NULL. See call site. */
     int (*evtchn_send) (struct domain *d, struct evtchn *chn);
     int (*evtchn_status) (struct domain *d, struct evtchn *chn);
     int (*evtchn_reset) (struct domain *d1, struct domain *d2);
diff --git a/xen/xsm/flask/avc.c b/xen/xsm/flask/avc.c
index 2dfa1f4295..87ea38b7a0 100644
--- a/xen/xsm/flask/avc.c
+++ b/xen/xsm/flask/avc.c
@@ -24,9 +24,7 @@
 #include <xen/prefetch.h>
 #include <xen/kernel.h>
 #include <xen/sched.h>
-#include <xen/cpu.h>
 #include <xen/init.h>
-#include <xen/percpu.h>
 #include <xen/rcupdate.h>
 #include <asm/atomic.h>
 #include <asm/current.h>
@@ -343,79 +341,17 @@ static inline int avc_reclaim_node(void)
     return ecx;
 }
 
-static struct avc_node *new_node(void)
-{
-    struct avc_node *node = xzalloc(struct avc_node);
-
-    if ( node )
-    {
-        INIT_RCU_HEAD(&node->rhead);
-        INIT_HLIST_NODE(&node->list);
-        avc_cache_stats_incr(allocations);
-    }
-
-    return node;
-}
-
-/*
- * avc_has_perm_noaudit() may consume up to two nodes, which we may not be
- * able to obtain from the allocator at that point. Since the is merely
- * about caching earlier decisions, allow for (just) one pre-allocated node.
- */
-static DEFINE_PER_CPU(struct avc_node *, prealloc_node);
-
-void avc_prealloc(void)
-{
-    struct avc_node **prealloc = &this_cpu(prealloc_node);
-
-    if ( !*prealloc )
-        *prealloc = new_node();
-}
-
-static int cpu_callback(struct notifier_block *nfb, unsigned long action,
-                        void *hcpu)
-{
-    unsigned int cpu = (unsigned long)hcpu;
-    struct avc_node **prealloc = &per_cpu(prealloc_node, cpu);
-
-    if ( action == CPU_DEAD && *prealloc )
-    {
-        xfree(*prealloc);
-        *prealloc = NULL;
-        avc_cache_stats_incr(frees);
-    }
-
-    return NOTIFY_DONE;
-}
-
-static struct notifier_block cpu_nfb = {
-    .notifier_call = cpu_callback,
-    .priority = 99
-};
-
-static int __init cpu_nfb_init(void)
-{
-    register_cpu_notifier(&cpu_nfb);
-    return 0;
-}
-__initcall(cpu_nfb_init);
-
 static struct avc_node *avc_alloc_node(void)
 {
-    struct avc_node *node, **prealloc = &this_cpu(prealloc_node);
+    struct avc_node *node;
 
-    node = *prealloc;
-    *prealloc = NULL;
+    node = xzalloc(struct avc_node);
+    if (!node)
+        goto out;
 
-    if ( !node )
-    {
-        /* Must not call xmalloc() & Co with IRQs off. */
-        if ( !local_irq_is_enabled() )
-            goto out;
-        node = new_node();
-        if ( !node )
-            goto out;
-    }
+    INIT_RCU_HEAD(&node->rhead);
+    INIT_HLIST_NODE(&node->list);
+    avc_cache_stats_incr(allocations);
 
     atomic_inc(&avc_cache.active_nodes);
     if ( atomic_read(&avc_cache.active_nodes) > avc_cache_threshold )
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index de050cc9fe..19b0d9e3eb 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -281,16 +281,6 @@ static int flask_evtchn_send(struct domain *d, struct 
evtchn *chn)
 {
     int rc;
 
-    /*
-     * When called with non-NULL chn, memory allocation may not be permitted.
-     * Allow AVC to preallocate nodes as necessary upon early notification.
-     */
-    if ( !chn )
-    {
-        avc_prealloc();
-        return 0;
-    }
-
     switch ( chn->state )
     {
     case ECS_INTERDOMAIN:
diff --git a/xen/xsm/flask/include/avc.h b/xen/xsm/flask/include/avc.h
index 722919b762..c14bd07a2b 100644
--- a/xen/xsm/flask/include/avc.h
+++ b/xen/xsm/flask/include/avc.h
@@ -91,8 +91,6 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid, u16 tclass, u32 
requested,
 int avc_has_perm(u32 ssid, u32 tsid, u16 tclass, u32 requested,
                                              struct avc_audit_data *auditdata);
 
-void avc_prealloc(void);
-
 /* Exported to selinuxfs */
 struct xen_flask_hash_stats;
 int avc_get_hash_stats(struct xen_flask_hash_stats *arg);
-- 
2.26.2


Reply via email to