On Thu, Oct 20, 2016 at 11:29 AM, Cong Wang <xiyou.wangc...@gmail.com> wrote:
> On Thu, Oct 20, 2016 at 7:58 AM, Stephen Smalley <s...@tycho.nsa.gov> wrote:
>> On 10/20/2016 02:52 AM, Cong Wang wrote:
>>> A kernel warning inside __local_bh_enable_ip() was reported by people
>>> running SELinux, this is caused due to some SELinux functions
>>> (indirectly) call peernet2id() with IRQ disabled in process context,
>>> when we re-enable BH with IRQ disabled kernel complains. Shut up this
>>> warning by saving IRQ context in peernet2id(), BH is still implicitly
>>> disabled.
>>
>> Not sure this suffices; kill_fasync() -> send_sigio() ->
>> send_sigio_to_task() -> sigio_perm() -> security_file_send_sigiotask()
>> -> selinux_file_send_sigiotask() -> ... -> audit_log() -> ... ->
>> peernet2id()
>
> Oh, this is a new one. kill_fasync() is called in IRQ handler, so we actually
> do multicast in IRQ context.... It makes no sense, netlink multicast could
> be very expensive if we have many listeners.
>
> I am Cc'ing Richard who added that multicast in audit_log_end(). It seems
> not easy to just move the multicast to a workqueue, since the skb is copied
> from audit_buffer which is freed immediately after that, probably need another
> queue like audit_skb_queue.

Please let me know if the attached patch makes any sense to you, before
I give it a serious test.

Thanks!
diff --git a/kernel/audit.c b/kernel/audit.c
index f1ca116..cb2b31b 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -139,6 +139,7 @@ static int     audit_freelist_count;
 static LIST_HEAD(audit_freelist);
 
 static struct sk_buff_head audit_skb_queue;
+static struct sk_buff_head audit_skb_multicast_queue;
 /* queue of skbs to send to auditd when/if it comes back */
 static struct sk_buff_head audit_skb_hold_queue;
 static struct task_struct *kauditd_task;
@@ -468,7 +469,8 @@ static void kauditd_send_multicast_skb(struct sk_buff *skb, 
gfp_t gfp_mask)
        if (!copy)
                return;
 
-       nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, gfp_mask);
+       skb_queue_tail(&audit_skb_multicast_queue, copy);
+       wake_up_interruptible(&kauditd_wait);
 }
 
 /*
@@ -509,6 +511,25 @@ static void flush_hold_queue(void)
        consume_skb(skb);
 }
 
+static void flush_multicast_queue(void)
+{
+       struct audit_net *aunet = net_generic(&init_net, audit_net_id);
+       struct sock *sock = aunet->nlsk;
+       struct sk_buff *skb;
+
+       if (!netlink_has_listeners(sock, AUDIT_NLGRP_READLOG))
+               return;
+
+       skb = skb_dequeue(&audit_skb_multicast_queue);
+       if (likely(!skb))
+               return;
+
+       while (skb) {
+               nlmsg_multicast(sock, skb, 0, AUDIT_NLGRP_READLOG, GFP_KERNEL);
+               skb = skb_dequeue(&audit_skb_multicast_queue);
+       }
+}
+
 static int kauditd_thread(void *dummy)
 {
        set_freezable();
@@ -517,6 +538,8 @@ static int kauditd_thread(void *dummy)
 
                flush_hold_queue();
 
+               flush_multicast_queue();
+
                skb = skb_dequeue(&audit_skb_queue);
 
                if (skb) {
@@ -530,7 +553,8 @@ static int kauditd_thread(void *dummy)
                        continue;
                }
 
-               wait_event_freezable(kauditd_wait, 
skb_queue_len(&audit_skb_queue));
+               wait_event_freezable(kauditd_wait, 
skb_queue_len(&audit_skb_queue)
+                                                  || 
skb_queue_len(&audit_skb_multicast_queue));
        }
        return 0;
 }

Reply via email to