From: Dexuan Cui <de...@microsoft.com>

With the recent introduction of per-channel tasklet, we need to update
the way we handle the 3 concurrency issues:

1. hv_process_channel_removal -> percpu_channel_deq vs.
   vmbus_chan_sched -> list_for_each_entry(..., percpu_list);

2. vmbus_process_offer -> percpu_channel_enq/deq vs. vmbus_chan_sched.

3. vmbus_close_internal vs. the per-channel tasklet vmbus_on_event;

The first 2 issues can be handled by Stephen's recent patch
"vmbus: use rcu for per-cpu channel list", and the third issue
can be handled by calling tasklet_disable in vmbus_close_internal here.

We don't need the original hv_event_tasklet_disable/enable since we
now use per-channel tasklet instead of the previous per-CPU tasklet,
and actually we must remove them due to the side effect now:
vmbus_process_offer -> hv_event_tasklet_enable -> tasklet_schedule will
start the per-channel callback prematurely, cauing NULL dereferencing
(the channel may haven't been properly configured to run the callback yet).

Fixes: 631e63a9f346 ("vmbus: change to per channel tasklet")

Signed-off-by: Dexuan Cui <de...@microsoft.com>
Cc: "K. Y. Srinivasan" <k...@microsoft.com>
Cc: Haiyang Zhang <haiya...@microsoft.com>
Cc: Stephen Hemminger <sthem...@microsoft.com>
Tested-by: Vitaly Kuznetsov <vkuzn...@redhat.com>
Signed-off-by: K. Y. Srinivasan <k...@microsoft.com>
---
 drivers/hv/channel.c      |   12 ++++--------
 drivers/hv/channel_mgmt.c |   19 -------------------
 include/linux/hyperv.h    |    3 ---
 3 files changed, 4 insertions(+), 30 deletions(-)

diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 81a80c8..a5f4c43 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -530,15 +530,13 @@ static int vmbus_close_internal(struct vmbus_channel 
*channel)
        int ret;
 
        /*
-        * vmbus_on_event(), running in the tasklet, can race
+        * vmbus_on_event(), running in the per-channel tasklet, can race
         * with vmbus_close_internal() in the case of SMP guest, e.g., when
         * the former is accessing channel->inbound.ring_buffer, the latter
-        * could be freeing the ring_buffer pages.
-        *
-        * To resolve the race, we can serialize them by disabling the
-        * tasklet when the latter is running here.
+        * could be freeing the ring_buffer pages, so here we must stop it
+        * first.
         */
-       hv_event_tasklet_disable(channel);
+       tasklet_disable(&channel->callback_event);
 
        /*
         * In case a device driver's probe() fails (e.g.,
@@ -605,8 +603,6 @@ static int vmbus_close_internal(struct vmbus_channel 
*channel)
                get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
 
 out:
-       hv_event_tasklet_enable(channel);
-
        return ret;
 }
 
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index d2cfa3e..bf846d0 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -382,19 +382,6 @@ static void vmbus_release_relid(u32 relid)
                       true);
 }
 
-void hv_event_tasklet_disable(struct vmbus_channel *channel)
-{
-       tasklet_disable(&channel->callback_event);
-}
-
-void hv_event_tasklet_enable(struct vmbus_channel *channel)
-{
-       tasklet_enable(&channel->callback_event);
-
-       /* In case there is any pending event */
-       tasklet_schedule(&channel->callback_event);
-}
-
 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
 {
        unsigned long flags;
@@ -403,7 +390,6 @@ void hv_process_channel_removal(struct vmbus_channel 
*channel, u32 relid)
        BUG_ON(!channel->rescind);
        BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
 
-       hv_event_tasklet_disable(channel);
        if (channel->target_cpu != get_cpu()) {
                put_cpu();
                smp_call_function_single(channel->target_cpu,
@@ -412,7 +398,6 @@ void hv_process_channel_removal(struct vmbus_channel 
*channel, u32 relid)
                percpu_channel_deq(channel);
                put_cpu();
        }
-       hv_event_tasklet_enable(channel);
 
        if (channel->primary_channel == NULL) {
                list_del(&channel->listentry);
@@ -506,7 +491,6 @@ static void vmbus_process_offer(struct vmbus_channel 
*newchannel)
 
        init_vp_index(newchannel, dev_type);
 
-       hv_event_tasklet_disable(newchannel);
        if (newchannel->target_cpu != get_cpu()) {
                put_cpu();
                smp_call_function_single(newchannel->target_cpu,
@@ -516,7 +500,6 @@ static void vmbus_process_offer(struct vmbus_channel 
*newchannel)
                percpu_channel_enq(newchannel);
                put_cpu();
        }
-       hv_event_tasklet_enable(newchannel);
 
        /*
         * This state is used to indicate a successful open
@@ -566,7 +549,6 @@ static void vmbus_process_offer(struct vmbus_channel 
*newchannel)
        list_del(&newchannel->listentry);
        mutex_unlock(&vmbus_connection.channel_mutex);
 
-       hv_event_tasklet_disable(newchannel);
        if (newchannel->target_cpu != get_cpu()) {
                put_cpu();
                smp_call_function_single(newchannel->target_cpu,
@@ -575,7 +557,6 @@ static void vmbus_process_offer(struct vmbus_channel 
*newchannel)
                percpu_channel_deq(newchannel);
                put_cpu();
        }
-       hv_event_tasklet_enable(newchannel);
 
        vmbus_release_relid(newchannel->offermsg.child_relid);
 
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index c4c7ae9..970771a 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1437,9 +1437,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr 
*icmsghdrp, u8 *buf,
                                const int *srv_version, int srv_vercnt,
                                int *nego_fw_version, int *nego_srv_version);
 
-void hv_event_tasklet_disable(struct vmbus_channel *channel);
-void hv_event_tasklet_enable(struct vmbus_channel *channel);
-
 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
 
 void vmbus_setevent(struct vmbus_channel *channel);
-- 
1.7.1

Reply via email to