Re: [Xen-devel] [PATCH v3 02/13] xen/pvcalls: implement frontend disconnect

2017-09-08 Thread Stefano Stabellini
On Fri, 11 Aug 2017, Boris Ostrovsky wrote:
> On 07/31/2017 06:57 PM, Stefano Stabellini wrote:
> > Introduce a data structure named pvcalls_bedata. It contains pointers to
> > the command ring, the event channel, a list of active sockets and a list
> > of passive sockets. Lists accesses are protected by a spin_lock.
> >
> > Introduce a waitqueue to allow waiting for a response on commands sent
> > to the backend.
> >
> > Introduce an array of struct xen_pvcalls_response to store commands
> > responses.
> >
> > Implement pvcalls frontend removal function. Go through the list of
> > active and passive sockets and free them all, one at a time.
> >
> > Signed-off-by: Stefano Stabellini 
> > CC: boris.ostrov...@oracle.com
> > CC: jgr...@suse.com
> > ---
> >  drivers/xen/pvcalls-front.c | 51 
> > +
> >  1 file changed, 51 insertions(+)
> >
> > diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
> > index a8d38c2..a126195 100644
> > --- a/drivers/xen/pvcalls-front.c
> > +++ b/drivers/xen/pvcalls-front.c
> > @@ -20,6 +20,29 @@
> >  #include 
> >  #include 
> >  
> > +#define PVCALLS_INVALID_ID UINT_MAX
> > +#define PVCALLS_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
> > +#define PVCALLS_NR_REQ_PER_RING __CONST_RING_SIZE(xen_pvcalls, 
> > XEN_PAGE_SIZE)
> > +
> > +struct pvcalls_bedata {
> > +   struct xen_pvcalls_front_ring ring;
> > +   grant_ref_t ref;
> > +   int irq;
> > +
> > +   struct list_head socket_mappings;
> > +   struct list_head socketpass_mappings;
> > +   spinlock_t pvcallss_lock;
> 
> In the backend this is called socket_lock and (subjectively) it would
> sound as a better name here too.

I'll rename


> > +
> > +   wait_queue_head_t inflight_req;
> > +   struct xen_pvcalls_response rsp[PVCALLS_NR_REQ_PER_RING];
> > +};
> > +static struct xenbus_device *pvcalls_front_dev;
> > +
> > +static irqreturn_t pvcalls_front_event_handler(int irq, void *dev_id)
> > +{
> > +   return IRQ_HANDLED;
> > +}
> > +
> >  static const struct xenbus_device_id pvcalls_front_ids[] = {
> > { "pvcalls" },
> > { "" }
> > @@ -27,6 +50,34 @@
> >  
> >  static int pvcalls_front_remove(struct xenbus_device *dev)
> >  {
> > +   struct pvcalls_bedata *bedata;
> > +   struct sock_mapping *map = NULL, *n;
> > +
> > +   bedata = dev_get_drvdata(_front_dev->dev);
> > +
> > +   list_for_each_entry_safe(map, n, >socket_mappings, list) {
> > +   mutex_lock(>active.in_mutex);
> > +   mutex_lock(>active.out_mutex);
> > +   pvcalls_front_free_map(bedata, map);
> > +   mutex_unlock(>active.out_mutex);
> > +   mutex_unlock(>active.in_mutex);
> > +   kfree(map);
> 
> I think this is the same issue as the one discussed for some other patch
> --- unlocking and then immediately freeing a lock.

Yes, I'll fix this too.


> > +   }
> > +   list_for_each_entry_safe(map, n, >socketpass_mappings, list) {
> > +   spin_lock(>pvcallss_lock);
> > +   list_del_init(>list);
> > +   spin_unlock(>pvcallss_lock);
> > +   kfree(map);
> > +   }
> > +   if (bedata->irq > 0)
> > +   unbind_from_irqhandler(bedata->irq, dev);
> > +   if (bedata->ref >= 0)
> > +   gnttab_end_foreign_access(bedata->ref, 0, 0);
> > +   kfree(bedata->ring.sring);
> > +   kfree(bedata);
> > +   dev_set_drvdata(>dev, NULL);
> > +   xenbus_switch_state(dev, XenbusStateClosed);
> 
> Should we first move the state to Closed and then free things up? Or it
> doesn't matter?

I believe that is already done by the xenbus driver: this function is
supposed to be called after the frontend state is set to Closing.


> > +   pvcalls_front_dev = NULL;
> > return 0;
> >  }
> >  
> 

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH v3 02/13] xen/pvcalls: implement frontend disconnect

2017-08-11 Thread Boris Ostrovsky
On 07/31/2017 06:57 PM, Stefano Stabellini wrote:
> Introduce a data structure named pvcalls_bedata. It contains pointers to
> the command ring, the event channel, a list of active sockets and a list
> of passive sockets. Lists accesses are protected by a spin_lock.
>
> Introduce a waitqueue to allow waiting for a response on commands sent
> to the backend.
>
> Introduce an array of struct xen_pvcalls_response to store commands
> responses.
>
> Implement pvcalls frontend removal function. Go through the list of
> active and passive sockets and free them all, one at a time.
>
> Signed-off-by: Stefano Stabellini 
> CC: boris.ostrov...@oracle.com
> CC: jgr...@suse.com
> ---
>  drivers/xen/pvcalls-front.c | 51 
> +
>  1 file changed, 51 insertions(+)
>
> diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
> index a8d38c2..a126195 100644
> --- a/drivers/xen/pvcalls-front.c
> +++ b/drivers/xen/pvcalls-front.c
> @@ -20,6 +20,29 @@
>  #include 
>  #include 
>  
> +#define PVCALLS_INVALID_ID UINT_MAX
> +#define PVCALLS_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
> +#define PVCALLS_NR_REQ_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
> +
> +struct pvcalls_bedata {
> + struct xen_pvcalls_front_ring ring;
> + grant_ref_t ref;
> + int irq;
> +
> + struct list_head socket_mappings;
> + struct list_head socketpass_mappings;
> + spinlock_t pvcallss_lock;

In the backend this is called socket_lock and (subjectively) it would
sound as a better name here too.

> +
> + wait_queue_head_t inflight_req;
> + struct xen_pvcalls_response rsp[PVCALLS_NR_REQ_PER_RING];
> +};
> +static struct xenbus_device *pvcalls_front_dev;
> +
> +static irqreturn_t pvcalls_front_event_handler(int irq, void *dev_id)
> +{
> + return IRQ_HANDLED;
> +}
> +
>  static const struct xenbus_device_id pvcalls_front_ids[] = {
>   { "pvcalls" },
>   { "" }
> @@ -27,6 +50,34 @@
>  
>  static int pvcalls_front_remove(struct xenbus_device *dev)
>  {
> + struct pvcalls_bedata *bedata;
> + struct sock_mapping *map = NULL, *n;
> +
> + bedata = dev_get_drvdata(_front_dev->dev);
> +
> + list_for_each_entry_safe(map, n, >socket_mappings, list) {
> + mutex_lock(>active.in_mutex);
> + mutex_lock(>active.out_mutex);
> + pvcalls_front_free_map(bedata, map);
> + mutex_unlock(>active.out_mutex);
> + mutex_unlock(>active.in_mutex);
> + kfree(map);

I think this is the same issue as the one discussed for some other patch
--- unlocking and then immediately freeing a lock.

> + }
> + list_for_each_entry_safe(map, n, >socketpass_mappings, list) {
> + spin_lock(>pvcallss_lock);
> + list_del_init(>list);
> + spin_unlock(>pvcallss_lock);
> + kfree(map);
> + }
> + if (bedata->irq > 0)
> + unbind_from_irqhandler(bedata->irq, dev);
> + if (bedata->ref >= 0)
> + gnttab_end_foreign_access(bedata->ref, 0, 0);
> + kfree(bedata->ring.sring);
> + kfree(bedata);
> + dev_set_drvdata(>dev, NULL);
> + xenbus_switch_state(dev, XenbusStateClosed);

Should we first move the state to Closed and then free things up? Or it
doesn't matter?

-boris

> + pvcalls_front_dev = NULL;
>   return 0;
>  }
>  


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v3 02/13] xen/pvcalls: implement frontend disconnect

2017-07-31 Thread Stefano Stabellini
Introduce a data structure named pvcalls_bedata. It contains pointers to
the command ring, the event channel, a list of active sockets and a list
of passive sockets. Lists accesses are protected by a spin_lock.

Introduce a waitqueue to allow waiting for a response on commands sent
to the backend.

Introduce an array of struct xen_pvcalls_response to store commands
responses.

Implement pvcalls frontend removal function. Go through the list of
active and passive sockets and free them all, one at a time.

Signed-off-by: Stefano Stabellini 
CC: boris.ostrov...@oracle.com
CC: jgr...@suse.com
---
 drivers/xen/pvcalls-front.c | 51 +
 1 file changed, 51 insertions(+)

diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index a8d38c2..a126195 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -20,6 +20,29 @@
 #include 
 #include 
 
+#define PVCALLS_INVALID_ID UINT_MAX
+#define PVCALLS_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
+#define PVCALLS_NR_REQ_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
+
+struct pvcalls_bedata {
+   struct xen_pvcalls_front_ring ring;
+   grant_ref_t ref;
+   int irq;
+
+   struct list_head socket_mappings;
+   struct list_head socketpass_mappings;
+   spinlock_t pvcallss_lock;
+
+   wait_queue_head_t inflight_req;
+   struct xen_pvcalls_response rsp[PVCALLS_NR_REQ_PER_RING];
+};
+static struct xenbus_device *pvcalls_front_dev;
+
+static irqreturn_t pvcalls_front_event_handler(int irq, void *dev_id)
+{
+   return IRQ_HANDLED;
+}
+
 static const struct xenbus_device_id pvcalls_front_ids[] = {
{ "pvcalls" },
{ "" }
@@ -27,6 +50,34 @@
 
 static int pvcalls_front_remove(struct xenbus_device *dev)
 {
+   struct pvcalls_bedata *bedata;
+   struct sock_mapping *map = NULL, *n;
+
+   bedata = dev_get_drvdata(_front_dev->dev);
+
+   list_for_each_entry_safe(map, n, >socket_mappings, list) {
+   mutex_lock(>active.in_mutex);
+   mutex_lock(>active.out_mutex);
+   pvcalls_front_free_map(bedata, map);
+   mutex_unlock(>active.out_mutex);
+   mutex_unlock(>active.in_mutex);
+   kfree(map);
+   }
+   list_for_each_entry_safe(map, n, >socketpass_mappings, list) {
+   spin_lock(>pvcallss_lock);
+   list_del_init(>list);
+   spin_unlock(>pvcallss_lock);
+   kfree(map);
+   }
+   if (bedata->irq > 0)
+   unbind_from_irqhandler(bedata->irq, dev);
+   if (bedata->ref >= 0)
+   gnttab_end_foreign_access(bedata->ref, 0, 0);
+   kfree(bedata->ring.sring);
+   kfree(bedata);
+   dev_set_drvdata(>dev, NULL);
+   xenbus_switch_state(dev, XenbusStateClosed);
+   pvcalls_front_dev = NULL;
return 0;
 }
 
-- 
1.9.1


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel