Re: [PATCH v2 1/2] xen/xenbus: avoid large structs and arrays on the stack

2020-07-02 Thread Boris Ostrovsky
On 7/1/20 8:16 AM, Juergen Gross wrote:
> xenbus_map_ring_valloc() and its sub-functions are putting quite large
> structs and arrays on the stack. This is problematic at runtime, but
> might also result in build failures (e.g. with clang due to the option
> -Werror,-Wframe-larger-than=... used).
>
> Fix that by moving most of the data from the stack into a dynamically
> allocated struct. Performance is no issue here, as
> xenbus_map_ring_valloc() is used only when adding a new PV device to
> a backend driver.
>
> While at it move some duplicated code from pv/hvm specific mapping
> functions to the single caller.
>
> Reported-by: Arnd Bergmann 
> Signed-off-by: Juergen Gross 


Reviewed-by: Boris Ostrovsky 





[PATCH v2 1/2] xen/xenbus: avoid large structs and arrays on the stack

2020-07-01 Thread Juergen Gross
xenbus_map_ring_valloc() and its sub-functions are putting quite large
structs and arrays on the stack. This is problematic at runtime, but
might also result in build failures (e.g. with clang due to the option
-Werror,-Wframe-larger-than=... used).

Fix that by moving most of the data from the stack into a dynamically
allocated struct. Performance is no issue here, as
xenbus_map_ring_valloc() is used only when adding a new PV device to
a backend driver.

While at it move some duplicated code from pv/hvm specific mapping
functions to the single caller.

Reported-by: Arnd Bergmann 
Signed-off-by: Juergen Gross 
---
V2:
- shorten internal function names (Boris Ostrovsky)
---
 drivers/xen/xenbus/xenbus_client.c | 161 +++--
 1 file changed, 83 insertions(+), 78 deletions(-)

diff --git a/drivers/xen/xenbus/xenbus_client.c 
b/drivers/xen/xenbus/xenbus_client.c
index 040d2a43e8e3..9f8372079ecf 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -69,11 +69,27 @@ struct xenbus_map_node {
unsigned int   nr_handles;
 };
 
+struct map_ring_valloc {
+   struct xenbus_map_node *node;
+
+   /* Why do we need two arrays? See comment of __xenbus_map_ring */
+   union {
+   unsigned long addrs[XENBUS_MAX_RING_GRANTS];
+   pte_t *ptes[XENBUS_MAX_RING_GRANTS];
+   };
+   phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
+
+   struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
+   struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
+
+   unsigned int idx;   /* HVM only. */
+};
+
 static DEFINE_SPINLOCK(xenbus_valloc_lock);
 static LIST_HEAD(xenbus_valloc_pages);
 
 struct xenbus_ring_ops {
-   int (*map)(struct xenbus_device *dev,
+   int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info,
   grant_ref_t *gnt_refs, unsigned int nr_grefs,
   void **vaddr);
int (*unmap)(struct xenbus_device *dev, void *vaddr);
@@ -449,12 +465,32 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, 
grant_ref_t *gnt_refs,
   unsigned int nr_grefs, void **vaddr)
 {
int err;
+   struct map_ring_valloc *info;
+
+   *vaddr = NULL;
+
+   if (nr_grefs > XENBUS_MAX_RING_GRANTS)
+   return -EINVAL;
+
+   info = kzalloc(sizeof(*info), GFP_KERNEL);
+   if (!info)
+   return -ENOMEM;
+
+   info->node = kzalloc(sizeof(*info->node), GFP_KERNEL);
+   if (!info->node) {
+   err = -ENOMEM;
+   goto out;
+   }
+
+   err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr);
 
-   err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
/* Some hypervisors are buggy and can return 1. */
if (err > 0)
err = GNTST_general_error;
 
+ out:
+   kfree(info->node);
+   kfree(info);
return err;
 }
 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
@@ -466,12 +502,10 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
 grant_ref_t *gnt_refs,
 unsigned int nr_grefs,
 grant_handle_t *handles,
-phys_addr_t *addrs,
+struct map_ring_valloc *info,
 unsigned int flags,
 bool *leaked)
 {
-   struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
-   struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
int i, j;
int err = GNTST_okay;
 
@@ -479,23 +513,22 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
return -EINVAL;
 
for (i = 0; i < nr_grefs; i++) {
-   memset(&map[i], 0, sizeof(map[i]));
-   gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i],
- dev->otherend_id);
+   gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags,
+ gnt_refs[i], dev->otherend_id);
handles[i] = INVALID_GRANT_HANDLE;
}
 
-   gnttab_batch_map(map, i);
+   gnttab_batch_map(info->map, i);
 
for (i = 0; i < nr_grefs; i++) {
-   if (map[i].status != GNTST_okay) {
-   err = map[i].status;
-   xenbus_dev_fatal(dev, map[i].status,
+   if (info->map[i].status != GNTST_okay) {
+   err = info->map[i].status;
+   xenbus_dev_fatal(dev, info->map[i].status,
 "mapping in shared page %d from domain 
%d",
 gnt_refs[i], dev->otherend_id);
goto fail;
} else
-   handles[i] = map[i].handle;
+   handles[i] = info->map[i].handle;
}
 
return GNTST_okay;
@@ -503,19 +536,19 @@ static