Re: [PATCH v2 3/5] android: binder: Move buffer out of area shared with user space

2017-08-24 Thread Dan Carpenter
On Wed, Aug 23, 2017 at 08:46:41AM -0700, Sherry Yang wrote:
> Binder driver allocates buffer meta data in a region that is mapped
> in user space. These meta data contain pointers in the kernel.
> 
> This patch allocates buffer meta data on the kernel heap that is
> not mapped in user space, and uses a pointer to refer to the data mapped.
> 

This feels like it has a security impact, right?  The original code is
an info leak?

> @@ -664,7 +679,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
>  
>   return 0;
>  
> -err_alloc_small_buf_failed:
> +err_alloc_buf_struct_failed:
>   kfree(alloc->pages);
>   alloc->pages = NULL;
>  err_alloc_pages_failed:

Not really really related to your patch, I was just looking at the
error handling here.  It looks like this with your patch applied.

   682  err_alloc_buf_struct_failed:
   683  kfree(alloc->pages);
   684  alloc->pages = NULL;
   685  err_alloc_pages_failed:
   686  mutex_lock(&binder_alloc_mmap_lock);
   687  vfree(alloc->buffer);

The vfree() here is supposed to release the resources from get_vm_area().
Why do people not use free_vm_area() instead?  It feels like we're
freeing "area->addr" but leaking "area" itself but perhaps I have
misunderstood something.

   688  alloc->buffer = NULL;
   689  err_get_vm_area_failed:
   690  err_already_mapped:
   691  mutex_unlock(&binder_alloc_mmap_lock);
   692  pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
   693 alloc->pid, vma->vm_start, vma->vm_end, failure_string, 
ret);
   694  return ret;

regards,
dan carpenter

___
devel mailing list
de...@linuxdriverproject.org
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel


[PATCH v2 3/5] android: binder: Move buffer out of area shared with user space

2017-08-23 Thread Sherry Yang
Binder driver allocates buffer meta data in a region that is mapped
in user space. These meta data contain pointers in the kernel.

This patch allocates buffer meta data on the kernel heap that is
not mapped in user space, and uses a pointer to refer to the data mapped.

Signed-off-by: Sherry Yang 
---
 drivers/android/binder_alloc.c  | 144 +++-
 drivers/android/binder_alloc.h  |   2 +-
 drivers/android/binder_alloc_selftest.c |  11 ++-
 3 files changed, 90 insertions(+), 67 deletions(-)

diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index f15af2b55a62..e96659215f25 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -62,9 +62,9 @@ static size_t binder_alloc_buffer_size(struct binder_alloc 
*alloc,
   struct binder_buffer *buffer)
 {
if (list_is_last(&buffer->entry, &alloc->buffers))
-   return alloc->buffer +
-  alloc->buffer_size - (void *)buffer->data;
-   return (size_t)binder_buffer_next(buffer) - (size_t)buffer->data;
+   return (u8 *)alloc->buffer +
+   alloc->buffer_size - (u8 *)buffer->data;
+   return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
 }
 
 static void binder_insert_free_buffer(struct binder_alloc *alloc,
@@ -114,9 +114,9 @@ static void binder_insert_allocated_buffer_locked(
buffer = rb_entry(parent, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
 
-   if (new_buffer < buffer)
+   if (new_buffer->data < buffer->data)
p = &parent->rb_left;
-   else if (new_buffer > buffer)
+   else if (new_buffer->data > buffer->data)
p = &parent->rb_right;
else
BUG();
@@ -131,18 +131,17 @@ static struct binder_buffer 
*binder_alloc_prepare_to_free_locked(
 {
struct rb_node *n = alloc->allocated_buffers.rb_node;
struct binder_buffer *buffer;
-   struct binder_buffer *kern_ptr;
+   void *kern_ptr;
 
-   kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
-   - offsetof(struct binder_buffer, data));
+   kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
 
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
 
-   if (kern_ptr < buffer)
+   if (kern_ptr < buffer->data)
n = n->rb_left;
-   else if (kern_ptr > buffer)
+   else if (kern_ptr > buffer->data)
n = n->rb_right;
else {
/*
@@ -330,6 +329,9 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct 
binder_alloc *alloc,
return ERR_PTR(-ENOSPC);
}
 
+   /* Pad 0-size buffers so they get assigned unique addresses */
+   size = max(size, sizeof(void *));
+
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(!buffer->free);
@@ -389,14 +391,9 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct 
binder_alloc *alloc,
 
has_page_addr =
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
-   if (n == NULL) {
-   if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
-   buffer_size = size; /* no room for other buffers */
-   else
-   buffer_size = size + sizeof(struct binder_buffer);
-   }
+   WARN_ON(n && buffer_size != size);
end_page_addr =
-   (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
+   (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr;
ret = binder_update_page_range(alloc, 1,
@@ -404,17 +401,25 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct 
binder_alloc *alloc,
if (ret)
return ERR_PTR(ret);
 
-   rb_erase(best_fit, &alloc->free_buffers);
-   buffer->free = 0;
-   buffer->free_in_progress = 0;
-   binder_insert_allocated_buffer_locked(alloc, buffer);
if (buffer_size != size) {
-   struct binder_buffer *new_buffer = (void *)buffer->data + size;
+   struct binder_buffer *new_buffer;
 
+   new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+   if (!new_buffer) {
+   pr_err("%s: %d failed to alloc new buffer struct\n",
+  __func__, alloc->pid);
+   goto err_alloc_buf_struct_failed;
+   }
+   new_buffer->data = (u8 *)buffer->data + size;
list_add(&new_buffer->entry, &buffer->entry);
new_