On 02/16/2018 01:48 AM, Laura Abbott wrote:
> On 02/12/2018 02:33 PM, Alexey Skidanov wrote:
>> Current ion kernel mapping implementation uses vmap() to map previously
>> allocated buffers into kernel virtual address space.
>>
>> On 32-bit platforms, vmap() might fail on large enough buffers due to the
>> limited available vmalloc space. dma_buf_kmap() should guarantee that
>> only one page is mapped at a time.
>>
>> To fix this, kmap()/kmap_atomic() is used to implement the appropriate
>> interfaces.
>>
>> Signed-off-by: Alexey Skidanov <alexey.skida...@intel.com>
>> ---
>>   drivers/staging/android/ion/ion.c | 97
>> +++++++++++++++++++--------------------
>>   drivers/staging/android/ion/ion.h |  1 -
>>   2 files changed, 48 insertions(+), 50 deletions(-)
>>
>> diff --git a/drivers/staging/android/ion/ion.c
>> b/drivers/staging/android/ion/ion.c
>> index 57e0d80..75830e3 100644
>> --- a/drivers/staging/android/ion/ion.c
>> +++ b/drivers/staging/android/ion/ion.c
>> @@ -27,6 +27,7 @@
>>   #include <linux/slab.h>
>>   #include <linux/uaccess.h>
>>   #include <linux/vmalloc.h>
>> +#include <linux/highmem.h>
>>     #include "ion.h"
>>   @@ -119,8 +120,6 @@ static struct ion_buffer
>> *ion_buffer_create(struct ion_heap *heap,
>>     void ion_buffer_destroy(struct ion_buffer *buffer)
>>   {
>> -    if (WARN_ON(buffer->kmap_cnt > 0))
>> -        buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
>>       buffer->heap->ops->free(buffer);
>>       kfree(buffer);
>>   }
>> @@ -140,34 +139,6 @@ static void _ion_buffer_destroy(struct ion_buffer
>> *buffer)
>>           ion_buffer_destroy(buffer);
>>   }
>>   -static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
>> -{
>> -    void *vaddr;
>> -
>> -    if (buffer->kmap_cnt) {
>> -        buffer->kmap_cnt++;
>> -        return buffer->vaddr;
>> -    }
>> -    vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
>> -    if (WARN_ONCE(!vaddr,
>> -              "heap->ops->map_kernel should return ERR_PTR on error"))
>> -        return ERR_PTR(-EINVAL);
>> -    if (IS_ERR(vaddr))
>> -        return vaddr;
>> -    buffer->vaddr = vaddr;
>> -    buffer->kmap_cnt++;
>> -    return vaddr;
>> -}
>> -
>> -static void ion_buffer_kmap_put(struct ion_buffer *buffer)
>> -{
>> -    buffer->kmap_cnt--;
>> -    if (!buffer->kmap_cnt) {
>> -        buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
>> -        buffer->vaddr = NULL;
>> -    }
>> -}
>> -
>>   static struct sg_table *dup_sg_table(struct sg_table *table)
>>   {
>>       struct sg_table *new_table;
>> @@ -305,34 +276,68 @@ static void ion_dma_buf_release(struct dma_buf
>> *dmabuf)
>>       _ion_buffer_destroy(buffer);
>>   }
>>   +static inline int sg_page_count(struct scatterlist *sg)
>> +{
>> +    return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
>> +}
>> +
>> +static struct page *ion_dma_buf_get_page(struct sg_table *sg_table,
>> +                     unsigned long offset)
>> +{
>> +    struct page *page;
>> +    struct scatterlist *sg;
>> +    int i;
>> +    unsigned int nr_pages;
>> +
>> +    nr_pages = 0;
>> +    page = NULL;
>> +    /* Find the page with specified offset */
>> +    for_each_sg(sg_table->sgl, sg, sg_table->nents, i) {
>> +        if (nr_pages + sg_page_count(sg) > offset) {
>> +            page = nth_page(sg_page(sg), offset - nr_pages);
>> +            break;
>> +        }
>> +
>> +        nr_pages += sg_page_count(sg);
>> +    }
>> +
>> +    return page;
>> +}
>>   static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long
>> offset)
>>   {
>>       struct ion_buffer *buffer = dmabuf->priv;
>>   -    return buffer->vaddr + offset * PAGE_SIZE;
>> +    return kmap(ion_dma_buf_get_page(buffer->sg_table, offset));
>>   }
> 
> This unfortunately doesn't work for uncached buffers. We need to create
> an uncached alias otherwise we break what little coherency that guarantees.
> I'm still convinced that we can't actually do the uncached buffers
> correctly and they should be removed...
> 
> Thanks,
> Laura
> 
I assume that your concern is possible cache coherency issue as result
of the memory access with the different cache attributes. Especially, if
these accesses are from different context.

But dma-buf requires that the cpu access should be started with
dma_buf_start_cpu_access() and ended with dma_buf_end_cpu_access() (and
with appropriate ioctl calls from the user space) to ensure that there
is no IO coherency issues. That is, the appropriate cache lines are
invalidated before the access and are flushed after the access (in case
of read/write access).

So, it seems, that in the end of each CPU access, the most update copy
of the buffer is in the RAM.

Probably, I'm wrong but I don't see the issue.

>>     static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned
>> long offset,
>>                      void *ptr)
>>   {
>> +    kunmap(ptr);
>> +}
>> +
>> +static void *ion_dma_buf_kmap_atomic(struct dma_buf *dmabuf,
>> +                     unsigned long offset)
>> +{
>> +    struct ion_buffer *buffer = dmabuf->priv;
>> +
>> +    return kmap_atomic(ion_dma_buf_get_page(buffer->sg_table,
>> +                        offset));
>> +}
>> +
>> +static void ion_dma_buf_kunmap_atomic(struct dma_buf *dmabuf,
>> +                      unsigned long offset,
>> +                      void *ptr)
>> +{
>> +    kunmap_atomic(ptr);
>>   }
>>     static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
>>                       enum dma_data_direction direction)
>>   {
>>       struct ion_buffer *buffer = dmabuf->priv;
>> -    void *vaddr;
>>       struct ion_dma_buf_attachment *a;
>>   -    /*
>> -     * TODO: Move this elsewhere because we don't always need a vaddr
>> -     */
>> -    if (buffer->heap->ops->map_kernel) {
>> -        mutex_lock(&buffer->lock);
>> -        vaddr = ion_buffer_kmap_get(buffer);
>> -        mutex_unlock(&buffer->lock);
>> -    }
>> -
>>       mutex_lock(&buffer->lock);
>>       list_for_each_entry(a, &buffer->attachments, list) {
>>           dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
>> @@ -349,12 +354,6 @@ static int ion_dma_buf_end_cpu_access(struct
>> dma_buf *dmabuf,
>>       struct ion_buffer *buffer = dmabuf->priv;
>>       struct ion_dma_buf_attachment *a;
>>   -    if (buffer->heap->ops->map_kernel) {
>> -        mutex_lock(&buffer->lock);
>> -        ion_buffer_kmap_put(buffer);
>> -        mutex_unlock(&buffer->lock);
>> -    }
>> -
>>       mutex_lock(&buffer->lock);
>>       list_for_each_entry(a, &buffer->attachments, list) {
>>           dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
>> @@ -374,8 +373,8 @@ static const struct dma_buf_ops dma_buf_ops = {
>>       .detach = ion_dma_buf_detatch,
>>       .begin_cpu_access = ion_dma_buf_begin_cpu_access,
>>       .end_cpu_access = ion_dma_buf_end_cpu_access,
>> -    .map_atomic = ion_dma_buf_kmap,
>> -    .unmap_atomic = ion_dma_buf_kunmap,
>> +    .map_atomic = ion_dma_buf_kmap_atomic,
>> +    .unmap_atomic = ion_dma_buf_kunmap_atomic,
>>       .map = ion_dma_buf_kmap,
>>       .unmap = ion_dma_buf_kunmap,
>>   };
>> diff --git a/drivers/staging/android/ion/ion.h
>> b/drivers/staging/android/ion/ion.h
>> index a238f23..e2bbd74 100644
>> --- a/drivers/staging/android/ion/ion.h
>> +++ b/drivers/staging/android/ion/ion.h
>> @@ -72,7 +72,6 @@ struct ion_buffer {
>>       size_t size;
>>       void *priv_virt;
>>       struct mutex lock;
>> -    int kmap_cnt;
>>       void *vaddr;
>>       struct sg_table *sg_table;
>>       struct list_head attachments;
>>
> 
_______________________________________________
devel mailing list
de...@linuxdriverproject.org
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel

Reply via email to