Re: [PATCH v3 3/7] DSPBRIDGE: do not call follow_page

2010-12-20 Thread Felipe Contreras
On Thu, May 27, 2010 at 7:02 PM, Ohad Ben-Cohen o...@wizery.com wrote:
 Eliminate the call to follow_page. Instead, use the page
 information that was kept during the proc_map call.
 This also has the advantage that users can now only
 specify memory areas that were previously mapped.

 Signed-off-by: Ohad Ben-Cohen o...@wizery.com

I found another issue with this patch:

 -       if (memory_sync_vma((u32) pmpu_addr, ul_size, FlushMemType)) {
 +       /* find requested memory are in cached mapping information */
 +       map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
 +       if (!map_obj) {
 +               pr_err(%s: find_containing_mapping failed\n, __func__);
 +               status = -EFAULT;
 +               goto err_out;
 +       }
 +       if (memory_sync_page(map_obj, (u32) pmpu_addr, ul_size, ul_flags)) {
                pr_err(%s: InValid address parameters %p %x\n,
 -                      __func__, pmpu_addr, ul_size);
 +                              __func__, pmpu_addr, ul_size);
                status = -EFAULT;
        }

find_containing_mapping() is taking the lock for dmm_map_list,
however, nothing prevents the map_obj to be destroyed after that,
specially if kcalloc sleeps, and then an unmap happens. While doing
some stress testing I found there's a race condition that makes
exactly that happen.

I'm sending some patches to fix that.

-- 
Felipe Contreras
--
To unsubscribe from this list: send the line unsubscribe linux-omap in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH v3 3/7] DSPBRIDGE: do not call follow_page

2010-07-25 Thread Felipe Contreras
Hi,

Just for the record, I found a problem in this patch. The next patch
in the series overrides it, so it's not that important, unless
somebody picks only this patch.

On Thu, May 27, 2010 at 7:02 PM, Ohad Ben-Cohen o...@wizery.com wrote:
 @@ -537,23 +606,30 @@ dsp_status proc_enum_nodes(void *hprocessor, void 
 **node_tab,
  }

  /* Cache operation against kernel address instead of users */
 -static int memory_sync_page(struct vm_area_struct *vma, unsigned long start,
 -                           ssize_t len, enum dsp_flushtype ftype)
 +static int memory_sync_page(struct dmm_map_object *map_obj,
 +               unsigned long start, ssize_t len, enum dsp_flushtype ftype)
  {
        struct page *page;
        void *kaddr;

[...]

 -       if (memory_sync_vma((u32) pmpu_addr, ul_size, FlushMemType)) {
 +       /* find requested memory are in cached mapping information */
 +       map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
 +       if (!map_obj) {
 +               pr_err(%s: find_containing_mapping failed\n, __func__);
 +               status = -EFAULT;
 +               goto err_out;
 +       }
 +       if (memory_sync_page(map_obj, (u32) pmpu_addr, ul_size, ul_flags)) {

It should be FlushMemType, not ul_flags.

                pr_err(%s: InValid address parameters %p %x\n,
 -                      __func__, pmpu_addr, ul_size);
 +                              __func__, pmpu_addr, ul_size);
                status = -EFAULT;
        }

-- 
Felipe Contreras
--
To unsubscribe from this list: send the line unsubscribe linux-omap in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v3 3/7] DSPBRIDGE: do not call follow_page

2010-05-27 Thread Ohad Ben-Cohen
Eliminate the call to follow_page. Instead, use the page
information that was kept during the proc_map call.
This also has the advantage that users can now only
specify memory areas that were previously mapped.

Signed-off-by: Ohad Ben-Cohen o...@wizery.com
---
You can also reach me at   ohadb at ti dot com  .

 drivers/dsp/bridge/pmgr/dspapi.c |4 +-
 drivers/dsp/bridge/rmgr/proc.c   |  148 +-
 2 files changed, 99 insertions(+), 53 deletions(-)

diff --git a/drivers/dsp/bridge/pmgr/dspapi.c b/drivers/dsp/bridge/pmgr/dspapi.c
index 05ea853..cc64a99 100644
--- a/drivers/dsp/bridge/pmgr/dspapi.c
+++ b/drivers/dsp/bridge/pmgr/dspapi.c
@@ -688,7 +688,7 @@ u32 procwrap_flush_memory(union Trapped_Args *args, void 
*pr_ctxt)
PROC_WRITEBACK_INVALIDATE_MEM)
return -EINVAL;
 
-   status = proc_flush_memory(args-args_proc_flushmemory.hprocessor,
+   status = proc_flush_memory(pr_ctxt,
   args-args_proc_flushmemory.pmpu_addr,
   args-args_proc_flushmemory.ul_size,
   args-args_proc_flushmemory.ul_flags);
@@ -703,7 +703,7 @@ u32 procwrap_invalidate_memory(union Trapped_Args *args, 
void *pr_ctxt)
dsp_status status;
 
status =
-   proc_invalidate_memory(args-args_proc_invalidatememory.hprocessor,
+   proc_invalidate_memory(pr_ctxt,
   args-args_proc_invalidatememory.pmpu_addr,
   args-args_proc_invalidatememory.ul_size);
return status;
diff --git a/drivers/dsp/bridge/rmgr/proc.c b/drivers/dsp/bridge/rmgr/proc.c
index 37258c4..6628483 100644
--- a/drivers/dsp/bridge/rmgr/proc.c
+++ b/drivers/dsp/bridge/rmgr/proc.c
@@ -189,6 +189,75 @@ out:
spin_unlock(pr_ctxt-dmm_map_lock);
 }
 
+static int match_containing_map_obj(struct dmm_map_object *map_obj,
+   u32 mpu_addr, u32 size)
+{
+   u32 map_obj_end = map_obj-mpu_addr + map_obj-size;
+
+   return mpu_addr = map_obj-mpu_addr 
+   mpu_addr + size = map_obj_end;
+}
+
+static struct dmm_map_object *find_containing_mapping(
+   struct process_context *pr_ctxt,
+   u32 mpu_addr, u32 size)
+{
+   struct dmm_map_object *map_obj;
+   pr_debug(%s: looking for mpu_addr 0x%x size 0x%x\n, __func__,
+   mpu_addr, size);
+
+   spin_lock(pr_ctxt-dmm_map_lock);
+   list_for_each_entry(map_obj, pr_ctxt-dmm_map_list, link) {
+   pr_debug(%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n,
+   __func__,
+   map_obj-mpu_addr,
+   map_obj-dsp_addr,
+   map_obj-size);
+   if (match_containing_map_obj(map_obj, mpu_addr, size)) {
+   pr_debug(%s: match!\n, __func__);
+   goto out;
+   }
+
+   pr_debug(%s: no match!\n, __func__);
+   }
+
+   map_obj = NULL;
+out:
+   spin_unlock(pr_ctxt-dmm_map_lock);
+   return map_obj;
+}
+
+static int find_first_page_in_cache(struct dmm_map_object *map_obj,
+   unsigned long mpu_addr)
+{
+   u32 mapped_base_page = map_obj-mpu_addr  PAGE_SHIFT;
+   u32 requested_base_page = mpu_addr  PAGE_SHIFT;
+   int pg_index = requested_base_page - mapped_base_page;
+
+   if (pg_index  0 || pg_index = map_obj-num_usr_pgs) {
+   pr_err(%s: failed (got %d)\n, __func__, pg_index);
+   return -1;
+   }
+
+   pr_debug(%s: first page is %d\n, __func__, pg_index);
+   return pg_index;
+}
+
+static inline struct page *get_mapping_page(struct dmm_map_object *map_obj,
+   int pg_i)
+{
+   pr_debug(%s: looking for pg_i %d, num_usr_pgs: %d\n, __func__,
+   pg_i, map_obj-num_usr_pgs);
+
+   if (pg_i  0 || pg_i = map_obj-num_usr_pgs) {
+   pr_err(%s: requested pg_i %d is out of mapped range\n,
+   __func__, pg_i);
+   return NULL;
+   }
+
+   return map_obj-pages[pg_i];
+}
+
 /*
  *   proc_attach 
  *  Purpose:
@@ -537,23 +606,30 @@ dsp_status proc_enum_nodes(void *hprocessor, void 
**node_tab,
 }
 
 /* Cache operation against kernel address instead of users */
-static int memory_sync_page(struct vm_area_struct *vma, unsigned long start,
-   ssize_t len, enum dsp_flushtype ftype)
+static int memory_sync_page(struct dmm_map_object *map_obj,
+   unsigned long start, ssize_t len, enum dsp_flushtype ftype)
 {
struct page *page;
void *kaddr;
unsigned