[PATCH] drm/omap: Fix release of refill engine

2012-10-12 Thread Andy Gross
Please disregard this mail.  Wrong patch sent

On 10/12/2012 10:56 AM, Andy Gross wrote:
> During asynchronous refills, we don't wait for the refill to
> finish.  However, we cannot release the engine back to the idle
> list until it has actually completed the refill operation.  The
> engine release will now be done in the IRQ handler, but only
> for asynchronous refill operations.
>
> Synchronous refills will continue to release the engine after they
> unblock from waiting on the refill.
>
> Signed-off-by: Andy Gross 
> ---
>   drivers/staging/omapdrm/omap_dmm_priv.h  |5 ++-
>   drivers/staging/omapdrm/omap_dmm_tiler.c |   77 
> -
>   2 files changed, 57 insertions(+), 25 deletions(-)
>
> diff --git a/drivers/staging/omapdrm/omap_dmm_priv.h 
> b/drivers/staging/omapdrm/omap_dmm_priv.h
> index 09ebc50..5ea73305 100644
> --- a/drivers/staging/omapdrm/omap_dmm_priv.h
> +++ b/drivers/staging/omapdrm/omap_dmm_priv.h
> @@ -141,6 +141,8 @@ struct refill_engine {
>   /* only one trans per engine for now */
>   struct dmm_txn txn;
>   
> + unsigned int async;
> +
>   wait_queue_head_t wait_for_refill;
>   
>   struct list_head idle_node;
> @@ -158,10 +160,11 @@ struct dmm {
>   dma_addr_t refill_pa;
>   
>   /* refill engines */
> - struct semaphore engine_sem;
> + wait_queue_head_t engine_queue;
>   struct list_head idle_head;
>   struct refill_engine *engines;
>   int num_engines;
> + atomic_t engine_counter;
>   
>   /* container information */
>   int container_width;
> diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c 
> b/drivers/staging/omapdrm/omap_dmm_tiler.c
> index fda9efc..eda2fce 100644
> --- a/drivers/staging/omapdrm/omap_dmm_tiler.c
> +++ b/drivers/staging/omapdrm/omap_dmm_tiler.c
> @@ -29,7 +29,6 @@
>   #include 
>   #include 
>   #include 
> -#include 
>   
>   #include "omap_dmm_tiler.h"
>   #include "omap_dmm_priv.h"
> @@ -120,6 +119,18 @@ static int wait_status(struct refill_engine *engine, 
> uint32_t wait_mask)
>   return 0;
>   }
>   
> +static void release_engine(struct refill_engine *engine)
> +{
> + unsigned long flags;
> +
> + spin_lock_irqsave(_lock, flags);
> + list_add(>idle_node, _dmm->idle_head);
> + spin_unlock_irqrestore(_lock, flags);
> +
> + atomic_inc(_dmm->engine_counter);
> + wake_up_interruptible(_dmm->engine_queue);
> +}
> +
>   static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
>   {
>   struct dmm *dmm = arg;
> @@ -130,9 +141,13 @@ static irqreturn_t omap_dmm_irq_handler(int irq, void 
> *arg)
>   writel(status, dmm->base + DMM_PAT_IRQSTATUS);
>   
>   for (i = 0; i < dmm->num_engines; i++) {
> - if (status & DMM_IRQSTAT_LST)
> + if (status & DMM_IRQSTAT_LST) {
>   wake_up_interruptible(>engines[i].wait_for_refill);
>   
> + if (>engines[i].async)
> + release_engine(>engines[i]);
> + }
> +
>   status >>= 8;
>   }
>   
> @@ -146,17 +161,24 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, 
> struct tcm *tcm)
>   {
>   struct dmm_txn *txn = NULL;
>   struct refill_engine *engine = NULL;
> + int ret;
> + unsigned long flags;
> +
>   
> - down(>engine_sem);
> + /* wait until an engine is available */
> + ret = wait_event_interruptible(omap_dmm->engine_queue,
> + atomic_add_unless(_dmm->engine_counter, -1, 0));
> + if (ret)
> + return ERR_PTR(ret);
>   
>   /* grab an idle engine */
> - spin_lock(_lock);
> + spin_lock_irqsave(_lock, flags);
>   if (!list_empty(>idle_head)) {
>   engine = list_entry(dmm->idle_head.next, struct refill_engine,
>   idle_node);
>   list_del(>idle_node);
>   }
> - spin_unlock(_lock);
> + spin_unlock_irqrestore(_lock, flags);
>   
>   BUG_ON(!engine);
>   
> @@ -174,7 +196,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, 
> struct tcm *tcm)
>* Add region to DMM transaction.  If pages or pages[i] is NULL, then the
>* corresponding slot is cleared (ie. dummy_pa is programmed)
>*/
> -static int dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
> +static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
>   struct page **pages, uint32_t npages, uint32_t roll)
>   {
>   dma_addr_t pat_pa = 0;
> @@ -208,7 +230,7 @@ static int dmm_txn_append(struct dmm_txn *txn, struct 
> pat_area *area,
>   
>   txn->last_pat = pat;
>   
> - return 0;
> + return;
>   }
>   
>   /**
> @@ -238,6 +260,9 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
>   goto cleanup;
>   }
>   
> + /* mark whether it is async to denote list management in IRQ handler */
> + engine->async = wait ? 0 : 1;
> +
>   /* kick reload */
>   writel(engine->refill_pa,
>   

[PATCH] drm/omap: Fix release of refill engine

2012-10-12 Thread Andy Gross
During asynchronous refills, we don't wait for the refill to
finish.  However, we cannot release the engine back to the idle
list until it has actually completed the refill operation.  The
engine release will now be done in the IRQ handler, but only
for asynchronous refill operations.

Synchronous refills will continue to release the engine after they
unblock from waiting on the refill.

Signed-off-by: Andy Gross 
---
 drivers/staging/omapdrm/omap_dmm_priv.h  |5 ++-
 drivers/staging/omapdrm/omap_dmm_tiler.c |   77 -
 2 files changed, 57 insertions(+), 25 deletions(-)

diff --git a/drivers/staging/omapdrm/omap_dmm_priv.h 
b/drivers/staging/omapdrm/omap_dmm_priv.h
index 09ebc50..5ea73305 100644
--- a/drivers/staging/omapdrm/omap_dmm_priv.h
+++ b/drivers/staging/omapdrm/omap_dmm_priv.h
@@ -141,6 +141,8 @@ struct refill_engine {
/* only one trans per engine for now */
struct dmm_txn txn;

+   unsigned int async;
+
wait_queue_head_t wait_for_refill;

struct list_head idle_node;
@@ -158,10 +160,11 @@ struct dmm {
dma_addr_t refill_pa;

/* refill engines */
-   struct semaphore engine_sem;
+   wait_queue_head_t engine_queue;
struct list_head idle_head;
struct refill_engine *engines;
int num_engines;
+   atomic_t engine_counter;

/* container information */
int container_width;
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c 
b/drivers/staging/omapdrm/omap_dmm_tiler.c
index fda9efc..eda2fce 100644
--- a/drivers/staging/omapdrm/omap_dmm_tiler.c
+++ b/drivers/staging/omapdrm/omap_dmm_tiler.c
@@ -29,7 +29,6 @@
 #include 
 #include 
 #include 
-#include 

 #include "omap_dmm_tiler.h"
 #include "omap_dmm_priv.h"
@@ -120,6 +119,18 @@ static int wait_status(struct refill_engine *engine, 
uint32_t wait_mask)
return 0;
 }

+static void release_engine(struct refill_engine *engine)
+{
+   unsigned long flags;
+
+   spin_lock_irqsave(_lock, flags);
+   list_add(>idle_node, _dmm->idle_head);
+   spin_unlock_irqrestore(_lock, flags);
+
+   atomic_inc(_dmm->engine_counter);
+   wake_up_interruptible(_dmm->engine_queue);
+}
+
 static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
 {
struct dmm *dmm = arg;
@@ -130,9 +141,13 @@ static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
writel(status, dmm->base + DMM_PAT_IRQSTATUS);

for (i = 0; i < dmm->num_engines; i++) {
-   if (status & DMM_IRQSTAT_LST)
+   if (status & DMM_IRQSTAT_LST) {
wake_up_interruptible(>engines[i].wait_for_refill);

+   if (>engines[i].async)
+   release_engine(>engines[i]);
+   }
+
status >>= 8;
}

@@ -146,17 +161,24 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, 
struct tcm *tcm)
 {
struct dmm_txn *txn = NULL;
struct refill_engine *engine = NULL;
+   int ret;
+   unsigned long flags;
+

-   down(>engine_sem);
+   /* wait until an engine is available */
+   ret = wait_event_interruptible(omap_dmm->engine_queue,
+   atomic_add_unless(_dmm->engine_counter, -1, 0));
+   if (ret)
+   return ERR_PTR(ret);

/* grab an idle engine */
-   spin_lock(_lock);
+   spin_lock_irqsave(_lock, flags);
if (!list_empty(>idle_head)) {
engine = list_entry(dmm->idle_head.next, struct refill_engine,
idle_node);
list_del(>idle_node);
}
-   spin_unlock(_lock);
+   spin_unlock_irqrestore(_lock, flags);

BUG_ON(!engine);

@@ -174,7 +196,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct 
tcm *tcm)
  * Add region to DMM transaction.  If pages or pages[i] is NULL, then the
  * corresponding slot is cleared (ie. dummy_pa is programmed)
  */
-static int dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
+static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
struct page **pages, uint32_t npages, uint32_t roll)
 {
dma_addr_t pat_pa = 0;
@@ -208,7 +230,7 @@ static int dmm_txn_append(struct dmm_txn *txn, struct 
pat_area *area,

txn->last_pat = pat;

-   return 0;
+   return;
 }

 /**
@@ -238,6 +260,9 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
goto cleanup;
}

+   /* mark whether it is async to denote list management in IRQ handler */
+   engine->async = wait ? 0 : 1;
+
/* kick reload */
writel(engine->refill_pa,
dmm->base + reg[PAT_DESCR][engine->id]);
@@ -252,11 +277,10 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
}

 cleanup:
-   spin_lock(_lock);
-   list_add(>idle_node, >idle_head);
-   spin_unlock(_lock);
+   /* only place engine back on list if we are done with 

[PATCH] drm/omap: Fix release of refill engine

2012-10-12 Thread Andy Gross
On 10/12/2012 08:44 AM, Rob Clark wrote:
>
> US);
>
>  for (i = 0; i < dmm->num_engines; i++) {
> -   if (status & DMM_IRQSTAT_LST)
> +   if (status & DMM_IRQSTAT_LST) {
>  
> wake_up_interruptible(>engines[i].wait_for_refill);
>
> +   if (>engines[i].async)
> Are you sure about that & ?  That looks like a typo, rather than what you 
> want..

Good catch.  I'll fix that, retest, and resubmit.




[PATCH] drm/omap: Fix release of refill engine

2012-10-12 Thread Rob Clark
On Thu, Oct 11, 2012 at 11:08 PM, Andy Gross  wrote:
> During asynchronous refills, we don't wait for the refill to
> finish.  However, we cannot release the engine back to the idle
> list until it has actually completed the refill operation.  The
> engine release will now be done in the IRQ handler, but only
> for asynchronous refill operations.
>
> Synchronous refills will continue to release the engine after they
> unblock from waiting on the refill.
>
> Signed-off-by: Andy Gross 
> ---
>  drivers/staging/omapdrm/omap_dmm_priv.h  |5 ++-
>  drivers/staging/omapdrm/omap_dmm_tiler.c |   77 -
>  2 files changed, 57 insertions(+), 25 deletions(-)
>
> diff --git a/drivers/staging/omapdrm/omap_dmm_priv.h 
> b/drivers/staging/omapdrm/omap_dmm_priv.h
> index 09ebc50..5ea73305 100644
> --- a/drivers/staging/omapdrm/omap_dmm_priv.h
> +++ b/drivers/staging/omapdrm/omap_dmm_priv.h
> @@ -141,6 +141,8 @@ struct refill_engine {
> /* only one trans per engine for now */
> struct dmm_txn txn;
>
> +   unsigned int async;
> +
> wait_queue_head_t wait_for_refill;
>
> struct list_head idle_node;
> @@ -158,10 +160,11 @@ struct dmm {
> dma_addr_t refill_pa;
>
> /* refill engines */
> -   struct semaphore engine_sem;
> +   wait_queue_head_t engine_queue;
> struct list_head idle_head;
> struct refill_engine *engines;
> int num_engines;
> +   atomic_t engine_counter;
>
> /* container information */
> int container_width;
> diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c 
> b/drivers/staging/omapdrm/omap_dmm_tiler.c
> index 7c19c5c..2fc9218 100644
> --- a/drivers/staging/omapdrm/omap_dmm_tiler.c
> +++ b/drivers/staging/omapdrm/omap_dmm_tiler.c
> @@ -29,7 +29,6 @@
>  #include 
>  #include 
>  #include 
> -#include 
>
>  #include "omap_dmm_tiler.h"
>  #include "omap_dmm_priv.h"
> @@ -120,6 +119,18 @@ static int wait_status(struct refill_engine *engine, 
> uint32_t wait_mask)
> return 0;
>  }
>
> +static void release_engine(struct refill_engine *engine)
> +{
> +   unsigned long flags;
> +
> +   spin_lock_irqsave(_lock, flags);
> +   list_add(>idle_node, _dmm->idle_head);
> +   spin_unlock_irqrestore(_lock, flags);
> +
> +   atomic_inc(_dmm->engine_counter);
> +   wake_up_interruptible(_dmm->engine_queue);
> +}
> +
>  static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
>  {
> struct dmm *dmm = arg;
> @@ -130,9 +141,13 @@ static irqreturn_t omap_dmm_irq_handler(int irq, void 
> *arg)
> writel(status, dmm->base + DMM_PAT_IRQSTATUS);
>
> for (i = 0; i < dmm->num_engines; i++) {
> -   if (status & DMM_IRQSTAT_LST)
> +   if (status & DMM_IRQSTAT_LST) {
> 
> wake_up_interruptible(>engines[i].wait_for_refill);
>
> +   if (>engines[i].async)

Are you sure about that & ?  That looks like a typo, rather than what you want..

> +   release_engine(>engines[i]);
> +   }
> +
> status >>= 8;
> }
>
> @@ -146,17 +161,24 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, 
> struct tcm *tcm)
>  {
> struct dmm_txn *txn = NULL;
> struct refill_engine *engine = NULL;
> +   int ret;
> +   unsigned long flags;
> +
>
> -   down(>engine_sem);
> +   /* wait until an engine is available */
> +   ret = wait_event_interruptible(omap_dmm->engine_queue,
> +   atomic_add_unless(_dmm->engine_counter, -1, 0));
> +   if (ret)
> +   return ERR_PTR(ret);
>
> /* grab an idle engine */
> -   spin_lock(_lock);
> +   spin_lock_irqsave(_lock, flags);
> if (!list_empty(>idle_head)) {
> engine = list_entry(dmm->idle_head.next, struct refill_engine,
> idle_node);
> list_del(>idle_node);
> }
> -   spin_unlock(_lock);
> +   spin_unlock_irqrestore(_lock, flags);
>
> BUG_ON(!engine);
>
> @@ -174,7 +196,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, 
> struct tcm *tcm)
>   * Add region to DMM transaction.  If pages or pages[i] is NULL, then the
>   * corresponding slot is cleared (ie. dummy_pa is programmed)
>   */
> -static int dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
> +static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
> struct page **pages, uint32_t npages, uint32_t roll)
>  {
> dma_addr_t pat_pa = 0;
> @@ -208,7 +230,7 @@ static int dmm_txn_append(struct dmm_txn *txn, struct 
> pat_area *area,
>
> txn->last_pat = pat;
>
> -   return 0;
> +   return;
>  }
>
>  /**
> @@ -238,6 +260,9 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
> goto cleanup;
> }
>
> +   /* mark whether it is async to denote list management in IRQ handler 
> */
> +  

[PATCH] drm/omap: Fix release of refill engine

2012-10-12 Thread Andy Gross
During asynchronous refills, we don't wait for the refill to
finish.  However, we cannot release the engine back to the idle
list until it has actually completed the refill operation.  The
engine release will now be done in the IRQ handler, but only
for asynchronous refill operations.

Synchronous refills will continue to release the engine after they
unblock from waiting on the refill.

Signed-off-by: Andy Gross 
---
 drivers/staging/omapdrm/omap_dmm_priv.h  |5 ++-
 drivers/staging/omapdrm/omap_dmm_tiler.c |   77 -
 2 files changed, 57 insertions(+), 25 deletions(-)

diff --git a/drivers/staging/omapdrm/omap_dmm_priv.h 
b/drivers/staging/omapdrm/omap_dmm_priv.h
index 09ebc50..5ea73305 100644
--- a/drivers/staging/omapdrm/omap_dmm_priv.h
+++ b/drivers/staging/omapdrm/omap_dmm_priv.h
@@ -141,6 +141,8 @@ struct refill_engine {
/* only one trans per engine for now */
struct dmm_txn txn;

+   unsigned int async;
+
wait_queue_head_t wait_for_refill;

struct list_head idle_node;
@@ -158,10 +160,11 @@ struct dmm {
dma_addr_t refill_pa;

/* refill engines */
-   struct semaphore engine_sem;
+   wait_queue_head_t engine_queue;
struct list_head idle_head;
struct refill_engine *engines;
int num_engines;
+   atomic_t engine_counter;

/* container information */
int container_width;
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c 
b/drivers/staging/omapdrm/omap_dmm_tiler.c
index 7c19c5c..2fc9218 100644
--- a/drivers/staging/omapdrm/omap_dmm_tiler.c
+++ b/drivers/staging/omapdrm/omap_dmm_tiler.c
@@ -29,7 +29,6 @@
 #include 
 #include 
 #include 
-#include 

 #include "omap_dmm_tiler.h"
 #include "omap_dmm_priv.h"
@@ -120,6 +119,18 @@ static int wait_status(struct refill_engine *engine, 
uint32_t wait_mask)
return 0;
 }

+static void release_engine(struct refill_engine *engine)
+{
+   unsigned long flags;
+
+   spin_lock_irqsave(_lock, flags);
+   list_add(>idle_node, _dmm->idle_head);
+   spin_unlock_irqrestore(_lock, flags);
+
+   atomic_inc(_dmm->engine_counter);
+   wake_up_interruptible(_dmm->engine_queue);
+}
+
 static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
 {
struct dmm *dmm = arg;
@@ -130,9 +141,13 @@ static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
writel(status, dmm->base + DMM_PAT_IRQSTATUS);

for (i = 0; i < dmm->num_engines; i++) {
-   if (status & DMM_IRQSTAT_LST)
+   if (status & DMM_IRQSTAT_LST) {
wake_up_interruptible(>engines[i].wait_for_refill);

+   if (>engines[i].async)
+   release_engine(>engines[i]);
+   }
+
status >>= 8;
}

@@ -146,17 +161,24 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, 
struct tcm *tcm)
 {
struct dmm_txn *txn = NULL;
struct refill_engine *engine = NULL;
+   int ret;
+   unsigned long flags;
+

-   down(>engine_sem);
+   /* wait until an engine is available */
+   ret = wait_event_interruptible(omap_dmm->engine_queue,
+   atomic_add_unless(_dmm->engine_counter, -1, 0));
+   if (ret)
+   return ERR_PTR(ret);

/* grab an idle engine */
-   spin_lock(_lock);
+   spin_lock_irqsave(_lock, flags);
if (!list_empty(>idle_head)) {
engine = list_entry(dmm->idle_head.next, struct refill_engine,
idle_node);
list_del(>idle_node);
}
-   spin_unlock(_lock);
+   spin_unlock_irqrestore(_lock, flags);

BUG_ON(!engine);

@@ -174,7 +196,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct 
tcm *tcm)
  * Add region to DMM transaction.  If pages or pages[i] is NULL, then the
  * corresponding slot is cleared (ie. dummy_pa is programmed)
  */
-static int dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
+static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
struct page **pages, uint32_t npages, uint32_t roll)
 {
dma_addr_t pat_pa = 0;
@@ -208,7 +230,7 @@ static int dmm_txn_append(struct dmm_txn *txn, struct 
pat_area *area,

txn->last_pat = pat;

-   return 0;
+   return;
 }

 /**
@@ -238,6 +260,9 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
goto cleanup;
}

+   /* mark whether it is async to denote list management in IRQ handler */
+   engine->async = wait ? 0 : 1;
+
/* kick reload */
writel(engine->refill_pa,
dmm->base + reg[PAT_DESCR][engine->id]);
@@ -252,11 +277,10 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
}

 cleanup:
-   spin_lock(_lock);
-   list_add(>idle_node, >idle_head);
-   spin_unlock(_lock);
+   /* only place engine back on list if we are done with 

Re: [PATCH] drm/omap: Fix release of refill engine

2012-10-12 Thread Rob Clark
On Thu, Oct 11, 2012 at 11:08 PM, Andy Gross andy.gr...@ti.com wrote:
 During asynchronous refills, we don't wait for the refill to
 finish.  However, we cannot release the engine back to the idle
 list until it has actually completed the refill operation.  The
 engine release will now be done in the IRQ handler, but only
 for asynchronous refill operations.

 Synchronous refills will continue to release the engine after they
 unblock from waiting on the refill.

 Signed-off-by: Andy Gross andy.gr...@ti.com
 ---
  drivers/staging/omapdrm/omap_dmm_priv.h  |5 ++-
  drivers/staging/omapdrm/omap_dmm_tiler.c |   77 -
  2 files changed, 57 insertions(+), 25 deletions(-)

 diff --git a/drivers/staging/omapdrm/omap_dmm_priv.h 
 b/drivers/staging/omapdrm/omap_dmm_priv.h
 index 09ebc50..5ea73305 100644
 --- a/drivers/staging/omapdrm/omap_dmm_priv.h
 +++ b/drivers/staging/omapdrm/omap_dmm_priv.h
 @@ -141,6 +141,8 @@ struct refill_engine {
 /* only one trans per engine for now */
 struct dmm_txn txn;

 +   unsigned int async;
 +
 wait_queue_head_t wait_for_refill;

 struct list_head idle_node;
 @@ -158,10 +160,11 @@ struct dmm {
 dma_addr_t refill_pa;

 /* refill engines */
 -   struct semaphore engine_sem;
 +   wait_queue_head_t engine_queue;
 struct list_head idle_head;
 struct refill_engine *engines;
 int num_engines;
 +   atomic_t engine_counter;

 /* container information */
 int container_width;
 diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c 
 b/drivers/staging/omapdrm/omap_dmm_tiler.c
 index 7c19c5c..2fc9218 100644
 --- a/drivers/staging/omapdrm/omap_dmm_tiler.c
 +++ b/drivers/staging/omapdrm/omap_dmm_tiler.c
 @@ -29,7 +29,6 @@
  #include linux/mm.h
  #include linux/time.h
  #include linux/list.h
 -#include linux/semaphore.h

  #include omap_dmm_tiler.h
  #include omap_dmm_priv.h
 @@ -120,6 +119,18 @@ static int wait_status(struct refill_engine *engine, 
 uint32_t wait_mask)
 return 0;
  }

 +static void release_engine(struct refill_engine *engine)
 +{
 +   unsigned long flags;
 +
 +   spin_lock_irqsave(list_lock, flags);
 +   list_add(engine-idle_node, omap_dmm-idle_head);
 +   spin_unlock_irqrestore(list_lock, flags);
 +
 +   atomic_inc(omap_dmm-engine_counter);
 +   wake_up_interruptible(omap_dmm-engine_queue);
 +}
 +
  static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
  {
 struct dmm *dmm = arg;
 @@ -130,9 +141,13 @@ static irqreturn_t omap_dmm_irq_handler(int irq, void 
 *arg)
 writel(status, dmm-base + DMM_PAT_IRQSTATUS);

 for (i = 0; i  dmm-num_engines; i++) {
 -   if (status  DMM_IRQSTAT_LST)
 +   if (status  DMM_IRQSTAT_LST) {
 
 wake_up_interruptible(dmm-engines[i].wait_for_refill);

 +   if (dmm-engines[i].async)

Are you sure about that  ?  That looks like a typo, rather than what you want..

 +   release_engine(dmm-engines[i]);
 +   }
 +
 status = 8;
 }

 @@ -146,17 +161,24 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, 
 struct tcm *tcm)
  {
 struct dmm_txn *txn = NULL;
 struct refill_engine *engine = NULL;
 +   int ret;
 +   unsigned long flags;
 +

 -   down(dmm-engine_sem);
 +   /* wait until an engine is available */
 +   ret = wait_event_interruptible(omap_dmm-engine_queue,
 +   atomic_add_unless(omap_dmm-engine_counter, -1, 0));
 +   if (ret)
 +   return ERR_PTR(ret);

 /* grab an idle engine */
 -   spin_lock(list_lock);
 +   spin_lock_irqsave(list_lock, flags);
 if (!list_empty(dmm-idle_head)) {
 engine = list_entry(dmm-idle_head.next, struct refill_engine,
 idle_node);
 list_del(engine-idle_node);
 }
 -   spin_unlock(list_lock);
 +   spin_unlock_irqrestore(list_lock, flags);

 BUG_ON(!engine);

 @@ -174,7 +196,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, 
 struct tcm *tcm)
   * Add region to DMM transaction.  If pages or pages[i] is NULL, then the
   * corresponding slot is cleared (ie. dummy_pa is programmed)
   */
 -static int dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
 +static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
 struct page **pages, uint32_t npages, uint32_t roll)
  {
 dma_addr_t pat_pa = 0;
 @@ -208,7 +230,7 @@ static int dmm_txn_append(struct dmm_txn *txn, struct 
 pat_area *area,

 txn-last_pat = pat;

 -   return 0;
 +   return;
  }

  /**
 @@ -238,6 +260,9 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
 goto cleanup;
 }

 +   /* mark whether it is async to denote list management in IRQ handler 
 */
 +   

Re: [PATCH] drm/omap: Fix release of refill engine

2012-10-12 Thread Andy Gross

On 10/12/2012 08:44 AM, Rob Clark wrote:


US);

 for (i = 0; i  dmm-num_engines; i++) {
-   if (status  DMM_IRQSTAT_LST)
+   if (status  DMM_IRQSTAT_LST) {
 
wake_up_interruptible(dmm-engines[i].wait_for_refill);

+   if (dmm-engines[i].async)
Are you sure about that  ?  That looks like a typo, rather than what you want..


Good catch.  I'll fix that, retest, and resubmit.


___
dri-devel mailing list
dri-devel@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH] drm/omap: Fix release of refill engine

2012-10-12 Thread Andy Gross
During asynchronous refills, we don't wait for the refill to
finish.  However, we cannot release the engine back to the idle
list until it has actually completed the refill operation.  The
engine release will now be done in the IRQ handler, but only
for asynchronous refill operations.

Synchronous refills will continue to release the engine after they
unblock from waiting on the refill.

Signed-off-by: Andy Gross andy.gr...@ti.com
---
 drivers/staging/omapdrm/omap_dmm_priv.h  |5 ++-
 drivers/staging/omapdrm/omap_dmm_tiler.c |   77 -
 2 files changed, 57 insertions(+), 25 deletions(-)

diff --git a/drivers/staging/omapdrm/omap_dmm_priv.h 
b/drivers/staging/omapdrm/omap_dmm_priv.h
index 09ebc50..5ea73305 100644
--- a/drivers/staging/omapdrm/omap_dmm_priv.h
+++ b/drivers/staging/omapdrm/omap_dmm_priv.h
@@ -141,6 +141,8 @@ struct refill_engine {
/* only one trans per engine for now */
struct dmm_txn txn;
 
+   unsigned int async;
+
wait_queue_head_t wait_for_refill;
 
struct list_head idle_node;
@@ -158,10 +160,11 @@ struct dmm {
dma_addr_t refill_pa;
 
/* refill engines */
-   struct semaphore engine_sem;
+   wait_queue_head_t engine_queue;
struct list_head idle_head;
struct refill_engine *engines;
int num_engines;
+   atomic_t engine_counter;
 
/* container information */
int container_width;
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c 
b/drivers/staging/omapdrm/omap_dmm_tiler.c
index fda9efc..eda2fce 100644
--- a/drivers/staging/omapdrm/omap_dmm_tiler.c
+++ b/drivers/staging/omapdrm/omap_dmm_tiler.c
@@ -29,7 +29,6 @@
 #include linux/mm.h
 #include linux/time.h
 #include linux/list.h
-#include linux/semaphore.h
 
 #include omap_dmm_tiler.h
 #include omap_dmm_priv.h
@@ -120,6 +119,18 @@ static int wait_status(struct refill_engine *engine, 
uint32_t wait_mask)
return 0;
 }
 
+static void release_engine(struct refill_engine *engine)
+{
+   unsigned long flags;
+
+   spin_lock_irqsave(list_lock, flags);
+   list_add(engine-idle_node, omap_dmm-idle_head);
+   spin_unlock_irqrestore(list_lock, flags);
+
+   atomic_inc(omap_dmm-engine_counter);
+   wake_up_interruptible(omap_dmm-engine_queue);
+}
+
 static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
 {
struct dmm *dmm = arg;
@@ -130,9 +141,13 @@ static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
writel(status, dmm-base + DMM_PAT_IRQSTATUS);
 
for (i = 0; i  dmm-num_engines; i++) {
-   if (status  DMM_IRQSTAT_LST)
+   if (status  DMM_IRQSTAT_LST) {
wake_up_interruptible(dmm-engines[i].wait_for_refill);
 
+   if (dmm-engines[i].async)
+   release_engine(dmm-engines[i]);
+   }
+
status = 8;
}
 
@@ -146,17 +161,24 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, 
struct tcm *tcm)
 {
struct dmm_txn *txn = NULL;
struct refill_engine *engine = NULL;
+   int ret;
+   unsigned long flags;
+
 
-   down(dmm-engine_sem);
+   /* wait until an engine is available */
+   ret = wait_event_interruptible(omap_dmm-engine_queue,
+   atomic_add_unless(omap_dmm-engine_counter, -1, 0));
+   if (ret)
+   return ERR_PTR(ret);
 
/* grab an idle engine */
-   spin_lock(list_lock);
+   spin_lock_irqsave(list_lock, flags);
if (!list_empty(dmm-idle_head)) {
engine = list_entry(dmm-idle_head.next, struct refill_engine,
idle_node);
list_del(engine-idle_node);
}
-   spin_unlock(list_lock);
+   spin_unlock_irqrestore(list_lock, flags);
 
BUG_ON(!engine);
 
@@ -174,7 +196,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct 
tcm *tcm)
  * Add region to DMM transaction.  If pages or pages[i] is NULL, then the
  * corresponding slot is cleared (ie. dummy_pa is programmed)
  */
-static int dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
+static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
struct page **pages, uint32_t npages, uint32_t roll)
 {
dma_addr_t pat_pa = 0;
@@ -208,7 +230,7 @@ static int dmm_txn_append(struct dmm_txn *txn, struct 
pat_area *area,
 
txn-last_pat = pat;
 
-   return 0;
+   return;
 }
 
 /**
@@ -238,6 +260,9 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
goto cleanup;
}
 
+   /* mark whether it is async to denote list management in IRQ handler */
+   engine-async = wait ? 0 : 1;
+
/* kick reload */
writel(engine-refill_pa,
dmm-base + reg[PAT_DESCR][engine-id]);
@@ -252,11 +277,10 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
}
 
 cleanup:
-   

Re: [PATCH] drm/omap: Fix release of refill engine

2012-10-12 Thread Andy Gross

Please disregard this mail.  Wrong patch sent

On 10/12/2012 10:56 AM, Andy Gross wrote:

During asynchronous refills, we don't wait for the refill to
finish.  However, we cannot release the engine back to the idle
list until it has actually completed the refill operation.  The
engine release will now be done in the IRQ handler, but only
for asynchronous refill operations.

Synchronous refills will continue to release the engine after they
unblock from waiting on the refill.

Signed-off-by: Andy Gross andy.gr...@ti.com
---
  drivers/staging/omapdrm/omap_dmm_priv.h  |5 ++-
  drivers/staging/omapdrm/omap_dmm_tiler.c |   77 -
  2 files changed, 57 insertions(+), 25 deletions(-)

diff --git a/drivers/staging/omapdrm/omap_dmm_priv.h 
b/drivers/staging/omapdrm/omap_dmm_priv.h
index 09ebc50..5ea73305 100644
--- a/drivers/staging/omapdrm/omap_dmm_priv.h
+++ b/drivers/staging/omapdrm/omap_dmm_priv.h
@@ -141,6 +141,8 @@ struct refill_engine {
/* only one trans per engine for now */
struct dmm_txn txn;
  
+	unsigned int async;

+
wait_queue_head_t wait_for_refill;
  
  	struct list_head idle_node;

@@ -158,10 +160,11 @@ struct dmm {
dma_addr_t refill_pa;
  
  	/* refill engines */

-   struct semaphore engine_sem;
+   wait_queue_head_t engine_queue;
struct list_head idle_head;
struct refill_engine *engines;
int num_engines;
+   atomic_t engine_counter;
  
  	/* container information */

int container_width;
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c 
b/drivers/staging/omapdrm/omap_dmm_tiler.c
index fda9efc..eda2fce 100644
--- a/drivers/staging/omapdrm/omap_dmm_tiler.c
+++ b/drivers/staging/omapdrm/omap_dmm_tiler.c
@@ -29,7 +29,6 @@
  #include linux/mm.h
  #include linux/time.h
  #include linux/list.h
-#include linux/semaphore.h
  
  #include omap_dmm_tiler.h

  #include omap_dmm_priv.h
@@ -120,6 +119,18 @@ static int wait_status(struct refill_engine *engine, 
uint32_t wait_mask)
return 0;
  }
  
+static void release_engine(struct refill_engine *engine)

+{
+   unsigned long flags;
+
+   spin_lock_irqsave(list_lock, flags);
+   list_add(engine-idle_node, omap_dmm-idle_head);
+   spin_unlock_irqrestore(list_lock, flags);
+
+   atomic_inc(omap_dmm-engine_counter);
+   wake_up_interruptible(omap_dmm-engine_queue);
+}
+
  static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
  {
struct dmm *dmm = arg;
@@ -130,9 +141,13 @@ static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
writel(status, dmm-base + DMM_PAT_IRQSTATUS);
  
  	for (i = 0; i  dmm-num_engines; i++) {

-   if (status  DMM_IRQSTAT_LST)
+   if (status  DMM_IRQSTAT_LST) {
wake_up_interruptible(dmm-engines[i].wait_for_refill);
  
+			if (dmm-engines[i].async)

+   release_engine(dmm-engines[i]);
+   }
+
status = 8;
}
  
@@ -146,17 +161,24 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)

  {
struct dmm_txn *txn = NULL;
struct refill_engine *engine = NULL;
+   int ret;
+   unsigned long flags;
+
  
-	down(dmm-engine_sem);

+   /* wait until an engine is available */
+   ret = wait_event_interruptible(omap_dmm-engine_queue,
+   atomic_add_unless(omap_dmm-engine_counter, -1, 0));
+   if (ret)
+   return ERR_PTR(ret);
  
  	/* grab an idle engine */

-   spin_lock(list_lock);
+   spin_lock_irqsave(list_lock, flags);
if (!list_empty(dmm-idle_head)) {
engine = list_entry(dmm-idle_head.next, struct refill_engine,
idle_node);
list_del(engine-idle_node);
}
-   spin_unlock(list_lock);
+   spin_unlock_irqrestore(list_lock, flags);
  
  	BUG_ON(!engine);
  
@@ -174,7 +196,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)

   * Add region to DMM transaction.  If pages or pages[i] is NULL, then the
   * corresponding slot is cleared (ie. dummy_pa is programmed)
   */
-static int dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
+static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
struct page **pages, uint32_t npages, uint32_t roll)
  {
dma_addr_t pat_pa = 0;
@@ -208,7 +230,7 @@ static int dmm_txn_append(struct dmm_txn *txn, struct 
pat_area *area,
  
  	txn-last_pat = pat;
  
-	return 0;

+   return;
  }
  
  /**

@@ -238,6 +260,9 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
goto cleanup;
}
  
+	/* mark whether it is async to denote list management in IRQ handler */

+   engine-async = wait ? 0 : 1;
+
/* kick reload */
writel(engine-refill_pa,
dmm-base + reg[PAT_DESCR][engine-id]);
@@ -252,11 +277,10 @@ static int dmm_txn_commit(struct dmm_txn *txn, 

[PATCH] drm/omap: Fix release of refill engine

2012-10-11 Thread Andy Gross
During asynchronous refills, we don't wait for the refill to
finish.  However, we cannot release the engine back to the idle
list until it has actually completed the refill operation.  The
engine release will now be done in the IRQ handler, but only
for asynchronous refill operations.

Synchronous refills will continue to release the engine after they
unblock from waiting on the refill.

Signed-off-by: Andy Gross andy.gr...@ti.com
---
 drivers/staging/omapdrm/omap_dmm_priv.h  |5 ++-
 drivers/staging/omapdrm/omap_dmm_tiler.c |   77 -
 2 files changed, 57 insertions(+), 25 deletions(-)

diff --git a/drivers/staging/omapdrm/omap_dmm_priv.h 
b/drivers/staging/omapdrm/omap_dmm_priv.h
index 09ebc50..5ea73305 100644
--- a/drivers/staging/omapdrm/omap_dmm_priv.h
+++ b/drivers/staging/omapdrm/omap_dmm_priv.h
@@ -141,6 +141,8 @@ struct refill_engine {
/* only one trans per engine for now */
struct dmm_txn txn;
 
+   unsigned int async;
+
wait_queue_head_t wait_for_refill;
 
struct list_head idle_node;
@@ -158,10 +160,11 @@ struct dmm {
dma_addr_t refill_pa;
 
/* refill engines */
-   struct semaphore engine_sem;
+   wait_queue_head_t engine_queue;
struct list_head idle_head;
struct refill_engine *engines;
int num_engines;
+   atomic_t engine_counter;
 
/* container information */
int container_width;
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c 
b/drivers/staging/omapdrm/omap_dmm_tiler.c
index 7c19c5c..2fc9218 100644
--- a/drivers/staging/omapdrm/omap_dmm_tiler.c
+++ b/drivers/staging/omapdrm/omap_dmm_tiler.c
@@ -29,7 +29,6 @@
 #include linux/mm.h
 #include linux/time.h
 #include linux/list.h
-#include linux/semaphore.h
 
 #include omap_dmm_tiler.h
 #include omap_dmm_priv.h
@@ -120,6 +119,18 @@ static int wait_status(struct refill_engine *engine, 
uint32_t wait_mask)
return 0;
 }
 
+static void release_engine(struct refill_engine *engine)
+{
+   unsigned long flags;
+
+   spin_lock_irqsave(list_lock, flags);
+   list_add(engine-idle_node, omap_dmm-idle_head);
+   spin_unlock_irqrestore(list_lock, flags);
+
+   atomic_inc(omap_dmm-engine_counter);
+   wake_up_interruptible(omap_dmm-engine_queue);
+}
+
 static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
 {
struct dmm *dmm = arg;
@@ -130,9 +141,13 @@ static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
writel(status, dmm-base + DMM_PAT_IRQSTATUS);
 
for (i = 0; i  dmm-num_engines; i++) {
-   if (status  DMM_IRQSTAT_LST)
+   if (status  DMM_IRQSTAT_LST) {
wake_up_interruptible(dmm-engines[i].wait_for_refill);
 
+   if (dmm-engines[i].async)
+   release_engine(dmm-engines[i]);
+   }
+
status = 8;
}
 
@@ -146,17 +161,24 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, 
struct tcm *tcm)
 {
struct dmm_txn *txn = NULL;
struct refill_engine *engine = NULL;
+   int ret;
+   unsigned long flags;
+
 
-   down(dmm-engine_sem);
+   /* wait until an engine is available */
+   ret = wait_event_interruptible(omap_dmm-engine_queue,
+   atomic_add_unless(omap_dmm-engine_counter, -1, 0));
+   if (ret)
+   return ERR_PTR(ret);
 
/* grab an idle engine */
-   spin_lock(list_lock);
+   spin_lock_irqsave(list_lock, flags);
if (!list_empty(dmm-idle_head)) {
engine = list_entry(dmm-idle_head.next, struct refill_engine,
idle_node);
list_del(engine-idle_node);
}
-   spin_unlock(list_lock);
+   spin_unlock_irqrestore(list_lock, flags);
 
BUG_ON(!engine);
 
@@ -174,7 +196,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct 
tcm *tcm)
  * Add region to DMM transaction.  If pages or pages[i] is NULL, then the
  * corresponding slot is cleared (ie. dummy_pa is programmed)
  */
-static int dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
+static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
struct page **pages, uint32_t npages, uint32_t roll)
 {
dma_addr_t pat_pa = 0;
@@ -208,7 +230,7 @@ static int dmm_txn_append(struct dmm_txn *txn, struct 
pat_area *area,
 
txn-last_pat = pat;
 
-   return 0;
+   return;
 }
 
 /**
@@ -238,6 +260,9 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
goto cleanup;
}
 
+   /* mark whether it is async to denote list management in IRQ handler */
+   engine-async = wait ? 0 : 1;
+
/* kick reload */
writel(engine-refill_pa,
dmm-base + reg[PAT_DESCR][engine-id]);
@@ -252,11 +277,10 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
}
 
 cleanup:
-