[PATCH] staging: omapdrm: Fix error paths during dmm init
On Thu, May 24, 2012 at 10:43 AM, Andy Gross wrote: > Failures during the dmm probe can cause the kernel to crash. ?Moved > the spinlock to a global and moved list initializations immediately > after the allocation of the dmm private structure. > > Signed-off-by: Andy Gross Reviewed-by: Rob Clark > --- > ?drivers/staging/omapdrm/omap_dmm_priv.h ?| ? ?1 - > ?drivers/staging/omapdrm/omap_dmm_tiler.c | ? 44 - > ?2 files changed, 24 insertions(+), 21 deletions(-) > > diff --git a/drivers/staging/omapdrm/omap_dmm_priv.h > b/drivers/staging/omapdrm/omap_dmm_priv.h > index 2f529ab..08b22e9 100644 > --- a/drivers/staging/omapdrm/omap_dmm_priv.h > +++ b/drivers/staging/omapdrm/omap_dmm_priv.h > @@ -181,7 +181,6 @@ struct dmm { > > ? ? ? ?/* allocation list and lock */ > ? ? ? ?struct list_head alloc_head; > - ? ? ? spinlock_t list_lock; > ?}; > > ?#endif > diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c > b/drivers/staging/omapdrm/omap_dmm_tiler.c > index 1ecb6a7..3bc715d 100644 > --- a/drivers/staging/omapdrm/omap_dmm_tiler.c > +++ b/drivers/staging/omapdrm/omap_dmm_tiler.c > @@ -40,6 +40,9 @@ > ?static struct tcm *containers[TILFMT_NFORMATS]; > ?static struct dmm *omap_dmm; > > +/* global spinlock for protecting lists */ > +static DEFINE_SPINLOCK(list_lock); > + > ?/* Geometry table */ > ?#define GEOM(xshift, yshift, bytes_per_pixel) { \ > ? ? ? ? ? ? ? ?.x_shft = (xshift), \ > @@ -147,13 +150,13 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, > struct tcm *tcm) > ? ? ? ?down(>engine_sem); > > ? ? ? ?/* grab an idle engine */ > - ? ? ? spin_lock(>list_lock); > + ? ? ? spin_lock(_lock); > ? ? ? ?if (!list_empty(>idle_head)) { > ? ? ? ? ? ? ? ?engine = list_entry(dmm->idle_head.next, struct refill_engine, > ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ?idle_node); > ? ? ? ? ? ? ? ?list_del(>idle_node); > ? ? ? ?} > - ? ? ? spin_unlock(>list_lock); > + ? ? ? spin_unlock(_lock); > > ? ? ? ?BUG_ON(!engine); > > @@ -256,9 +259,9 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait) > ? ? ? ?} > > ?cleanup: > - ? ? ? spin_lock(>list_lock); > + ? ? ? spin_lock(_lock); > ? ? ? ?list_add(>idle_node, >idle_head); > - ? ? ? spin_unlock(>list_lock); > + ? ? ? spin_unlock(_lock); > > ? ? ? ?up(_dmm->engine_sem); > ? ? ? ?return ret; > @@ -351,9 +354,9 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, > uint16_t w, > ? ? ? ?} > > ? ? ? ?/* add to allocation list */ > - ? ? ? spin_lock(_dmm->list_lock); > + ? ? ? spin_lock(_lock); > ? ? ? ?list_add(>alloc_node, _dmm->alloc_head); > - ? ? ? spin_unlock(_dmm->list_lock); > + ? ? ? spin_unlock(_lock); > > ? ? ? ?return block; > ?} > @@ -374,9 +377,9 @@ struct tiler_block *tiler_reserve_1d(size_t size) > ? ? ? ? ? ? ? ?return 0; > ? ? ? ?} > > - ? ? ? spin_lock(_dmm->list_lock); > + ? ? ? spin_lock(_lock); > ? ? ? ?list_add(>alloc_node, _dmm->alloc_head); > - ? ? ? spin_unlock(_dmm->list_lock); > + ? ? ? spin_unlock(_lock); > > ? ? ? ?return block; > ?} > @@ -389,9 +392,9 @@ int tiler_release(struct tiler_block *block) > ? ? ? ?if (block->area.tcm) > ? ? ? ? ? ? ? ?dev_err(omap_dmm->dev, "failed to release block\n"); > > - ? ? ? spin_lock(_dmm->list_lock); > + ? ? ? spin_lock(_lock); > ? ? ? ?list_del(>alloc_node); > - ? ? ? spin_unlock(_dmm->list_lock); > + ? ? ? spin_unlock(_lock); > > ? ? ? ?kfree(block); > ? ? ? ?return ret; > @@ -479,13 +482,13 @@ static int omap_dmm_remove(struct platform_device *dev) > > ? ? ? ?if (omap_dmm) { > ? ? ? ? ? ? ? ?/* free all area regions */ > - ? ? ? ? ? ? ? spin_lock(_dmm->list_lock); > + ? ? ? ? ? ? ? spin_lock(_lock); > ? ? ? ? ? ? ? ?list_for_each_entry_safe(block, _block, _dmm->alloc_head, > ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ?alloc_node) { > ? ? ? ? ? ? ? ? ? ? ? ?list_del(>alloc_node); > ? ? ? ? ? ? ? ? ? ? ? ?kfree(block); > ? ? ? ? ? ? ? ?} > - ? ? ? ? ? ? ? spin_unlock(_dmm->list_lock); > + ? ? ? ? ? ? ? spin_unlock(_lock); > > ? ? ? ? ? ? ? ?for (i = 0; i < omap_dmm->num_lut; i++) > ? ? ? ? ? ? ? ? ? ? ? ?if (omap_dmm->tcm && omap_dmm->tcm[i]) > @@ -503,7 +506,7 @@ static int omap_dmm_remove(struct platform_device *dev) > > ? ? ? ? ? ? ? ?vfree(omap_dmm->lut); > > - ? ? ? ? ? ? ? if (omap_dmm->irq != -1) > + ? ? ? ? ? ? ? if (omap_dmm->irq > 0) > ? ? ? ? ? ? ? ? ? ? ? ?free_irq(omap_dmm->irq, omap_dmm); > > ? ? ? ? ? ? ? ?iounmap(omap_dmm->base); > @@ -527,6 +530,10 @@ static int omap_dmm_probe(struct platform_device *dev) > ? ? ? ? ? ? ? ?goto fail; > ? ? ? ?} > > + ? ? ? /* initialize lists */ > + ? ? ? INIT_LIST_HEAD(_dmm->alloc_head); > + ? ? ? INIT_LIST_HEAD(_dmm->idle_head); > + > ? ? ? ?/* lookup hwmod data - base address and irq */ > ? ? ? ?mem = platform_get_resource(dev, IORESOURCE_MEM, 0); > ? ? ? ?if (!mem) { > @@ -629,7 +636,6 @@ static int omap_dmm_probe(struct platform_device *dev) > ? ? ? ?} > > ? ? ? ?sema_init(_dmm->engine_sem, omap_dmm->num_engines); > - ? ? ? INIT_LIST_HEAD(_dmm->idle_head); > ? ? ? ?for (i = 0; i < omap_dmm->num_engines; i++) { > ? ?
[PATCH] staging: omapdrm: Fix error paths during dmm init
Failures during the dmm probe can cause the kernel to crash. Moved the spinlock to a global and moved list initializations immediately after the allocation of the dmm private structure. Signed-off-by: Andy Gross --- drivers/staging/omapdrm/omap_dmm_priv.h |1 - drivers/staging/omapdrm/omap_dmm_tiler.c | 44 - 2 files changed, 24 insertions(+), 21 deletions(-) diff --git a/drivers/staging/omapdrm/omap_dmm_priv.h b/drivers/staging/omapdrm/omap_dmm_priv.h index 2f529ab..08b22e9 100644 --- a/drivers/staging/omapdrm/omap_dmm_priv.h +++ b/drivers/staging/omapdrm/omap_dmm_priv.h @@ -181,7 +181,6 @@ struct dmm { /* allocation list and lock */ struct list_head alloc_head; - spinlock_t list_lock; }; #endif diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c b/drivers/staging/omapdrm/omap_dmm_tiler.c index 1ecb6a7..3bc715d 100644 --- a/drivers/staging/omapdrm/omap_dmm_tiler.c +++ b/drivers/staging/omapdrm/omap_dmm_tiler.c @@ -40,6 +40,9 @@ static struct tcm *containers[TILFMT_NFORMATS]; static struct dmm *omap_dmm; +/* global spinlock for protecting lists */ +static DEFINE_SPINLOCK(list_lock); + /* Geometry table */ #define GEOM(xshift, yshift, bytes_per_pixel) { \ .x_shft = (xshift), \ @@ -147,13 +150,13 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm) down(>engine_sem); /* grab an idle engine */ - spin_lock(>list_lock); + spin_lock(_lock); if (!list_empty(>idle_head)) { engine = list_entry(dmm->idle_head.next, struct refill_engine, idle_node); list_del(>idle_node); } - spin_unlock(>list_lock); + spin_unlock(_lock); BUG_ON(!engine); @@ -256,9 +259,9 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait) } cleanup: - spin_lock(>list_lock); + spin_lock(_lock); list_add(>idle_node, >idle_head); - spin_unlock(>list_lock); + spin_unlock(_lock); up(_dmm->engine_sem); return ret; @@ -351,9 +354,9 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, } /* add to allocation list */ - spin_lock(_dmm->list_lock); + spin_lock(_lock); list_add(>alloc_node, _dmm->alloc_head); - spin_unlock(_dmm->list_lock); + spin_unlock(_lock); return block; } @@ -374,9 +377,9 @@ struct tiler_block *tiler_reserve_1d(size_t size) return 0; } - spin_lock(_dmm->list_lock); + spin_lock(_lock); list_add(>alloc_node, _dmm->alloc_head); - spin_unlock(_dmm->list_lock); + spin_unlock(_lock); return block; } @@ -389,9 +392,9 @@ int tiler_release(struct tiler_block *block) if (block->area.tcm) dev_err(omap_dmm->dev, "failed to release block\n"); - spin_lock(_dmm->list_lock); + spin_lock(_lock); list_del(>alloc_node); - spin_unlock(_dmm->list_lock); + spin_unlock(_lock); kfree(block); return ret; @@ -479,13 +482,13 @@ static int omap_dmm_remove(struct platform_device *dev) if (omap_dmm) { /* free all area regions */ - spin_lock(_dmm->list_lock); + spin_lock(_lock); list_for_each_entry_safe(block, _block, _dmm->alloc_head, alloc_node) { list_del(>alloc_node); kfree(block); } - spin_unlock(_dmm->list_lock); + spin_unlock(_lock); for (i = 0; i < omap_dmm->num_lut; i++) if (omap_dmm->tcm && omap_dmm->tcm[i]) @@ -503,7 +506,7 @@ static int omap_dmm_remove(struct platform_device *dev) vfree(omap_dmm->lut); - if (omap_dmm->irq != -1) + if (omap_dmm->irq > 0) free_irq(omap_dmm->irq, omap_dmm); iounmap(omap_dmm->base); @@ -527,6 +530,10 @@ static int omap_dmm_probe(struct platform_device *dev) goto fail; } + /* initialize lists */ + INIT_LIST_HEAD(_dmm->alloc_head); + INIT_LIST_HEAD(_dmm->idle_head); + /* lookup hwmod data - base address and irq */ mem = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!mem) { @@ -629,7 +636,6 @@ static int omap_dmm_probe(struct platform_device *dev) } sema_init(_dmm->engine_sem, omap_dmm->num_engines); - INIT_LIST_HEAD(_dmm->idle_head); for (i = 0; i < omap_dmm->num_engines; i++) { omap_dmm->engines[i].id = i; omap_dmm->engines[i].dmm = omap_dmm; @@ -672,9 +678,6 @@ static int omap_dmm_probe(struct platform_device *dev) containers[TILFMT_32BIT] = omap_dmm->tcm[0]; containers[TILFMT_PAGE] = omap_dmm->tcm[0]; -
[PATCH] staging: omapdrm: Fix error paths during dmm init
Failures during the dmm probe can cause the kernel to crash. Moved the spinlock to a global and moved list initializations immediately after the allocation of the dmm private structure. Signed-off-by: Andy Gross andy.gr...@ti.com --- drivers/staging/omapdrm/omap_dmm_priv.h |1 - drivers/staging/omapdrm/omap_dmm_tiler.c | 44 - 2 files changed, 24 insertions(+), 21 deletions(-) diff --git a/drivers/staging/omapdrm/omap_dmm_priv.h b/drivers/staging/omapdrm/omap_dmm_priv.h index 2f529ab..08b22e9 100644 --- a/drivers/staging/omapdrm/omap_dmm_priv.h +++ b/drivers/staging/omapdrm/omap_dmm_priv.h @@ -181,7 +181,6 @@ struct dmm { /* allocation list and lock */ struct list_head alloc_head; - spinlock_t list_lock; }; #endif diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c b/drivers/staging/omapdrm/omap_dmm_tiler.c index 1ecb6a7..3bc715d 100644 --- a/drivers/staging/omapdrm/omap_dmm_tiler.c +++ b/drivers/staging/omapdrm/omap_dmm_tiler.c @@ -40,6 +40,9 @@ static struct tcm *containers[TILFMT_NFORMATS]; static struct dmm *omap_dmm; +/* global spinlock for protecting lists */ +static DEFINE_SPINLOCK(list_lock); + /* Geometry table */ #define GEOM(xshift, yshift, bytes_per_pixel) { \ .x_shft = (xshift), \ @@ -147,13 +150,13 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm) down(dmm-engine_sem); /* grab an idle engine */ - spin_lock(dmm-list_lock); + spin_lock(list_lock); if (!list_empty(dmm-idle_head)) { engine = list_entry(dmm-idle_head.next, struct refill_engine, idle_node); list_del(engine-idle_node); } - spin_unlock(dmm-list_lock); + spin_unlock(list_lock); BUG_ON(!engine); @@ -256,9 +259,9 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait) } cleanup: - spin_lock(dmm-list_lock); + spin_lock(list_lock); list_add(engine-idle_node, dmm-idle_head); - spin_unlock(dmm-list_lock); + spin_unlock(list_lock); up(omap_dmm-engine_sem); return ret; @@ -351,9 +354,9 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, } /* add to allocation list */ - spin_lock(omap_dmm-list_lock); + spin_lock(list_lock); list_add(block-alloc_node, omap_dmm-alloc_head); - spin_unlock(omap_dmm-list_lock); + spin_unlock(list_lock); return block; } @@ -374,9 +377,9 @@ struct tiler_block *tiler_reserve_1d(size_t size) return 0; } - spin_lock(omap_dmm-list_lock); + spin_lock(list_lock); list_add(block-alloc_node, omap_dmm-alloc_head); - spin_unlock(omap_dmm-list_lock); + spin_unlock(list_lock); return block; } @@ -389,9 +392,9 @@ int tiler_release(struct tiler_block *block) if (block-area.tcm) dev_err(omap_dmm-dev, failed to release block\n); - spin_lock(omap_dmm-list_lock); + spin_lock(list_lock); list_del(block-alloc_node); - spin_unlock(omap_dmm-list_lock); + spin_unlock(list_lock); kfree(block); return ret; @@ -479,13 +482,13 @@ static int omap_dmm_remove(struct platform_device *dev) if (omap_dmm) { /* free all area regions */ - spin_lock(omap_dmm-list_lock); + spin_lock(list_lock); list_for_each_entry_safe(block, _block, omap_dmm-alloc_head, alloc_node) { list_del(block-alloc_node); kfree(block); } - spin_unlock(omap_dmm-list_lock); + spin_unlock(list_lock); for (i = 0; i omap_dmm-num_lut; i++) if (omap_dmm-tcm omap_dmm-tcm[i]) @@ -503,7 +506,7 @@ static int omap_dmm_remove(struct platform_device *dev) vfree(omap_dmm-lut); - if (omap_dmm-irq != -1) + if (omap_dmm-irq 0) free_irq(omap_dmm-irq, omap_dmm); iounmap(omap_dmm-base); @@ -527,6 +530,10 @@ static int omap_dmm_probe(struct platform_device *dev) goto fail; } + /* initialize lists */ + INIT_LIST_HEAD(omap_dmm-alloc_head); + INIT_LIST_HEAD(omap_dmm-idle_head); + /* lookup hwmod data - base address and irq */ mem = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!mem) { @@ -629,7 +636,6 @@ static int omap_dmm_probe(struct platform_device *dev) } sema_init(omap_dmm-engine_sem, omap_dmm-num_engines); - INIT_LIST_HEAD(omap_dmm-idle_head); for (i = 0; i omap_dmm-num_engines; i++) { omap_dmm-engines[i].id = i; omap_dmm-engines[i].dmm = omap_dmm; @@ -672,9 +678,6 @@ static int