Re: [Nouveau] [PATCH] drm/nouveau: gk20a: Turn instmem lock into mutex

2017-02-24 Thread Ben Skeggs
On 02/24/2017 05:25 PM, Alexandre Courbot wrote:
> On 02/24/2017 01:20 AM, Thierry Reding wrote:
>> * PGP Signed by an unknown key
>>
>> On Mon, Jan 30, 2017 at 09:03:07PM +0100, Thierry Reding wrote:
>>> From: Thierry Reding 
>>>
>>> The gk20a implementation of instance memory uses vmap()/vunmap() to map
>>> memory regions into the kernel's virtual address space. These functions
>>> may sleep, so protecting them by a spin lock is not safe. This triggers
>>> a warning if the DEBUG_ATOMIC_SLEEP Kconfig option is enabled. Fix this
>>> by using a mutex instead.
>>>
>>> Signed-off-by: Thierry Reding 
>>> ---
>>>  drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c | 19
>>> ---
>>>  1 file changed, 8 insertions(+), 11 deletions(-)
>>
>> Alex, could you take a look at this?
> 
> Sorry! Yes, using a mutex here should be safe since vmap() can sleep
> anyway. And I don't think this code can ever be reached in atomic
> context (Ben can confirm that last point). Tested this patch and it
> seems to work like a charm.
That should be true on this chipset.  Though, if we ever need to touch
the grctx or something in response to an interrupt (iirc nvgpu does
this, maybe?), this could change.  Granted, we probably should switch to
threaded interrupts at some point anyway.

As it currently stands, it should be fine.

Ben.

> 
> Reviewed-by: Alexandre Courbot 
> Tested-by: Alexandre Courbot 
> 
>>
>> Thanks,
>> Thierry
>>
>>> diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
>>> b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
>>> index a6a7fa0d7679..7f5244d57d2f 100644
>>> --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
>>> +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
>>> @@ -94,7 +94,7 @@ struct gk20a_instmem {
>>>  struct nvkm_instmem base;
>>>
>>>  /* protects vaddr_* and gk20a_instobj::vaddr* */
>>> -spinlock_t lock;
>>> +struct mutex lock;
>>>
>>>  /* CPU mappings LRU */
>>>  unsigned int vaddr_use;
>>> @@ -184,11 +184,10 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory
>>> *memory)
>>>  struct gk20a_instmem *imem = node->base.imem;
>>>  struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
>>>  const u64 size = nvkm_memory_size(memory);
>>> -unsigned long flags;
>>>
>>>  nvkm_ltc_flush(ltc);
>>>
>>> -spin_lock_irqsave(>lock, flags);
>>> +mutex_lock(>lock);
>>>
>>>  if (node->base.vaddr) {
>>>  if (!node->use_cpt) {
>>> @@ -216,7 +215,7 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory
>>> *memory)
>>>
>>>  out:
>>>  node->use_cpt++;
>>> -spin_unlock_irqrestore(>lock, flags);
>>> +mutex_unlock(>lock);
>>>
>>>  return node->base.vaddr;
>>>  }
>>> @@ -239,9 +238,8 @@ gk20a_instobj_release_iommu(struct nvkm_memory
>>> *memory)
>>>  struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
>>>  struct gk20a_instmem *imem = node->base.imem;
>>>  struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
>>> -unsigned long flags;
>>>
>>> -spin_lock_irqsave(>lock, flags);
>>> +mutex_lock(>lock);
>>>
>>>  /* we should at least have one user to release... */
>>>  if (WARN_ON(node->use_cpt == 0))
>>> @@ -252,7 +250,7 @@ gk20a_instobj_release_iommu(struct nvkm_memory
>>> *memory)
>>>  list_add_tail(>vaddr_node, >vaddr_lru);
>>>
>>>  out:
>>> -spin_unlock_irqrestore(>lock, flags);
>>> +mutex_unlock(>lock);
>>>
>>>  wmb();
>>>  nvkm_ltc_invalidate(ltc);
>>> @@ -306,19 +304,18 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory
>>> *memory)
>>>  struct gk20a_instmem *imem = node->base.imem;
>>>  struct device *dev = imem->base.subdev.device->dev;
>>>  struct nvkm_mm_node *r;
>>> -unsigned long flags;
>>>  int i;
>>>
>>>  if (unlikely(list_empty(>base.mem.regions)))
>>>  goto out;
>>>
>>> -spin_lock_irqsave(>lock, flags);
>>> +mutex_lock(>lock);
>>>
>>>  /* vaddr has already been recycled */
>>>  if (node->base.vaddr)
>>>  gk20a_instobj_iommu_recycle_vaddr(node);
>>>
>>> -spin_unlock_irqrestore(>lock, flags);
>>> +mutex_unlock(>lock);
>>>
>>>  r = list_first_entry(>base.mem.regions, struct nvkm_mm_node,
>>>   rl_entry);
>>> @@ -580,7 +577,7 @@ gk20a_instmem_new(struct nvkm_device *device, int
>>> index,
>>>  if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
>>>  return -ENOMEM;
>>>  nvkm_instmem_ctor(_instmem, device, index, >base);
>>> -spin_lock_init(>lock);
>>> +mutex_init(>lock);
>>>  *pimem = >base;
>>>
>>>  /* do not allow more than 1MB of CPU-mapped instmem */
>>> -- 
>>> 2.11.0
>>>
>>
>> * Unknown Key
>> * 0x7F3EB3A1
>>
> 
> ___
> Nouveau mailing list
> Nouveau@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/nouveau



signature.asc
Description: OpenPGP digital signature

Re: [Nouveau] [PATCH] drm/nouveau: gk20a: Turn instmem lock into mutex

2017-02-23 Thread Alexandre Courbot

On 02/24/2017 01:20 AM, Thierry Reding wrote:

* PGP Signed by an unknown key

On Mon, Jan 30, 2017 at 09:03:07PM +0100, Thierry Reding wrote:

From: Thierry Reding 

The gk20a implementation of instance memory uses vmap()/vunmap() to map
memory regions into the kernel's virtual address space. These functions
may sleep, so protecting them by a spin lock is not safe. This triggers
a warning if the DEBUG_ATOMIC_SLEEP Kconfig option is enabled. Fix this
by using a mutex instead.

Signed-off-by: Thierry Reding 
---
 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c | 19 ---
 1 file changed, 8 insertions(+), 11 deletions(-)


Alex, could you take a look at this?


Sorry! Yes, using a mutex here should be safe since vmap() can sleep 
anyway. And I don't think this code can ever be reached in atomic 
context (Ben can confirm that last point). Tested this patch and it 
seems to work like a charm.


Reviewed-by: Alexandre Courbot 
Tested-by: Alexandre Courbot 



Thanks,
Thierry


diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c 
b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index a6a7fa0d7679..7f5244d57d2f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -94,7 +94,7 @@ struct gk20a_instmem {
struct nvkm_instmem base;

/* protects vaddr_* and gk20a_instobj::vaddr* */
-   spinlock_t lock;
+   struct mutex lock;

/* CPU mappings LRU */
unsigned int vaddr_use;
@@ -184,11 +184,10 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)
struct gk20a_instmem *imem = node->base.imem;
struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
const u64 size = nvkm_memory_size(memory);
-   unsigned long flags;

nvkm_ltc_flush(ltc);

-   spin_lock_irqsave(>lock, flags);
+   mutex_lock(>lock);

if (node->base.vaddr) {
if (!node->use_cpt) {
@@ -216,7 +215,7 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)

 out:
node->use_cpt++;
-   spin_unlock_irqrestore(>lock, flags);
+   mutex_unlock(>lock);

return node->base.vaddr;
 }
@@ -239,9 +238,8 @@ gk20a_instobj_release_iommu(struct nvkm_memory *memory)
struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
struct gk20a_instmem *imem = node->base.imem;
struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
-   unsigned long flags;

-   spin_lock_irqsave(>lock, flags);
+   mutex_lock(>lock);

/* we should at least have one user to release... */
if (WARN_ON(node->use_cpt == 0))
@@ -252,7 +250,7 @@ gk20a_instobj_release_iommu(struct nvkm_memory *memory)
list_add_tail(>vaddr_node, >vaddr_lru);

 out:
-   spin_unlock_irqrestore(>lock, flags);
+   mutex_unlock(>lock);

wmb();
nvkm_ltc_invalidate(ltc);
@@ -306,19 +304,18 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
struct gk20a_instmem *imem = node->base.imem;
struct device *dev = imem->base.subdev.device->dev;
struct nvkm_mm_node *r;
-   unsigned long flags;
int i;

if (unlikely(list_empty(>base.mem.regions)))
goto out;

-   spin_lock_irqsave(>lock, flags);
+   mutex_lock(>lock);

/* vaddr has already been recycled */
if (node->base.vaddr)
gk20a_instobj_iommu_recycle_vaddr(node);

-   spin_unlock_irqrestore(>lock, flags);
+   mutex_unlock(>lock);

r = list_first_entry(>base.mem.regions, struct nvkm_mm_node,
 rl_entry);
@@ -580,7 +577,7 @@ gk20a_instmem_new(struct nvkm_device *device, int index,
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
nvkm_instmem_ctor(_instmem, device, index, >base);
-   spin_lock_init(>lock);
+   mutex_init(>lock);
*pimem = >base;

/* do not allow more than 1MB of CPU-mapped instmem */
--
2.11.0



* Unknown Key
* 0x7F3EB3A1



___
Nouveau mailing list
Nouveau@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/nouveau


Re: [Nouveau] [PATCH] drm/nouveau: gk20a: Turn instmem lock into mutex

2017-02-23 Thread Thierry Reding
On Mon, Jan 30, 2017 at 09:03:07PM +0100, Thierry Reding wrote:
> From: Thierry Reding 
> 
> The gk20a implementation of instance memory uses vmap()/vunmap() to map
> memory regions into the kernel's virtual address space. These functions
> may sleep, so protecting them by a spin lock is not safe. This triggers
> a warning if the DEBUG_ATOMIC_SLEEP Kconfig option is enabled. Fix this
> by using a mutex instead.
> 
> Signed-off-by: Thierry Reding 
> ---
>  drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c | 19 ---
>  1 file changed, 8 insertions(+), 11 deletions(-)

Alex, could you take a look at this?

Thanks,
Thierry

> diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c 
> b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
> index a6a7fa0d7679..7f5244d57d2f 100644
> --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
> +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
> @@ -94,7 +94,7 @@ struct gk20a_instmem {
>   struct nvkm_instmem base;
>  
>   /* protects vaddr_* and gk20a_instobj::vaddr* */
> - spinlock_t lock;
> + struct mutex lock;
>  
>   /* CPU mappings LRU */
>   unsigned int vaddr_use;
> @@ -184,11 +184,10 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)
>   struct gk20a_instmem *imem = node->base.imem;
>   struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
>   const u64 size = nvkm_memory_size(memory);
> - unsigned long flags;
>  
>   nvkm_ltc_flush(ltc);
>  
> - spin_lock_irqsave(>lock, flags);
> + mutex_lock(>lock);
>  
>   if (node->base.vaddr) {
>   if (!node->use_cpt) {
> @@ -216,7 +215,7 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)
>  
>  out:
>   node->use_cpt++;
> - spin_unlock_irqrestore(>lock, flags);
> + mutex_unlock(>lock);
>  
>   return node->base.vaddr;
>  }
> @@ -239,9 +238,8 @@ gk20a_instobj_release_iommu(struct nvkm_memory *memory)
>   struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
>   struct gk20a_instmem *imem = node->base.imem;
>   struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
> - unsigned long flags;
>  
> - spin_lock_irqsave(>lock, flags);
> + mutex_lock(>lock);
>  
>   /* we should at least have one user to release... */
>   if (WARN_ON(node->use_cpt == 0))
> @@ -252,7 +250,7 @@ gk20a_instobj_release_iommu(struct nvkm_memory *memory)
>   list_add_tail(>vaddr_node, >vaddr_lru);
>  
>  out:
> - spin_unlock_irqrestore(>lock, flags);
> + mutex_unlock(>lock);
>  
>   wmb();
>   nvkm_ltc_invalidate(ltc);
> @@ -306,19 +304,18 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
>   struct gk20a_instmem *imem = node->base.imem;
>   struct device *dev = imem->base.subdev.device->dev;
>   struct nvkm_mm_node *r;
> - unsigned long flags;
>   int i;
>  
>   if (unlikely(list_empty(>base.mem.regions)))
>   goto out;
>  
> - spin_lock_irqsave(>lock, flags);
> + mutex_lock(>lock);
>  
>   /* vaddr has already been recycled */
>   if (node->base.vaddr)
>   gk20a_instobj_iommu_recycle_vaddr(node);
>  
> - spin_unlock_irqrestore(>lock, flags);
> + mutex_unlock(>lock);
>  
>   r = list_first_entry(>base.mem.regions, struct nvkm_mm_node,
>rl_entry);
> @@ -580,7 +577,7 @@ gk20a_instmem_new(struct nvkm_device *device, int index,
>   if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
>   return -ENOMEM;
>   nvkm_instmem_ctor(_instmem, device, index, >base);
> - spin_lock_init(>lock);
> + mutex_init(>lock);
>   *pimem = >base;
>  
>   /* do not allow more than 1MB of CPU-mapped instmem */
> -- 
> 2.11.0
> 


signature.asc
Description: PGP signature
___
Nouveau mailing list
Nouveau@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/nouveau


[Nouveau] [PATCH] drm/nouveau: gk20a: Turn instmem lock into mutex

2017-01-30 Thread Thierry Reding
From: Thierry Reding 

The gk20a implementation of instance memory uses vmap()/vunmap() to map
memory regions into the kernel's virtual address space. These functions
may sleep, so protecting them by a spin lock is not safe. This triggers
a warning if the DEBUG_ATOMIC_SLEEP Kconfig option is enabled. Fix this
by using a mutex instead.

Signed-off-by: Thierry Reding 
---
 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c | 19 ---
 1 file changed, 8 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c 
b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index a6a7fa0d7679..7f5244d57d2f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -94,7 +94,7 @@ struct gk20a_instmem {
struct nvkm_instmem base;
 
/* protects vaddr_* and gk20a_instobj::vaddr* */
-   spinlock_t lock;
+   struct mutex lock;
 
/* CPU mappings LRU */
unsigned int vaddr_use;
@@ -184,11 +184,10 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)
struct gk20a_instmem *imem = node->base.imem;
struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
const u64 size = nvkm_memory_size(memory);
-   unsigned long flags;
 
nvkm_ltc_flush(ltc);
 
-   spin_lock_irqsave(>lock, flags);
+   mutex_lock(>lock);
 
if (node->base.vaddr) {
if (!node->use_cpt) {
@@ -216,7 +215,7 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)
 
 out:
node->use_cpt++;
-   spin_unlock_irqrestore(>lock, flags);
+   mutex_unlock(>lock);
 
return node->base.vaddr;
 }
@@ -239,9 +238,8 @@ gk20a_instobj_release_iommu(struct nvkm_memory *memory)
struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
struct gk20a_instmem *imem = node->base.imem;
struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
-   unsigned long flags;
 
-   spin_lock_irqsave(>lock, flags);
+   mutex_lock(>lock);
 
/* we should at least have one user to release... */
if (WARN_ON(node->use_cpt == 0))
@@ -252,7 +250,7 @@ gk20a_instobj_release_iommu(struct nvkm_memory *memory)
list_add_tail(>vaddr_node, >vaddr_lru);
 
 out:
-   spin_unlock_irqrestore(>lock, flags);
+   mutex_unlock(>lock);
 
wmb();
nvkm_ltc_invalidate(ltc);
@@ -306,19 +304,18 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
struct gk20a_instmem *imem = node->base.imem;
struct device *dev = imem->base.subdev.device->dev;
struct nvkm_mm_node *r;
-   unsigned long flags;
int i;
 
if (unlikely(list_empty(>base.mem.regions)))
goto out;
 
-   spin_lock_irqsave(>lock, flags);
+   mutex_lock(>lock);
 
/* vaddr has already been recycled */
if (node->base.vaddr)
gk20a_instobj_iommu_recycle_vaddr(node);
 
-   spin_unlock_irqrestore(>lock, flags);
+   mutex_unlock(>lock);
 
r = list_first_entry(>base.mem.regions, struct nvkm_mm_node,
 rl_entry);
@@ -580,7 +577,7 @@ gk20a_instmem_new(struct nvkm_device *device, int index,
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
nvkm_instmem_ctor(_instmem, device, index, >base);
-   spin_lock_init(>lock);
+   mutex_init(>lock);
*pimem = >base;
 
/* do not allow more than 1MB of CPU-mapped instmem */
-- 
2.11.0

___
Nouveau mailing list
Nouveau@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/nouveau