Re: [PATCH 3/4] memremap: don't use a separate devm action for devmap_managed_enable_get

2019-08-19 Thread Dan Williams
On Sun, Aug 18, 2019 at 2:12 AM Christoph Hellwig  wrote:
>
> Just clean up for early failures and then piggy back on
> devm_memremap_pages_release.  This helps with a pending not device
> managed version of devm_memremap_pages.
>
> Signed-off-by: Christoph Hellwig 
> Reviewed-by: Ira Weiny 

Looks good,

Reviewed-by: Dan Williams 


[PATCH 3/4] memremap: don't use a separate devm action for devmap_managed_enable_get

2019-08-18 Thread Christoph Hellwig
Just clean up for early failures and then piggy back on
devm_memremap_pages_release.  This helps with a pending not device
managed version of devm_memremap_pages.

Signed-off-by: Christoph Hellwig 
Reviewed-by: Ira Weiny 
---
 kernel/memremap.c | 15 ++-
 1 file changed, 10 insertions(+), 5 deletions(-)

diff --git a/kernel/memremap.c b/kernel/memremap.c
index 600a14cbe663..09a087ca30ff 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -21,13 +21,13 @@ DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
 EXPORT_SYMBOL(devmap_managed_key);
 static atomic_t devmap_managed_enable;
 
-static void devmap_managed_enable_put(void *data)
+static void devmap_managed_enable_put(void)
 {
if (atomic_dec_and_test(_managed_enable))
static_branch_disable(_managed_key);
 }
 
-static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap 
*pgmap)
+static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
 {
if (!pgmap->ops || !pgmap->ops->page_free) {
WARN(1, "Missing page_free method\n");
@@ -36,13 +36,16 @@ static int devmap_managed_enable_get(struct device *dev, 
struct dev_pagemap *pgm
 
if (atomic_inc_return(_managed_enable) == 1)
static_branch_enable(_managed_key);
-   return devm_add_action_or_reset(dev, devmap_managed_enable_put, NULL);
+   return 0;
 }
 #else
-static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap 
*pgmap)
+static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
 {
return -EINVAL;
 }
+static void devmap_managed_enable_put(void)
+{
+}
 #endif /* CONFIG_DEV_PAGEMAP_OPS */
 
 static void pgmap_array_delete(struct resource *res)
@@ -123,6 +126,7 @@ static void devm_memremap_pages_release(void *data)
untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
pgmap_array_delete(res);
WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
+   devmap_managed_enable_put();
 }
 
 static void dev_pagemap_percpu_release(struct percpu_ref *ref)
@@ -212,7 +216,7 @@ void *devm_memremap_pages(struct device *dev, struct 
dev_pagemap *pgmap)
}
 
if (need_devmap_managed) {
-   error = devmap_managed_enable_get(dev, pgmap);
+   error = devmap_managed_enable_get(pgmap);
if (error)
return ERR_PTR(error);
}
@@ -321,6 +325,7 @@ void *devm_memremap_pages(struct device *dev, struct 
dev_pagemap *pgmap)
  err_array:
dev_pagemap_kill(pgmap);
dev_pagemap_cleanup(pgmap);
+   devmap_managed_enable_put();
return ERR_PTR(error);
 }
 EXPORT_SYMBOL_GPL(devm_memremap_pages);
-- 
2.20.1



[PATCH 3/4] memremap: don't use a separate devm action for devmap_managed_enable_get

2019-08-16 Thread Christoph Hellwig
Just clean up for early failures and then piggy back on
devm_memremap_pages_release.  This helps with a pending not device
managed version of devm_memremap_pages.

Signed-off-by: Christoph Hellwig 
---
 mm/memremap.c | 15 ++-
 1 file changed, 10 insertions(+), 5 deletions(-)

diff --git a/mm/memremap.c b/mm/memremap.c
index 416b4129acbb..4e11da4ecab9 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -21,13 +21,13 @@ DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
 EXPORT_SYMBOL(devmap_managed_key);
 static atomic_t devmap_managed_enable;
 
-static void devmap_managed_enable_put(void *data)
+static void devmap_managed_enable_put(void)
 {
if (atomic_dec_and_test(_managed_enable))
static_branch_disable(_managed_key);
 }
 
-static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap 
*pgmap)
+static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
 {
if (!pgmap->ops || !pgmap->ops->page_free) {
WARN(1, "Missing page_free method\n");
@@ -36,13 +36,16 @@ static int devmap_managed_enable_get(struct device *dev, 
struct dev_pagemap *pgm
 
if (atomic_inc_return(_managed_enable) == 1)
static_branch_enable(_managed_key);
-   return devm_add_action_or_reset(dev, devmap_managed_enable_put, NULL);
+   return 0;
 }
 #else
-static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap 
*pgmap)
+static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
 {
return -EINVAL;
 }
+static void devmap_managed_enable_put(void)
+{
+}
 #endif /* CONFIG_DEV_PAGEMAP_OPS */
 
 static void pgmap_array_delete(struct resource *res)
@@ -129,6 +132,7 @@ static void devm_memremap_pages_release(void *data)
untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
pgmap_array_delete(res);
WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
+   devmap_managed_enable_put();
 }
 
 static void dev_pagemap_percpu_release(struct percpu_ref *ref)
@@ -218,7 +222,7 @@ void *devm_memremap_pages(struct device *dev, struct 
dev_pagemap *pgmap)
}
 
if (need_devmap_managed) {
-   error = devmap_managed_enable_get(dev, pgmap);
+   error = devmap_managed_enable_get(pgmap);
if (error)
return ERR_PTR(error);
}
@@ -327,6 +331,7 @@ void *devm_memremap_pages(struct device *dev, struct 
dev_pagemap *pgmap)
  err_array:
dev_pagemap_kill(pgmap);
dev_pagemap_cleanup(pgmap);
+   devmap_managed_enable_put();
return ERR_PTR(error);
 }
 EXPORT_SYMBOL_GPL(devm_memremap_pages);
-- 
2.20.1