Re: [PATCH v2 3/3] kmemleak: change some global variables to int

2014-03-21 Thread Catalin Marinas
On Mon, Mar 17, 2014 at 04:09:04AM +, Li Zefan wrote:
> They don't have to be atomic_t, because they are simple boolean
> toggles.
> 
> Signed-off-by: Li Zefan 

A reason for which I had atomic_t was to avoid compiler optimisations
but I don't immediately see how it could go wrong. Assuming that you
have tested it,

Acked-by: Catalin Marinas 
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH v2 3/3] kmemleak: change some global variables to int

2014-03-21 Thread Catalin Marinas
On Mon, Mar 17, 2014 at 04:09:04AM +, Li Zefan wrote:
 They don't have to be atomic_t, because they are simple boolean
 toggles.
 
 Signed-off-by: Li Zefan lize...@huawei.com

A reason for which I had atomic_t was to avoid compiler optimisations
but I don't immediately see how it could go wrong. Assuming that you
have tested it,

Acked-by: Catalin Marinas catalin.mari...@arm.com
--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH v2 3/3] kmemleak: change some global variables to int

2014-03-16 Thread Li Zefan
They don't have to be atomic_t, because they are simple boolean
toggles.

Signed-off-by: Li Zefan 
---
 mm/kmemleak.c | 78 +--
 1 file changed, 39 insertions(+), 39 deletions(-)

diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 54270f2..c352c63 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -192,15 +192,15 @@ static struct kmem_cache *object_cache;
 static struct kmem_cache *scan_area_cache;
 
 /* set if tracing memory operations is enabled */
-static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
+static int kmemleak_enabled;
 /* set in the late_initcall if there were no errors */
-static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
+static int kmemleak_initialized;
 /* enables or disables early logging of the memory operations */
-static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
+static int kmemleak_early_log = 1;
 /* set if a kmemleak warning was issued */
-static atomic_t kmemleak_warning = ATOMIC_INIT(0);
+static int kmemleak_warning;
 /* set if a fatal kmemleak error has occurred */
-static atomic_t kmemleak_error = ATOMIC_INIT(0);
+static int kmemleak_error;
 
 /* minimum and maximum address that may be valid pointers */
 static unsigned long min_addr = ULONG_MAX;
@@ -267,7 +267,7 @@ static void kmemleak_disable(void);
 #define kmemleak_warn(x...)do {\
pr_warning(x);  \
dump_stack();   \
-   atomic_set(_warning, 1);   \
+   kmemleak_warning = 1;   \
 } while (0)
 
 /*
@@ -805,7 +805,7 @@ static void __init log_early(int op_type, const void *ptr, 
size_t size,
unsigned long flags;
struct early_log *log;
 
-   if (atomic_read(_error)) {
+   if (kmemleak_error) {
/* kmemleak stopped recording, just count the requests */
crt_early_log++;
return;
@@ -840,7 +840,7 @@ static void early_alloc(struct early_log *log)
unsigned long flags;
int i;
 
-   if (!atomic_read(_enabled) || !log->ptr || IS_ERR(log->ptr))
+   if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
return;
 
/*
@@ -893,9 +893,9 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int 
min_count,
 {
pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
 
-   if (atomic_read(_enabled) && ptr && !IS_ERR(ptr))
+   if (kmemleak_enabled && ptr && !IS_ERR(ptr))
create_object((unsigned long)ptr, size, min_count, gfp);
-   else if (atomic_read(_early_log))
+   else if (kmemleak_early_log)
log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
 }
 EXPORT_SYMBOL_GPL(kmemleak_alloc);
@@ -919,11 +919,11 @@ void __ref kmemleak_alloc_percpu(const void __percpu 
*ptr, size_t size)
 * Percpu allocations are only scanned and not reported as leaks
 * (min_count is set to 0).
 */
-   if (atomic_read(_enabled) && ptr && !IS_ERR(ptr))
+   if (kmemleak_enabled && ptr && !IS_ERR(ptr))
for_each_possible_cpu(cpu)
create_object((unsigned long)per_cpu_ptr(ptr, cpu),
  size, 0, GFP_KERNEL);
-   else if (atomic_read(_early_log))
+   else if (kmemleak_early_log)
log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
 }
 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
@@ -939,9 +939,9 @@ void __ref kmemleak_free(const void *ptr)
 {
pr_debug("%s(0x%p)\n", __func__, ptr);
 
-   if (atomic_read(_enabled) && ptr && !IS_ERR(ptr))
+   if (kmemleak_enabled && ptr && !IS_ERR(ptr))
delete_object_full((unsigned long)ptr);
-   else if (atomic_read(_early_log))
+   else if (kmemleak_early_log)
log_early(KMEMLEAK_FREE, ptr, 0, 0);
 }
 EXPORT_SYMBOL_GPL(kmemleak_free);
@@ -959,9 +959,9 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
 {
pr_debug("%s(0x%p)\n", __func__, ptr);
 
-   if (atomic_read(_enabled) && ptr && !IS_ERR(ptr))
+   if (kmemleak_enabled && ptr && !IS_ERR(ptr))
delete_object_part((unsigned long)ptr, size);
-   else if (atomic_read(_early_log))
+   else if (kmemleak_early_log)
log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
 }
 EXPORT_SYMBOL_GPL(kmemleak_free_part);
@@ -979,11 +979,11 @@ void __ref kmemleak_free_percpu(const void __percpu *ptr)
 
pr_debug("%s(0x%p)\n", __func__, ptr);
 
-   if (atomic_read(_enabled) && ptr && !IS_ERR(ptr))
+   if (kmemleak_enabled && ptr && !IS_ERR(ptr))
for_each_possible_cpu(cpu)
delete_object_full((unsigned long)per_cpu_ptr(ptr,
  cpu));
-   else if (atomic_read(_early_log))
+   else if (kmemleak_early_log)
log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
 }
 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
@@ 

[PATCH v2 3/3] kmemleak: change some global variables to int

2014-03-16 Thread Li Zefan
They don't have to be atomic_t, because they are simple boolean
toggles.

Signed-off-by: Li Zefan lize...@huawei.com
---
 mm/kmemleak.c | 78 +--
 1 file changed, 39 insertions(+), 39 deletions(-)

diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 54270f2..c352c63 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -192,15 +192,15 @@ static struct kmem_cache *object_cache;
 static struct kmem_cache *scan_area_cache;
 
 /* set if tracing memory operations is enabled */
-static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
+static int kmemleak_enabled;
 /* set in the late_initcall if there were no errors */
-static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
+static int kmemleak_initialized;
 /* enables or disables early logging of the memory operations */
-static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
+static int kmemleak_early_log = 1;
 /* set if a kmemleak warning was issued */
-static atomic_t kmemleak_warning = ATOMIC_INIT(0);
+static int kmemleak_warning;
 /* set if a fatal kmemleak error has occurred */
-static atomic_t kmemleak_error = ATOMIC_INIT(0);
+static int kmemleak_error;
 
 /* minimum and maximum address that may be valid pointers */
 static unsigned long min_addr = ULONG_MAX;
@@ -267,7 +267,7 @@ static void kmemleak_disable(void);
 #define kmemleak_warn(x...)do {\
pr_warning(x);  \
dump_stack();   \
-   atomic_set(kmemleak_warning, 1);   \
+   kmemleak_warning = 1;   \
 } while (0)
 
 /*
@@ -805,7 +805,7 @@ static void __init log_early(int op_type, const void *ptr, 
size_t size,
unsigned long flags;
struct early_log *log;
 
-   if (atomic_read(kmemleak_error)) {
+   if (kmemleak_error) {
/* kmemleak stopped recording, just count the requests */
crt_early_log++;
return;
@@ -840,7 +840,7 @@ static void early_alloc(struct early_log *log)
unsigned long flags;
int i;
 
-   if (!atomic_read(kmemleak_enabled) || !log-ptr || IS_ERR(log-ptr))
+   if (!kmemleak_enabled || !log-ptr || IS_ERR(log-ptr))
return;
 
/*
@@ -893,9 +893,9 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int 
min_count,
 {
pr_debug(%s(0x%p, %zu, %d)\n, __func__, ptr, size, min_count);
 
-   if (atomic_read(kmemleak_enabled)  ptr  !IS_ERR(ptr))
+   if (kmemleak_enabled  ptr  !IS_ERR(ptr))
create_object((unsigned long)ptr, size, min_count, gfp);
-   else if (atomic_read(kmemleak_early_log))
+   else if (kmemleak_early_log)
log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
 }
 EXPORT_SYMBOL_GPL(kmemleak_alloc);
@@ -919,11 +919,11 @@ void __ref kmemleak_alloc_percpu(const void __percpu 
*ptr, size_t size)
 * Percpu allocations are only scanned and not reported as leaks
 * (min_count is set to 0).
 */
-   if (atomic_read(kmemleak_enabled)  ptr  !IS_ERR(ptr))
+   if (kmemleak_enabled  ptr  !IS_ERR(ptr))
for_each_possible_cpu(cpu)
create_object((unsigned long)per_cpu_ptr(ptr, cpu),
  size, 0, GFP_KERNEL);
-   else if (atomic_read(kmemleak_early_log))
+   else if (kmemleak_early_log)
log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
 }
 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
@@ -939,9 +939,9 @@ void __ref kmemleak_free(const void *ptr)
 {
pr_debug(%s(0x%p)\n, __func__, ptr);
 
-   if (atomic_read(kmemleak_enabled)  ptr  !IS_ERR(ptr))
+   if (kmemleak_enabled  ptr  !IS_ERR(ptr))
delete_object_full((unsigned long)ptr);
-   else if (atomic_read(kmemleak_early_log))
+   else if (kmemleak_early_log)
log_early(KMEMLEAK_FREE, ptr, 0, 0);
 }
 EXPORT_SYMBOL_GPL(kmemleak_free);
@@ -959,9 +959,9 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
 {
pr_debug(%s(0x%p)\n, __func__, ptr);
 
-   if (atomic_read(kmemleak_enabled)  ptr  !IS_ERR(ptr))
+   if (kmemleak_enabled  ptr  !IS_ERR(ptr))
delete_object_part((unsigned long)ptr, size);
-   else if (atomic_read(kmemleak_early_log))
+   else if (kmemleak_early_log)
log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
 }
 EXPORT_SYMBOL_GPL(kmemleak_free_part);
@@ -979,11 +979,11 @@ void __ref kmemleak_free_percpu(const void __percpu *ptr)
 
pr_debug(%s(0x%p)\n, __func__, ptr);
 
-   if (atomic_read(kmemleak_enabled)  ptr  !IS_ERR(ptr))
+   if (kmemleak_enabled  ptr  !IS_ERR(ptr))
for_each_possible_cpu(cpu)
delete_object_full((unsigned long)per_cpu_ptr(ptr,
  cpu));
-   else if (atomic_read(kmemleak_early_log))
+   else if (kmemleak_early_log)