[PATCH] kernel/futex.c: fix incorrect 'should_fail_futex' handling

2020-09-26 Thread mateusznosek0
From: Mateusz Nosek 

Previously if 'futex_should_fail' returned true, then only 'ret' variable
was set, which was later overwritten without being read. The patch fixes
the problem.

Signed-off-by: Mateusz Nosek 
---
 kernel/futex.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/kernel/futex.c b/kernel/futex.c
index a5876694a60e..39681bf8b06c 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1502,8 +1502,10 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, 
struct futex_pi_state *pi_
 */
newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
 
-   if (unlikely(should_fail_futex(true)))
+   if (unlikely(should_fail_futex(true))) {
ret = -EFAULT;
+   goto out_unlock;
+   }
 
ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
if (!ret && (curval != uval)) {
-- 
2.20.1



[PATCH] mm/page_alloc.c: Clean code by removing unnecessary initialization

2020-09-04 Thread mateusznosek0
From: Mateusz Nosek 

Previously variable 'tmp' was initialized, but was not read later
before reassigning. So the initialization can be removed.

Signed-off-by: Mateusz Nosek 
---
 mm/page_alloc.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3ae4f3651aec..77c3d2084004 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5637,7 +5637,7 @@ static int find_next_best_node(int node, nodemask_t 
*used_node_mask)
int n, val;
int min_val = INT_MAX;
int best_node = NUMA_NO_NODE;
-   const struct cpumask *tmp = cpumask_of_node(0);
+   const struct cpumask *tmp;
 
/* Use the local node if we haven't already */
if (!node_isset(node, *used_node_mask)) {
-- 
2.20.1



[PATCH] mm/mmu_notifier.c: micro-optimization substitute kzalloc with kmalloc

2020-09-06 Thread mateusznosek0
From: Mateusz Nosek 

Most fields in struct pointed by 'subscriptions' are initialized explicitly
after the allocation. By changing kzalloc to kmalloc the call to memset
is avoided. As the only new code consists of 2 simple memory accesses,
the performance is increased.

Signed-off-by: Mateusz Nosek 
---
 mm/mmu_notifier.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 4fc918163dd3..190e198dc5be 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -625,7 +625,7 @@ int __mmu_notifier_register(struct mmu_notifier 
*subscription,
 * know that mm->notifier_subscriptions can't change while we
 * hold the write side of the mmap_lock.
 */
-   subscriptions = kzalloc(
+   subscriptions = kmalloc(
sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL);
if (!subscriptions)
return -ENOMEM;
@@ -636,6 +636,8 @@ int __mmu_notifier_register(struct mmu_notifier 
*subscription,
subscriptions->itree = RB_ROOT_CACHED;
init_waitqueue_head(&subscriptions->wq);
INIT_HLIST_HEAD(&subscriptions->deferred_list);
+   subscriptions->active_invalidate_ranges = 0;
+   subscriptions->has_itree = false;
}
 
ret = mm_take_all_locks(mm);
-- 
2.20.1



[PATCH] mm/page_poison.c: replace bool variable with static key

2020-09-21 Thread mateusznosek0
From: Mateusz Nosek 

Variable 'want_page_poisoning' is a switch deciding if page poisoning
should be enabled. This patch changes it to be static key.

Signed-off-by: Mateusz Nosek 
---
 mm/page_poison.c | 20 +++-
 1 file changed, 15 insertions(+), 5 deletions(-)

diff --git a/mm/page_poison.c b/mm/page_poison.c
index 34b9181ee5d1..ae0482cded87 100644
--- a/mm/page_poison.c
+++ b/mm/page_poison.c
@@ -8,13 +8,23 @@
 #include 
 #include 
 
-static bool want_page_poisoning __read_mostly;
+static DEFINE_STATIC_KEY_FALSE_RO(want_page_poisoning);
 
 static int __init early_page_poison_param(char *buf)
 {
-   if (!buf)
-   return -EINVAL;
-   return strtobool(buf, &want_page_poisoning);
+   int ret;
+   bool tmp;
+
+   ret = strtobool(buf, &tmp);
+   if (ret)
+   return ret;
+
+   if (tmp)
+   static_branch_enable(&want_page_poisoning);
+   else
+   static_branch_disable(&want_page_poisoning);
+
+   return 0;
 }
 early_param("page_poison", early_page_poison_param);
 
@@ -31,7 +41,7 @@ bool page_poisoning_enabled(void)
 * Page poisoning is debug page alloc for some arches. If
 * either of those options are enabled, enable poisoning.
 */
-   return (want_page_poisoning ||
+   return (static_branch_unlikely(&want_page_poisoning) ||
(!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
debug_pagealloc_enabled()));
 }
-- 
2.20.1



[PATCH] mm/page_alloc.c: micro-optimization remove unnecessary branch

2020-09-11 Thread mateusznosek0
From: Mateusz Nosek 

Previously flags check was separated into two separated checks with two
separated branches. In case of presence of any of two mentioned flags,
the same effect on flow occurs. Therefore checks can be merged and one
branch can be avoided.

Signed-off-by: Mateusz Nosek 
---
 mm/page_alloc.c | 8 +++-
 1 file changed, 3 insertions(+), 5 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cefbef32bf4a..b9bd75cacf02 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3972,8 +3972,10 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
 * success so it is time to admit defeat. We will skip the OOM killer
 * because it is very likely that the caller has a more reasonable
 * fallback than shooting a random task.
+*
+* The OOM killer may not free memory on a specific node.
 */
-   if (gfp_mask & __GFP_RETRY_MAYFAIL)
+   if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
goto out;
/* The OOM killer does not needlessly kill tasks for lowmem */
if (ac->highest_zoneidx < ZONE_NORMAL)
@@ -3990,10 +3992,6 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
 * failures more gracefully we should just bail out here.
 */
 
-   /* The OOM killer may not free memory on a specific node */
-   if (gfp_mask & __GFP_THISNODE)
-   goto out;
-
/* Exhausted what can be done so it's blame time */
if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
*did_some_progress = 1;
-- 
2.20.1



[PATCH] fs/aio.c: clean code by removing unnecessary assignment

2020-09-11 Thread mateusznosek0
From: Mateusz Nosek 

Variable 'ret' is reassigned before used otherwise, so the assignment is
unnecessary and therefore can be removed.

Signed-off-by: Mateusz Nosek 
---
 fs/aio.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/fs/aio.c b/fs/aio.c
index 42154e7c44cb..0a0e5cefa1c4 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1528,7 +1528,6 @@ static int aio_read(struct kiocb *req, const struct iocb 
*iocb,
file = req->ki_filp;
if (unlikely(!(file->f_mode & FMODE_READ)))
return -EBADF;
-   ret = -EINVAL;
if (unlikely(!file->f_op->read_iter))
return -EINVAL;
 
-- 
2.20.1



[PATCH] fs/pipe.c: clean code by removing unnecessary initialization

2020-09-12 Thread mateusznosek0
From: Mateusz Nosek 

Previously variable 'buf' was initialized, but was not read later before
reassigning.  So the initialization can be removed.

Signed-off-by: Mateusz Nosek 
---
 fs/pipe.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/fs/pipe.c b/fs/pipe.c
index 60dbee457143..a18ee5f6383b 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -495,7 +495,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
head = pipe->head;
if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
unsigned int mask = pipe->ring_size - 1;
-   struct pipe_buffer *buf = &pipe->bufs[head & mask];
+   struct pipe_buffer *buf;
struct page *page = pipe->tmp_page;
int copied;
 
-- 
2.20.1



[PATCH] mm/compaction.c: micro-optimization remove unnecessary branch

2020-09-13 Thread mateusznosek0
From: Mateusz Nosek 

The same code can work both for 'zone->compact_considered > defer_limit'
and 'zone->compact_considered >= defer_limit'. In the latter there is one
branch less which is more effective considering performance.

Signed-off-by: Mateusz Nosek 
---
 mm/compaction.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index 176dcded298e..6c63844fc061 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -180,11 +180,10 @@ bool compaction_deferred(struct zone *zone, int order)
return false;
 
/* Avoid possible overflow */
-   if (++zone->compact_considered > defer_limit)
+   if (++zone->compact_considered >= defer_limit) {
zone->compact_considered = defer_limit;
-
-   if (zone->compact_considered >= defer_limit)
return false;
+   }
 
trace_mm_compaction_deferred(zone, order);
 
-- 
2.20.1



[RFC PATCH] mm/page_alloc.c: micro-optimization reduce oom critical section size

2020-09-14 Thread mateusznosek0
From: Mateusz Nosek 

Most operations from '__alloc_pages_may_oom' do not require oom_mutex hold.
Exception is 'out_of_memory'. The patch refactors '__alloc_pages_may_oom'
to reduce critical section size and improve overall system performance.

Signed-off-by: Mateusz Nosek 
---
 mm/page_alloc.c | 45 -
 1 file changed, 24 insertions(+), 21 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b9bd75cacf02..b07f950a5825 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3935,18 +3935,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
.order = order,
};
struct page *page;
-
-   *did_some_progress = 0;
-
-   /*
-* Acquire the oom lock.  If that fails, somebody else is
-* making progress for us.
-*/
-   if (!mutex_trylock(&oom_lock)) {
-   *did_some_progress = 1;
-   schedule_timeout_uninterruptible(1);
-   return NULL;
-   }
+   bool success;
 
/*
 * Go through the zonelist yet one more time, keep very high watermark
@@ -3959,14 +3948,17 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int 
order,
  ~__GFP_DIRECT_RECLAIM, order,
  ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
if (page)
-   goto out;
+   return page;
+
+   /* Check if somebody else is making progress for us. */
+   *did_some_progress = mutex_is_locked(&oom_lock);
 
/* Coredumps can quickly deplete all memory reserves */
if (current->flags & PF_DUMPCORE)
-   goto out;
+   return NULL;
/* The OOM killer will not help higher order allocs */
if (order > PAGE_ALLOC_COSTLY_ORDER)
-   goto out;
+   return NULL;
/*
 * We have already exhausted all our reclaim opportunities without any
 * success so it is time to admit defeat. We will skip the OOM killer
@@ -3976,12 +3968,12 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int 
order,
 * The OOM killer may not free memory on a specific node.
 */
if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
-   goto out;
+   return NULL;
/* The OOM killer does not needlessly kill tasks for lowmem */
if (ac->highest_zoneidx < ZONE_NORMAL)
-   goto out;
+   return NULL;
if (pm_suspended_storage())
-   goto out;
+   return NULL;
/*
 * XXX: GFP_NOFS allocations should rather fail than rely on
 * other request to make a forward progress.
@@ -3992,8 +3984,20 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
 * failures more gracefully we should just bail out here.
 */
 
+   /*
+* Acquire the oom lock.  If that fails, somebody else is
+* making progress for us.
+*/
+   if (!mutex_trylock(&oom_lock)) {
+   *did_some_progress = 1;
+   schedule_timeout_uninterruptible(1);
+   return NULL;
+   }
+   success = out_of_memory(&oc);
+   mutex_unlock(&oom_lock);
+
/* Exhausted what can be done so it's blame time */
-   if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
+   if (success || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
*did_some_progress = 1;
 
/*
@@ -4004,8 +4008,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
page = __alloc_pages_cpuset_fallback(gfp_mask, order,
ALLOC_NO_WATERMARKS, ac);
}
-out:
-   mutex_unlock(&oom_lock);
+
return page;
 }
 
-- 
2.20.1



[RFC PATCH] fs: micro-optimization remove branches by adjusting flag values

2020-09-14 Thread mateusznosek0
From: Mateusz Nosek 

When flags A and B have equal values than the following code

if(flags1 & A)
flags2 |= B;

is equivalent to

flags2 |= (flags1 & A);

The latter code should generate less instructions and be faster as one
branch is omitted in it.

Introduced patch changes the value of 'LOOKUP_EMPTY' and makes it equal
to the value of 'AT_EMPTY_PATH'. Thanks to that, few branches can be
changed in a way showed above which improves both performance and the
size of the code.

Signed-off-by: Mateusz Nosek 
---
 fs/exec.c | 14 ++
 fs/fhandle.c  |  4 ++--
 fs/namespace.c|  4 ++--
 fs/open.c |  8 
 fs/stat.c |  4 ++--
 fs/utimes.c   |  6 +++---
 include/linux/namei.h |  4 ++--
 7 files changed, 25 insertions(+), 19 deletions(-)

diff --git a/fs/exec.c b/fs/exec.c
index a91003e28eaa..39e1ada1ee6c 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -904,8 +904,8 @@ static struct file *do_open_execat(int fd, struct filename 
*name, int flags)
return ERR_PTR(-EINVAL);
if (flags & AT_SYMLINK_NOFOLLOW)
open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW;
-   if (flags & AT_EMPTY_PATH)
-   open_exec_flags.lookup_flags |= LOOKUP_EMPTY;
+   BUILD_BUG_ON(AT_EMPTY_PATH != LOOKUP_EMPTY);
+   open_exec_flags.lookup_flags |= (flags & AT_EMPTY_PATH);
 
file = do_filp_open(fd, name, &open_exec_flags);
if (IS_ERR(file))
@@ -2176,7 +2176,10 @@ SYSCALL_DEFINE5(execveat,
const char __user *const __user *, envp,
int, flags)
 {
-   int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
+   int lookup_flags;
+
+   BUILD_BUG_ON(AT_EMPTY_PATH != LOOKUP_EMPTY);
+   lookup_flags = (flags & AT_EMPTY_PATH);
 
return do_execveat(fd,
   getname_flags(filename, lookup_flags, NULL),
@@ -2197,7 +2200,10 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
   const compat_uptr_t __user *, envp,
   int,  flags)
 {
-   int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
+   int lookup_flags;
+
+   BUILD_BUG_ON(AT_EMPTY_PATH != LOOKUP_EMPTY);
+   lookup_flags = (flags & AT_EMPTY_PATH);
 
return compat_do_execveat(fd,
  getname_flags(filename, lookup_flags, NULL),
diff --git a/fs/fhandle.c b/fs/fhandle.c
index 01263ffbc4c0..579bf462bf89 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -102,8 +102,8 @@ SYSCALL_DEFINE5(name_to_handle_at, int, dfd, const char 
__user *, name,
return -EINVAL;
 
lookup_flags = (flag & AT_SYMLINK_FOLLOW) ? LOOKUP_FOLLOW : 0;
-   if (flag & AT_EMPTY_PATH)
-   lookup_flags |= LOOKUP_EMPTY;
+   BUILD_BUG_ON(AT_EMPTY_PATH != LOOKUP_EMPTY);
+   lookup_flags |= (flag & AT_EMPTY_PATH);
err = user_path_at(dfd, name, lookup_flags, &path);
if (!err) {
err = do_sys_name_to_handle(&path, handle, mnt_id);
diff --git a/fs/namespace.c b/fs/namespace.c
index 098f981dce54..319f42d11236 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2456,8 +2456,8 @@ SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, 
filename, unsigned, fl
lookup_flags &= ~LOOKUP_AUTOMOUNT;
if (flags & AT_SYMLINK_NOFOLLOW)
lookup_flags &= ~LOOKUP_FOLLOW;
-   if (flags & AT_EMPTY_PATH)
-   lookup_flags |= LOOKUP_EMPTY;
+   BUILD_BUG_ON(AT_EMPTY_PATH != LOOKUP_EMPTY);
+   lookup_flags |= (flags & AT_EMPTY_PATH);
 
if (detached && !may_mount())
return -EPERM;
diff --git a/fs/open.c b/fs/open.c
index 9af548fb841b..8b6fe1e89811 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -410,8 +410,8 @@ static long do_faccessat(int dfd, const char __user 
*filename, int mode, int fla
 
if (flags & AT_SYMLINK_NOFOLLOW)
lookup_flags &= ~LOOKUP_FOLLOW;
-   if (flags & AT_EMPTY_PATH)
-   lookup_flags |= LOOKUP_EMPTY;
+   BUILD_BUG_ON(AT_EMPTY_PATH != LOOKUP_EMPTY);
+   lookup_flags |= (flags & AT_EMPTY_PATH);
 
if (!(flags & AT_EACCESS)) {
old_cred = access_override_creds();
@@ -692,8 +692,8 @@ int do_fchownat(int dfd, const char __user *filename, uid_t 
user, gid_t group,
goto out;
 
lookup_flags = (flag & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW;
-   if (flag & AT_EMPTY_PATH)
-   lookup_flags |= LOOKUP_EMPTY;
+   BUILD_BUG_ON(AT_EMPTY_PATH != LOOKUP_EMPTY);
+   lookup_flags |= (flag & AT_EMPTY_PATH);
 retry:
error = user_path_at(dfd, filename, lookup_flags, &path);
if (error)
diff --git a/fs/stat.c b/fs/stat.c
index 44f8ad346db4..a9feb7a7e9ec 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -168,8 +168,8 @@ static inline unsigned vfs_stat_set_lookup_flags(unsigned 
*lookup_flags,
*lookup_flags &= ~LOOKUP_FOLLOW;
if (flags & AT_NO_AUT

[PATCH] fs/open.c: micro-optimization by avoiding branch on common path

2020-09-18 Thread mateusznosek0
From: Mateusz Nosek 

If file is a directory it is surely not regular. Therefore, if 'S_ISREG'
check returns false one can be sure that vfs_truncate must returns with
error. Introduced patch refactors code to avoid one branch in 'likely'
control flow path. Moreover, it marks the proper check with 'unlikely'
macro to improve both branch prediction and readability. Changes were
tested with gcc 8.3.0 on x86 architecture and it is confirmed that
slightly better assembly is generated.

Signed-off-by: Mateusz Nosek 
---
 fs/open.c | 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/fs/open.c b/fs/open.c
index 9af548fb841b..69658ea27530 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -74,10 +74,12 @@ long vfs_truncate(const struct path *path, loff_t length)
inode = path->dentry->d_inode;
 
/* For directories it's -EISDIR, for other non-regulars - -EINVAL */
-   if (S_ISDIR(inode->i_mode))
-   return -EISDIR;
-   if (!S_ISREG(inode->i_mode))
-   return -EINVAL;
+   if (unlikely(!S_ISREG(inode->i_mode))) {
+   if (S_ISDIR(inode->i_mode))
+   return -EISDIR;
+   else
+   return -EINVAL;
+   }
 
error = mnt_want_write(path->mnt);
if (error)
-- 
2.20.1



[RFC PATCH] mm/page_alloc.c: clean code by merging two functions

2020-09-16 Thread mateusznosek0
From: Mateusz Nosek 

The 'finalise_ac' function is just 'epilogue' for 'prepare_alloc_pages'.
Therefore there is no need to keep them both so 'finalise_ac' content can
be merged into 'prepare_alloc_pages' code. It would make
'__alloc_pages_nodemask' cleaner when it comes to readability.

Signed-off-by: Mateusz Nosek 
---
 mm/page_alloc.c | 10 ++
 1 file changed, 2 insertions(+), 8 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b9bd75cacf02..a094e297c88f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4824,12 +4824,6 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, 
unsigned int order,
 
*alloc_flags = current_alloc_flags(gfp_mask, *alloc_flags);
 
-   return true;
-}
-
-/* Determine whether to spread dirty pages and what the first usable zone */
-static inline void finalise_ac(gfp_t gfp_mask, struct alloc_context *ac)
-{
/* Dirty zone balancing only done in the fast path */
ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
 
@@ -4840,6 +4834,8 @@ static inline void finalise_ac(gfp_t gfp_mask, struct 
alloc_context *ac)
 */
ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
ac->highest_zoneidx, ac->nodemask);
+
+   return true;
 }
 
 /*
@@ -4868,8 +4864,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int 
order, int preferred_nid,
if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, 
&alloc_mask, &alloc_flags))
return NULL;
 
-   finalise_ac(gfp_mask, &ac);
-
/*
 * Forbid the first pass from falling back to types that fragment
 * memory until all local zones are considered.
-- 
2.20.1



[PATCH] mm/slab.c: micro-optimization spare one branch in main flow

2020-09-16 Thread mateusznosek0
From: Mateusz Nosek 

By small refactoring two 'unlikely' branches are changed so that if
not one of them is true then only one branch occurs in 'likely' path
of the function in question. Change verified in assembly generated
by gcc 8.3.0.

Signed-off-by: Mateusz Nosek 
---
 mm/slab.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index 2cb494d98fe9..e04ea756bcbc 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4186,9 +4186,10 @@ size_t __ksize(const void *objp)
struct kmem_cache *c;
size_t size;
 
-   BUG_ON(!objp);
-   if (unlikely(objp == ZERO_SIZE_PTR))
+   if (unlikely(ZERO_OR_NULL_PTR(objp))) {
+   BUG_ON(!objp);
return 0;
+   }
 
c = virt_to_cache(objp);
size = c ? c->object_size : 0;
-- 
2.20.1



[PATCH] mm/page_alloc.c: fix early params garbage value accesses

2020-09-16 Thread mateusznosek0
From: Mateusz Nosek 

Previously in '__init early_init_on_alloc' and '__init early_init_on_free'
the return values from 'kstrtobool' were not handled properly. That caused
potential garbage value read from variable 'bool_result'. Introduced patch
fixes error handling.

Signed-off-by: Mateusz Nosek 
---
 mm/page_alloc.c | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6b699d273d6e..112e5a63f9ca 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -155,16 +155,16 @@ static int __init early_init_on_alloc(char *buf)
int ret;
bool bool_result;
 
-   if (!buf)
-   return -EINVAL;
ret = kstrtobool(buf, &bool_result);
+   if (ret)
+   return ret;
if (bool_result && page_poisoning_enabled())
pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, will take 
precedence over init_on_alloc\n");
if (bool_result)
static_branch_enable(&init_on_alloc);
else
static_branch_disable(&init_on_alloc);
-   return ret;
+   return 0;
 }
 early_param("init_on_alloc", early_init_on_alloc);
 
@@ -173,16 +173,16 @@ static int __init early_init_on_free(char *buf)
int ret;
bool bool_result;
 
-   if (!buf)
-   return -EINVAL;
ret = kstrtobool(buf, &bool_result);
+   if (ret)
+   return ret;
if (bool_result && page_poisoning_enabled())
pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, will take 
precedence over init_on_free\n");
if (bool_result)
static_branch_enable(&init_on_free);
else
static_branch_disable(&init_on_free);
-   return ret;
+   return 0;
 }
 early_param("init_on_free", early_init_on_free);
 
-- 
2.20.1



[PATCH] include/linux/compaction.h: clean code by removing unused enum value

2020-09-17 Thread mateusznosek0
From: Mateusz Nosek 

The enum value 'COMPACT_INACTIVE' is never used so can be removed.

Signed-off-by: Mateusz Nosek 
---
 include/linux/compaction.h | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 25a521d299c1..1de5a1151ee7 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -29,9 +29,6 @@ enum compact_result {
/* compaction didn't start as it was deferred due to past failures */
COMPACT_DEFERRED,
 
-   /* compaction not active last round */
-   COMPACT_INACTIVE = COMPACT_DEFERRED,
-
/* For more detailed tracepoint output - internal to compaction */
COMPACT_NO_SUITABLE_PAGE,
/* compaction should continue to another pageblock */
-- 
2.20.1



[PATCH] mmzone: clean code by removing unused macro parameter

2020-09-17 Thread mateusznosek0
From: Mateusz Nosek 

Previously 'for_next_zone_zonelist_nodemask' macro parameter 'zlist' was
unused so this patch removes it.

Signed-off-by: Mateusz Nosek 
---
 include/linux/mmzone.h | 2 +-
 mm/page_alloc.c| 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 90721f3156bc..7e0ea3fe95ca 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1120,7 +1120,7 @@ static inline struct zoneref *first_zones_zonelist(struct 
zonelist *zonelist,
z = next_zones_zonelist(++z, highidx, nodemask),\
zone = zonelist_zone(z))
 
-#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
+#define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
for (zone = z->zone;\
zone;   \
z = next_zones_zonelist(++z, highidx, nodemask),\
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 60a0e94645a6..6b1b4a331792 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3727,8 +3727,8 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int 
order, int alloc_flags,
 */
no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
z = ac->preferred_zoneref;
-   for_next_zone_zonelist_nodemask(zone, z, ac->zonelist,
-   ac->highest_zoneidx, ac->nodemask) {
+   for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
+   ac->nodemask) {
struct page *page;
unsigned long mark;
 
-- 
2.20.1



[PATCH] mm/slab.c: clean code by removing redundant if condition

2020-09-15 Thread mateusznosek0
From: Mateusz Nosek 

The removed code was unnecessary and changed nothing in the flow, since
in case of returning NULL by 'kmem_cache_alloc_node' returning 'freelist'
from the function in question is the same as returning NULL.

Signed-off-by: Mateusz Nosek 
---
 mm/slab.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index 3160dff6fd76..8f2fd224c1f0 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2301,8 +2301,6 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
/* Slab management obj is off-slab. */
freelist = kmem_cache_alloc_node(cachep->freelist_cache,
  local_flags, nodeid);
-   if (!freelist)
-   return NULL;
} else {
/* We will use last bytes at the slab for freelist */
freelist = addr + (PAGE_SIZE << cachep->gfporder) -
-- 
2.20.1