Currently if z3fold couldn't find an unbuddied page it would first
try to pull a page off the stale list. The problem with this
approach is that we can't 100% guarantee that the page is not
processed by the workqueue thread at the same time unless we run
cancel_work_sync() on it, which we can't do if we're in an atomic
context. So let's just limit stale list usage to non-atomic
contexts only.

Signed-off-by: Vitaly Vul <vitaly....@sony.com>
---
 mm/z3fold.c | 35 +++++++++++++++++++----------------
 1 file changed, 19 insertions(+), 16 deletions(-)

diff --git a/mm/z3fold.c b/mm/z3fold.c
index 39e1912..9b0d112 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -620,24 +620,27 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t 
size, gfp_t gfp,
                bud = FIRST;
        }
- spin_lock(&pool->stale_lock);
-       zhdr = list_first_entry_or_null(&pool->stale,
-                                       struct z3fold_header, buddy);
-       /*
-        * Before allocating a page, let's see if we can take one from the
-        * stale pages list. cancel_work_sync() can sleep so we must make
-        * sure it won't be called in case we're in atomic context.
-        */
-       if (zhdr && (can_sleep || !work_pending(&zhdr->work))) {
-               list_del(&zhdr->buddy);
-               spin_unlock(&pool->stale_lock);
-               if (can_sleep)
+       page = NULL;
+       if (can_sleep) {
+               spin_lock(&pool->stale_lock);
+               zhdr = list_first_entry_or_null(&pool->stale,
+                                               struct z3fold_header, buddy);
+               /*
+                * Before allocating a page, let's see if we can take one from
+                * the stale pages list. cancel_work_sync() can sleep so we
+                * limit this case to the contexts where we can sleep
+                */
+               if (zhdr) {
+                       list_del(&zhdr->buddy);
+                       spin_unlock(&pool->stale_lock);
                        cancel_work_sync(&zhdr->work);
-               page = virt_to_page(zhdr);
-       } else {
-               spin_unlock(&pool->stale_lock);
-               page = alloc_page(gfp);
+                       page = virt_to_page(zhdr);
+               } else {
+                       spin_unlock(&pool->stale_lock);
+               }
        }
+       if (!page)
+               page = alloc_page(gfp);
if (!page)
                return -ENOMEM;
--
2.7.4


Reply via email to