Let's cc Uladzislau on vmalloc things?

> How about this?

Well, lol, that's a simple approach to avoiding the problem ;)

> unmap_kernel_range had been atomic operation and zsmalloc has used
> it in atomic context in zs_unmap_object.
> However, ("e47110e90584, mm/vunmap: add cond_resched() in vunmap_pmd_range")
> changed it into non-atomic operation via adding cond_resched.
> It causes zram decompresion failure by corrupting compressed buffer
> in atomic context.
> 
> This patch introduces unmap_kernel_range_atomic which works for
> only range less than PMD_SIZE to prevent cond_resched call.
> 
> ...
>
> --- a/include/linux/vmalloc.h
> +++ b/include/linux/vmalloc.h
> @@ -180,6 +180,7 @@ int map_kernel_range(unsigned long start, unsigned long 
> size, pgprot_t prot,
>               struct page **pages);
>  extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long 
> size);
>  extern void unmap_kernel_range(unsigned long addr, unsigned long size);
> +extern void unmap_kernel_range_atomic(unsigned long addr, unsigned long 
> size);
>  static inline void set_vm_flush_reset_perms(void *addr)
>  {
>       struct vm_struct *vm = find_vm_area(addr);
> @@ -200,6 +201,7 @@ unmap_kernel_range_noflush(unsigned long addr, unsigned 
> long size)
>  {
>  }
>  #define unmap_kernel_range unmap_kernel_range_noflush
> +#define unmap_kernel_range_atomic unmap_kernel_range_noflush
>  static inline void set_vm_flush_reset_perms(void *addr)
>  {
>  }
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index d7075ad340aa..714e5425dc45 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -88,6 +88,7 @@ static void vunmap_pmd_range(pud_t *pud, unsigned long 
> addr, unsigned long end,
>       pmd_t *pmd;
>       unsigned long next;
>       int cleared;
> +     bool check_resched = (end - addr) > PMD_SIZE;
>  
>       pmd = pmd_offset(pud, addr);
>       do {
> @@ -102,8 +103,8 @@ static void vunmap_pmd_range(pud_t *pud, unsigned long 
> addr, unsigned long end,
>               if (pmd_none_or_clear_bad(pmd))
>                       continue;
>               vunmap_pte_range(pmd, addr, next, mask);
> -
> -             cond_resched();
> +             if (check_resched)
> +                     cond_resched();
>       } while (pmd++, addr = next, addr != end);
>  }
>  
> @@ -2024,6 +2025,24 @@ void unmap_kernel_range(unsigned long addr, unsigned 
> long size)
>       flush_tlb_kernel_range(addr, end);
>  }
>  
> +/**
> + * unmap_kernel_range_atomic - unmap kernel VM area and flush cache and TLB
> + * @addr: start of the VM area to unmap
> + * @size: size of the VM area to unmap
> + *
> + * Similar to unmap_kernel_range_noflush() but it's atomic. @size should be
> + * less than PMD_SIZE.
> + */
> +void unmap_kernel_range_atomic(unsigned long addr, unsigned long size)
> +{
> +     unsigned long end = addr + size;
> +
> +     flush_cache_vunmap(addr, end);
> +     WARN_ON(size > PMD_SIZE);

WARN_ON_ONCE() would be better here - no point in creating a million
warnings where one would suffice.

> +     unmap_kernel_range_noflush(addr, size);
> +     flush_tlb_kernel_range(addr, end);
> +}
> +
>  static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
>       struct vmap_area *va, unsigned long flags, const void *caller)
>  {
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index 662ee420706f..9decc7634852 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -1154,7 +1154,7 @@ static inline void __zs_unmap_object(struct 
> mapping_area *area,
>  {
>       unsigned long addr = (unsigned long)area->vm_addr;
>  
> -     unmap_kernel_range(addr, PAGE_SIZE * 2);
> +     unmap_kernel_range_atomic(addr, PAGE_SIZE * 2);
>  }

I suppose we could live with it if no better solutions are forthcoming.

Reply via email to