From: Yulei Zhang <yuleixzh...@tencent.com>

Currently dmemfs do not support memory readonly, so change_protection()
will be disabled for dmemfs vma. Since vma->vm_flags could be changed to
new flag in mprotect_fixup(), so we introduce a new vma flag VM_DMEM and
check this flag in mprotect_fixup() to avoid changing vma->vm_flags.

We also check it in vma_to_resize() to disable mremap() for dmemfs vma.

Signed-off-by: Chen Zhuo <sagazc...@tencent.com>
Signed-off-by: Yulei Zhang <yuleixzh...@tencent.com>
---
 fs/dmemfs/inode.c  | 2 +-
 include/linux/mm.h | 7 +++++++
 mm/gup.c           | 7 +++++--
 mm/mincore.c       | 8 ++++++--
 mm/mprotect.c      | 5 ++++-
 mm/mremap.c        | 3 +++
 6 files changed, 26 insertions(+), 6 deletions(-)

diff --git a/fs/dmemfs/inode.c b/fs/dmemfs/inode.c
index ab6a492..b165bd3 100644
--- a/fs/dmemfs/inode.c
+++ b/fs/dmemfs/inode.c
@@ -507,7 +507,7 @@ int dmemfs_file_mmap(struct file *file, struct 
vm_area_struct *vma)
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_PFNMAP;
+       vma->vm_flags |= VM_PFNMAP | VM_DMEM | VM_IO;
 
        file_accessed(file);
        vma->vm_ops = &dmemfs_vm_ops;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index db6ae4d..2f3135fe 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -311,6 +311,8 @@ int overcommit_policy_handler(struct ctl_table *, int, void 
*, size_t *,
 #define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
 #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
 
+#define VM_DMEM                BIT(38)         /* Dmem page VM */
+
 #ifdef CONFIG_ARCH_HAS_PKEYS
 # define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
 # define VM_PKEY_BIT0  VM_HIGH_ARCH_0  /* A protection key is a 4-bit value */
@@ -666,6 +668,11 @@ static inline bool vma_is_accessible(struct vm_area_struct 
*vma)
        return vma->vm_flags & VM_ACCESS_FLAGS;
 }
 
+static inline bool vma_is_dmem(struct vm_area_struct *vma)
+{
+       return !!(vma->vm_flags & VM_DMEM);
+}
+
 #ifdef CONFIG_SHMEM
 /*
  * The vma_is_shmem is not inline because it is used only by slow
diff --git a/mm/gup.c b/mm/gup.c
index 47c8197..0ea9071 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -492,8 +492,11 @@ static struct page *follow_page_pte(struct vm_area_struct 
*vma,
                        goto no_page;
        } else if (unlikely(!page)) {
                if (flags & FOLL_DUMP) {
-                       /* Avoid special (like zero) pages in core dumps */
-                       page = ERR_PTR(-EFAULT);
+                       if (vma_is_dmem(vma))
+                               page = ERR_PTR(-EEXIST);
+                       else
+                               /* Avoid special (like zero) pages in core 
dumps */
+                               page = ERR_PTR(-EFAULT);
                        goto out;
                }
 
diff --git a/mm/mincore.c b/mm/mincore.c
index 02db1a8..f8d10e4 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -78,8 +78,12 @@ static int __mincore_unmapped_range(unsigned long addr, 
unsigned long end,
                pgoff_t pgoff;
 
                pgoff = linear_page_index(vma, addr);
-               for (i = 0; i < nr; i++, pgoff++)
-                       vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
+               for (i = 0; i < nr; i++, pgoff++) {
+                       if (vma_is_dmem(vma))
+                               vec[i] = 1;
+                       else
+                               vec[i] = mincore_page(vma->vm_file->f_mapping, 
pgoff);
+               }
        } else {
                for (i = 0; i < nr; i++)
                        vec[i] = 0;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 56c02be..b1650b5 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -236,7 +236,7 @@ static inline unsigned long change_pmd_range(struct 
vm_area_struct *vma,
                 * for all the checks.
                 */
                if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) &&
-                    pmd_none_or_clear_bad_unless_trans_huge(pmd))
+                    pmd_none_or_clear_bad_unless_trans_huge(pmd) && 
!pmd_special(*pmd))
                        goto next;
 
                /* invoke the mmu notifier if the pmd is populated */
@@ -412,6 +412,9 @@ static int prot_none_test(unsigned long addr, unsigned long 
next,
                return 0;
        }
 
+       if (vma_is_dmem(vma))
+               return -EINVAL;
+
        /*
         * Do PROT_NONE PFN permission checks here when we can still
         * bail out without undoing a lot of state. This is a rather
diff --git a/mm/mremap.c b/mm/mremap.c
index 138abba..598e681 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -482,6 +482,9 @@ static struct vm_area_struct *vma_to_resize(unsigned long 
addr,
        if (!vma || vma->vm_start > addr)
                return ERR_PTR(-EFAULT);
 
+       if (vma_is_dmem(vma))
+               return ERR_PTR(-EINVAL);
+
        /*
         * !old_len is a special case where an attempt is made to 'duplicate'
         * a mapping.  This makes no sense for private mappings as it will
-- 
1.8.3.1

Reply via email to