From: Konstantin Khlebnikov <[email protected]>

Each user gets private copy of the code thus nobody will be able to exploit
pages in the page cache. This works for statically-linked binaries. Shared
libraries are still vulnerable, but setting suid bit will protect them too.

[1] 
http://googleprojectzero.blogspot.com/2015/03/exploiting-dram-rowhammer-bug-to-gain.html

Signed-off-by: Konstantin Khlebnikov <[email protected]>
---
 include/linux/mm.h |    1 +
 mm/memory.c        |    4 ++--
 mm/mmap.c          |   11 +++++++++++
 3 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 47a9392..25edb4a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -123,6 +123,7 @@ extern unsigned int kobjsize(const void *objp);
 #define VM_MAYSHARE    0x00000080
 
 #define VM_GROWSDOWN   0x00000100      /* general info on the segment */
+#define VM_COR         0x00000200      /* copy-on-read */
 #define VM_PFNMAP      0x00000400      /* Page-ranges managed without "struct 
page", just pure PFN */
 #define VM_DENYWRITE   0x00000800      /* ETXTBSY on write attempts.. */
 
diff --git a/mm/memory.c b/mm/memory.c
index 411144f..a3c1064 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2904,7 +2904,7 @@ static int do_cow_fault(struct mm_struct *mm, struct 
vm_area_struct *vma,
                }
                goto uncharge_out;
        }
-       do_set_pte(vma, address, new_page, pte, true, true);
+       do_set_pte(vma, address, new_page, pte, vma->vm_flags & VM_WRITE, true);
        mem_cgroup_commit_charge(new_page, memcg, false);
        lru_cache_add_active_or_unevictable(new_page, vma);
        pte_unmap_unlock(pte, ptl);
@@ -3002,7 +3002,7 @@ static int do_fault(struct mm_struct *mm, struct 
vm_area_struct *vma,
                        - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
 
        pte_unmap(page_table);
-       if (!(flags & FAULT_FLAG_WRITE))
+       if (!(flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_COR))
                return do_read_fault(mm, vma, address, pmd, pgoff, flags,
                                orig_pte);
        if (!(vma->vm_flags & VM_SHARED))
diff --git a/mm/mmap.c b/mm/mmap.c
index da9990a..a91dd2b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1354,6 +1354,17 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned 
long addr,
                default:
                        return -EINVAL;
                }
+
+               /*
+                * Read-only SUID/SGID binares are mapped as copy-on-read
+                * this protects them against exploiting with Rowhammer.
+                */
+               if (!(file->f_mode & FMODE_WRITE) &&
+                   ((inode->i_mode & S_ISUID) || ((inode->i_mode & S_ISGID) &&
+                           (inode->i_mode & S_IXGRP)))) {
+                       vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
+                       vm_flags |= VM_COR;
+               }
        } else {
                switch (flags & MAP_TYPE) {
                case MAP_SHARED:

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to