> 2.2 series of kernels, sincee they're apparently vulnerable too?
 You can find the patch on bugtraq/isec/etc, attached is a peek at it

-- 
Dariush Pietrzak,
Key fingerprint = 40D0 9FFB 9939 7320 8294  05E0 BCC7 02C4 75CC 50D9
--- linux/mm/mremap.c.security  Sun Mar 25 20:31:03 2001
+++ linux/mm/mremap.c   Thu Feb 19 05:10:34 2004
@@ -9,6 +9,7 @@
 #include <linux/shm.h>
 #include <linux/mman.h>
 #include <linux/swap.h>
+#include <linux/file.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -25,7 +26,7 @@
        if (pgd_none(*pgd))
                goto end;
        if (pgd_bad(*pgd)) {
-               printk("move_one_page: bad source pgd (%08lx)\n", pgd_val(*pgd));
+               printk("copy_one_page: bad source pgd (%08lx)\n", pgd_val(*pgd));
                pgd_clear(pgd);
                goto end;
        }
@@ -34,7 +35,7 @@
        if (pmd_none(*pmd))
                goto end;
        if (pmd_bad(*pmd)) {
-               printk("move_one_page: bad source pmd (%08lx)\n", pmd_val(*pmd));
+               printk("copy_one_page: bad source pmd (%08lx)\n", pmd_val(*pmd));
                pmd_clear(pmd);
                goto end;
        }
@@ -57,34 +58,22 @@
        return pte;
 }
 
-static inline int copy_one_pte(pte_t * src, pte_t * dst)
+static int copy_one_page(struct mm_struct *mm, unsigned long old_addr, unsigned long 
new_addr)
 {
-       int error = 0;
-       pte_t pte = *src;
+       pte_t * src, * dst;
 
-       if (!pte_none(pte)) {
-               error++;
-               if (dst) {
-                       pte_clear(src);
-                       set_pte(dst, pte);
-                       error--;
+       src = get_one_pte(mm, old_addr);
+       if (src && !pte_none(*src)) {
+               if ((dst = alloc_one_pte(mm, new_addr))) {
+                       set_pte(dst, *src);
+                       return 0;
                }
+               return 1;
        }
-       return error;
-}
-
-static int move_one_page(struct mm_struct *mm, unsigned long old_addr, unsigned long 
new_addr)
-{
-       int error = 0;
-       pte_t * src;
-
-       src = get_one_pte(mm, old_addr);
-       if (src)
-               error = copy_one_pte(src, alloc_one_pte(mm, new_addr));
-       return error;
+       return 0;
 }
 
-static int move_page_tables(struct mm_struct * mm,
+static int copy_page_tables(struct mm_struct * mm,
        unsigned long new_addr, unsigned long old_addr, unsigned long len)
 {
        unsigned long offset = len;
@@ -99,7 +88,7 @@
         */
        while (offset) {
                offset -= PAGE_SIZE;
-               if (move_one_page(mm, old_addr + offset, new_addr + offset))
+               if (copy_one_page(mm, old_addr + offset, new_addr + offset))
                        goto oops_we_failed;
        }
        return 0;
@@ -113,8 +102,6 @@
         */
 oops_we_failed:
        flush_cache_range(mm, new_addr, new_addr + len);
-       while ((offset += PAGE_SIZE) < len)
-               move_one_page(mm, new_addr + offset, old_addr + offset);
        zap_page_range(mm, new_addr, len);
        flush_tlb_range(mm, new_addr, new_addr + len);
        return -1;
@@ -129,7 +116,9 @@
        if (new_vma) {
                unsigned long new_addr = get_unmapped_area(addr, new_len);
 
-               if (new_addr && !move_page_tables(current->mm, new_addr, addr, 
old_len)) {
+               if (new_addr && !copy_page_tables(current->mm, new_addr, addr, 
old_len)) {
+                       unsigned long ret;
+
                        *new_vma = *vma;
                        new_vma->vm_start = new_addr;
                        new_vma->vm_end = new_addr+new_len;
@@ -138,9 +127,19 @@
                                new_vma->vm_file->f_count++;
                        if (new_vma->vm_ops && new_vma->vm_ops->open)
                                new_vma->vm_ops->open(new_vma);
+                       if ((ret = do_munmap(addr, old_len))) {
+                               if (new_vma->vm_ops && new_vma->vm_ops->close)
+                                       new_vma->vm_ops->close(new_vma);
+                               if (new_vma->vm_file)
+                                       fput(new_vma->vm_file);
+                               flush_cache_range(current->mm, new_addr, new_addr + 
old_len);
+                               zap_page_range(current->mm, new_addr, old_len);
+                               flush_tlb_range(current->mm, new_addr, new_addr + 
old_len);
+                               kmem_cache_free(vm_area_cachep, new_vma);
+                               return ret;
+                       }
                        insert_vm_struct(current->mm, new_vma);
                        merge_segments(current->mm, new_vma->vm_start, 
new_vma->vm_end);
-                       do_munmap(addr, old_len);
                        current->mm->total_vm += new_len >> PAGE_SHIFT;
                        if (new_vma->vm_flags & VM_LOCKED) {
                                current->mm->locked_vm += new_len >> PAGE_SHIFT;
@@ -176,9 +175,9 @@
         * Always allow a shrinking remap: that just unmaps
         * the unnecessary pages..
         */
-       ret = addr;
        if (old_len >= new_len) {
-               do_munmap(addr+new_len, old_len - new_len);
+               if (!(ret = do_munmap(addr+new_len, old_len - new_len)))
+                       ret = addr;
                goto out;
        }
 

Reply via email to