From: Paolo 'Blaisorblade' Giarrusso <[EMAIL PROTECTED]>

This adapts the changes to the i386 handler to the UML one. It isn't enough to
make UML work, however, because UML has some peculiarities. Subsequent patches
fix this.

Signed-off-by: Paolo 'Blaisorblade' Giarrusso <[EMAIL PROTECTED]>
---

 linux-2.6.git-paolo/arch/um/kernel/trap_kern.c |   32 +++++++++++++++++++++----
 1 files changed, 27 insertions(+), 5 deletions(-)

diff -puN arch/um/kernel/trap_kern.c~rfp-fault-sigsegv-2-uml 
arch/um/kernel/trap_kern.c
--- linux-2.6.git/arch/um/kernel/trap_kern.c~rfp-fault-sigsegv-2-uml    
2005-08-11 23:09:32.000000000 +0200
+++ linux-2.6.git-paolo/arch/um/kernel/trap_kern.c      2005-08-11 
23:09:32.000000000 +0200
@@ -37,6 +37,7 @@ int handle_page_fault(unsigned long addr
        pmd_t *pmd;
        pte_t *pte;
        int err = -EFAULT;
+       int access_mask = 0;
 
        *code_out = SEGV_MAPERR;
        down_read(&mm->mmap_sem);
@@ -55,14 +56,15 @@ int handle_page_fault(unsigned long addr
 good_area:
        *code_out = SEGV_ACCERR;
        if(is_write && !(vma->vm_flags & VM_WRITE)) 
-               goto out;
+               goto prot_bad;
 
         if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
-                goto out;
+                goto prot_bad;
 
+       access_mask = is_write ? VM_WRITE : 0;
        do {
-survive:
-               switch (handle_mm_fault(mm, vma, address, is_write)){
+handle_fault:
+               switch (__handle_mm_fault(mm, vma, address, access_mask)) {
                case VM_FAULT_MINOR:
                        current->min_flt++;
                        break;
@@ -72,6 +74,9 @@ survive:
                case VM_FAULT_SIGBUS:
                        err = -EACCES;
                        goto out;
+               case VM_FAULT_SIGSEGV:
+                       err = -EFAULT;
+                       goto out;
                case VM_FAULT_OOM:
                        err = -ENOMEM;
                        goto out_of_memory;
@@ -87,10 +92,27 @@ survive:
        *pte = pte_mkyoung(*pte);
        if(pte_write(*pte)) *pte = pte_mkdirty(*pte);
        flush_tlb_page(vma, address);
+
+       /* If the PTE is not present, the vma protection are not accurate if
+        * VM_NONUNIFORM; present PTE's are correct for VM_NONUNIFORM and were
+        * already handled otherwise. */
 out:
        up_read(&mm->mmap_sem);
        return(err);
 
+prot_bad:
+       if (unlikely(vma->vm_flags & VM_NONUNIFORM)) {
+               access_mask = is_write ? VM_WRITE : 0;
+               /* Otherwise, on a legitimate read fault on a page mapped as
+                * exec-only, we get problems. Probably, we should lower
+                * requirements... we should always test just
+                * pte_read/write/exec, on vma->vm_page_prot! This way is
+                * cumbersome. However, for now things should work for UML. */
+               access_mask |= vma->vm_flags & VM_EXEC ? VM_EXEC : VM_READ;
+               goto handle_fault;
+       }
+       goto out;
+       
 /*
  * We ran out of memory, or some other thing happened to us that made
  * us unable to handle the page fault gracefully.
@@ -100,7 +122,7 @@ out_of_memory:
                up_read(&mm->mmap_sem);
                yield();
                down_read(&mm->mmap_sem);
-               goto survive;
+               goto handle_fault;
        }
        goto out;
 }
_
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to