Re: [patch 5/6] mm: merge nopfn into fault
On Wed, Feb 21, 2007 at 05:50:31AM +0100, Nick Piggin wrote: > Remove ->nopfn and reimplement the existing handlers with ->fault > > Signed-off-by: Nick Piggin <[EMAIL PROTECTED]> Dang, forgot to quilt refresh after fixing spufs compile. -- Remove ->nopfn and reimplement the existing handlers with ->fault Signed-off-by: Nick Piggin <[EMAIL PROTECTED]> arch/powerpc/platforms/cell/spufs/file.c | 90 --- drivers/char/mspec.c | 29 ++--- include/linux/mm.h |8 -- mm/memory.c | 58 +-- 4 files changed, 71 insertions(+), 114 deletions(-) Index: linux-2.6/drivers/char/mspec.c === --- linux-2.6.orig/drivers/char/mspec.c +++ linux-2.6/drivers/char/mspec.c @@ -182,24 +182,25 @@ mspec_close(struct vm_area_struct *vma) /* - * mspec_nopfn + * mspec_fault * * Creates a mspec page and maps it to user space. */ -static unsigned long -mspec_nopfn(struct vm_area_struct *vma, unsigned long address) +static struct page * +mspec_fault(struct fault_data *fdata) { unsigned long paddr, maddr; unsigned long pfn; - int index; - struct vma_data *vdata = vma->vm_private_data; + int index = fdata->pgoff; + struct vma_data *vdata = fdata->vma->vm_private_data; - index = (address - vma->vm_start) >> PAGE_SHIFT; maddr = (volatile unsigned long) vdata->maddr[index]; if (maddr == 0) { maddr = uncached_alloc_page(numa_node_id()); - if (maddr == 0) - return NOPFN_OOM; + if (maddr == 0) { + fdata->type = VM_FAULT_OOM; + return NULL; + } spin_lock(>lock); if (vdata->maddr[index] == 0) { @@ -219,13 +220,21 @@ mspec_nopfn(struct vm_area_struct *vma, pfn = paddr >> PAGE_SHIFT; - return pfn; + fdata->type = VM_FAULT_MINOR; + /* +* vm_insert_pfn can fail with -EBUSY, but in that case it will +* be because another thread has installed the pte first, so it +* is no problem. +*/ + vm_insert_pfn(fdata->vma, fdata->address, pfn); + + return NULL; } static struct vm_operations_struct mspec_vm_ops = { .open = mspec_open, .close = mspec_close, - .nopfn = mspec_nopfn + .fault = mspec_fault, }; /* Index: linux-2.6/include/linux/mm.h === --- linux-2.6.orig/include/linux/mm.h +++ linux-2.6/include/linux/mm.h @@ -230,7 +230,6 @@ struct vm_operations_struct { void (*close)(struct vm_area_struct * area); struct page * (*fault)(struct vm_area_struct *vma, struct fault_data * fdata); struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type); - unsigned long (*nopfn)(struct vm_area_struct * area, unsigned long address); int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock); /* notification that a previously read-only page is about to become @@ -660,13 +659,6 @@ static inline int page_mapped(struct pag #define NOPAGE_OOM ((struct page *) (-1)) /* - * Error return values for the *_nopfn functions - */ -#define NOPFN_SIGBUS ((unsigned long) -1) -#define NOPFN_OOM ((unsigned long) -2) -#define NOPFN_REFAULT ((unsigned long) -3) - -/* * Different kinds of faults, as returned by handle_mm_fault(). * Used to decide whether a process gets delivered SIGBUS or * just gets major/minor fault counters bumped up. Index: linux-2.6/mm/memory.c === --- linux-2.6.orig/mm/memory.c +++ linux-2.6/mm/memory.c @@ -1288,6 +1288,11 @@ EXPORT_SYMBOL(vm_insert_page); * * This function should only be called from a vm_ops->fault handler, and * in that case the handler should return NULL. + * + * vma cannot be a COW mapping. + * + * As this is called only for pages that do not currently exist, we + * do not need to flush old virtual caches or the TLB. */ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) @@ -2343,56 +2348,6 @@ static int do_nonlinear_fault(struct mm_ } /* - * do_no_pfn() tries to create a new page mapping for a page without - * a struct_page backing it - * - * As this is called only for pages that do not currently exist, we - * do not need to flush old virtual caches or the TLB. - * - * We enter with non-exclusive mmap_sem (to exclude vma changes, - * but allow concurrent faults), and pte mapped but not yet locked. - * We return with mmap_sem still held, but pte unmapped and unlocked. - * - * It is expected that the ->nopfn handler always returns the same pfn - * for a
[patch 5/6] mm: merge nopfn into fault
Remove ->nopfn and reimplement the existing handlers with ->fault Signed-off-by: Nick Piggin <[EMAIL PROTECTED]> arch/powerpc/platforms/cell/spufs/file.c | 90 --- drivers/char/mspec.c | 29 ++--- include/linux/mm.h |8 -- mm/memory.c | 58 +-- 4 files changed, 71 insertions(+), 114 deletions(-) Index: linux-2.6/drivers/char/mspec.c === --- linux-2.6.orig/drivers/char/mspec.c +++ linux-2.6/drivers/char/mspec.c @@ -182,24 +182,25 @@ mspec_close(struct vm_area_struct *vma) /* - * mspec_nopfn + * mspec_fault * * Creates a mspec page and maps it to user space. */ -static unsigned long -mspec_nopfn(struct vm_area_struct *vma, unsigned long address) +static struct page * +mspec_fault(struct fault_data *fdata) { unsigned long paddr, maddr; unsigned long pfn; - int index; - struct vma_data *vdata = vma->vm_private_data; + int index = fdata->pgoff; + struct vma_data *vdata = fdata->vma->vm_private_data; - index = (address - vma->vm_start) >> PAGE_SHIFT; maddr = (volatile unsigned long) vdata->maddr[index]; if (maddr == 0) { maddr = uncached_alloc_page(numa_node_id()); - if (maddr == 0) - return NOPFN_OOM; + if (maddr == 0) { + fdata->type = VM_FAULT_OOM; + return NULL; + } spin_lock(>lock); if (vdata->maddr[index] == 0) { @@ -219,13 +220,21 @@ mspec_nopfn(struct vm_area_struct *vma, pfn = paddr >> PAGE_SHIFT; - return pfn; + fdata->type = VM_FAULT_MINOR; + /* +* vm_insert_pfn can fail with -EBUSY, but in that case it will +* be because another thread has installed the pte first, so it +* is no problem. +*/ + vm_insert_pfn(fdata->vma, fdata->address, pfn); + + return NULL; } static struct vm_operations_struct mspec_vm_ops = { .open = mspec_open, .close = mspec_close, - .nopfn = mspec_nopfn + .fault = mspec_fault, }; /* Index: linux-2.6/include/linux/mm.h === --- linux-2.6.orig/include/linux/mm.h +++ linux-2.6/include/linux/mm.h @@ -230,7 +230,6 @@ struct vm_operations_struct { void (*close)(struct vm_area_struct * area); struct page * (*fault)(struct vm_area_struct *vma, struct fault_data * fdata); struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type); - unsigned long (*nopfn)(struct vm_area_struct * area, unsigned long address); int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock); /* notification that a previously read-only page is about to become @@ -660,13 +659,6 @@ static inline int page_mapped(struct pag #define NOPAGE_OOM ((struct page *) (-1)) /* - * Error return values for the *_nopfn functions - */ -#define NOPFN_SIGBUS ((unsigned long) -1) -#define NOPFN_OOM ((unsigned long) -2) -#define NOPFN_REFAULT ((unsigned long) -3) - -/* * Different kinds of faults, as returned by handle_mm_fault(). * Used to decide whether a process gets delivered SIGBUS or * just gets major/minor fault counters bumped up. Index: linux-2.6/mm/memory.c === --- linux-2.6.orig/mm/memory.c +++ linux-2.6/mm/memory.c @@ -1288,6 +1288,11 @@ EXPORT_SYMBOL(vm_insert_page); * * This function should only be called from a vm_ops->fault handler, and * in that case the handler should return NULL. + * + * vma cannot be a COW mapping. + * + * As this is called only for pages that do not currently exist, we + * do not need to flush old virtual caches or the TLB. */ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) @@ -2343,56 +2348,6 @@ static int do_nonlinear_fault(struct mm_ } /* - * do_no_pfn() tries to create a new page mapping for a page without - * a struct_page backing it - * - * As this is called only for pages that do not currently exist, we - * do not need to flush old virtual caches or the TLB. - * - * We enter with non-exclusive mmap_sem (to exclude vma changes, - * but allow concurrent faults), and pte mapped but not yet locked. - * We return with mmap_sem still held, but pte unmapped and unlocked. - * - * It is expected that the ->nopfn handler always returns the same pfn - * for a given virtual mapping. - * - * Mark this `noinline' to prevent it from bloating the main pagefault code. - */ -static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma, -unsigned long address, pte_t
[patch 5/6] mm: merge nopfn into fault
Remove -nopfn and reimplement the existing handlers with -fault Signed-off-by: Nick Piggin [EMAIL PROTECTED] arch/powerpc/platforms/cell/spufs/file.c | 90 --- drivers/char/mspec.c | 29 ++--- include/linux/mm.h |8 -- mm/memory.c | 58 +-- 4 files changed, 71 insertions(+), 114 deletions(-) Index: linux-2.6/drivers/char/mspec.c === --- linux-2.6.orig/drivers/char/mspec.c +++ linux-2.6/drivers/char/mspec.c @@ -182,24 +182,25 @@ mspec_close(struct vm_area_struct *vma) /* - * mspec_nopfn + * mspec_fault * * Creates a mspec page and maps it to user space. */ -static unsigned long -mspec_nopfn(struct vm_area_struct *vma, unsigned long address) +static struct page * +mspec_fault(struct fault_data *fdata) { unsigned long paddr, maddr; unsigned long pfn; - int index; - struct vma_data *vdata = vma-vm_private_data; + int index = fdata-pgoff; + struct vma_data *vdata = fdata-vma-vm_private_data; - index = (address - vma-vm_start) PAGE_SHIFT; maddr = (volatile unsigned long) vdata-maddr[index]; if (maddr == 0) { maddr = uncached_alloc_page(numa_node_id()); - if (maddr == 0) - return NOPFN_OOM; + if (maddr == 0) { + fdata-type = VM_FAULT_OOM; + return NULL; + } spin_lock(vdata-lock); if (vdata-maddr[index] == 0) { @@ -219,13 +220,21 @@ mspec_nopfn(struct vm_area_struct *vma, pfn = paddr PAGE_SHIFT; - return pfn; + fdata-type = VM_FAULT_MINOR; + /* +* vm_insert_pfn can fail with -EBUSY, but in that case it will +* be because another thread has installed the pte first, so it +* is no problem. +*/ + vm_insert_pfn(fdata-vma, fdata-address, pfn); + + return NULL; } static struct vm_operations_struct mspec_vm_ops = { .open = mspec_open, .close = mspec_close, - .nopfn = mspec_nopfn + .fault = mspec_fault, }; /* Index: linux-2.6/include/linux/mm.h === --- linux-2.6.orig/include/linux/mm.h +++ linux-2.6/include/linux/mm.h @@ -230,7 +230,6 @@ struct vm_operations_struct { void (*close)(struct vm_area_struct * area); struct page * (*fault)(struct vm_area_struct *vma, struct fault_data * fdata); struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type); - unsigned long (*nopfn)(struct vm_area_struct * area, unsigned long address); int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock); /* notification that a previously read-only page is about to become @@ -660,13 +659,6 @@ static inline int page_mapped(struct pag #define NOPAGE_OOM ((struct page *) (-1)) /* - * Error return values for the *_nopfn functions - */ -#define NOPFN_SIGBUS ((unsigned long) -1) -#define NOPFN_OOM ((unsigned long) -2) -#define NOPFN_REFAULT ((unsigned long) -3) - -/* * Different kinds of faults, as returned by handle_mm_fault(). * Used to decide whether a process gets delivered SIGBUS or * just gets major/minor fault counters bumped up. Index: linux-2.6/mm/memory.c === --- linux-2.6.orig/mm/memory.c +++ linux-2.6/mm/memory.c @@ -1288,6 +1288,11 @@ EXPORT_SYMBOL(vm_insert_page); * * This function should only be called from a vm_ops-fault handler, and * in that case the handler should return NULL. + * + * vma cannot be a COW mapping. + * + * As this is called only for pages that do not currently exist, we + * do not need to flush old virtual caches or the TLB. */ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) @@ -2343,56 +2348,6 @@ static int do_nonlinear_fault(struct mm_ } /* - * do_no_pfn() tries to create a new page mapping for a page without - * a struct_page backing it - * - * As this is called only for pages that do not currently exist, we - * do not need to flush old virtual caches or the TLB. - * - * We enter with non-exclusive mmap_sem (to exclude vma changes, - * but allow concurrent faults), and pte mapped but not yet locked. - * We return with mmap_sem still held, but pte unmapped and unlocked. - * - * It is expected that the -nopfn handler always returns the same pfn - * for a given virtual mapping. - * - * Mark this `noinline' to prevent it from bloating the main pagefault code. - */ -static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma, -unsigned long address, pte_t *page_table, pmd_t
Re: [patch 5/6] mm: merge nopfn into fault
On Wed, Feb 21, 2007 at 05:50:31AM +0100, Nick Piggin wrote: Remove -nopfn and reimplement the existing handlers with -fault Signed-off-by: Nick Piggin [EMAIL PROTECTED] Dang, forgot to quilt refresh after fixing spufs compile. -- Remove -nopfn and reimplement the existing handlers with -fault Signed-off-by: Nick Piggin [EMAIL PROTECTED] arch/powerpc/platforms/cell/spufs/file.c | 90 --- drivers/char/mspec.c | 29 ++--- include/linux/mm.h |8 -- mm/memory.c | 58 +-- 4 files changed, 71 insertions(+), 114 deletions(-) Index: linux-2.6/drivers/char/mspec.c === --- linux-2.6.orig/drivers/char/mspec.c +++ linux-2.6/drivers/char/mspec.c @@ -182,24 +182,25 @@ mspec_close(struct vm_area_struct *vma) /* - * mspec_nopfn + * mspec_fault * * Creates a mspec page and maps it to user space. */ -static unsigned long -mspec_nopfn(struct vm_area_struct *vma, unsigned long address) +static struct page * +mspec_fault(struct fault_data *fdata) { unsigned long paddr, maddr; unsigned long pfn; - int index; - struct vma_data *vdata = vma-vm_private_data; + int index = fdata-pgoff; + struct vma_data *vdata = fdata-vma-vm_private_data; - index = (address - vma-vm_start) PAGE_SHIFT; maddr = (volatile unsigned long) vdata-maddr[index]; if (maddr == 0) { maddr = uncached_alloc_page(numa_node_id()); - if (maddr == 0) - return NOPFN_OOM; + if (maddr == 0) { + fdata-type = VM_FAULT_OOM; + return NULL; + } spin_lock(vdata-lock); if (vdata-maddr[index] == 0) { @@ -219,13 +220,21 @@ mspec_nopfn(struct vm_area_struct *vma, pfn = paddr PAGE_SHIFT; - return pfn; + fdata-type = VM_FAULT_MINOR; + /* +* vm_insert_pfn can fail with -EBUSY, but in that case it will +* be because another thread has installed the pte first, so it +* is no problem. +*/ + vm_insert_pfn(fdata-vma, fdata-address, pfn); + + return NULL; } static struct vm_operations_struct mspec_vm_ops = { .open = mspec_open, .close = mspec_close, - .nopfn = mspec_nopfn + .fault = mspec_fault, }; /* Index: linux-2.6/include/linux/mm.h === --- linux-2.6.orig/include/linux/mm.h +++ linux-2.6/include/linux/mm.h @@ -230,7 +230,6 @@ struct vm_operations_struct { void (*close)(struct vm_area_struct * area); struct page * (*fault)(struct vm_area_struct *vma, struct fault_data * fdata); struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type); - unsigned long (*nopfn)(struct vm_area_struct * area, unsigned long address); int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock); /* notification that a previously read-only page is about to become @@ -660,13 +659,6 @@ static inline int page_mapped(struct pag #define NOPAGE_OOM ((struct page *) (-1)) /* - * Error return values for the *_nopfn functions - */ -#define NOPFN_SIGBUS ((unsigned long) -1) -#define NOPFN_OOM ((unsigned long) -2) -#define NOPFN_REFAULT ((unsigned long) -3) - -/* * Different kinds of faults, as returned by handle_mm_fault(). * Used to decide whether a process gets delivered SIGBUS or * just gets major/minor fault counters bumped up. Index: linux-2.6/mm/memory.c === --- linux-2.6.orig/mm/memory.c +++ linux-2.6/mm/memory.c @@ -1288,6 +1288,11 @@ EXPORT_SYMBOL(vm_insert_page); * * This function should only be called from a vm_ops-fault handler, and * in that case the handler should return NULL. + * + * vma cannot be a COW mapping. + * + * As this is called only for pages that do not currently exist, we + * do not need to flush old virtual caches or the TLB. */ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) @@ -2343,56 +2348,6 @@ static int do_nonlinear_fault(struct mm_ } /* - * do_no_pfn() tries to create a new page mapping for a page without - * a struct_page backing it - * - * As this is called only for pages that do not currently exist, we - * do not need to flush old virtual caches or the TLB. - * - * We enter with non-exclusive mmap_sem (to exclude vma changes, - * but allow concurrent faults), and pte mapped but not yet locked. - * We return with mmap_sem still held, but pte unmapped and unlocked. - * - * It is expected that the -nopfn handler always returns the same pfn - * for a given virtual mapping. -