[patch 5/7] mm: add vm_insert_pfn

2007-01-12 Thread Nick Piggin
Add a vm_insert_pfn helper, so that ->fault handlers can have nopfn
functionality by installing their own pte and returning NULL.

Signed-off-by: Nick Piggin <[EMAIL PROTECTED]>

Index: linux-2.6/include/linux/mm.h
===
--- linux-2.6.orig/include/linux/mm.h
+++ linux-2.6/include/linux/mm.h
@@ -1151,6 +1151,7 @@ unsigned long vmalloc_to_pfn(void *addr)
 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
+int vm_insert_pfn(struct vm_area_struct *, unsigned long addr, unsigned long 
pfn);
 
 struct page *follow_page(struct vm_area_struct *, unsigned long address,
unsigned int foll_flags);
Index: linux-2.6/mm/memory.c
===
--- linux-2.6.orig/mm/memory.c
+++ linux-2.6/mm/memory.c
@@ -1277,6 +1277,50 @@ int vm_insert_page(struct vm_area_struct
 }
 EXPORT_SYMBOL(vm_insert_page);
 
+/**
+ * vm_insert_pfn - insert single pfn into user vma
+ * @vma: user vma to map to
+ * @addr: target user address of this page
+ * @pfn: source kernel pfn
+ *
+ * Similar to vm_inert_page, this allows drivers to insert individual pages
+ * they've allocated into a user vma. Same comments apply.
+ *
+ * This function should only be called from a vm_ops->fault handler, and
+ * in that case the handler should return NULL.
+ */
+int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned 
long pfn)
+{
+   struct mm_struct *mm = vma->vm_mm;
+   int retval;
+   pte_t *pte, entry;
+   spinlock_t *ptl;
+
+   BUG_ON(!(vma->vm_flags & VM_PFNMAP));
+   BUG_ON(is_cow_mapping(vma->vm_flags));
+
+   retval = -ENOMEM;
+   pte = get_locked_pte(mm, addr, );
+   if (!pte)
+   goto out;
+   retval = -EBUSY;
+   if (!pte_none(*pte))
+   goto out_unlock;
+
+   /* Ok, finally just insert the thing.. */
+   entry = pfn_pte(pfn, vma->vm_page_prot);
+   set_pte_at(mm, addr, pte, entry);
+   update_mmu_cache(vma, addr, entry);
+
+   retval = 0;
+out_unlock:
+   pte_unmap_unlock(pte, ptl);
+
+out:
+   return retval;
+}
+EXPORT_SYMBOL(vm_insert_pfn);
+
 /*
  * maps a range of physical memory into the requested pages. the old
  * mappings are removed. any references to nonexistent pages results
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[patch 5/7] mm: add vm_insert_pfn

2007-01-12 Thread Nick Piggin
Add a vm_insert_pfn helper, so that -fault handlers can have nopfn
functionality by installing their own pte and returning NULL.

Signed-off-by: Nick Piggin [EMAIL PROTECTED]

Index: linux-2.6/include/linux/mm.h
===
--- linux-2.6.orig/include/linux/mm.h
+++ linux-2.6/include/linux/mm.h
@@ -1151,6 +1151,7 @@ unsigned long vmalloc_to_pfn(void *addr)
 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
+int vm_insert_pfn(struct vm_area_struct *, unsigned long addr, unsigned long 
pfn);
 
 struct page *follow_page(struct vm_area_struct *, unsigned long address,
unsigned int foll_flags);
Index: linux-2.6/mm/memory.c
===
--- linux-2.6.orig/mm/memory.c
+++ linux-2.6/mm/memory.c
@@ -1277,6 +1277,50 @@ int vm_insert_page(struct vm_area_struct
 }
 EXPORT_SYMBOL(vm_insert_page);
 
+/**
+ * vm_insert_pfn - insert single pfn into user vma
+ * @vma: user vma to map to
+ * @addr: target user address of this page
+ * @pfn: source kernel pfn
+ *
+ * Similar to vm_inert_page, this allows drivers to insert individual pages
+ * they've allocated into a user vma. Same comments apply.
+ *
+ * This function should only be called from a vm_ops-fault handler, and
+ * in that case the handler should return NULL.
+ */
+int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned 
long pfn)
+{
+   struct mm_struct *mm = vma-vm_mm;
+   int retval;
+   pte_t *pte, entry;
+   spinlock_t *ptl;
+
+   BUG_ON(!(vma-vm_flags  VM_PFNMAP));
+   BUG_ON(is_cow_mapping(vma-vm_flags));
+
+   retval = -ENOMEM;
+   pte = get_locked_pte(mm, addr, ptl);
+   if (!pte)
+   goto out;
+   retval = -EBUSY;
+   if (!pte_none(*pte))
+   goto out_unlock;
+
+   /* Ok, finally just insert the thing.. */
+   entry = pfn_pte(pfn, vma-vm_page_prot);
+   set_pte_at(mm, addr, pte, entry);
+   update_mmu_cache(vma, addr, entry);
+
+   retval = 0;
+out_unlock:
+   pte_unmap_unlock(pte, ptl);
+
+out:
+   return retval;
+}
+EXPORT_SYMBOL(vm_insert_pfn);
+
 /*
  * maps a range of physical memory into the requested pages. the old
  * mappings are removed. any references to nonexistent pages results
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/