Signed-off-by: Wink Saville <[EMAIL PROTECTED]>
---
  include/asm-x86_64/kshmem.h |   17 +++
  include/linux/kshmem.h      |   47 +++++++
  include/linux/mm.h          |    1 +
  include/linux/vmalloc.h     |    3 +
  mm/Kconfig                  |    8 +
  mm/Makefile                 |    1 +
  mm/kshmem.c                 |  296 +++++++++++++++++++++++++++++++++++++++++++
  mm/vmalloc.c                |   37 +++++-
  8 files changed, 406 insertions(+), 4 deletions(-)
  create mode 100644 include/asm-x86_64/kshmem.h
  create mode 100644 include/linux/kshmem.h
  create mode 100644 mm/kshmem.c

Index: linux-2.6/include/asm-x86_64/kshmem.h
===================================================================
--- /dev/null   1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6/include/asm-x86_64/kshmem.h       2007-04-29 21:52:04.000000000 
-0700
@@ -0,0 +1,17 @@
+/** * Copyright (C) 2007 Saville Software, Inc.
+ *
+ * This code may be used for any purpose whatsoever, but
+ * no warranty of any kind is provided.
+ */
+
+#ifndef _ASM_KSHMEM_H
+#define _ASM_KSHMEM_H
+
+#define KSHMEM_AREA_ADDR       0x6f8000000000
+#define KSHMEM_AREA_SIZE       0x008000000000
+#define KSHMEM_AREA_MASK       (~(KSHMEM_AREA_SIZE-1))
+
+#define KSHMEM_USER_DISABLE(x) __pgd(pgd_val(x) & ~_PAGE_USER)
+#define KSHMEM_USER_ENABLE(x)  __pgd(pgd_val(x) | _PAGE_USER)
+
+#endif
Index: linux-2.6/include/linux/kshmem.h
===================================================================
--- /dev/null   1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6/include/linux/kshmem.h    2007-04-29 21:52:04.000000000 -0700
@@ -0,0 +1,47 @@
+/** * Copyright (C) 2007 Saville Software, Inc.
+ *
+ * This code may be used for any purpose whatsoever, but
+ * no warranty of any kind is provided.
+ */
+
+#ifndef _KSHMEM_H
+#define _KSHMEM_H
+
+#ifdef CONFIG_KSHMEM
+
+#include <asm/kshmem.h>
+
+#ifdef __KERNEL__
+
+extern void            kshmem_init(void);
+extern char *          kshmem_alloc_at(unsigned long location, unsigned long 
size, pgprot_t page_flags);
+extern void *          kshmem_alloc(unsigned long size, pgprot_t page_flags);
+extern void            kshmem_user_disable(void);
+extern void            kshmem_user_enable(void);
+extern int             kshmem_prepare(struct mm_struct *pNew_mm);
+extern unsigned long   kshmem_addr_to_kvaddr(unsigned long kshmem_addr);
+
+#endif
+
+#else
+
+#ifdef __KERNEL__
+
+static inline void     kshmem_init(void)
+                               {}
+static inline char *   kshmem_alloc_at(unsigned long location, unsigned long 
size, pgprot_t page_flags)
+                               { BUG(); return NULL; }
+static inline void *   kshmem_alloc(unsigned long size, pgprot_t page_flags)
+                               { BUG(); return NULL; }
+static inline void     kshmem_free(void *pAddr)
+                               { BUG(); }
+static inline int      kshmem_prepare(struct mm_struct *pNew_mm)
+                               { return 0; }
+static inline unsigned long kshmem_addr_to_kvaddr(unsigned long kshmem_addr)
+                               { BUG(); return 0; }
+
+#endif
+
+#endif /* CONFIG_KSHMEM */
+
+#endif /* _KSHMEM_H */
Index: linux-2.6/include/linux/mm.h
===================================================================
--- linux-2.6.orig/include/linux/mm.h   2007-04-29 21:51:39.000000000 -0700
+++ linux-2.6/include/linux/mm.h        2007-04-29 21:52:04.000000000 -0700
@@ -169,6 +169,7 @@
  #define VM_MAPPED_COPY        0x01000000      /* T if mapped copy of data 
(nommu mmap) */
  #define VM_INSERTPAGE 0x02000000      /* The vma has had "vm_insert_page()" 
done on it */
  #define VM_ALWAYSDUMP 0x04000000      /* Always include in core dumps */
+#define VM_KSHMEM      0x08000000      /* Kernel shared memory */

  #ifndef VM_STACK_DEFAULT_FLAGS                /* arch can override this */
  #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
Index: linux-2.6/include/linux/vmalloc.h
===================================================================
--- linux-2.6.orig/include/linux/vmalloc.h      2007-04-29 21:51:39.000000000 
-0700
+++ linux-2.6/include/linux/vmalloc.h   2007-04-29 21:52:04.000000000 -0700
@@ -42,6 +42,9 @@
  extern void *vmalloc_exec(unsigned long size);
  extern void *vmalloc_32(unsigned long size);
  extern void *vmalloc_32_user(unsigned long size);
+extern void *__vmalloc_at(unsigned long size, unsigned long flags,
+               unsigned long start, unsigned long end,
+               gfp_t gfp_mask, pgprot_t prot);
  extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
  extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
                                pgprot_t prot);
Index: linux-2.6/mm/Kconfig
===================================================================
--- linux-2.6.orig/mm/Kconfig   2007-04-29 21:51:39.000000000 -0700
+++ linux-2.6/mm/Kconfig        2007-04-29 21:56:30.000000000 -0700
@@ -163,3 +163,11 @@
        default "0" if !ZONE_DMA
        default "1"

+config KSHMEM
+       bool "Kernel shared memory"
+       def_bool n
+       help
+         Allows an area of memory to be shared between the kernel and
+         user space programs. For instance interrupt service routines
+         and user space programs may share the same memory.
+
Index: linux-2.6/mm/Makefile
===================================================================
--- linux-2.6.orig/mm/Makefile  2007-04-29 21:51:39.000000000 -0700
+++ linux-2.6/mm/Makefile       2007-04-29 21:52:04.000000000 -0700
@@ -29,3 +29,4 @@
  obj-$(CONFIG_FS_XIP) += filemap_xip.o
  obj-$(CONFIG_MIGRATION) += migrate.o
  obj-$(CONFIG_SMP) += allocpercpu.o
+obj-$(CONFIG_KSHMEM) += kshmem.o
Index: linux-2.6/mm/kshmem.c
===================================================================
--- /dev/null   1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6/mm/kshmem.c       2007-04-29 21:52:04.000000000 -0700
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2007 Saville Software, Inc.
+ *
+ * This code may be used for any purpose whatsoever, but
+ * no warranty of any kind is provided.
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/kshmem.h>
+
+#include <asm/io.h>
+
+
+#define KSHMEM_DEBUG
+#ifdef KSHMEM_DEBUG
+#define DPK(fmt, args...) printk(KERN_ERR "kshmem " fmt, ## args)
+#else
+#define DPK(fmt, args...)
+#endif
+
+MODULE_AUTHOR("Wink Saville");
+MODULE_LICENSE("Dual BSD/GPL");
+
+#undef KSHMEM_FIX_PTL
+#ifdef KSHMEM_FIX_PTL
+
+/*
+ * get the pte pointer for the address
+ */
+static void _get_pti(struct mm_struct *mm,
+                   unsigned long addr,
+                   int *pageSize,
+                   pgd_t **pgd_ptr,
+                   pud_t **pud_ptr,
+                   pmd_t **pmd_ptr,
+                   pte_t **pte_ptr)
+{
+       int   page_size = 0;
+       pgd_t *pgd = NULL;
+       pud_t *pud = NULL;
+       pmd_t *pmd = NULL;
+       pte_t *pte = NULL;
+
+       if (mm == NULL) {
+               goto done;
+       }
+       pgd = pgd_offset(mm, addr);
+       if (pgd_none(*pgd)) goto done;
+       pud = pud_offset(pgd, addr);
+       if (pud_none(*pud)) goto done;
+       pmd = pmd_offset(pud, addr);
+       if (pmd_none(*pmd)) goto done;
+
+       if (pmd_large(*pmd))
+       {
+               pte = (pte_t *)pmd;
+               page_size = LARGE_PAGE_SIZE;
+       }
+       else
+       {
+               pte = pte_offset_map(pmd, addr);
+               page_size = PAGE_SIZE;
+       }
+done:
+       if (pageSize) *pageSize = page_size;
+       if (pgd_ptr) *pgd_ptr = pgd;
+       if (pud_ptr) *pud_ptr = pud;
+       if (pmd_ptr) *pmd_ptr = pmd;
+       if (pte_ptr) *pte_ptr = pte;
+}
+
+/*
+ * get the page table lock for the addr
+ */
+static spinlock_t *_get_ptl(struct mm_struct *mm, unsigned long addr)
+{
+               pmd_t *pmd = NULL;
+
+               _get_pti(mm, addr, NULL, NULL, NULL, &pmd, NULL);
+       if (pmd) {
+               return pte_lockptr(mm, pmd);
+       } else {
+               return NULL;
+       }
+}
+
+/*
+ * Fix the page table lock
+ */
+static void _fix_ptl(void *addr)
+{
+       spinlock_t              *pLock;
+       struct mm_struct        *cur_mm = current->active_mm;
+
+       /*
+        * For some reason we need to initialize the page table lock (ptl)
+        */
+       pLock = _get_ptl(cur_mm, (unsigned long)addr);
+       if (pLock) {
+               spin_lock_init(pLock);
+       } else {
+               DPK("_fix_ptl: src_mm=%p addr=%p NO PAGE!!! 
*********************\n",
+                               cur_mm, addr);
+       }
+}
+#endif
+
+/*
+ * For all of the pages:
+ *     - make sure they are present
+ *     - get so they won't be deallocated
+ *     - lock them down so they won't be swapped
+ */
+static void _fixate_range(char *pStart, char *pEnd)
+{
+       char *pAddr;
+
+       make_pages_present((unsigned long)pStart, (unsigned long)pEnd);
+
+       for (pAddr = pStart; pAddr < pEnd; pAddr += PAGE_SIZE) {
+               struct page *pPage = vmalloc_to_page(pAddr);
+
+               get_page(pPage);
+               SetPageLocked(pPage);
+#ifdef KSHMEM_FIX_PTL
+               _fix_ptl(pAddr);
+#endif
+       }
+}
+
+/*
+ * Allocate size number of bytes of kshmem at a specific location,
+ * page_flags should be PAGE_SHARED or PAGE_SHARED_EXEC.
+ *
+ * The resulting pointer will be page aligned and consume at least
+ * on page of memory.
+ */
+char *kshmem_alloc_at(unsigned long location, unsigned long size, pgprot_t 
page_flags)
+{
+       char *pStart;
+       struct mm_struct *cur_mm;
+
+       DPK("kshmem_alloc_at: current->mm=%p current->active_mm=%p 
&init_mm=%p\n", current->mm, current->active_mm, &init_mm);
+       /*
+        * Save the current mm and switch to init_mm where we allocate
+        * kshmem memory and then share with all other mm's.
+        */
+       cur_mm  = current->mm;
+       current->mm = &init_mm;
+
+       /*
+        * cur_mm is NULL when we start, maybe we should use
+        * active_mm?
+        */
+
+       /* Do we need to hold the semaphore? */
+       down_write(&init_mm.mmap_sem);
+
+       size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
+       pStart = __vmalloc_at(size,
+                                 VM_ALLOC | VM_USERMAP | VM_KSHMEM | VM_LOCKED 
| VM_SHARED,
+                                 location, location + size,
+                                 GFP_KERNEL | GFP_DMA, page_flags);
+       BUG_ON(pStart != (char *)location);
+       _fixate_range(pStart, pStart + size);
+
+       up_write(&init_mm.mmap_sem);
+
+       /*
+        * Restore mm
+        */
+       current->mm = cur_mm;
+
+       return pStart;
+}
+EXPORT_SYMBOL(kshmem_alloc_at);
+
+/*
+ * Allocate size number of bytes of kshmem, page_flags
+ * should be PAGE_SHARED or PAGE_SHARED_EXEC.
+ *
+ * The resulting pointer will be page aligned and consume at least
+ * on page of memory.
+ */
+void *kshmem_alloc(unsigned long size, pgprot_t page_flags)
+{
+       char *pStart;
+       struct mm_struct *cur_mm;
+
+       /*
+        * Save the current mm and switch to init_mm where we allocate
+        * kshmem memory and then share with all other mm's.
+        */
+       cur_mm  = current->mm;
+       current->mm = &init_mm;
+
+       DPK("kshmem_alloc: cur_mm=%p current->mm=%p\n", cur_mm, current->mm);
+
+       /* Do we need to hold the semaphore? */
+       down_write(&init_mm.mmap_sem);
+
+       size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
+       pStart = __vmalloc_at(size,
+                                 VM_ALLOC | VM_USERMAP | VM_KSHMEM | VM_LOCKED 
| VM_SHARED,
+                                 KSHMEM_AREA_ADDR, KSHMEM_AREA_ADDR + 
KSHMEM_AREA_SIZE,
+                                 GFP_KERNEL | GFP_DMA, page_flags);
+       BUG_ON(NULL == pStart);
+       _fixate_range(pStart, pStart + size);
+
+       up_write(&init_mm.mmap_sem);
+
+       /*
+        * Restore mm
+        */
+       current->mm = cur_mm;
+
+       return pStart;
+}
+EXPORT_SYMBOL(kshmem_alloc);
+
+/*
+ * Prepare the kernel shared memory of the new_mm
+ */
+int kshmem_prepare(struct mm_struct *new_mm)
+{
+       /**
+        * Set the pgd for the KSHMEM_AREA_ADDR and
+        * point it at the pgd from init_mm.
+        */
+       down_read(&init_mm.mmap_sem);
+       set_pgd(pgd_offset(new_mm, KSHMEM_AREA_ADDR),
+                       KSHMEM_USER_DISABLE(*pgd_offset(&init_mm, 
KSHMEM_AREA_ADDR)));
+                       //__pgd(pgd_val(*pgd_offset(&init_mm, 
KSHMEM_AREA_ADDR)) & ~_PAGE_USER));
+       up_read(&init_mm.mmap_sem);
+
+       return 0;
+}
+EXPORT_SYMBOL(kshmem_prepare);
+
+/*
+ * Disable the kshmem for the current mm
+ */
+void kshmem_user_disable(void)
+{
+       down_read(&current->active_mm->mmap_sem);
+       set_pgd(pgd_offset(current->active_mm, KSHMEM_AREA_ADDR),
+                       KSHMEM_USER_DISABLE(*pgd_offset(&init_mm, 
KSHMEM_AREA_ADDR)));
+       up_read(&current->active_mm->mmap_sem);
+}
+
+/*
+ * Enable the kshmem for the current mm
+ */
+void kshmem_user_enable(void)
+{
+       down_read(&current->active_mm->mmap_sem);
+       set_pgd(pgd_offset(current->active_mm, KSHMEM_AREA_ADDR),
+                       KSHMEM_USER_ENABLE(*pgd_offset(&init_mm, 
KSHMEM_AREA_ADDR)));
+       up_read(&current->active_mm->mmap_sem);
+}
+
+/*
+ * Convert a vmalloced address to a "kernel" virtual address
+ */
+unsigned long kshmem_addr_to_kvaddr(unsigned long kshmem_addr)
+{
+       struct page *p;
+       unsigned long phys;
+       unsigned long addr;
+
+       p = vmalloc_to_page((unsigned char *)kshmem_addr);
+       phys = page_to_phys(p);
+       addr = (unsigned long)phys_to_virt(phys) + offset_in_page(kshmem_addr);
+
+       return addr;
+}
+EXPORT_SYMBOL(kshmem_addr_to_kvaddr);
+
+/*
+ * Initialize
+ */
+void kshmem_init(void)
+{
+       char *p;
+
+       DPK("kshmem_init E:\n");
+
+       p = kshmem_alloc_at(KSHMEM_AREA_ADDR, PAGE_SIZE, PAGE_SHARED);
+       BUG_ON((unsigned long)p != KSHMEM_AREA_ADDR);
+
+       DPK("kshmem_init X: p=%p\n", p);
+}
+
+
Index: linux-2.6/mm/vmalloc.c
===================================================================
--- linux-2.6.orig/mm/vmalloc.c 2007-04-29 21:51:39.000000000 -0700
+++ linux-2.6/mm/vmalloc.c      2007-04-29 21:52:04.000000000 -0700
@@ -469,8 +469,11 @@
  }

  /**
- *     __vmalloc_node  -  allocate virtually contiguous memory
+ *     __vmalloc_node_at  -  allocate virtually contiguous memory
   *    @size:          allocation size
+ *     @flags:         flags for vm area
+ *     @start:         desired starting location
+ *     @end:           desired ending location
   *    @gfp_mask:      flags for the page level allocator
   *    @prot:          protection mask for the allocated pages
   *    @node:          node to use for allocation or -1
@@ -479,8 +482,9 @@
   *    allocator with @gfp_mask flags.  Map them into contiguous
   *    kernel virtual space, using a pagetable protection of @prot.
   */
-static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
-                           int node)
+static void *__vmalloc_node_at(unsigned long size, unsigned long flags,
+                               unsigned long start, unsigned long end,
+                               gfp_t gfp_mask, pgprot_t prot, int node)
  {
        struct vm_struct *area;

@@ -488,13 +492,38 @@
        if (!size || (size >> PAGE_SHIFT) > num_physpages)
                return NULL;

-       area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask);
+       area = __get_vm_area_node(size, flags, start, end, node, gfp_mask);
        if (!area)
                return NULL;

        return __vmalloc_area_node(area, gfp_mask, prot, node);
  }

+void *__vmalloc_at(unsigned long size, unsigned long flags,
+               unsigned long start, unsigned long end,
+               gfp_t gfp_mask, pgprot_t prot)
+{
+       return __vmalloc_node_at(size, flags, start, end, gfp_mask, prot, -1);
+}
+EXPORT_SYMBOL(__vmalloc_at);
+
+/**
+ *     __vmalloc_node  -  allocate virtually contiguous memory
+ *     @size:          allocation size
+ *     @gfp_mask:      flags for the page level allocator
+ *     @prot:          protection mask for the allocated pages
+ *     @node:          node to use for allocation or -1
+ *
+ *     Allocate enough pages to cover @size from the page level
+ *     allocator with @gfp_mask flags.  Map them into contiguous
+ *     kernel virtual space, using a pagetable protection of @prot.
+ */
+static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
+                           int node)
+{
+       return __vmalloc_node_at(size, VM_ALLOC, VMALLOC_START, VMALLOC_END, 
gfp_mask, prot, node);
+}
+
  void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
  {
        return __vmalloc_node(size, gfp_mask, prot, -1);


-------------------------------------------------------------------------
This SF.net email is sponsored by DB2 Express
Download DB2 Express C - the FREE version of DB2 express and take
control of your XML. No limits. Just data. Click to get it now.
http://sourceforge.net/powerbar/db2/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to