Hello,
I guess the box uses PAE?
There's a bootstrap issue: Xen only provides 512KiB of spare bootstrap
memory, which is not so much to build a pagetable covering 100s of MiB.
Attached is a patch I've found in my xen checkout, I can't remember
whether it works.
Samuel
Index: i386/intel/pmap.c
===================================================================
RCS file: /cvsroot/hurd/gnumach/i386/intel/pmap.c,v
retrieving revision 1.4.2.16.2.31
diff -u -p -r1.4.2.16.2.31 pmap.c
--- i386/intel/pmap.c 24 Mar 2009 01:48:03 -0000 1.4.2.16.2.31
+++ i386/intel/pmap.c 1 Oct 2009 14:20:09 -0000
@@ -619,6 +619,40 @@ void pmap_bootstrap()
|| kernel_virtual_end > VM_MAX_KERNEL_ADDRESS)
kernel_virtual_end = VM_MAX_KERNEL_ADDRESS;
+#ifdef MACH_XEN
+ /* Xen only provides 512KB extra bootstrap linear memory, which is far
+ * from enough to map all available memory, so we need to map more
+ * bootstrap linear memory. */
+ {
+ pt_entry_t *l1_map = (pt_entry_t*) phystokv(pmap_grab_page());
+ pt_entry_t *base = (pt_entry_t*) boot_info.pt_base;
+ int i;
+#ifdef PAE
+ pt_entry_t *l2_map = (pt_entry_t*) phystokv(pte_to_pa(base[0]));
+#else /* PAE */
+ pt_entry_t *l2_map = base;
+#endif /* PAE */
+ for (i = 0; i < NPTES; i++) {
+ if (!(l2_map[i] & INTEL_PTE_VALID)) {
+ struct mmu_update update;
+ int j, n;
+
+ for (j = 0; j < NPTES; j++)
+ l1_map[j] = intel_ptob(pfn_to_mfn(i *
NPTES + j)) | INTEL_PTE_VALID | INTEL_PTE_WRITE;
+ pmap_set_page_readonly_init(l1_map);
+ if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE,
kv_to_mfn (l1_map)))
+ panic("couldn't pin page %p(%p)",
l1_map, kv_to_ma (l1_map));
+ update.ptr = kv_to_ma(&l2_map[i]);
+ update.val = pa_to_ma(l1_map) | INTEL_PTE_VALID;
+ hyp_mmu_update(kvtolin(&update), 1,
kvtolin(&n), DOMID_SELF);
+ if (n != 1)
+ panic("couldn't complete bootstrap
map");
+ break;
+ }
+ }
+ }
+#endif /* MACH_XEN */
+
/*
* Allocate and clear a kernel page directory.
*/