Author: kib
Date: Mon Aug 10 17:18:21 2015
New Revision: 286584
URL: https://svnweb.freebsd.org/changeset/base/286584

Log:
  Make kstack_pages a tunable on arm, x86, and powepc.  On i386, the
  initial thread stack is not adjusted by the tunable, the stack is
  allocated too early to get access to the kernel environment. See
  TD0_KSTACK_PAGES for the thread0 stack sizing on i386.
  
  The tunable was tested on x86 only.  From the visual inspection, it
  seems that it might work on arm and powerpc.  The arm
  USPACE_SVC_STACK_TOP and powerpc USPACE macros seems to be already
  incorrect for the threads with non-default kstack size.  I only
  changed the macros to use variable instead of constant, since I cannot
  test.
  
  On arm64, mips and sparc64, some static data structures are sized by
  KSTACK_PAGES, so the tunable is disabled.
  
  Sponsored by: The FreeBSD Foundation
  MFC after:    2 week

Modified:
  head/sys/amd64/amd64/genassym.c
  head/sys/amd64/amd64/machdep.c
  head/sys/amd64/amd64/mp_machdep.c
  head/sys/arm/arm/machdep.c
  head/sys/arm/at91/at91_machdep.c
  head/sys/arm/cavium/cns11xx/econa_machdep.c
  head/sys/arm/include/param.h
  head/sys/arm/samsung/s3c2xx0/s3c24x0_machdep.c
  head/sys/arm/xscale/i80321/ep80219_machdep.c
  head/sys/arm/xscale/i80321/iq31244_machdep.c
  head/sys/arm/xscale/i8134x/crb_machdep.c
  head/sys/arm/xscale/ixp425/avila_machdep.c
  head/sys/arm/xscale/pxa/pxa_machdep.c
  head/sys/ddb/db_ps.c
  head/sys/i386/i386/genassym.c
  head/sys/i386/i386/mp_machdep.c
  head/sys/i386/i386/sys_machdep.c
  head/sys/kern/kern_fork.c
  head/sys/kern/subr_param.c
  head/sys/powerpc/aim/mmu_oea.c
  head/sys/powerpc/aim/mmu_oea64.c
  head/sys/powerpc/booke/pmap.c
  head/sys/powerpc/include/param.h
  head/sys/vm/vm_glue.c
  head/sys/x86/xen/pv.c

Modified: head/sys/amd64/amd64/genassym.c
==============================================================================
--- head/sys/amd64/amd64/genassym.c     Mon Aug 10 17:16:49 2015        
(r286583)
+++ head/sys/amd64/amd64/genassym.c     Mon Aug 10 17:18:21 2015        
(r286584)
@@ -93,7 +93,6 @@ ASSYM(TDP_KTHREAD, TDP_KTHREAD);
 ASSYM(V_TRAP, offsetof(struct vmmeter, v_trap));
 ASSYM(V_SYSCALL, offsetof(struct vmmeter, v_syscall));
 ASSYM(V_INTR, offsetof(struct vmmeter, v_intr));
-ASSYM(KSTACK_PAGES, KSTACK_PAGES);
 ASSYM(PAGE_SIZE, PAGE_SIZE);
 ASSYM(NPTEPG, NPTEPG);
 ASSYM(NPDEPG, NPDEPG);

Modified: head/sys/amd64/amd64/machdep.c
==============================================================================
--- head/sys/amd64/amd64/machdep.c      Mon Aug 10 17:16:49 2015        
(r286583)
+++ head/sys/amd64/amd64/machdep.c      Mon Aug 10 17:18:21 2015        
(r286584)
@@ -1516,12 +1516,6 @@ hammer_time(u_int64_t modulep, u_int64_t
        char *env;
        size_t kstack0_sz;
 
-       thread0.td_kstack = physfree + KERNBASE;
-       thread0.td_kstack_pages = KSTACK_PAGES;
-       kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE;
-       bzero((void *)thread0.td_kstack, kstack0_sz);
-       physfree += kstack0_sz;
-
        /*
         * This may be done better later if it gets more high level
         * components in it. If so just link td->td_proc here.
@@ -1533,6 +1527,12 @@ hammer_time(u_int64_t modulep, u_int64_t
        /* Init basic tunables, hz etc */
        init_param1();
 
+       thread0.td_kstack = physfree + KERNBASE;
+       thread0.td_kstack_pages = kstack_pages;
+       kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE;
+       bzero((void *)thread0.td_kstack, kstack0_sz);
+       physfree += kstack0_sz;
+
        /*
         * make gdt memory segments
         */

Modified: head/sys/amd64/amd64/mp_machdep.c
==============================================================================
--- head/sys/amd64/amd64/mp_machdep.c   Mon Aug 10 17:16:49 2015        
(r286583)
+++ head/sys/amd64/amd64/mp_machdep.c   Mon Aug 10 17:18:21 2015        
(r286584)
@@ -348,7 +348,7 @@ native_start_all_aps(void)
 
                /* allocate and set up an idle stack data page */
                bootstacks[cpu] = (void *)kmem_malloc(kernel_arena,
-                   KSTACK_PAGES * PAGE_SIZE, M_WAITOK | M_ZERO);
+                   kstack_pages * PAGE_SIZE, M_WAITOK | M_ZERO);
                doublefault_stack = (char *)kmem_malloc(kernel_arena,
                    PAGE_SIZE, M_WAITOK | M_ZERO);
                nmi_stack = (char *)kmem_malloc(kernel_arena, PAGE_SIZE,
@@ -356,7 +356,7 @@ native_start_all_aps(void)
                dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
                    M_WAITOK | M_ZERO);
 
-               bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 
8;
+               bootSTK = (char *)bootstacks[cpu] + kstack_pages * PAGE_SIZE - 
8;
                bootAP = cpu;
 
                /* attempt to start the Application Processor */

Modified: head/sys/arm/arm/machdep.c
==============================================================================
--- head/sys/arm/arm/machdep.c  Mon Aug 10 17:16:49 2015        (r286583)
+++ head/sys/arm/arm/machdep.c  Mon Aug 10 17:18:21 2015        (r286584)
@@ -1066,7 +1066,7 @@ init_proc0(vm_offset_t kstack)
        proc_linkup0(&proc0, &thread0);
        thread0.td_kstack = kstack;
        thread0.td_pcb = (struct pcb *)
-               (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
+               (thread0.td_kstack + kstack_pages * PAGE_SIZE) - 1;
        thread0.td_pcb->pcb_flags = 0;
        thread0.td_pcb->pcb_vfpcpu = -1;
        thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN | VFPSCR_FZ;
@@ -1360,7 +1360,7 @@ initarm(struct arm_boot_params *abp)
        valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU);
        valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU);
        valloc_pages(undstack, UND_STACK_SIZE * MAXCPU);
-       valloc_pages(kernelstack, KSTACK_PAGES * MAXCPU);
+       valloc_pages(kernelstack, kstack_pages * MAXCPU);
        valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
 
        /*
@@ -1614,7 +1614,7 @@ initarm(struct arm_boot_params *abp)
        irqstack    = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU);
        abtstack    = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU);
        undstack    = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU );
-       kernelstack = pmap_preboot_get_vpages(KSTACK_PAGES * MAXCPU);
+       kernelstack = pmap_preboot_get_vpages(kstack_pages * MAXCPU);
 
        /* Allocate message buffer. */
        msgbufp = (void *)pmap_preboot_get_vpages(

Modified: head/sys/arm/at91/at91_machdep.c
==============================================================================
--- head/sys/arm/at91/at91_machdep.c    Mon Aug 10 17:16:49 2015        
(r286583)
+++ head/sys/arm/at91/at91_machdep.c    Mon Aug 10 17:18:21 2015        
(r286584)
@@ -512,7 +512,7 @@ initarm(struct arm_boot_params *abp)
        valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU);
        valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU);
        valloc_pages(undstack, UND_STACK_SIZE * MAXCPU);
-       valloc_pages(kernelstack, KSTACK_PAGES * MAXCPU);
+       valloc_pages(kernelstack, kstack_pages * MAXCPU);
        valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
 
        /*
@@ -553,7 +553,7 @@ initarm(struct arm_boot_params *abp)
        pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
            UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
        pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
-           KSTACK_PAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+           kstack_pages * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
 
        pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
            L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);

Modified: head/sys/arm/cavium/cns11xx/econa_machdep.c
==============================================================================
--- head/sys/arm/cavium/cns11xx/econa_machdep.c Mon Aug 10 17:16:49 2015        
(r286583)
+++ head/sys/arm/cavium/cns11xx/econa_machdep.c Mon Aug 10 17:18:21 2015        
(r286584)
@@ -222,7 +222,7 @@ initarm(struct arm_boot_params *abp)
        valloc_pages(irqstack, IRQ_STACK_SIZE);
        valloc_pages(abtstack, ABT_STACK_SIZE);
        valloc_pages(undstack, UND_STACK_SIZE);
-       valloc_pages(kernelstack, KSTACK_PAGES);
+       valloc_pages(kernelstack, kstack_pages);
        valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
 
        /*
@@ -260,7 +260,7 @@ initarm(struct arm_boot_params *abp)
        pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
            UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
        pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
-           KSTACK_PAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+           kstack_pages * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
 
        pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
            L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);

Modified: head/sys/arm/include/param.h
==============================================================================
--- head/sys/arm/include/param.h        Mon Aug 10 17:16:49 2015        
(r286583)
+++ head/sys/arm/include/param.h        Mon Aug 10 17:18:21 2015        
(r286584)
@@ -131,7 +131,7 @@
 #define KSTACK_GUARD_PAGES     1
 #endif /* !KSTACK_GUARD_PAGES */
 
-#define USPACE_SVC_STACK_TOP           (KSTACK_PAGES * PAGE_SIZE)
+#define USPACE_SVC_STACK_TOP           (kstack_pages * PAGE_SIZE)
 
 /*
  * Mach derived conversion macros

Modified: head/sys/arm/samsung/s3c2xx0/s3c24x0_machdep.c
==============================================================================
--- head/sys/arm/samsung/s3c2xx0/s3c24x0_machdep.c      Mon Aug 10 17:16:49 
2015        (r286583)
+++ head/sys/arm/samsung/s3c2xx0/s3c24x0_machdep.c      Mon Aug 10 17:18:21 
2015        (r286584)
@@ -271,7 +271,7 @@ initarm(struct arm_boot_params *abp)
        valloc_pages(irqstack, IRQ_STACK_SIZE);
        valloc_pages(abtstack, ABT_STACK_SIZE);
        valloc_pages(undstack, UND_STACK_SIZE);
-       valloc_pages(kernelstack, KSTACK_PAGES);
+       valloc_pages(kernelstack, kstack_pages);
        valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
        /*
         * Now we start construction of the L1 page table
@@ -307,7 +307,7 @@ initarm(struct arm_boot_params *abp)
        pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
            UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
        pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
-           KSTACK_PAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+           kstack_pages * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
 
        pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
            L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);

Modified: head/sys/arm/xscale/i80321/ep80219_machdep.c
==============================================================================
--- head/sys/arm/xscale/i80321/ep80219_machdep.c        Mon Aug 10 17:16:49 
2015        (r286583)
+++ head/sys/arm/xscale/i80321/ep80219_machdep.c        Mon Aug 10 17:18:21 
2015        (r286584)
@@ -225,7 +225,7 @@ initarm(struct arm_boot_params *abp)
        valloc_pages(irqstack, IRQ_STACK_SIZE);
        valloc_pages(abtstack, ABT_STACK_SIZE);
        valloc_pages(undstack, UND_STACK_SIZE);
-       valloc_pages(kernelstack, KSTACK_PAGES);
+       valloc_pages(kernelstack, kstack_pages);
        alloc_pages(minidataclean.pv_pa, 1);
        valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
        /*

Modified: head/sys/arm/xscale/i80321/iq31244_machdep.c
==============================================================================
--- head/sys/arm/xscale/i80321/iq31244_machdep.c        Mon Aug 10 17:16:49 
2015        (r286583)
+++ head/sys/arm/xscale/i80321/iq31244_machdep.c        Mon Aug 10 17:18:21 
2015        (r286584)
@@ -226,7 +226,7 @@ initarm(struct arm_boot_params *abp)
        valloc_pages(irqstack, IRQ_STACK_SIZE);
        valloc_pages(abtstack, ABT_STACK_SIZE);
        valloc_pages(undstack, UND_STACK_SIZE);
-       valloc_pages(kernelstack, KSTACK_PAGES);
+       valloc_pages(kernelstack, kstack_pages);
        alloc_pages(minidataclean.pv_pa, 1);
        valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
        /*

Modified: head/sys/arm/xscale/i8134x/crb_machdep.c
==============================================================================
--- head/sys/arm/xscale/i8134x/crb_machdep.c    Mon Aug 10 17:16:49 2015        
(r286583)
+++ head/sys/arm/xscale/i8134x/crb_machdep.c    Mon Aug 10 17:18:21 2015        
(r286584)
@@ -225,7 +225,7 @@ initarm(struct arm_boot_params *abp)
        valloc_pages(irqstack, IRQ_STACK_SIZE);
        valloc_pages(abtstack, ABT_STACK_SIZE);
        valloc_pages(undstack, UND_STACK_SIZE);
-       valloc_pages(kernelstack, KSTACK_PAGES);
+       valloc_pages(kernelstack, kstack_pages);
        valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
        /*
         * Now we start construction of the L1 page table

Modified: head/sys/arm/xscale/ixp425/avila_machdep.c
==============================================================================
--- head/sys/arm/xscale/ixp425/avila_machdep.c  Mon Aug 10 17:16:49 2015        
(r286583)
+++ head/sys/arm/xscale/ixp425/avila_machdep.c  Mon Aug 10 17:18:21 2015        
(r286584)
@@ -295,7 +295,7 @@ initarm(struct arm_boot_params *abp)
        valloc_pages(irqstack, IRQ_STACK_SIZE);
        valloc_pages(abtstack, ABT_STACK_SIZE);
        valloc_pages(undstack, UND_STACK_SIZE);
-       valloc_pages(kernelstack, KSTACK_PAGES);
+       valloc_pages(kernelstack, kstack_pages);
        alloc_pages(minidataclean.pv_pa, 1);
        valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
 

Modified: head/sys/arm/xscale/pxa/pxa_machdep.c
==============================================================================
--- head/sys/arm/xscale/pxa/pxa_machdep.c       Mon Aug 10 17:16:49 2015        
(r286583)
+++ head/sys/arm/xscale/pxa/pxa_machdep.c       Mon Aug 10 17:18:21 2015        
(r286584)
@@ -206,7 +206,7 @@ initarm(struct arm_boot_params *abp)
        valloc_pages(irqstack, IRQ_STACK_SIZE);
        valloc_pages(abtstack, ABT_STACK_SIZE);
        valloc_pages(undstack, UND_STACK_SIZE);
-       valloc_pages(kernelstack, KSTACK_PAGES);
+       valloc_pages(kernelstack, kstack_pages);
        alloc_pages(minidataclean.pv_pa, 1);
        valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
        /*

Modified: head/sys/ddb/db_ps.c
==============================================================================
--- head/sys/ddb/db_ps.c        Mon Aug 10 17:16:49 2015        (r286583)
+++ head/sys/ddb/db_ps.c        Mon Aug 10 17:18:21 2015        (r286584)
@@ -462,7 +462,7 @@ db_findstack_cmd(db_expr_t addr, bool ha
        for (ks_ce = kstack_cache; ks_ce != NULL;
             ks_ce = ks_ce->next_ks_entry) {
                if ((vm_offset_t)ks_ce <= saddr && saddr < (vm_offset_t)ks_ce +
-                   PAGE_SIZE * KSTACK_PAGES) {
+                   PAGE_SIZE * kstack_pages) {
                        db_printf("Cached stack %p\n", ks_ce);
                        return;
                }

Modified: head/sys/i386/i386/genassym.c
==============================================================================
--- head/sys/i386/i386/genassym.c       Mon Aug 10 17:16:49 2015        
(r286583)
+++ head/sys/i386/i386/genassym.c       Mon Aug 10 17:18:21 2015        
(r286584)
@@ -101,8 +101,6 @@ ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
 ASSYM(V_TRAP, offsetof(struct vmmeter, v_trap));
 ASSYM(V_SYSCALL, offsetof(struct vmmeter, v_syscall));
 ASSYM(V_INTR, offsetof(struct vmmeter, v_intr));
-/* ASSYM(UPAGES, UPAGES);*/
-ASSYM(KSTACK_PAGES, KSTACK_PAGES);
 ASSYM(TD0_KSTACK_PAGES, TD0_KSTACK_PAGES);
 ASSYM(PAGE_SIZE, PAGE_SIZE);
 ASSYM(NPTEPG, NPTEPG);

Modified: head/sys/i386/i386/mp_machdep.c
==============================================================================
--- head/sys/i386/i386/mp_machdep.c     Mon Aug 10 17:16:49 2015        
(r286583)
+++ head/sys/i386/i386/mp_machdep.c     Mon Aug 10 17:18:21 2015        
(r286584)
@@ -348,7 +348,7 @@ start_all_aps(void)
 
                /* allocate and set up a boot stack data page */
                bootstacks[cpu] =
-                   (char *)kmem_malloc(kernel_arena, KSTACK_PAGES * PAGE_SIZE,
+                   (char *)kmem_malloc(kernel_arena, kstack_pages * PAGE_SIZE,
                    M_WAITOK | M_ZERO);
                dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
                    M_WAITOK | M_ZERO);
@@ -360,7 +360,8 @@ start_all_aps(void)
                outb(CMOS_DATA, BIOS_WARM);     /* 'warm-start' */
 #endif
 
-               bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 
4;
+               bootSTK = (char *)bootstacks[cpu] + kstack_pages *
+                   PAGE_SIZE - 4;
                bootAP = cpu;
 
                /* attempt to start the Application Processor */

Modified: head/sys/i386/i386/sys_machdep.c
==============================================================================
--- head/sys/i386/i386/sys_machdep.c    Mon Aug 10 17:16:49 2015        
(r286583)
+++ head/sys/i386/i386/sys_machdep.c    Mon Aug 10 17:18:21 2015        
(r286584)
@@ -275,7 +275,7 @@ i386_extend_pcb(struct thread *td)
        ext = (struct pcb_ext *)kmem_malloc(kernel_arena, ctob(IOPAGES+1),
            M_WAITOK | M_ZERO);
        /* -16 is so we can convert a trapframe into vm86trapframe inplace */
-       ext->ext_tss.tss_esp0 = td->td_kstack + ctob(KSTACK_PAGES) -
+       ext->ext_tss.tss_esp0 = td->td_kstack + ctob(td->td_kstack_pages) -
            sizeof(struct pcb) - 16;
        ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
        /*

Modified: head/sys/kern/kern_fork.c
==============================================================================
--- head/sys/kern/kern_fork.c   Mon Aug 10 17:16:49 2015        (r286583)
+++ head/sys/kern/kern_fork.c   Mon Aug 10 17:18:21 2015        (r286584)
@@ -832,7 +832,7 @@ fork1(struct thread *td, int flags, int 
        mem_charged = 0;
        vm2 = NULL;
        if (pages == 0)
-               pages = KSTACK_PAGES;
+               pages = kstack_pages;
        /* Allocate new proc. */
        newproc = uma_zalloc(proc_zone, M_WAITOK);
        td2 = FIRST_THREAD_IN_PROC(newproc);

Modified: head/sys/kern/subr_param.c
==============================================================================
--- head/sys/kern/subr_param.c  Mon Aug 10 17:16:49 2015        (r286583)
+++ head/sys/kern/subr_param.c  Mon Aug 10 17:18:21 2015        (r286584)
@@ -159,6 +159,9 @@ void
 init_param1(void)
 {
 
+#if !defined(__mips__) && !defined(__arm64__) && !defined(__sparc64__)
+       TUNABLE_INT_FETCH("kern.kstack_pages", &kstack_pages);
+#endif
        hz = -1;
        TUNABLE_INT_FETCH("kern.hz", &hz);
        if (hz == -1)

Modified: head/sys/powerpc/aim/mmu_oea.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea.c      Mon Aug 10 17:16:49 2015        
(r286583)
+++ head/sys/powerpc/aim/mmu_oea.c      Mon Aug 10 17:18:21 2015        
(r286584)
@@ -932,13 +932,13 @@ moea_bootstrap(mmu_t mmup, vm_offset_t k
         * Allocate a kernel stack with a guard page for thread0 and map it
         * into the kernel page map.
         */
-       pa = moea_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
+       pa = moea_bootstrap_alloc(kstack_pages * PAGE_SIZE, PAGE_SIZE);
        va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
-       virtual_avail = va + KSTACK_PAGES * PAGE_SIZE;
+       virtual_avail = va + kstack_pages * PAGE_SIZE;
        CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va);
        thread0.td_kstack = va;
-       thread0.td_kstack_pages = KSTACK_PAGES;
-       for (i = 0; i < KSTACK_PAGES; i++) {
+       thread0.td_kstack_pages = kstack_pages;
+       for (i = 0; i < kstack_pages; i++) {
                moea_kenter(mmup, va, pa);
                pa += PAGE_SIZE;
                va += PAGE_SIZE;

Modified: head/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea64.c    Mon Aug 10 17:16:49 2015        
(r286583)
+++ head/sys/powerpc/aim/mmu_oea64.c    Mon Aug 10 17:18:21 2015        
(r286584)
@@ -917,13 +917,13 @@ moea64_late_bootstrap(mmu_t mmup, vm_off
         * Allocate a kernel stack with a guard page for thread0 and map it
         * into the kernel page map.
         */
-       pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
+       pa = moea64_bootstrap_alloc(kstack_pages * PAGE_SIZE, PAGE_SIZE);
        va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
-       virtual_avail = va + KSTACK_PAGES * PAGE_SIZE;
+       virtual_avail = va + kstack_pages * PAGE_SIZE;
        CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
        thread0.td_kstack = va;
-       thread0.td_kstack_pages = KSTACK_PAGES;
-       for (i = 0; i < KSTACK_PAGES; i++) {
+       thread0.td_kstack_pages = kstack_pages;
+       for (i = 0; i < kstack_pages; i++) {
                moea64_kenter(mmup, va, pa);
                pa += PAGE_SIZE;
                va += PAGE_SIZE;

Modified: head/sys/powerpc/booke/pmap.c
==============================================================================
--- head/sys/powerpc/booke/pmap.c       Mon Aug 10 17:16:49 2015        
(r286583)
+++ head/sys/powerpc/booke/pmap.c       Mon Aug 10 17:18:21 2015        
(r286584)
@@ -1207,7 +1207,7 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset
        /* Steal physical memory for kernel stack from the end */
        /* of the first avail region                           */
        /*******************************************************/
-       kstack0_sz = KSTACK_PAGES * PAGE_SIZE;
+       kstack0_sz = kstack_pages * PAGE_SIZE;
        kstack0_phys = availmem_regions[0].mr_start +
            availmem_regions[0].mr_size;
        kstack0_phys -= kstack0_sz;
@@ -1312,7 +1312,7 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset
        /* Enter kstack0 into kernel map, provide guard page */
        kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
        thread0.td_kstack = kstack0;
-       thread0.td_kstack_pages = KSTACK_PAGES;
+       thread0.td_kstack_pages = kstack_pages;
 
        debugf("kstack_sz = 0x%08x\n", kstack0_sz);
        debugf("kstack0_phys at 0x%08x - 0x%08x\n",
@@ -1320,7 +1320,7 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset
        debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz);
        
        virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
-       for (i = 0; i < KSTACK_PAGES; i++) {
+       for (i = 0; i < kstack_pages; i++) {
                mmu_booke_kenter(mmu, kstack0, kstack0_phys);
                kstack0 += PAGE_SIZE;
                kstack0_phys += PAGE_SIZE;

Modified: head/sys/powerpc/include/param.h
==============================================================================
--- head/sys/powerpc/include/param.h    Mon Aug 10 17:16:49 2015        
(r286583)
+++ head/sys/powerpc/include/param.h    Mon Aug 10 17:18:21 2015        
(r286584)
@@ -111,7 +111,7 @@
 #endif
 #endif
 #define        KSTACK_GUARD_PAGES      1       /* pages of kstack guard; 0 
disables */
-#define        USPACE          (KSTACK_PAGES * PAGE_SIZE)      /* total size 
of pcb */
+#define        USPACE          (kstack_pages * PAGE_SIZE)      /* total size 
of pcb */
 
 /*
  * Mach derived conversion macros

Modified: head/sys/vm/vm_glue.c
==============================================================================
--- head/sys/vm/vm_glue.c       Mon Aug 10 17:16:49 2015        (r286583)
+++ head/sys/vm/vm_glue.c       Mon Aug 10 17:18:21 2015        (r286584)
@@ -327,11 +327,11 @@ vm_thread_new(struct thread *td, int pag
 
        /* Bounds check */
        if (pages <= 1)
-               pages = KSTACK_PAGES;
+               pages = kstack_pages;
        else if (pages > KSTACK_MAX_PAGES)
                pages = KSTACK_MAX_PAGES;
 
-       if (pages == KSTACK_PAGES) {
+       if (pages == kstack_pages) {
                mtx_lock(&kstack_cache_mtx);
                if (kstack_cache != NULL) {
                        ks_ce = kstack_cache;
@@ -340,7 +340,7 @@ vm_thread_new(struct thread *td, int pag
 
                        td->td_kstack_obj = ks_ce->ksobj;
                        td->td_kstack = (vm_offset_t)ks_ce;
-                       td->td_kstack_pages = KSTACK_PAGES;
+                       td->td_kstack_pages = kstack_pages;
                        return (1);
                }
                mtx_unlock(&kstack_cache_mtx);
@@ -444,7 +444,7 @@ vm_thread_dispose(struct thread *td)
        ks = td->td_kstack;
        td->td_kstack = 0;
        td->td_kstack_pages = 0;
-       if (pages == KSTACK_PAGES && kstacks <= kstack_cache_size) {
+       if (pages == kstack_pages && kstacks <= kstack_cache_size) {
                ks_ce = (struct kstack_cache_entry *)ks;
                ks_ce->ksobj = ksobj;
                mtx_lock(&kstack_cache_mtx);
@@ -471,7 +471,7 @@ vm_thread_stack_lowmem(void *nulll)
                ks_ce = ks_ce->next_ks_entry;
 
                vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
-                   KSTACK_PAGES);
+                   kstack_pages);
        }
 }
 

Modified: head/sys/x86/xen/pv.c
==============================================================================
--- head/sys/x86/xen/pv.c       Mon Aug 10 17:16:49 2015        (r286583)
+++ head/sys/x86/xen/pv.c       Mon Aug 10 17:18:21 2015        (r286584)
@@ -215,7 +215,7 @@ start_xen_ap(int cpu)
 {
        struct vcpu_guest_context *ctxt;
        int ms, cpus = mp_naps;
-       const size_t stacksize = KSTACK_PAGES * PAGE_SIZE;
+       const size_t stacksize = kstack_pages * PAGE_SIZE;
 
        /* allocate and set up an idle stack data page */
        bootstacks[cpu] =
@@ -227,7 +227,7 @@ start_xen_ap(int cpu)
        dpcpu =
            (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, M_WAITOK | M_ZERO);
 
-       bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
+       bootSTK = (char *)bootstacks[cpu] + kstack_pages * PAGE_SIZE - 8;
        bootAP = cpu;
 
        ctxt = malloc(sizeof(*ctxt), M_TEMP, M_WAITOK | M_ZERO);
_______________________________________________
svn-src-head@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to