Author: nwhitehorn
Date: Wed Nov  3 15:15:48 2010
New Revision: 214739
URL: http://svn.freebsd.org/changeset/base/214739

Log:
  Clean up the user segment handling code a little more. Now that
  set_user_sr() itself caches the user segment VSID, there is no need for
  cpu_switch() to do it again. This change also unifies the 32 and 64-bit
  code paths for kernel faults on user pages and remaps the user SLB slot
  on 64-bit systems when taking a syscall to avoid some unnecessary segment
  exception traps.

Modified:
  head/sys/powerpc/aim/copyinout.c
  head/sys/powerpc/aim/swtch32.S
  head/sys/powerpc/aim/swtch64.S
  head/sys/powerpc/aim/trap.c

Modified: head/sys/powerpc/aim/copyinout.c
==============================================================================
--- head/sys/powerpc/aim/copyinout.c    Wed Nov  3 13:42:59 2010        
(r214738)
+++ head/sys/powerpc/aim/copyinout.c    Wed Nov  3 15:15:48 2010        
(r214739)
@@ -102,11 +102,12 @@ set_user_sr(pmap_t pm, const void *addr)
        if (curthread->td_pcb->pcb_cpu.aim.usr_vsid == slbv) 
                return;
 
-       __asm __volatile ("isync; slbie %0; slbmte %1, %2; isync" ::
-           "r"(USER_ADDR), "r"(slbv), "r"(USER_SLB_SLBE));
+       __asm __volatile("isync");
        curthread->td_pcb->pcb_cpu.aim.usr_segm =
            (uintptr_t)addr >> ADDR_SR_SHFT;
        curthread->td_pcb->pcb_cpu.aim.usr_vsid = slbv;
+       __asm __volatile ("slbie %0; slbmte %1, %2; isync" ::
+           "r"(USER_ADDR), "r"(slbv), "r"(USER_SLB_SLBE));
 }
 #else
 static __inline void
@@ -124,6 +125,8 @@ set_user_sr(pmap_t pm, const void *addr)
        vsid |= SR_N;
 
        __asm __volatile("isync");
+       curthread->td_pcb->pcb_cpu.aim.usr_segm =
+           (uintptr_t)addr >> ADDR_SR_SHFT;
        curthread->td_pcb->pcb_cpu.aim.usr_vsid = vsid;
        __asm __volatile("mtsr %0,%1; isync" :: "n"(USER_SR), "r"(vsid));
 }

Modified: head/sys/powerpc/aim/swtch32.S
==============================================================================
--- head/sys/powerpc/aim/swtch32.S      Wed Nov  3 13:42:59 2010        
(r214738)
+++ head/sys/powerpc/aim/swtch32.S      Wed Nov  3 15:15:48 2010        
(r214739)
@@ -88,8 +88,6 @@ ENTRY(cpu_switch)
        stw     %r16,PCB_CR(%r6)
        mflr    %r16                    /* Save the link register */
        stw     %r16,PCB_LR(%r6)
-       mfsr    %r16,USER_SR            /* Save USER_SR for copyin/out */
-       stw     %r16,PCB_AIM_USR_VSID(%r6)
        stw     %r1,PCB_SP(%r6)         /* Save the stack pointer */
        stw     %r2,PCB_TOC(%r6)        /* Save the TOC pointer */
 

Modified: head/sys/powerpc/aim/swtch64.S
==============================================================================
--- head/sys/powerpc/aim/swtch64.S      Wed Nov  3 13:42:59 2010        
(r214738)
+++ head/sys/powerpc/aim/swtch64.S      Wed Nov  3 15:15:48 2010        
(r214739)
@@ -110,11 +110,6 @@ ENTRY(cpu_switch)
        std     %r1,PCB_SP(%r6)         /* Save the stack pointer */
        std     %r2,PCB_TOC(%r6)        /* Save the TOC pointer */
        
-       li      %r15,0                  /* Save user segment for copyin/out */
-       li      %r16,USER_SLB_SLOT
-       slbmfev %r15, %r16
-       std     %r15,PCB_AIM_USR_VSID(%r6)
-
        mr      %r14,%r3                /* Copy the old thread ptr... */
        mr      %r15,%r4                /* and the new thread ptr in scratch */
        mr      %r16,%r5                /* and the new lock */

Modified: head/sys/powerpc/aim/trap.c
==============================================================================
--- head/sys/powerpc/aim/trap.c Wed Nov  3 13:42:59 2010        (r214738)
+++ head/sys/powerpc/aim/trap.c Wed Nov  3 15:15:48 2010        (r214739)
@@ -455,6 +455,13 @@ syscall(struct trapframe *frame)
        td = PCPU_GET(curthread);
        td->td_frame = frame;
 
+       /*
+        * Speculatively restore last user SLB segment, which we know is
+        * invalid already, since we are likely to do copyin()/copyout().
+        */
+       __asm __volatile ("slbmte %0, %1; isync" ::
+            "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
+
        error = syscallenter(td, &sa);
        syscallret(td, error, &sa);
 }
@@ -532,13 +539,7 @@ trap_pfault(struct trapframe *frame, int
 
                        map = &p->p_vmspace->vm_map;
 
-                       #ifdef __powerpc64__
                        user_sr = td->td_pcb->pcb_cpu.aim.usr_segm;
-                       #else
-                       __asm ("mfsr %0, %1"
-                           : "=r"(user_sr)
-                           : "K"(USER_SR));
-                       #endif
                        eva &= ADDR_PIDX | ADDR_POFF;
                        eva |= user_sr << ADDR_SR_SHFT;
                } else {
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to