On Mon, Jul 14, 2008 at 10:22:19PM +1000, Simon Horman wrote:

> > diff -r e8056a7091a7 xen/arch/ia64/linux-xen/mca_asm.S
> > --- a/xen/arch/ia64/linux-xen/mca_asm.S     Mon Jul 14 19:31:54 2008 +0900
> > +++ b/xen/arch/ia64/linux-xen/mca_asm.S     Mon Jul 14 19:33:20 2008 +0900
> > @@ -473,6 +473,7 @@
> >     ;;
> >     srlz.d
> >     ;;
> > +#ifndef XEN
> >     // 3. Reload ITR for PAL code.
> >     GET_THIS_PADDR(r2, ia64_mca_pal_pte)
> >     ;;
> > @@ -491,6 +492,8 @@
> >     ;;
> >     srlz.i
> >     ;;
> > +#endif
> > +   
> >     // 4. Reload DTR for stack.
> >  #ifdef XEN
> >     // Kernel registers are saved in a per_cpu cpu_kr_ia64_t
> > diff -r e8056a7091a7 xen/arch/ia64/vmx/vmx_entry.S
> > --- a/xen/arch/ia64/vmx/vmx_entry.S Mon Jul 14 19:31:54 2008 +0900
> > +++ b/xen/arch/ia64/vmx/vmx_entry.S Mon Jul 14 19:33:20 2008 +0900
> > @@ -598,7 +598,7 @@
> >  /*
> >   * in0: new rr7
> >   * in1: virtual address of guest_vhpt
> > - * in2: virtual address of pal code segment
> > + * in2: virtual addres of guest shared_info
> >   * r8: will contain old rid value
> >   */
> >  
> > @@ -611,7 +611,7 @@
> >  GLOBAL_ENTRY(__vmx_switch_rr7)
> >         // not sure this unwind statement is correct...
> >         .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
> > -   alloc loc1 = ar.pfs, 4, 8, 0, 0
> > +   alloc loc1 = ar.pfs, 4, 7, 0, 0
> >  1:{
> >     mov r28  = in0                  // copy procedure index
> >     mov r8   = ip                   // save ip to compute branch
> > @@ -623,12 +623,9 @@
> >     tpa loc2 = loc2                 // get physical address of per cpu date
> >     tpa r3 = r8                     // get physical address of ip
> >     dep loc5 = 0,in1,60,4           // get physical address of guest_vhpt
> > -   dep loc6 = 0,in2,60,4           // get physical address of pal code
> > -   dep loc7 = 0,in3,60,4           // get physical address of privregs
> > +   dep loc6 = 0,in2,60,4           // get physical address of privregs
> >     ;;
> >     dep loc6 = 0,loc6,0,IA64_GRANULE_SHIFT
> > -                                        // mask granule shift
> > -   dep loc7 = 0,loc7,0,IA64_GRANULE_SHIFT
> >                                          // mask granule shift
> >     mov loc4 = psr                  // save psr
> >     ;;
> > @@ -725,46 +722,31 @@
> >     ;;
> >  .vhpt_overlaps:
> >  
> > -   // re-pin mappings for PAL code section
> > -   mov r24=IA64_TR_PALCODE
> > -   or loc6 = r25,loc6              // construct PA | page properties
> > -   mov r23 = IA64_GRANULE_SHIFT<<2
> > -   ;;
> > -   ptr.i   in2,r23
> > -   ;;
> > -   mov cr.itir=r23
> > -   mov cr.ifa=in2
> > -   ;;
> > -   itr.i itr[r24]=loc6             // wire in new mapping...
> > -   ;;
> > -
> >     // r16, r19, r20 are used by
> >     //  ia64_switch_mode_phys()/ia64_switch_mode_virt()
> >     // re-pin mappings for privregs
> >     // r21  = (current physical addr) & (IA64_GRANULE_SIZE - 1)
> >     // r17  = (guest_vhpt physical addr) & (IA64_GRANULE_SIZE - 1)
> > -   // loc6 = (((pal phys addr) & (IA64_GRANULE_SIZE - 1) << 2)) | 
> > PAGE_KERNEL
> > -   // loc7 = (privregs physical addr) & (IA64_GRANULE_SIZE - 1)
> > -   cmp.ne.unc p7,p0=r21,loc7       // check overlap with current stack
> > +   // loc6 = (privregs physical addr) & (IA64_GRANULE_SIZE - 1)
> > +   cmp.ne.unc p7,p0=r21,loc6       // check overlap with current stack
> >     ;;
> > -(p7)       cmp.ne.unc p8,p0=r17,loc7       // check overlap with guest_vhpt
> > +(p7)       cmp.ne.unc p8,p0=r17,loc6       // check overlap with guest_vhpt
> >     ;;
> > -   // loc7 = (((privregs phys) & (IA64_GRANULE_SIZE - 1)) << 2) | 
> > PAGE_KERNEL
> > -   or loc7 = r25,loc7          // construct PA | page properties
> > +   // loc6 = (((privregs phys) & (IA64_GRANULE_SIZE - 1)) << 2) | 
> > PAGE_KERNEL
> > +   or loc6 = r25,loc6          // construct PA | page properties
> >     ;;
> > -   cmp.ne p9,p0=loc6,loc7
> >     mov r22=IA64_TR_VPD
> >     mov r24=IA64_TR_MAPPED_REGS
> >     mov r23=IA64_GRANULE_SHIFT<<2
> >     ;;
> > -(p9)       ptr.i   in3,r23 
> > -(p8)       ptr.d   in3,r23
> > +   ptr.i   in2,r23
> > +(p8)       ptr.d   in2,r23
> >     mov cr.itir=r23
> > -   mov cr.ifa=in3
> > +   mov cr.ifa=in2
> >     ;;
> > -(p9)       itr.i itr[r22]=loc7         // wire in new mapping...
> > +   itr.i itr[r22]=loc6         // wire in new mapping...
> >     ;;
> > -(p8)       itr.d dtr[r24]=loc7         // wire in new mapping...
> > +(p8)       itr.d dtr[r24]=loc6         // wire in new mapping...
> >     ;;
> >  
> >     // done, switch back to virtual and return
> > diff -r e8056a7091a7 xen/arch/ia64/vmx/vmx_phy_mode.c
> > --- a/xen/arch/ia64/vmx/vmx_phy_mode.c      Mon Jul 14 19:31:54 2008 +0900
> > +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c      Mon Jul 14 19:33:20 2008 +0900
> > @@ -172,7 +172,7 @@
> >     ia64_dv_serialize_data();
> >     vmx_switch_rr7(vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),
> >                        (void *)vcpu->arch.vhpt.hash,
> > -                  pal_vaddr, vcpu->arch.privregs);
> > +                  vcpu->arch.privregs);
> >     ia64_set_pta(VMX(vcpu, mpta));
> >     vmx_ia64_set_dcr(vcpu);
> >  
> > diff -r e8056a7091a7 xen/arch/ia64/vmx/vmx_vcpu.c
> > --- a/xen/arch/ia64/vmx/vmx_vcpu.c  Mon Jul 14 19:31:54 2008 +0900
> > +++ b/xen/arch/ia64/vmx/vmx_vcpu.c  Mon Jul 14 19:33:20 2008 +0900
> > @@ -197,12 +197,12 @@
> >  }
> >  
> >  void vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
> > -                    void *pal_vaddr, void *shared_arch_info)
> > +                    void *shared_arch_info)
> >  {
> >      __get_cpu_var(inserted_vhpt) = guest_vhpt;
> >      __get_cpu_var(inserted_vpd) = shared_arch_info;
> >      __get_cpu_var(inserted_mapped_regs) = shared_arch_info;
> > -    __vmx_switch_rr7(rid, guest_vhpt, pal_vaddr, shared_arch_info);
> > +    __vmx_switch_rr7(rid, guest_vhpt, shared_arch_info);
> >  }
> >  
> >  IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val)
> > @@ -219,7 +219,7 @@
> >      case VRN7:
> >          if (likely(vcpu == current))
> >              vmx_switch_rr7(vrrtomrr(vcpu,val), (void 
> > *)vcpu->arch.vhpt.hash,
> > -                           pal_vaddr, vcpu->arch.privregs);
> > +                           vcpu->arch.privregs);
> >         break;
> >      case VRN4:
> >          rrval = vrrtomrr(vcpu,val);
> > diff -r e8056a7091a7 xen/arch/ia64/xen/xenasm.S
> > --- a/xen/arch/ia64/xen/xenasm.S    Mon Jul 14 19:31:54 2008 +0900
> > +++ b/xen/arch/ia64/xen/xenasm.S    Mon Jul 14 19:33:20 2008 +0900
> > @@ -34,12 +34,13 @@
> >  //                         unsigned long va_vhpt)           /* in4 */
> >  //Local usage:
> >  //  loc0=rp, loc1=ar.pfs, loc2=percpu_paddr, loc3=psr, loc4=ar.rse
> > -//  loc5=pal_vaddr, loc6=xen_paddr, loc7=shared_archinfo_paddr,
> > +//  loc5=shared_archinfo_paddr, loc6=xen_paddr, 
> >  //  r16, r19, r20 are used by ia64_switch_mode_{phys, virt}()
> > +// loc5 is unused.
> >  GLOBAL_ENTRY(ia64_new_rr7)
> >     // FIXME? not sure this unwind statement is correct...
> >     .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
> > -   alloc loc1 = ar.pfs, 5, 8, 0, 0
> > +   alloc loc1 = ar.pfs, 5, 7, 0, 0
> >     movl loc2=PERCPU_ADDR
> >  1: {
> >       mov loc3 = psr                // save psr     
> > @@ -51,7 +52,7 @@
> >     tpa in1=in1                     // grab shared_info BEFORE changing rr7
> >     adds r8 = 1f-1b,r8              // calculate return address for call
> >     ;;
> > -   tpa loc7=in2                    // grab arch_vcpu_info BEFORE chg rr7
> > +   tpa loc5=in2                    // grab arch_vcpu_info BEFORE chg rr7
> >     movl r17=PSR_BITS_TO_SET
> >     mov loc4=ar.rsc                 // save RSE configuration
> >     movl r16=PSR_BITS_TO_CLEAR
> > @@ -60,10 +61,7 @@
> >     mov ar.rsc=0                    // put RSE in enforced lazy, LE mode
> >     or loc3=loc3,r17                // add in psr the bits to set
> >     ;;
> > -   movl loc5=pal_vaddr             // get pal_vaddr
> > -   ;;
> > -   ld8 loc5=[loc5]                 // read pal_vaddr
> > -   ;;
> > +
> >     andcm r16=loc3,r16              // removes bits to clear from psr
> >     dep loc6=0,r8,0,KERNEL_TR_PAGE_SHIFT // Xen code paddr
> >     br.call.sptk.many rp=ia64_switch_mode_phys
> > @@ -163,24 +161,12 @@
> >     add r22=r22,in3
> >     ;;
> >     ptr.d   r22,r24
> > -   or r23=loc7,r25                 // construct PA | page properties
> > +   or r23=loc5,r25                 // construct PA | page properties
> >     mov cr.itir=r24
> >     mov cr.ifa=r22
> >     mov r21=IA64_TR_MAPPED_REGS
> >     ;;
> >     itr.d dtr[r21]=r23              // wire in new mapping...
> > -
> > -   // Purge/insert PAL TR
> > -   mov r24=IA64_TR_PALCODE
> > -   mov r23=IA64_GRANULE_SHIFT<<2
> > -   dep r25=0,loc5,60,4             // convert pal vaddr to paddr
> > -   ;;
> > -   ptr.i   loc5,r23
> > -   or r25=r25,r26                  // construct PA | page properties
> > -   mov cr.itir=r23
> > -   mov cr.ifa=loc5
> > -   ;;
> > -   itr.i itr[r24]=r25
> >  
> >     // done, switch back to virtual and return
> >     mov r16=loc3                    // r16= original psr
> > @@ -361,6 +347,8 @@
> >     mov cr.ifa=loc5
> >     ;;
> >     itr.i itr[r24]=r25
> > +   ;;
> > +   srlz.i
> 
> This srlz.i is probably good to have, but its not really related ?

You're right. It isn't necessary.


> >     // done, switch back to virtual and return
> >     mov r16=loc3                    // r16= original psr
> > diff -r e8056a7091a7 xen/include/asm-ia64/linux-xen/linux/efi.h
> > --- a/xen/include/asm-ia64/linux-xen/linux/efi.h    Mon Jul 14 19:31:54 
> > 2008 +0900
> > +++ b/xen/include/asm-ia64/linux-xen/linux/efi.h    Mon Jul 14 19:33:20 
> > 2008 +0900
> > @@ -25,6 +25,7 @@
> >  #include <asm/system.h>
> >  
> >  #ifdef XEN
> > +#include <asm/meminit.h>     /* GRANULEROUNDDOWN */
> >  extern void * pal_vaddr;
> >  #endif
> >  
> > @@ -474,6 +475,10 @@
> >  } while (0)
> >  
> >  #define XEN_EFI_RR_RESTORE(rr6, rr7) do {          \
> > +   ia64_ptr(0x1 /*I*/,                             \
> > +            GRANULEROUNDDOWN(                      \
> > +                    (unsigned long)pal_vaddr),     \
> > +            IA64_GRANULE_SHIFT);                   \
> >     set_one_rr_efi(6UL << 61, rr6);                 \
> >     set_one_rr_efi(7UL << 61, rr7);                 \
> 
> I don't think this is quite right because ia64_new_rr7_efi
> (via set_one_rr_efi()) will just reinsert the pal_vaddr.
> 
> I think it might be better to make things a bit more symmetrical.
> Pin pal_vaddr in XEN_EFI_RR_SAVE after calling set_one_rr_efi(),
> unpin it in XEN_EFI_RR_RESTORE (as above) and not touch it at all in
> set_one_rr_efi().
> 

Agreed. I also came up that it should be more symmetorical.
It should that ia64_new_rr7_efi() shouldn't pin down pal code and
the macros should be something like

#define XEN_EFI_RR_SAVE
        set_one_rr_efi(rr6)
        set_one_rr_efi(rr7)
        pin down pal code
          ia64_ptr()
          ia64_itr()
          Probably efi_map_pal_code() can be stolen.
          Maybe inline function?
        
#define XEN_EFI_RR_RESTORE
        purge pinning pal code
          ia64_ptr(...)
        set_one_rr_efi(rr6)
        set_one_rr_efi(rr7)


> It would probably also be good to give XEN_EFI_RR_RESTORE and
> XEN_EFI_RR_SAVE different names. Perhaps XEN_EFI_ENTER and
> XEN_EFI_LEAVE.

Agreed.

-- 
yamahata

_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@lists.xensource.com
http://lists.xensource.com/xen-ia64-devel

Reply via email to