On Wed, Jul 16, 2008 at 12:20:54PM +1000, Simon Horman wrote: > Move PAL code from the Xen identity mapped region to the > EFI identity mapped region, which overlaps with guest virtual space. > > Make sure that PAL memory is only pinned into the TLB when making > EFI, PAL or SAL calls. > > This seems to be nice as it provides a symmetrical approach to > mapping an unmapping pal code. > > However it would be just as safe, and arguably simpler just to map > the PAL code (once?) when the EFI RR is active - for instance very > early on in boot, or when calling XEN_EFI_RR_ENTER. In other words, > unpining is XEN_EFI_RR_LEAVE shouldn't be neccessary, as the EFI RR > should protect the memory from unwanted accesses by guests (or > the hypevisor for that matter). > > This patch is mostly the work of Yamahata-san. > > Cc: Isaku Yamahata <[EMAIL PROTECTED]> > Signed-off-by: Simon Horman <[EMAIL PROTECTED]> > > Tue, 15 Jul 2008 13:06:42 +1000 > * Remove spurious change of ptr.i to ptr.d > just above .vpd_not_mapped in ia64_do_tlb_purge. > > Wed, 16 Jul 2008 12:16:51 +1000 > * Unpin PAL in efi_map_pal_code() to ensure that it isn't present > when pinned. Thanks to Yamahata-san. > > Index: xen-unstable.hg/xen/arch/ia64/linux-xen/efi.c > =================================================================== > --- xen-unstable.hg.orig/xen/arch/ia64/linux-xen/efi.c 2008-07-16 > 11:53:25.000000000 +1000 > +++ xen-unstable.hg/xen/arch/ia64/linux-xen/efi.c 2008-07-16 > 12:15:17.000000000 +1000 > @@ -424,7 +424,7 @@ efi_get_pal_addr (void) > md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT), > vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); > #endif > - return __va(md->phys_addr); > + return __va_efi(md->phys_addr); > } > printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n", > __FUNCTION__); > @@ -432,7 +432,7 @@ efi_get_pal_addr (void) > } > > #ifdef XEN > -void *pal_vaddr = 0; > +static void *pal_vaddr = 0; > > void * > efi_get_pal_addr(void) > @@ -443,24 +443,53 @@ efi_get_pal_addr(void) > } > #endif > > +#ifdef XEN > +static void > +__efi_unmap_pal_code (void *pal_vaddr) > +{ > + ia64_ptr(0x1, GRANULEROUNDDOWN((unsigned long)pal_vaddr), > + IA64_GRANULE_SHIFT); > +} > + > void > -efi_map_pal_code (void) > +efi_unmap_pal_code (void) > { > -#ifdef XEN > - u64 psr; > - (void)efi_get_pal_addr(); > -#else > void *pal_vaddr = efi_get_pal_addr (); > u64 psr; > > if (!pal_vaddr) > return; > + > + /* > + * Cannot write to CRx with PSR.ic=1 > + */ > + psr = ia64_clear_ic(); > + __efi_unmap_pal_code(pal_vaddr); > + ia64_ptr(0x1, GRANULEROUNDDOWN((unsigned long)pal_vaddr), > + IA64_GRANULE_SHIFT); > + ia64_set_psr(psr); /* restore psr */ > + ia64_srlz_i(); > +}
Forgot to delete ia64_ptr()? > #endif > > +void > +efi_map_pal_code (void) > +{ > + void *pal_vaddr = efi_get_pal_addr (); > + u64 psr; > + > + if (!pal_vaddr) > + return; > + > /* > * Cannot write to CRx with PSR.ic=1 > */ > psr = ia64_clear_ic(); > +#ifdef XEN > + /* pal_vaddr must be unpinned before pinning > + * This is needed in the case of a nested EFI, PAL or SAL call */ > + __efi_unmap_pal_code(pal_vaddr); > +#endif > ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) > pal_vaddr), > pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), > IA64_GRANULE_SHIFT); > @@ -594,7 +623,9 @@ efi_init (void) > } > #endif > > +#ifndef XEN > efi_map_pal_code(); > +#endif > efi_enter_virtual_mode(); > } > > Index: xen-unstable.hg/xen/arch/ia64/linux-xen/mca_asm.S > =================================================================== > --- xen-unstable.hg.orig/xen/arch/ia64/linux-xen/mca_asm.S 2008-07-16 > 11:53:25.000000000 +1000 > +++ xen-unstable.hg/xen/arch/ia64/linux-xen/mca_asm.S 2008-07-16 > 11:53:26.000000000 +1000 > @@ -473,6 +473,7 @@ ia64_reload_tr: > ;; > srlz.d > ;; > +#ifndef XEN > // 3. Reload ITR for PAL code. > GET_THIS_PADDR(r2, ia64_mca_pal_pte) > ;; > @@ -491,6 +492,8 @@ ia64_reload_tr: > ;; > srlz.i > ;; > +#endif > + > // 4. Reload DTR for stack. > #ifdef XEN > // Kernel registers are saved in a per_cpu cpu_kr_ia64_t > Index: xen-unstable.hg/xen/arch/ia64/vmx/vmx_entry.S > =================================================================== > --- xen-unstable.hg.orig/xen/arch/ia64/vmx/vmx_entry.S 2008-07-16 > 11:53:25.000000000 +1000 > +++ xen-unstable.hg/xen/arch/ia64/vmx/vmx_entry.S 2008-07-16 > 11:53:26.000000000 +1000 > @@ -598,7 +598,7 @@ END(ia64_leave_hypercall) > /* > * in0: new rr7 > * in1: virtual address of guest_vhpt > - * in2: virtual address of pal code segment > + * in2: virtual addres of guest shared_info > * r8: will contain old rid value > */ > > @@ -611,7 +611,7 @@ END(ia64_leave_hypercall) > GLOBAL_ENTRY(__vmx_switch_rr7) > // not sure this unwind statement is correct... > .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1) > - alloc loc1 = ar.pfs, 4, 8, 0, 0 > + alloc loc1 = ar.pfs, 4, 7, 0, 0 > 1:{ > mov r28 = in0 // copy procedure index > mov r8 = ip // save ip to compute branch > @@ -623,13 +623,10 @@ GLOBAL_ENTRY(__vmx_switch_rr7) > tpa loc2 = loc2 // get physical address of per cpu date > tpa r3 = r8 // get physical address of ip > dep loc5 = 0,in1,60,4 // get physical address of guest_vhpt > - dep loc6 = 0,in2,60,4 // get physical address of pal code > - dep loc7 = 0,in3,60,4 // get physical address of privregs > + dep loc6 = 0,in2,60,4 // get physical address of privregs > ;; > dep loc6 = 0,loc6,0,IA64_GRANULE_SHIFT > // mask granule shift > - dep loc7 = 0,loc7,0,IA64_GRANULE_SHIFT > - // mask granule shift > mov loc4 = psr // save psr > ;; > mov loc3 = ar.rsc // save RSE configuration > @@ -725,46 +722,31 @@ GLOBAL_ENTRY(__vmx_switch_rr7) > ;; > .vhpt_overlaps: > > - // re-pin mappings for PAL code section > - mov r24=IA64_TR_PALCODE > - or loc6 = r25,loc6 // construct PA | page properties > - mov r23 = IA64_GRANULE_SHIFT<<2 > - ;; > - ptr.i in2,r23 > - ;; > - mov cr.itir=r23 > - mov cr.ifa=in2 > - ;; > - itr.i itr[r24]=loc6 // wire in new mapping... > - ;; > - > // r16, r19, r20 are used by > // ia64_switch_mode_phys()/ia64_switch_mode_virt() > // re-pin mappings for privregs > // r21 = (current physical addr) & (IA64_GRANULE_SIZE - 1) > // r17 = (guest_vhpt physical addr) & (IA64_GRANULE_SIZE - 1) > - // loc6 = (((pal phys addr) & (IA64_GRANULE_SIZE - 1) << 2)) | > PAGE_KERNEL > - // loc7 = (privregs physical addr) & (IA64_GRANULE_SIZE - 1) > - cmp.ne.unc p7,p0=r21,loc7 // check overlap with current stack > + // loc6 = (privregs physical addr) & (IA64_GRANULE_SIZE - 1) > + cmp.ne.unc p7,p0=r21,loc6 // check overlap with current stack > ;; > -(p7) cmp.ne.unc p8,p0=r17,loc7 // check overlap with guest_vhpt > +(p7) cmp.ne.unc p8,p0=r17,loc6 // check overlap with guest_vhpt > ;; > - // loc7 = (((privregs phys) & (IA64_GRANULE_SIZE - 1)) << 2) | > PAGE_KERNEL > - or loc7 = r25,loc7 // construct PA | page properties > + // loc6 = (((privregs phys) & (IA64_GRANULE_SIZE - 1)) << 2) | > PAGE_KERNEL > + or loc6 = r25,loc6 // construct PA | page properties > ;; > - cmp.ne p9,p0=loc6,loc7 > mov r22=IA64_TR_VPD > mov r24=IA64_TR_MAPPED_REGS > mov r23=IA64_GRANULE_SHIFT<<2 > ;; > -(p9) ptr.i in3,r23 > -(p8) ptr.d in3,r23 > + ptr.i in2,r23 > +(p8) ptr.d in2,r23 > mov cr.itir=r23 > - mov cr.ifa=in3 > + mov cr.ifa=in2 > ;; > -(p9) itr.i itr[r22]=loc7 // wire in new mapping... > + itr.i itr[r22]=loc6 // wire in new mapping... > ;; > -(p8) itr.d dtr[r24]=loc7 // wire in new mapping... > +(p8) itr.d dtr[r24]=loc6 // wire in new mapping... > ;; > > // done, switch back to virtual and return > Index: xen-unstable.hg/xen/arch/ia64/vmx/vmx_phy_mode.c > =================================================================== > --- xen-unstable.hg.orig/xen/arch/ia64/vmx/vmx_phy_mode.c 2008-07-16 > 08:29:58.000000000 +1000 > +++ xen-unstable.hg/xen/arch/ia64/vmx/vmx_phy_mode.c 2008-07-16 > 11:53:26.000000000 +1000 > @@ -133,8 +133,6 @@ vmx_init_all_rr(VCPU *vcpu) > VMX(vcpu, vrr[VRN7]) = 0x738; > } > > -extern void * pal_vaddr; > - > void > vmx_load_all_rr(VCPU *vcpu) > { > @@ -173,7 +171,7 @@ vmx_load_all_rr(VCPU *vcpu) > ia64_dv_serialize_data(); > vmx_switch_rr7(vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])), > (void *)vcpu->arch.vhpt.hash, > - pal_vaddr, vcpu->arch.privregs); > + vcpu->arch.privregs); > ia64_set_pta(VMX(vcpu, mpta)); > vmx_ia64_set_dcr(vcpu); > > Index: xen-unstable.hg/xen/arch/ia64/vmx/vmx_vcpu.c > =================================================================== > --- xen-unstable.hg.orig/xen/arch/ia64/vmx/vmx_vcpu.c 2008-07-16 > 11:53:25.000000000 +1000 > +++ xen-unstable.hg/xen/arch/ia64/vmx/vmx_vcpu.c 2008-07-16 > 11:53:26.000000000 +1000 > @@ -197,12 +197,12 @@ void vmx_vcpu_set_rr_fast(VCPU *vcpu, u6 > } > > void vmx_switch_rr7(unsigned long rid, void *guest_vhpt, > - void *pal_vaddr, void *shared_arch_info) > + void *shared_arch_info) > { > __get_cpu_var(inserted_vhpt) = (unsigned long)guest_vhpt; > __get_cpu_var(inserted_vpd) = (unsigned long)shared_arch_info; > __get_cpu_var(inserted_mapped_regs) = (unsigned long)shared_arch_info; > - __vmx_switch_rr7(rid, guest_vhpt, pal_vaddr, shared_arch_info); > + __vmx_switch_rr7(rid, guest_vhpt, shared_arch_info); > } > > IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val) > @@ -219,7 +219,7 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u6 > case VRN7: > if (likely(vcpu == current)) > vmx_switch_rr7(vrrtomrr(vcpu,val), (void *)vcpu->arch.vhpt.hash, > - pal_vaddr, vcpu->arch.privregs); > + vcpu->arch.privregs); > break; > case VRN4: > rrval = vrrtomrr(vcpu,val); > Index: xen-unstable.hg/xen/arch/ia64/xen/xenasm.S > =================================================================== > --- xen-unstable.hg.orig/xen/arch/ia64/xen/xenasm.S 2008-07-16 > 11:53:25.000000000 +1000 > +++ xen-unstable.hg/xen/arch/ia64/xen/xenasm.S 2008-07-16 > 11:53:26.000000000 +1000 > @@ -34,12 +34,13 @@ > // unsigned long va_vhpt) /* in4 */ > //Local usage: > // loc0=rp, loc1=ar.pfs, loc2=percpu_paddr, loc3=psr, loc4=ar.rse > -// loc5=pal_vaddr, loc6=xen_paddr, loc7=shared_archinfo_paddr, > +// loc5=shared_archinfo_paddr, loc6=xen_paddr, > // r16, r19, r20 are used by ia64_switch_mode_{phys, virt}() > +// loc5 is unused. > GLOBAL_ENTRY(ia64_new_rr7) > // FIXME? not sure this unwind statement is correct... > .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1) > - alloc loc1 = ar.pfs, 5, 8, 0, 0 > + alloc loc1 = ar.pfs, 5, 7, 0, 0 > movl loc2=PERCPU_ADDR > 1: { > mov loc3 = psr // save psr > @@ -51,7 +52,7 @@ GLOBAL_ENTRY(ia64_new_rr7) > tpa in1=in1 // grab shared_info BEFORE changing rr7 > adds r8 = 1f-1b,r8 // calculate return address for call > ;; > - tpa loc7=in2 // grab arch_vcpu_info BEFORE chg rr7 > + tpa loc5=in2 // grab arch_vcpu_info BEFORE chg rr7 > movl r17=PSR_BITS_TO_SET > mov loc4=ar.rsc // save RSE configuration > movl r16=PSR_BITS_TO_CLEAR > @@ -60,10 +61,7 @@ GLOBAL_ENTRY(ia64_new_rr7) > mov ar.rsc=0 // put RSE in enforced lazy, LE mode > or loc3=loc3,r17 // add in psr the bits to set > ;; > - movl loc5=pal_vaddr // get pal_vaddr > - ;; > - ld8 loc5=[loc5] // read pal_vaddr > - ;; > + > andcm r16=loc3,r16 // removes bits to clear from psr > dep loc6=0,r8,0,KERNEL_TR_PAGE_SHIFT // Xen code paddr > br.call.sptk.many rp=ia64_switch_mode_phys > @@ -163,25 +161,13 @@ GLOBAL_ENTRY(ia64_new_rr7) > add r22=r22,in3 > ;; > ptr.d r22,r24 > - or r23=loc7,r25 // construct PA | page properties > + or r23=loc5,r25 // construct PA | page properties > mov cr.itir=r24 > mov cr.ifa=r22 > mov r21=IA64_TR_MAPPED_REGS > ;; > itr.d dtr[r21]=r23 // wire in new mapping... > > - // Purge/insert PAL TR > - mov r24=IA64_TR_PALCODE > - mov r23=IA64_GRANULE_SHIFT<<2 > - dep r25=0,loc5,60,4 // convert pal vaddr to paddr > - ;; > - ptr.i loc5,r23 > - or r25=r25,r26 // construct PA | page properties > - mov cr.itir=r23 > - mov cr.ifa=loc5 > - ;; > - itr.i itr[r24]=r25 > - > // done, switch back to virtual and return > mov r16=loc3 // r16= original psr > br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode > @@ -216,7 +202,7 @@ END(ia64_new_rr7) > GLOBAL_ENTRY(ia64_new_rr7_efi) > // FIXME? not sure this unwind statement is correct... > .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1) > - alloc loc1 = ar.pfs, 3, 8, 0, 0 > + alloc loc1 = ar.pfs, 3, 7, 0, 0 > movl loc2=PERCPU_ADDR > 1: { > mov loc3 = psr // save psr > @@ -235,17 +221,13 @@ GLOBAL_ENTRY(ia64_new_rr7_efi) > mov ar.rsc=0 // put RSE in enforced lazy, LE mode > or loc3=loc3,r17 // add in psr the bits to set > ;; > - movl loc5=pal_vaddr // get pal_vaddr > - ;; > - ld8 loc5=[loc5] // read pal_vaddr > + dep loc6 = 0,in2,60,4 // get physical address of VPD > ;; > - dep loc7 = 0,in2,60,4 // get physical address of VPD > - ;; > - dep loc7 = 0,loc7,0,IA64_GRANULE_SHIFT > + dep loc6 = 0,loc6,0,IA64_GRANULE_SHIFT > // mask granule shift > ;; > andcm r16=loc3,r16 // removes bits to clear from psr > - dep loc6=0,r8,0,KERNEL_TR_PAGE_SHIFT // Xen code paddr > + dep loc5=0,r8,0,KERNEL_TR_PAGE_SHIFT // Xen code paddr > br.call.sptk.many rp=ia64_switch_mode_phys > 1: > movl r26=PAGE_KERNEL > @@ -271,7 +253,7 @@ GLOBAL_ENTRY(ia64_new_rr7_efi) > mov r16=IA64_TR_KERNEL > mov cr.itir=r24 > mov cr.ifa=r17 > - or r18=loc6,r26 > + or r18=loc5,r26 > ;; > itr.i itr[r16]=r18 > ;; > @@ -324,7 +306,7 @@ ia64_new_rr7_efi_percpu_not_mapped: > // VPD > cmp.eq p7,p0=r0,in2 > (p7) br.cond.sptk ia64_new_rr7_efi_vpd_not_mapped > - or loc7 = r26,loc7 // construct PA | page properties > + or loc6 = r26,loc6 // construct PA | page properties > mov r22=IA64_TR_VPD > mov r24=IA64_TR_MAPPED_REGS > mov r23=IA64_GRANULE_SHIFT<<2 > @@ -340,9 +322,9 @@ ia64_new_rr7_efi_percpu_not_mapped: > mov cr.itir=r23 > mov cr.ifa=in2 > ;; > - itr.i itr[r22]=loc7 > + itr.i itr[r22]=loc6 > ;; > - itr.d dtr[r24]=loc7 > + itr.d dtr[r24]=loc6 > ;; > srlz.i > ;; > @@ -350,18 +332,6 @@ ia64_new_rr7_efi_percpu_not_mapped: > ;; > ia64_new_rr7_efi_vpd_not_mapped: > > - // Purge/insert PAL TR > - mov r24=IA64_TR_PALCODE > - mov r23=IA64_GRANULE_SHIFT<<2 > - dep r25=0,loc5,60,4 // convert pal vaddr to paddr > - ;; > - ptr.i loc5,r23 > - or r25=r25,r26 // construct PA | page properties > - mov cr.itir=r23 > - mov cr.ifa=loc5 > - ;; > - itr.i itr[r24]=r25 > - > // done, switch back to virtual and return > mov r16=loc3 // r16= original psr > br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode > Index: xen-unstable.hg/xen/include/asm-ia64/linux-xen/linux/efi.h > =================================================================== > --- xen-unstable.hg.orig/xen/include/asm-ia64/linux-xen/linux/efi.h > 2008-07-16 11:53:24.000000000 +1000 > +++ xen-unstable.hg/xen/include/asm-ia64/linux-xen/linux/efi.h > 2008-07-16 11:54:29.000000000 +1000 > @@ -24,10 +24,6 @@ > #include <asm/page.h> > #include <asm/system.h> > > -#ifdef XEN > -extern void * pal_vaddr; > -#endif > - > #define EFI_SUCCESS 0 > #define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1))) > #define EFI_INVALID_PARAMETER ( 2 | (1UL << (BITS_PER_LONG-1))) > @@ -302,6 +298,9 @@ efi_guid_unparse(efi_guid_t *guid, char > extern void efi_init (void); > extern void *efi_get_pal_addr (void); > extern void efi_map_pal_code (void); > +#ifdef XEN > +extern void efi_unmap_pal_code (void); > +#endif > extern void efi_map_memmap(void); > extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); > extern void efi_gettimeofday (struct timespec *ts); > @@ -471,9 +470,11 @@ struct efi_generic_dev_path { > rr7 = ia64_get_rr(7UL << 61); \ > set_one_rr_efi(6UL << 61, XEN_EFI_RR); \ > set_one_rr_efi(7UL << 61, XEN_EFI_RR); \ > + efi_map_pal_code(); \ > } while (0) > > #define XEN_EFI_RR_LEAVE(rr6, rr7) do { \ > + efi_unmap_pal_code(); \ > set_one_rr_efi(6UL << 61, rr6); \ > set_one_rr_efi(7UL << 61, rr7); \ > } while (0) > Index: xen-unstable.hg/xen/include/asm-ia64/vmx_vcpu.h I think XEN_EFI_RR_LEAVE() should call efi_unmap_pal_code() conditionally with rr7 check because in nested firmware calling case, itr[IA64_TR_PALCODE] should not be purged. Otherwise it returns back to firmware, undesirable things would happen. > =================================================================== > --- xen-unstable.hg.orig/xen/include/asm-ia64/vmx_vcpu.h 2008-07-16 > 11:53:25.000000000 +1000 > +++ xen-unstable.hg/xen/include/asm-ia64/vmx_vcpu.h 2008-07-16 > 11:53:26.000000000 +1000 > @@ -104,9 +104,9 @@ extern uint64_t guest_read_vivr(VCPU * v > extern int vmx_vcpu_pend_interrupt(VCPU * vcpu, uint8_t vector); > extern void vcpu_load_kernel_regs(VCPU * vcpu); > extern void __vmx_switch_rr7(unsigned long rid, void *guest_vhpt, > - void *pal_vaddr, void *shared_arch_info); > + void *shared_arch_info); > extern void vmx_switch_rr7(unsigned long rid, void *guest_vhpt, > - void *pal_vaddr, void *shared_arch_info); > + void *shared_arch_info); > extern void vmx_ia64_set_dcr(VCPU * v); > extern void inject_guest_interruption(struct vcpu *vcpu, u64 vec); > extern void vmx_asm_bsw0(void); > Index: xen-unstable.hg/xen/arch/ia64/linux-xen/smpboot.c > =================================================================== > --- xen-unstable.hg.orig/xen/arch/ia64/linux-xen/smpboot.c 2008-07-16 > 08:29:58.000000000 +1000 > +++ xen-unstable.hg/xen/arch/ia64/linux-xen/smpboot.c 2008-07-16 > 11:53:26.000000000 +1000 > @@ -438,7 +438,9 @@ start_secondary (void *unused) > /* Early console may use I/O ports */ > ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); > Dprintk("start_secondary: starting CPU 0x%x\n", > hard_smp_processor_id()); > +#ifndef XEN > efi_map_pal_code(); > +#endif > cpu_init(); > smp_callin(); > > -- yamahata _______________________________________________ Xen-ia64-devel mailing list Xen-ia64-devel@lists.xensource.com http://lists.xensource.com/xen-ia64-devel