# HG changeset patch # User [EMAIL PROTECTED] # Date 1193036196 -32400 # Node ID cd7ee3e5470185e37314d2088df8a499b3352278 # Parent 98ac6d05aed27ea7f9f4137baa0452f24e2569ee clean up of rr handling to avoid reesrved registers/field fault. PATCHNAME: fix_rr_handling
Signed-off-by: Isaku Yamahata <[EMAIL PROTECTED]> diff -r 98ac6d05aed2 -r cd7ee3e54701 xen/arch/ia64/vmx/vmx_utility.c --- a/xen/arch/ia64/vmx/vmx_utility.c Sun Oct 21 15:58:00 2007 -0600 +++ b/xen/arch/ia64/vmx/vmx_utility.c Mon Oct 22 15:56:36 2007 +0900 @@ -637,10 +637,9 @@ int is_reserved_itir_field(VCPU* vcpu, u return 0; } -int is_reserved_rr_field(VCPU* vcpu, u64 reg_value) -{ - ia64_rr rr; - rr.rrval = reg_value; +static int __is_reserved_rr_field(u64 reg_value) +{ + ia64_rr rr = { .rrval = reg_value }; if(rr.reserved0 != 0 || rr.reserved1 != 0){ return 1; @@ -656,3 +655,20 @@ int is_reserved_rr_field(VCPU* vcpu, u64 return 0; } +int is_reserved_rr_rid(VCPU* vcpu, u64 reg_value) +{ + ia64_rr rr = { .rrval = reg_value }; + + if (rr.rid >= (1UL << vcpu->domain->arch.rid_bits)) + return 1; + + return 0; +} + +int is_reserved_rr_field(VCPU* vcpu, u64 reg_value) +{ + if (__is_reserved_rr_field(reg_value)) + return 1; + + return is_reserved_rr_rid(vcpu, reg_value); +} diff -r 98ac6d05aed2 -r cd7ee3e54701 xen/arch/ia64/vmx/vmx_vcpu.c --- a/xen/arch/ia64/vmx/vmx_vcpu.c Sun Oct 21 15:58:00 2007 -0600 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c Mon Oct 22 15:56:36 2007 +0900 @@ -161,12 +161,12 @@ IA64FAULT vmx_vcpu_cover(VCPU *vcpu) IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val) { - ia64_rr newrr; u64 rrval; - newrr.rrval=val; - if (newrr.rid >= (1 << vcpu->domain->arch.rid_bits)) - panic_domain (NULL, "use of invalid rid %x\n", newrr.rid); + if (unlikely(is_reserved_rr_rid(vcpu, val))) { + gdprintk(XENLOG_DEBUG, "use of invalid rrval %lx\n", val); + return IA64_RSVDREG_FAULT; + } VMX(vcpu,vrr[reg>>VRN_SHIFT]) = val; switch((u64)(reg>>VRN_SHIFT)) { diff -r 98ac6d05aed2 -r cd7ee3e54701 xen/arch/ia64/xen/domain.c --- a/xen/arch/ia64/xen/domain.c Sun Oct 21 15:58:00 2007 -0600 +++ b/xen/arch/ia64/xen/domain.c Mon Oct 22 15:56:36 2007 +0900 @@ -1583,6 +1583,7 @@ domain_set_shared_info_va (unsigned long { struct vcpu *v = current; struct domain *d = v->domain; + int rc; /* Check virtual address: must belong to region 7, @@ -1604,9 +1605,10 @@ domain_set_shared_info_va (unsigned long __ia64_per_cpu_var(current_psr_ic_addr) = (int *)(va + XSI_PSR_IC_OFS); /* Remap the shared pages. */ - set_one_rr (7UL << 61, PSCB(v,rrs[7])); - - return 0; + rc = !set_one_rr(7UL << 61, PSCB(v,rrs[7])); + BUG_ON(rc); + + return rc; } /* Transfer and clear the shadow bitmap in 1kB chunks for L1 cache. */ diff -r 98ac6d05aed2 -r cd7ee3e54701 xen/arch/ia64/xen/regionreg.c --- a/xen/arch/ia64/xen/regionreg.c Sun Oct 21 15:58:00 2007 -0600 +++ b/xen/arch/ia64/xen/regionreg.c Mon Oct 22 15:56:36 2007 +0900 @@ -238,14 +238,12 @@ int set_one_rr(unsigned long rr, unsigne ia64_rr rrv, newrrv, memrrv; unsigned long newrid; - if (val == -1) - return 1; - rrv.rrval = val; newrrv.rrval = 0; newrid = v->arch.starting_rid + rrv.rid; - if (newrid > v->arch.ending_rid) { + // avoid reserved register/field fault + if (unlikely(is_reserved_rr_field(v, val))) { printk("can't set rr%d to %lx, starting_rid=%x," "ending_rid=%x, val=%lx\n", (int) rreg, newrid, v->arch.starting_rid,v->arch.ending_rid,val); @@ -295,12 +293,11 @@ void init_all_rr(struct vcpu *v) ia64_rr rrv; rrv.rrval = 0; - //rrv.rrval = v->domain->arch.metaphysical_rr0; rrv.ps = v->arch.vhpt_pg_shift; rrv.ve = 1; if (!v->vcpu_info) panic("Stopping in init_all_rr\n"); - VCPU(v,rrs[0]) = -1; + VCPU(v,rrs[0]) = rrv.rrval; VCPU(v,rrs[1]) = rrv.rrval; VCPU(v,rrs[2]) = rrv.rrval; VCPU(v,rrs[3]) = rrv.rrval; @@ -308,7 +305,7 @@ void init_all_rr(struct vcpu *v) VCPU(v,rrs[5]) = rrv.rrval; rrv.ve = 0; VCPU(v,rrs[6]) = rrv.rrval; -// v->shared_info->arch.rrs[7] = rrv.rrval; + VCPU(v,rrs[7]) = rrv.rrval; } diff -r 98ac6d05aed2 -r cd7ee3e54701 xen/arch/ia64/xen/vcpu.c --- a/xen/arch/ia64/xen/vcpu.c Sun Oct 21 15:58:00 2007 -0600 +++ b/xen/arch/ia64/xen/vcpu.c Mon Oct 22 15:56:36 2007 +0900 @@ -287,7 +287,7 @@ static void vcpu_set_metaphysical_mode(V PSCB(vcpu, metaphysical_mode) = newmode; if (newmode) set_metaphysical_rr0(); - else if (PSCB(vcpu, rrs[0]) != -1) + else set_virtual_rr0(); } } @@ -2095,9 +2095,16 @@ unsigned long vcpu_get_rr_ve(VCPU * vcpu IA64FAULT vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val) { + if (unlikely(is_reserved_rr_field(vcpu, val))) { + gdprintk(XENLOG_DEBUG, "use of invalid rrval %lx\n", val); + return IA64_RSVDREG_FAULT; + } + PSCB(vcpu, rrs)[reg >> 61] = val; - if (vcpu == current) - set_one_rr(reg, val); + if (likely(vcpu == current)) { + int rc = set_one_rr(reg, val); + BUG_ON(rc == 0); + } return IA64_NO_FAULT; } @@ -2120,17 +2127,30 @@ IA64FAULT vcpu_set_rr0_to_rr4(VCPU * vcp u64 reg3 = 0x6000000000000000UL; u64 reg4 = 0x8000000000000000UL; + if (unlikely(is_reserved_rr_field(vcpu, val0) || + is_reserved_rr_field(vcpu, val1) || + is_reserved_rr_field(vcpu, val2) || + is_reserved_rr_field(vcpu, val3) || + is_reserved_rr_field(vcpu, val4))) { + gdprintk(XENLOG_DEBUG, + "use of invalid rrval %lx %lx %lx %lx %lx\n", + val0, val1, val2, val3, val4); + return IA64_RSVDREG_FAULT; + } + PSCB(vcpu, rrs)[reg0 >> 61] = val0; PSCB(vcpu, rrs)[reg1 >> 61] = val1; PSCB(vcpu, rrs)[reg2 >> 61] = val2; PSCB(vcpu, rrs)[reg3 >> 61] = val3; PSCB(vcpu, rrs)[reg4 >> 61] = val4; - if (vcpu == current) { - set_one_rr(reg0, val0); - set_one_rr(reg1, val1); - set_one_rr(reg2, val2); - set_one_rr(reg3, val3); - set_one_rr(reg4, val4); + if (likely(vcpu == current)) { + int rc; + rc = !set_one_rr(reg0, val0); + rc |= !set_one_rr(reg1, val1); + rc |= !set_one_rr(reg2, val2); + rc |= !set_one_rr(reg3, val3); + rc |= !set_one_rr(reg4, val4); + BUG_ON(rc != 0); } return IA64_NO_FAULT; } diff -r 98ac6d05aed2 -r cd7ee3e54701 xen/include/asm-ia64/regionreg.h --- a/xen/include/asm-ia64/regionreg.h Sun Oct 21 15:58:00 2007 -0600 +++ b/xen/include/asm-ia64/regionreg.h Mon Oct 22 15:56:36 2007 +0900 @@ -1,3 +1,4 @@ + #ifndef _REGIONREG_H_ #define _REGIONREG_H_ @@ -85,6 +86,9 @@ extern void set_metaphysical_rr0(void); extern void load_region_regs(struct vcpu *v); +extern int is_reserved_rr_rid(struct vcpu *vcpu, u64 reg_value); +extern int is_reserved_rr_field(struct vcpu *vcpu, u64 reg_value); + #endif /* !_REGIONREG_H_ */ /*
# HG changeset patch # User [EMAIL PROTECTED] # Date 1193036196 -32400 # Node ID cd7ee3e5470185e37314d2088df8a499b3352278 # Parent 98ac6d05aed27ea7f9f4137baa0452f24e2569ee clean up of rr handling to avoid reesrved registers/field fault. PATCHNAME: fix_rr_handling Signed-off-by: Isaku Yamahata <[EMAIL PROTECTED]> diff -r 98ac6d05aed2 -r cd7ee3e54701 xen/arch/ia64/vmx/vmx_utility.c --- a/xen/arch/ia64/vmx/vmx_utility.c Sun Oct 21 15:58:00 2007 -0600 +++ b/xen/arch/ia64/vmx/vmx_utility.c Mon Oct 22 15:56:36 2007 +0900 @@ -637,10 +637,9 @@ int is_reserved_itir_field(VCPU* vcpu, u return 0; } -int is_reserved_rr_field(VCPU* vcpu, u64 reg_value) -{ - ia64_rr rr; - rr.rrval = reg_value; +static int __is_reserved_rr_field(u64 reg_value) +{ + ia64_rr rr = { .rrval = reg_value }; if(rr.reserved0 != 0 || rr.reserved1 != 0){ return 1; @@ -656,3 +655,20 @@ int is_reserved_rr_field(VCPU* vcpu, u64 return 0; } +int is_reserved_rr_rid(VCPU* vcpu, u64 reg_value) +{ + ia64_rr rr = { .rrval = reg_value }; + + if (rr.rid >= (1UL << vcpu->domain->arch.rid_bits)) + return 1; + + return 0; +} + +int is_reserved_rr_field(VCPU* vcpu, u64 reg_value) +{ + if (__is_reserved_rr_field(reg_value)) + return 1; + + return is_reserved_rr_rid(vcpu, reg_value); +} diff -r 98ac6d05aed2 -r cd7ee3e54701 xen/arch/ia64/vmx/vmx_vcpu.c --- a/xen/arch/ia64/vmx/vmx_vcpu.c Sun Oct 21 15:58:00 2007 -0600 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c Mon Oct 22 15:56:36 2007 +0900 @@ -161,12 +161,12 @@ IA64FAULT vmx_vcpu_cover(VCPU *vcpu) IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val) { - ia64_rr newrr; u64 rrval; - newrr.rrval=val; - if (newrr.rid >= (1 << vcpu->domain->arch.rid_bits)) - panic_domain (NULL, "use of invalid rid %x\n", newrr.rid); + if (unlikely(is_reserved_rr_rid(vcpu, val))) { + gdprintk(XENLOG_DEBUG, "use of invalid rrval %lx\n", val); + return IA64_RSVDREG_FAULT; + } VMX(vcpu,vrr[reg>>VRN_SHIFT]) = val; switch((u64)(reg>>VRN_SHIFT)) { diff -r 98ac6d05aed2 -r cd7ee3e54701 xen/arch/ia64/xen/domain.c --- a/xen/arch/ia64/xen/domain.c Sun Oct 21 15:58:00 2007 -0600 +++ b/xen/arch/ia64/xen/domain.c Mon Oct 22 15:56:36 2007 +0900 @@ -1583,6 +1583,7 @@ domain_set_shared_info_va (unsigned long { struct vcpu *v = current; struct domain *d = v->domain; + int rc; /* Check virtual address: must belong to region 7, @@ -1604,9 +1605,10 @@ domain_set_shared_info_va (unsigned long __ia64_per_cpu_var(current_psr_ic_addr) = (int *)(va + XSI_PSR_IC_OFS); /* Remap the shared pages. */ - set_one_rr (7UL << 61, PSCB(v,rrs[7])); - - return 0; + rc = !set_one_rr(7UL << 61, PSCB(v,rrs[7])); + BUG_ON(rc); + + return rc; } /* Transfer and clear the shadow bitmap in 1kB chunks for L1 cache. */ diff -r 98ac6d05aed2 -r cd7ee3e54701 xen/arch/ia64/xen/regionreg.c --- a/xen/arch/ia64/xen/regionreg.c Sun Oct 21 15:58:00 2007 -0600 +++ b/xen/arch/ia64/xen/regionreg.c Mon Oct 22 15:56:36 2007 +0900 @@ -238,14 +238,12 @@ int set_one_rr(unsigned long rr, unsigne ia64_rr rrv, newrrv, memrrv; unsigned long newrid; - if (val == -1) - return 1; - rrv.rrval = val; newrrv.rrval = 0; newrid = v->arch.starting_rid + rrv.rid; - if (newrid > v->arch.ending_rid) { + // avoid reserved register/field fault + if (unlikely(is_reserved_rr_field(v, val))) { printk("can't set rr%d to %lx, starting_rid=%x," "ending_rid=%x, val=%lx\n", (int) rreg, newrid, v->arch.starting_rid,v->arch.ending_rid,val); @@ -295,12 +293,11 @@ void init_all_rr(struct vcpu *v) ia64_rr rrv; rrv.rrval = 0; - //rrv.rrval = v->domain->arch.metaphysical_rr0; rrv.ps = v->arch.vhpt_pg_shift; rrv.ve = 1; if (!v->vcpu_info) panic("Stopping in init_all_rr\n"); - VCPU(v,rrs[0]) = -1; + VCPU(v,rrs[0]) = rrv.rrval; VCPU(v,rrs[1]) = rrv.rrval; VCPU(v,rrs[2]) = rrv.rrval; VCPU(v,rrs[3]) = rrv.rrval; @@ -308,7 +305,7 @@ void init_all_rr(struct vcpu *v) VCPU(v,rrs[5]) = rrv.rrval; rrv.ve = 0; VCPU(v,rrs[6]) = rrv.rrval; -// v->shared_info->arch.rrs[7] = rrv.rrval; + VCPU(v,rrs[7]) = rrv.rrval; } diff -r 98ac6d05aed2 -r cd7ee3e54701 xen/arch/ia64/xen/vcpu.c --- a/xen/arch/ia64/xen/vcpu.c Sun Oct 21 15:58:00 2007 -0600 +++ b/xen/arch/ia64/xen/vcpu.c Mon Oct 22 15:56:36 2007 +0900 @@ -287,7 +287,7 @@ static void vcpu_set_metaphysical_mode(V PSCB(vcpu, metaphysical_mode) = newmode; if (newmode) set_metaphysical_rr0(); - else if (PSCB(vcpu, rrs[0]) != -1) + else set_virtual_rr0(); } } @@ -2095,9 +2095,16 @@ unsigned long vcpu_get_rr_ve(VCPU * vcpu IA64FAULT vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val) { + if (unlikely(is_reserved_rr_field(vcpu, val))) { + gdprintk(XENLOG_DEBUG, "use of invalid rrval %lx\n", val); + return IA64_RSVDREG_FAULT; + } + PSCB(vcpu, rrs)[reg >> 61] = val; - if (vcpu == current) - set_one_rr(reg, val); + if (likely(vcpu == current)) { + int rc = set_one_rr(reg, val); + BUG_ON(rc == 0); + } return IA64_NO_FAULT; } @@ -2120,17 +2127,30 @@ IA64FAULT vcpu_set_rr0_to_rr4(VCPU * vcp u64 reg3 = 0x6000000000000000UL; u64 reg4 = 0x8000000000000000UL; + if (unlikely(is_reserved_rr_field(vcpu, val0) || + is_reserved_rr_field(vcpu, val1) || + is_reserved_rr_field(vcpu, val2) || + is_reserved_rr_field(vcpu, val3) || + is_reserved_rr_field(vcpu, val4))) { + gdprintk(XENLOG_DEBUG, + "use of invalid rrval %lx %lx %lx %lx %lx\n", + val0, val1, val2, val3, val4); + return IA64_RSVDREG_FAULT; + } + PSCB(vcpu, rrs)[reg0 >> 61] = val0; PSCB(vcpu, rrs)[reg1 >> 61] = val1; PSCB(vcpu, rrs)[reg2 >> 61] = val2; PSCB(vcpu, rrs)[reg3 >> 61] = val3; PSCB(vcpu, rrs)[reg4 >> 61] = val4; - if (vcpu == current) { - set_one_rr(reg0, val0); - set_one_rr(reg1, val1); - set_one_rr(reg2, val2); - set_one_rr(reg3, val3); - set_one_rr(reg4, val4); + if (likely(vcpu == current)) { + int rc; + rc = !set_one_rr(reg0, val0); + rc |= !set_one_rr(reg1, val1); + rc |= !set_one_rr(reg2, val2); + rc |= !set_one_rr(reg3, val3); + rc |= !set_one_rr(reg4, val4); + BUG_ON(rc != 0); } return IA64_NO_FAULT; } diff -r 98ac6d05aed2 -r cd7ee3e54701 xen/include/asm-ia64/regionreg.h --- a/xen/include/asm-ia64/regionreg.h Sun Oct 21 15:58:00 2007 -0600 +++ b/xen/include/asm-ia64/regionreg.h Mon Oct 22 15:56:36 2007 +0900 @@ -1,3 +1,4 @@ + #ifndef _REGIONREG_H_ #define _REGIONREG_H_ @@ -85,6 +86,9 @@ extern void set_metaphysical_rr0(void); extern void load_region_regs(struct vcpu *v); +extern int is_reserved_rr_rid(struct vcpu *vcpu, u64 reg_value); +extern int is_reserved_rr_field(struct vcpu *vcpu, u64 reg_value); + #endif /* !_REGIONREG_H_ */ /*
_______________________________________________ Xen-ia64-devel mailing list Xen-ia64-devel@lists.xensource.com http://lists.xensource.com/xen-ia64-devel