Re: [Xen-devel] [PATCH 2/3] x86: drop is_pv_32on64_domain()
On Tue, 2015-06-23 at 16:19 +0100, Jan Beulich wrote: ... as being identical to is_pv_32bit_domain() after the x86-32 removal. In a few cases this includes no longer open-coding is_pv_32bit_vcpu(). Signed-off-by: Jan Beulich jbeul...@suse.com Given that the common impact is effectively x86-only due to the ifdefs (CONFIG_COMPAT is effectively x86 IMHO) I don't think you really need it but: Acked-by: Ian Campbell ian.campb...@citrix.com --- a/xen/common/kexec.c +++ b/xen/common/kexec.c @@ -872,7 +872,7 @@ static int kexec_load_slot(struct kexec_ static uint16_t kexec_load_v1_arch(void) { #ifdef CONFIG_X86 -return is_pv_32on64_domain(hardware_domain) ? EM_386 : EM_X86_64; +return is_pv_32bit_domain(hardware_domain) ? EM_386 : EM_X86_64; #else return EM_NONE; #endif --- a/xen/common/xenoprof.c +++ b/xen/common/xenoprof.c @@ -219,7 +219,7 @@ static int alloc_xenoprof_struct( bufsize = sizeof(struct xenoprof_buf); i = sizeof(struct event_log); #ifdef CONFIG_COMPAT -d-xenoprof-is_compat = is_pv_32on64_domain(is_passive ? hardware_domain : d); +d-xenoprof-is_compat = is_pv_32bit_domain(is_passive ? hardware_domain : d); if ( XENOPROF_COMPAT(d-xenoprof) ) { bufsize = sizeof(struct compat_oprof_buf); ___ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel
[Xen-devel] [PATCH 2/3] x86: drop is_pv_32on64_domain()
... as being identical to is_pv_32bit_domain() after the x86-32 removal. In a few cases this includes no longer open-coding is_pv_32bit_vcpu(). Signed-off-by: Jan Beulich jbeul...@suse.com --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -367,7 +367,7 @@ int switch_native(struct domain *d) if ( !may_switch_mode(d) ) return -EACCES; -if ( !is_pv_32on64_domain(d) ) +if ( !is_pv_32bit_domain(d) ) return 0; d-arch.is_32bit_pv = d-arch.has_32bit_shinfo = 0; @@ -392,7 +392,7 @@ int switch_compat(struct domain *d) if ( !may_switch_mode(d) ) return -EACCES; -if ( is_pv_32on64_domain(d) ) +if ( is_pv_32bit_domain(d) ) return 0; d-arch.is_32bit_pv = d-arch.has_32bit_shinfo = 1; @@ -481,7 +481,7 @@ int vcpu_initialise(struct vcpu *v) v-arch.pv_vcpu.ctrlreg[4] = real_cr4_to_pv_guest_cr4(mmu_cr4_features); -rc = is_pv_32on64_domain(d) ? setup_compat_l4(v) : 0; +rc = is_pv_32bit_domain(d) ? setup_compat_l4(v) : 0; done: if ( rc ) { @@ -689,7 +689,7 @@ unsigned long pv_guest_cr4_fixup(const s hv_cr4_mask = ~X86_CR4_TSD; if ( cpu_has_de ) hv_cr4_mask = ~X86_CR4_DE; -if ( cpu_has_fsgsbase !is_pv_32bit_domain(v-domain) ) +if ( cpu_has_fsgsbase !is_pv_32bit_vcpu(v) ) hv_cr4_mask = ~X86_CR4_FSGSBASE; if ( cpu_has_xsave ) hv_cr4_mask = ~X86_CR4_OSXSAVE; @@ -721,7 +721,7 @@ int arch_set_info_guest( /* The context is a compat-mode one if the target domain is compat-mode; * we expect the tools to DTRT even in compat-mode callers. */ -compat = is_pv_32on64_domain(d); +compat = is_pv_32bit_domain(d); #define c(fld) (compat ? (c.cmp-fld) : (c.nat-fld)) flags = c(flags); @@ -1195,7 +1195,7 @@ static void load_segments(struct vcpu *n all_segs_okay = loadsegment(gs, uregs-gs); } -if ( !is_pv_32on64_domain(n-domain) ) +if ( !is_pv_32bit_vcpu(n) ) { /* This can only be non-zero if selector is NULL. */ if ( n-arch.pv_vcpu.fs_base ) @@ -1224,7 +1224,7 @@ static void load_segments(struct vcpu *n (unsigned long *)pv-kernel_sp; unsigned long cs_and_mask, rflags; -if ( is_pv_32on64_domain(n-domain) ) +if ( is_pv_32bit_vcpu(n) ) { unsigned int *esp = ring_1(regs) ? (unsigned int *)regs-rsp : @@ -1340,7 +1340,7 @@ static void save_segments(struct vcpu *v if ( regs-es ) dirty_segment_mask |= DIRTY_ES; -if ( regs-fs || is_pv_32on64_domain(v-domain) ) +if ( regs-fs || is_pv_32bit_vcpu(v) ) { dirty_segment_mask |= DIRTY_FS; v-arch.pv_vcpu.fs_base = 0; /* != 0 selector kills fs_base */ @@ -1350,7 +1350,7 @@ static void save_segments(struct vcpu *v dirty_segment_mask |= DIRTY_FS_BASE; } -if ( regs-gs || is_pv_32on64_domain(v-domain) ) +if ( regs-gs || is_pv_32bit_vcpu(v) ) { dirty_segment_mask |= DIRTY_GS; v-arch.pv_vcpu.gs_base_user = 0; /* != 0 selector kills gs_base_user */ @@ -1483,8 +1483,8 @@ static void __context_switch(void) psr_ctxt_switch_to(nd); -gdt = !is_pv_32on64_domain(nd) ? per_cpu(gdt_table, cpu) : - per_cpu(compat_gdt_table, cpu); +gdt = !is_pv_32bit_domain(nd) ? per_cpu(gdt_table, cpu) : +per_cpu(compat_gdt_table, cpu); if ( need_full_gdt(nd) ) { unsigned long mfn = virt_to_mfn(gdt); @@ -1568,7 +1568,7 @@ void context_switch(struct vcpu *prev, s if ( is_pv_domain(nextd) (is_idle_domain(prevd) || has_hvm_container_domain(prevd) || - is_pv_32on64_domain(prevd) != is_pv_32on64_domain(nextd)) ) + is_pv_32bit_domain(prevd) != is_pv_32bit_domain(nextd)) ) { uint64_t efer = read_efer(); if ( !(efer EFER_SCE) ) --- a/xen/arch/x86/domain_build.c +++ b/xen/arch/x86/domain_build.c @@ -293,7 +293,7 @@ static unsigned long __init compute_dom0 avail -= (d-max_vcpus - 1UL) get_order_from_bytes(sizeof(struct vcpu)); /* ...and compat_l4's, if needed. */ -if ( is_pv_32on64_domain(d) ) +if ( is_pv_32bit_domain(d) ) avail -= d-max_vcpus - 1; /* Reserve memory for iommu_dom0_init() (rough estimate). */ @@ -608,7 +608,7 @@ static __init void dom0_update_physmap(s BUG_ON(rc); return; } -if ( !is_pv_32on64_domain(d) ) +if ( !is_pv_32bit_domain(d) ) ((unsigned long *)vphysmap_s)[pfn] = mfn; else ((unsigned int *)vphysmap_s)[pfn] = mfn; @@ -718,7 +718,7 @@ static __init void mark_pv_pt_pages_rdon /* Top-level p.t. is pinned. */ if ( (page-u.inuse.type_info PGT_type_mask) == - (!is_pv_32on64_domain(d) ? + (!is_pv_32bit_domain(d) ? PGT_l4_page_table :
Re: [Xen-devel] [PATCH 2/3] x86: drop is_pv_32on64_domain()
On 23/06/15 16:19, Jan Beulich wrote: ... as being identical to is_pv_32bit_domain() after the x86-32 removal. In a few cases this includes no longer open-coding is_pv_32bit_vcpu(). Signed-off-by: Jan Beulich jbeul...@suse.com Reviewed-by: Andrew Cooper andrew.coop...@citrix.com ___ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel
Re: [Xen-devel] [PATCH 2/3] x86: drop is_pv_32on64_domain()
On 06/23/2015 04:19 PM, Jan Beulich wrote: ... as being identical to is_pv_32bit_domain() after the x86-32 removal. In a few cases this includes no longer open-coding is_pv_32bit_vcpu(). Signed-off-by: Jan Beulich jbeul...@suse.com Reviewed-by: George Dunlap george.dun...@eu.citrix.com --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -367,7 +367,7 @@ int switch_native(struct domain *d) if ( !may_switch_mode(d) ) return -EACCES; -if ( !is_pv_32on64_domain(d) ) +if ( !is_pv_32bit_domain(d) ) return 0; d-arch.is_32bit_pv = d-arch.has_32bit_shinfo = 0; @@ -392,7 +392,7 @@ int switch_compat(struct domain *d) if ( !may_switch_mode(d) ) return -EACCES; -if ( is_pv_32on64_domain(d) ) +if ( is_pv_32bit_domain(d) ) return 0; d-arch.is_32bit_pv = d-arch.has_32bit_shinfo = 1; @@ -481,7 +481,7 @@ int vcpu_initialise(struct vcpu *v) v-arch.pv_vcpu.ctrlreg[4] = real_cr4_to_pv_guest_cr4(mmu_cr4_features); -rc = is_pv_32on64_domain(d) ? setup_compat_l4(v) : 0; +rc = is_pv_32bit_domain(d) ? setup_compat_l4(v) : 0; done: if ( rc ) { @@ -689,7 +689,7 @@ unsigned long pv_guest_cr4_fixup(const s hv_cr4_mask = ~X86_CR4_TSD; if ( cpu_has_de ) hv_cr4_mask = ~X86_CR4_DE; -if ( cpu_has_fsgsbase !is_pv_32bit_domain(v-domain) ) +if ( cpu_has_fsgsbase !is_pv_32bit_vcpu(v) ) hv_cr4_mask = ~X86_CR4_FSGSBASE; if ( cpu_has_xsave ) hv_cr4_mask = ~X86_CR4_OSXSAVE; @@ -721,7 +721,7 @@ int arch_set_info_guest( /* The context is a compat-mode one if the target domain is compat-mode; * we expect the tools to DTRT even in compat-mode callers. */ -compat = is_pv_32on64_domain(d); +compat = is_pv_32bit_domain(d); #define c(fld) (compat ? (c.cmp-fld) : (c.nat-fld)) flags = c(flags); @@ -1195,7 +1195,7 @@ static void load_segments(struct vcpu *n all_segs_okay = loadsegment(gs, uregs-gs); } -if ( !is_pv_32on64_domain(n-domain) ) +if ( !is_pv_32bit_vcpu(n) ) { /* This can only be non-zero if selector is NULL. */ if ( n-arch.pv_vcpu.fs_base ) @@ -1224,7 +1224,7 @@ static void load_segments(struct vcpu *n (unsigned long *)pv-kernel_sp; unsigned long cs_and_mask, rflags; -if ( is_pv_32on64_domain(n-domain) ) +if ( is_pv_32bit_vcpu(n) ) { unsigned int *esp = ring_1(regs) ? (unsigned int *)regs-rsp : @@ -1340,7 +1340,7 @@ static void save_segments(struct vcpu *v if ( regs-es ) dirty_segment_mask |= DIRTY_ES; -if ( regs-fs || is_pv_32on64_domain(v-domain) ) +if ( regs-fs || is_pv_32bit_vcpu(v) ) { dirty_segment_mask |= DIRTY_FS; v-arch.pv_vcpu.fs_base = 0; /* != 0 selector kills fs_base */ @@ -1350,7 +1350,7 @@ static void save_segments(struct vcpu *v dirty_segment_mask |= DIRTY_FS_BASE; } -if ( regs-gs || is_pv_32on64_domain(v-domain) ) +if ( regs-gs || is_pv_32bit_vcpu(v) ) { dirty_segment_mask |= DIRTY_GS; v-arch.pv_vcpu.gs_base_user = 0; /* != 0 selector kills gs_base_user */ @@ -1483,8 +1483,8 @@ static void __context_switch(void) psr_ctxt_switch_to(nd); -gdt = !is_pv_32on64_domain(nd) ? per_cpu(gdt_table, cpu) : - per_cpu(compat_gdt_table, cpu); +gdt = !is_pv_32bit_domain(nd) ? per_cpu(gdt_table, cpu) : +per_cpu(compat_gdt_table, cpu); if ( need_full_gdt(nd) ) { unsigned long mfn = virt_to_mfn(gdt); @@ -1568,7 +1568,7 @@ void context_switch(struct vcpu *prev, s if ( is_pv_domain(nextd) (is_idle_domain(prevd) || has_hvm_container_domain(prevd) || - is_pv_32on64_domain(prevd) != is_pv_32on64_domain(nextd)) ) + is_pv_32bit_domain(prevd) != is_pv_32bit_domain(nextd)) ) { uint64_t efer = read_efer(); if ( !(efer EFER_SCE) ) --- a/xen/arch/x86/domain_build.c +++ b/xen/arch/x86/domain_build.c @@ -293,7 +293,7 @@ static unsigned long __init compute_dom0 avail -= (d-max_vcpus - 1UL) get_order_from_bytes(sizeof(struct vcpu)); /* ...and compat_l4's, if needed. */ -if ( is_pv_32on64_domain(d) ) +if ( is_pv_32bit_domain(d) ) avail -= d-max_vcpus - 1; /* Reserve memory for iommu_dom0_init() (rough estimate). */ @@ -608,7 +608,7 @@ static __init void dom0_update_physmap(s BUG_ON(rc); return; } -if ( !is_pv_32on64_domain(d) ) +if ( !is_pv_32bit_domain(d) ) ((unsigned long *)vphysmap_s)[pfn] = mfn; else ((unsigned int *)vphysmap_s)[pfn] = mfn; @@ -718,7 +718,7 @@ static __init void