Am Dienstag 17 März 2015, 10:54:02 schrieb Boris Ostrovsky: > Add runtime interface for setting PMU mode and flags. Three main modes are > provided: > * XENPMU_MODE_OFF: PMU is not virtualized > * XENPMU_MODE_SELF: Guests can access PMU MSRs and receive PMU interrupts. > * XENPMU_MODE_HV: Same as XENPMU_MODE_SELF for non-proviledged guests, dom0 > can profile itself and the hypervisor. > > Note that PMU modes are different from what can be provided at Xen's boot line > with 'vpmu' argument. An 'off' (or '0') value is equivalent to > XENPMU_MODE_OFF. > Any other value, on the other hand, will cause VPMU mode to be set to > XENPMU_MODE_SELF during boot. > > For feature flags only Intel's BTS is currently supported. > > Mode and flags are set via HYPERVISOR_xenpmu_op hypercall. > > Signed-off-by: Boris Ostrovsky <boris.ostrov...@oracle.com> > Acked-by: Daniel De Graaf <dgde...@tycho.nsa.gov>
Reviewed-by: Dietmar Hahn <dietmar.h...@ts.fujitsu.com> Only two minor comments below. > --- > > Changes in v19: > * Keep track of active vpmu count and allow certain mode changes only when > the count > is zero > * Drop vpmu_unload routines > * Revert to to using opt_vpmu_enabled > * Changes to oprofile code are no longer needed > * Changes to vmcs.c are no longer needed > * Simplified vpmu_switch_from/to inlines > > tools/flask/policy/policy/modules/xen/xen.te | 3 + > xen/arch/x86/domain.c | 4 +- > xen/arch/x86/hvm/svm/vpmu.c | 4 +- > xen/arch/x86/hvm/vmx/vpmu_core2.c | 10 +- > xen/arch/x86/hvm/vpmu.c | 155 > +++++++++++++++++++++++++-- > xen/arch/x86/x86_64/compat/entry.S | 4 + > xen/arch/x86/x86_64/entry.S | 4 + > xen/include/asm-x86/hvm/vpmu.h | 27 +++-- > xen/include/public/pmu.h | 45 ++++++++ > xen/include/public/xen.h | 1 + > xen/include/xen/hypercall.h | 4 + > xen/include/xlat.lst | 1 + > xen/include/xsm/dummy.h | 15 +++ > xen/include/xsm/xsm.h | 6 ++ > xen/xsm/dummy.c | 1 + > xen/xsm/flask/hooks.c | 18 ++++ > xen/xsm/flask/policy/access_vectors | 2 + > 17 files changed, 279 insertions(+), 25 deletions(-) > > diff --git a/tools/flask/policy/policy/modules/xen/xen.te > b/tools/flask/policy/policy/modules/xen/xen.te > index c0128aa..870ff81 100644 > --- a/tools/flask/policy/policy/modules/xen/xen.te > +++ b/tools/flask/policy/policy/modules/xen/xen.te > @@ -68,6 +68,9 @@ allow dom0_t xen_t:xen2 { > resource_op > psr_cmt_op > }; > +allow dom0_t xen_t:xen2 { > + pmu_ctrl > +}; > allow dom0_t xen_t:mmu memorymap; > > # Allow dom0 to use these domctls on itself. For domctls acting on other > diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c > index 21f0766..60d9a80 100644 > --- a/xen/arch/x86/domain.c > +++ b/xen/arch/x86/domain.c > @@ -1536,7 +1536,7 @@ void context_switch(struct vcpu *prev, struct vcpu > *next) > if ( is_hvm_vcpu(prev) ) > { > if (prev != next) > - vpmu_save(prev); > + vpmu_switch_from(prev); > > if ( !list_empty(&prev->arch.hvm_vcpu.tm_list) ) > pt_save_timer(prev); > @@ -1581,7 +1581,7 @@ void context_switch(struct vcpu *prev, struct vcpu > *next) > > if (is_hvm_vcpu(next) && (prev != next) ) > /* Must be done with interrupts enabled */ > - vpmu_load(next); > + vpmu_switch_to(next); > > context_saved(prev); > > diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c > index a8b79df..481ea7b 100644 > --- a/xen/arch/x86/hvm/svm/vpmu.c > +++ b/xen/arch/x86/hvm/svm/vpmu.c > @@ -472,14 +472,14 @@ struct arch_vpmu_ops amd_vpmu_ops = { > .arch_vpmu_dump = amd_vpmu_dump > }; > > -int svm_vpmu_initialise(struct vcpu *v, unsigned int vpmu_flags) > +int svm_vpmu_initialise(struct vcpu *v) > { > struct vpmu_struct *vpmu = vcpu_vpmu(v); > uint8_t family = current_cpu_data.x86; > int ret = 0; > > /* vpmu enabled? */ > - if ( !vpmu_flags ) > + if ( vpmu_mode == XENPMU_MODE_OFF ) > return 0; > > switch ( family ) > diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c > b/xen/arch/x86/hvm/vmx/vpmu_core2.c > index c2405bf..6280644 100644 > --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c > +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c > @@ -708,13 +708,13 @@ static int core2_vpmu_do_interrupt(struct cpu_user_regs > *regs) > return 1; > } > > -static int core2_vpmu_initialise(struct vcpu *v, unsigned int vpmu_flags) > +static int core2_vpmu_initialise(struct vcpu *v) > { > struct vpmu_struct *vpmu = vcpu_vpmu(v); > u64 msr_content; > static bool_t ds_warned; > > - if ( !(vpmu_flags & VPMU_BOOT_BTS) ) > + if ( !(vpmu_features & XENPMU_FEATURE_INTEL_BTS) ) > goto func_out; > /* Check the 'Debug Store' feature in the CPUID.EAX[1]:EDX[21] */ > while ( boot_cpu_has(X86_FEATURE_DS) ) > @@ -826,7 +826,7 @@ struct arch_vpmu_ops core2_no_vpmu_ops = { > .do_cpuid = core2_no_vpmu_do_cpuid, > }; > > -int vmx_vpmu_initialise(struct vcpu *v, unsigned int vpmu_flags) > +int vmx_vpmu_initialise(struct vcpu *v) > { > struct vpmu_struct *vpmu = vcpu_vpmu(v); > uint8_t family = current_cpu_data.x86; > @@ -834,7 +834,7 @@ int vmx_vpmu_initialise(struct vcpu *v, unsigned int > vpmu_flags) > int ret = 0; > > vpmu->arch_vpmu_ops = &core2_no_vpmu_ops; > - if ( !vpmu_flags ) > + if ( vpmu_mode == XENPMU_MODE_OFF ) > return 0; > > if ( family == 6 ) > @@ -877,7 +877,7 @@ int vmx_vpmu_initialise(struct vcpu *v, unsigned int > vpmu_flags) > /* future: */ > case 0x3d: > case 0x4e: > - ret = core2_vpmu_initialise(v, vpmu_flags); > + ret = core2_vpmu_initialise(v); > if ( !ret ) > vpmu->arch_vpmu_ops = &core2_vpmu_ops; > return ret; > diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c > index ef88528..164accf 100644 > --- a/xen/arch/x86/hvm/vpmu.c > +++ b/xen/arch/x86/hvm/vpmu.c > @@ -21,6 +21,8 @@ > #include <xen/config.h> > #include <xen/sched.h> > #include <xen/xenoprof.h> > +#include <xen/event.h> > +#include <xen/guest_access.h> > #include <asm/regs.h> > #include <asm/types.h> > #include <asm/msr.h> > @@ -33,8 +35,10 @@ > #include <asm/hvm/svm/vmcb.h> > #include <asm/apic.h> > #include <public/pmu.h> > +#include <xsm/xsm.h> > > #include <compat/pmu.h> > +CHECK_pmu_params; > CHECK_pmu_intel_ctxt; > CHECK_pmu_amd_ctxt; > CHECK_pmu_cntr_pair; > @@ -46,9 +50,14 @@ CHECK_pmu_regs; > * "vpmu=bts" : vpmu enabled and Intel BTS feature switched on. > */ > static unsigned int __read_mostly opt_vpmu_enabled; > +unsigned int __read_mostly vpmu_mode = XENPMU_MODE_OFF; > +unsigned int __read_mostly vpmu_features = 0; > static void parse_vpmu_param(char *s); > custom_param("vpmu", parse_vpmu_param); > > +static DEFINE_SPINLOCK(vpmu_lock); > +static unsigned vpmu_count; > + > static DEFINE_PER_CPU(struct vcpu *, last_vcpu); > > static void __init parse_vpmu_param(char *s) > @@ -59,7 +68,7 @@ static void __init parse_vpmu_param(char *s) > break; > default: > if ( !strcmp(s, "bts") ) > - opt_vpmu_enabled |= VPMU_BOOT_BTS; > + vpmu_features |= XENPMU_FEATURE_INTEL_BTS; > else if ( *s ) > { > printk("VPMU: unknown flag: %s - vpmu disabled!\n", s); > @@ -67,7 +76,9 @@ static void __init parse_vpmu_param(char *s) > } > /* fall through */ > case 1: > - opt_vpmu_enabled |= VPMU_BOOT_ENABLED; > + /* Default VPMU mode */ > + vpmu_mode = XENPMU_MODE_SELF; > + opt_vpmu_enabled = 1; > break; > } > } > @@ -76,7 +87,7 @@ void vpmu_lvtpc_update(uint32_t val) > { > struct vpmu_struct *vpmu; > > - if ( !opt_vpmu_enabled ) > + if ( vpmu_mode == XENPMU_MODE_OFF ) > return; > > vpmu = vcpu_vpmu(current); > @@ -89,6 +100,9 @@ int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, > uint64_t supported) > { > struct vpmu_struct *vpmu = vcpu_vpmu(current); > > + if ( vpmu_mode == XENPMU_MODE_OFF ) > + return 0; > + > if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_wrmsr ) > return vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content, supported); > return 0; > @@ -98,6 +112,12 @@ int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) > { > struct vpmu_struct *vpmu = vcpu_vpmu(current); > > + if ( vpmu_mode == XENPMU_MODE_OFF ) > + { > + *msr_content = 0; > + return 0; > + } > + > if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_rdmsr ) > return vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content); > return 0; > @@ -248,28 +268,41 @@ void vpmu_initialise(struct vcpu *v) > > ASSERT(!vpmu->flags && !vpmu->context); > > + spin_lock(&vpmu_lock); > + vpmu_count++; /* Prevent vpmu_mode from changing until we are done */ > + spin_unlock(&vpmu_lock); > + > switch ( vendor ) > { > case X86_VENDOR_AMD: > - ret = svm_vpmu_initialise(v, opt_vpmu_enabled); > + ret = svm_vpmu_initialise(v); > break; > > case X86_VENDOR_INTEL: > - ret = vmx_vpmu_initialise(v, opt_vpmu_enabled); > + ret = vmx_vpmu_initialise(v); > break; > > default: > - if ( opt_vpmu_enabled ) > + if ( vpmu_mode != XENPMU_MODE_OFF ) > { > printk(XENLOG_G_WARNING "VPMU: Unknown CPU vendor %d. " > "Disabling VPMU\n", vendor); > opt_vpmu_enabled = 0; > + vpmu_mode = XENPMU_MODE_OFF; > } > - return; > + return; /* Don't bother restoring vpmu_count, VPMU is off forever */ > } > > if ( ret ) > printk(XENLOG_G_WARNING "VPMU: Initialization failed for %pv\n", v); > + > + /* Intel needs to initialize VPMU ops even if VPMU is not in use */ > + if ( ret || (vpmu_mode == XENPMU_MODE_OFF) ) > + { > + spin_lock(&vpmu_lock); > + vpmu_count--; > + spin_unlock(&vpmu_lock); > + } > } > > static void vpmu_clear_last(void *arg) > @@ -298,6 +331,10 @@ void vpmu_destroy(struct vcpu *v) > > if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_destroy ) > vpmu->arch_vpmu_ops->arch_vpmu_destroy(v); > + > + spin_lock(&vpmu_lock); > + vpmu_count--; > + spin_unlock(&vpmu_lock); > } > > /* Dump some vpmu informations on console. Used in keyhandler > dump_domains(). */ > @@ -309,6 +346,109 @@ void vpmu_dump(struct vcpu *v) > vpmu->arch_vpmu_ops->arch_vpmu_dump(v); > } > > +long do_xenpmu_op(unsigned int op, XEN_GUEST_HANDLE_PARAM(xen_pmu_params_t) > arg) > +{ > + int ret; > + struct xen_pmu_params pmu_params = {.val = 0}; > + > + if ( !opt_vpmu_enabled ) > + return -EOPNOTSUPP; > + > + ret = xsm_pmu_op(XSM_OTHER, current->domain, op); > + if ( ret ) > + return ret; > + > + /* Check major version when parameters are specified */ > + switch ( op ) > + { > + case XENPMU_mode_set: > + case XENPMU_feature_set: > + if ( copy_from_guest(&pmu_params, arg, 1) ) > + return -EFAULT; > + > + if ( pmu_params.version.maj != XENPMU_VER_MAJ ) > + return -EINVAL; > + } > + > + switch ( op ) > + { > + case XENPMU_mode_set: > + { > + if ( (pmu_params.val & ~(XENPMU_MODE_SELF | XENPMU_MODE_HV)) || > + (hweight64(pmu_params.val) > 1) ) > + return -EINVAL; > + > + /* 32-bit dom0 can only sample itself. */ > + if ( is_pv_32bit_vcpu(current) && (pmu_params.val & XENPMU_MODE_HV) ) > + return -EINVAL; > + > + spin_lock(&vpmu_lock); > + > + /* > + * We can always safely switch between XENPMU_MODE_SELF and > + * XENPMU_MODE_HV while other VPMUs are active. > + */ > + if ( (vpmu_count == 0) || (vpmu_mode == pmu_params.val) || > + ((vpmu_mode ^ pmu_params.val) == > + (XENPMU_MODE_SELF | XENPMU_MODE_HV)) ) > + vpmu_mode = pmu_params.val; > + else > + { > + printk(XENLOG_WARNING "VPMU: Cannot change mode while" > + " active VPMUs exist\n"); > + ret = -EBUSY; > + } > + > + spin_unlock(&vpmu_lock); > + > + break; > + } > + > + case XENPMU_mode_get: > + memset(&pmu_params, 0, sizeof(pmu_params)); > + pmu_params.val = vpmu_mode; > + > + pmu_params.version.maj = XENPMU_VER_MAJ; > + pmu_params.version.min = XENPMU_VER_MIN; > + > + if ( copy_to_guest(arg, &pmu_params, 1) ) > + return -EFAULT; > + > + break; > + > + case XENPMU_feature_set: > + if ( pmu_params.val & ~XENPMU_FEATURE_INTEL_BTS ) > + return -EINVAL; > + > + spin_lock(&vpmu_lock); > + > + if ( vpmu_count == 0 ) > + vpmu_features = pmu_params.val; > + else > + { > + printk(XENLOG_WARNING "VPMU: Cannot change features while" > + " active VPMUs exist\n"); > + ret = -EBUSY; > + } > + > + spin_unlock(&vpmu_lock); > + > + break; > + > + case XENPMU_feature_get: memset(&pmu_params, 0, sizeof(pmu_params)); ? > + pmu_params.val = vpmu_features; > + if ( copy_field_to_guest(arg, &pmu_params, val) ) > + return -EFAULT; > + > + break; > + > + default: > + ret = -EINVAL; > + } > + > + return ret; > +} > + > static int __init vpmu_init(void) > { > /* NMI watchdog uses LVTPC and HW counter */ > @@ -316,6 +456,7 @@ static int __init vpmu_init(void) > { > printk(XENLOG_WARNING "NMI watchdog is enabled. Turning VPMU > off.\n"); > opt_vpmu_enabled = 0; > + vpmu_mode = XENPMU_MODE_OFF; Maybe not needed because of the static initializing? Dietmar. > } > > return 0; > diff --git a/xen/arch/x86/x86_64/compat/entry.S > b/xen/arch/x86/x86_64/compat/entry.S > index 5b0af61..7691a79 100644 > --- a/xen/arch/x86/x86_64/compat/entry.S > +++ b/xen/arch/x86/x86_64/compat/entry.S > @@ -417,6 +417,8 @@ ENTRY(compat_hypercall_table) > .quad do_domctl > .quad compat_kexec_op > .quad do_tmem_op > + .quad do_ni_hypercall /* reserved for XenClient */ > + .quad do_xenpmu_op /* 40 */ > .rept __HYPERVISOR_arch_0-((.-compat_hypercall_table)/8) > .quad compat_ni_hypercall > .endr > @@ -466,6 +468,8 @@ ENTRY(compat_hypercall_args_table) > .byte 1 /* do_domctl */ > .byte 2 /* compat_kexec_op */ > .byte 1 /* do_tmem_op */ > + .byte 0 /* reserved for XenClient */ > + .byte 2 /* do_xenpmu_op */ /* 40 */ > .rept __HYPERVISOR_arch_0-(.-compat_hypercall_args_table) > .byte 0 /* compat_ni_hypercall */ > .endr > diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S > index 2d25d57..403e97c 100644 > --- a/xen/arch/x86/x86_64/entry.S > +++ b/xen/arch/x86/x86_64/entry.S > @@ -764,6 +764,8 @@ ENTRY(hypercall_table) > .quad do_domctl > .quad do_kexec_op > .quad do_tmem_op > + .quad do_ni_hypercall /* reserved for XenClient */ > + .quad do_xenpmu_op /* 40 */ > .rept __HYPERVISOR_arch_0-((.-hypercall_table)/8) > .quad do_ni_hypercall > .endr > @@ -813,6 +815,8 @@ ENTRY(hypercall_args_table) > .byte 1 /* do_domctl */ > .byte 2 /* do_kexec */ > .byte 1 /* do_tmem_op */ > + .byte 0 /* reserved for XenClient */ > + .byte 2 /* do_xenpmu_op */ /* 40 */ > .rept __HYPERVISOR_arch_0-(.-hypercall_args_table) > .byte 0 /* do_ni_hypercall */ > .endr > diff --git a/xen/include/asm-x86/hvm/vpmu.h b/xen/include/asm-x86/hvm/vpmu.h > index 82bfa0e..88ffc19 100644 > --- a/xen/include/asm-x86/hvm/vpmu.h > +++ b/xen/include/asm-x86/hvm/vpmu.h > @@ -24,13 +24,6 @@ > > #include <public/pmu.h> > > -/* > - * Flag bits given as a string on the hypervisor boot parameter 'vpmu'. > - * See arch/x86/hvm/vpmu.c. > - */ > -#define VPMU_BOOT_ENABLED 0x1 /* vpmu generally enabled. */ > -#define VPMU_BOOT_BTS 0x2 /* Intel BTS feature wanted. */ > - > #define vcpu_vpmu(vcpu) (&(vcpu)->arch.vpmu) > #define vpmu_vcpu(vpmu) container_of((vpmu), struct vcpu, arch.vpmu) > > @@ -59,8 +52,8 @@ struct arch_vpmu_ops { > void (*arch_vpmu_dump)(const struct vcpu *); > }; > > -int vmx_vpmu_initialise(struct vcpu *, unsigned int flags); > -int svm_vpmu_initialise(struct vcpu *, unsigned int flags); > +int vmx_vpmu_initialise(struct vcpu *); > +int svm_vpmu_initialise(struct vcpu *); > > struct vpmu_struct { > u32 flags; > @@ -116,5 +109,21 @@ void vpmu_dump(struct vcpu *v); > extern int acquire_pmu_ownership(int pmu_ownership); > extern void release_pmu_ownership(int pmu_ownership); > > +extern unsigned int vpmu_mode; > +extern unsigned int vpmu_features; > + > +/* Context switch */ > +static inline void vpmu_switch_from(struct vcpu *prev) > +{ > + if ( vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV) ) > + vpmu_save(prev); > +} > + > +static inline void vpmu_switch_to(struct vcpu *next) > +{ > + if ( vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV) ) > + vpmu_load(next); > +} > + > #endif /* __ASM_X86_HVM_VPMU_H_*/ > > diff --git a/xen/include/public/pmu.h b/xen/include/public/pmu.h > index f97106d..66cc494 100644 > --- a/xen/include/public/pmu.h > +++ b/xen/include/public/pmu.h > @@ -13,6 +13,51 @@ > #define XENPMU_VER_MAJ 0 > #define XENPMU_VER_MIN 1 > > +/* > + * ` enum neg_errnoval > + * ` HYPERVISOR_xenpmu_op(enum xenpmu_op cmd, struct xenpmu_params *args); > + * > + * @cmd == XENPMU_* (PMU operation) > + * @args == struct xenpmu_params > + */ > +/* ` enum xenpmu_op { */ > +#define XENPMU_mode_get 0 /* Also used for getting PMU version */ > +#define XENPMU_mode_set 1 > +#define XENPMU_feature_get 2 > +#define XENPMU_feature_set 3 > +/* ` } */ > + > +/* Parameters structure for HYPERVISOR_xenpmu_op call */ > +struct xen_pmu_params { > + /* IN/OUT parameters */ > + struct { > + uint32_t maj; > + uint32_t min; > + } version; > + uint64_t val; > + > + /* IN parameters */ > + uint32_t vcpu; > + uint32_t pad; > +}; > +typedef struct xen_pmu_params xen_pmu_params_t; > +DEFINE_XEN_GUEST_HANDLE(xen_pmu_params_t); > + > +/* PMU modes: > + * - XENPMU_MODE_OFF: No PMU virtualization > + * - XENPMU_MODE_SELF: Guests can profile themselves > + * - XENPMU_MODE_HV: Guests can profile themselves, dom0 profiles > + * itself and Xen > + */ > +#define XENPMU_MODE_OFF 0 > +#define XENPMU_MODE_SELF (1<<0) > +#define XENPMU_MODE_HV (1<<1) > + > +/* > + * PMU features: > + * - XENPMU_FEATURE_INTEL_BTS: Intel BTS support (ignored on AMD) > + */ > +#define XENPMU_FEATURE_INTEL_BTS 1 > > /* Shared between hypervisor and PV domain */ > struct xen_pmu_data { > diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h > index 3703c39..0dd3c97 100644 > --- a/xen/include/public/xen.h > +++ b/xen/include/public/xen.h > @@ -101,6 +101,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_ulong_t); > #define __HYPERVISOR_kexec_op 37 > #define __HYPERVISOR_tmem_op 38 > #define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */ > +#define __HYPERVISOR_xenpmu_op 40 > > /* Architecture-specific hypercall definitions. */ > #define __HYPERVISOR_arch_0 48 > diff --git a/xen/include/xen/hypercall.h b/xen/include/xen/hypercall.h > index eda8a36..ef665db 100644 > --- a/xen/include/xen/hypercall.h > +++ b/xen/include/xen/hypercall.h > @@ -14,6 +14,7 @@ > #include <public/event_channel.h> > #include <public/tmem.h> > #include <public/version.h> > +#include <public/pmu.h> > #include <asm/hypercall.h> > #include <xsm/xsm.h> > > @@ -144,6 +145,9 @@ do_tmem_op( > extern long > do_xenoprof_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg); > > +extern long > +do_xenpmu_op(unsigned int op, XEN_GUEST_HANDLE_PARAM(xen_pmu_params_t) arg); > + > #ifdef CONFIG_COMPAT > > extern int > diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst > index 94eedad..19ab5f7 100644 > --- a/xen/include/xlat.lst > +++ b/xen/include/xlat.lst > @@ -97,6 +97,7 @@ > ? xenpf_pcpuinfo platform.h > ? xenpf_pcpu_version platform.h > ? xenpf_resource_entry platform.h > +? pmu_params pmu.h > ! sched_poll sched.h > ? sched_remote_shutdown sched.h > ? sched_shutdown sched.h > diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h > index f20e89c..c637454 100644 > --- a/xen/include/xsm/dummy.h > +++ b/xen/include/xsm/dummy.h > @@ -655,4 +655,19 @@ static XSM_INLINE int xsm_ioport_mapping(XSM_DEFAULT_ARG > struct domain *d, uint3 > return xsm_default_action(action, current->domain, d); > } > > +static XSM_INLINE int xsm_pmu_op (XSM_DEFAULT_ARG struct domain *d, int op) > +{ > + XSM_ASSERT_ACTION(XSM_OTHER); > + switch ( op ) > + { > + case XENPMU_mode_set: > + case XENPMU_mode_get: > + case XENPMU_feature_set: > + case XENPMU_feature_get: > + return xsm_default_action(XSM_PRIV, d, current->domain); > + default: > + return -EPERM; > + } > +} > + > #endif /* CONFIG_X86 */ > diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h > index 4ce089f..90edbb1 100644 > --- a/xen/include/xsm/xsm.h > +++ b/xen/include/xsm/xsm.h > @@ -173,6 +173,7 @@ struct xsm_operations { > int (*unbind_pt_irq) (struct domain *d, struct xen_domctl_bind_pt_irq > *bind); > int (*ioport_permission) (struct domain *d, uint32_t s, uint32_t e, > uint8_t allow); > int (*ioport_mapping) (struct domain *d, uint32_t s, uint32_t e, uint8_t > allow); > + int (*pmu_op) (struct domain *d, unsigned int op); > #endif > }; > > @@ -665,6 +666,11 @@ static inline int xsm_ioport_mapping (xsm_default_t def, > struct domain *d, uint3 > return xsm_ops->ioport_mapping(d, s, e, allow); > } > > +static inline int xsm_pmu_op (xsm_default_t def, struct domain *d, int op) > +{ > + return xsm_ops->pmu_op(d, op); > +} > + > #endif /* CONFIG_X86 */ > > #endif /* XSM_NO_WRAPPERS */ > diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c > index 8eb3050..94f1cf0 100644 > --- a/xen/xsm/dummy.c > +++ b/xen/xsm/dummy.c > @@ -144,5 +144,6 @@ void xsm_fixup_ops (struct xsm_operations *ops) > set_to_dummy_if_null(ops, unbind_pt_irq); > set_to_dummy_if_null(ops, ioport_permission); > set_to_dummy_if_null(ops, ioport_mapping); > + set_to_dummy_if_null(ops, pmu_op); > #endif > } > diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c > index c6431b5..982e879 100644 > --- a/xen/xsm/flask/hooks.c > +++ b/xen/xsm/flask/hooks.c > @@ -1505,6 +1505,23 @@ static int flask_unbind_pt_irq (struct domain *d, > struct xen_domctl_bind_pt_irq > { > return current_has_perm(d, SECCLASS_RESOURCE, RESOURCE__REMOVE); > } > + > +static int flask_pmu_op (struct domain *d, unsigned int op) > +{ > + u32 dsid = domain_sid(d); > + > + switch ( op ) > + { > + case XENPMU_mode_set: > + case XENPMU_mode_get: > + case XENPMU_feature_set: > + case XENPMU_feature_get: > + return avc_has_perm(dsid, SECINITSID_XEN, SECCLASS_XEN2, > + XEN2__PMU_CTRL, NULL); > + default: > + return -EPERM; > + } > +} > #endif /* CONFIG_X86 */ > > long do_flask_op(XEN_GUEST_HANDLE_PARAM(xsm_op_t) u_flask_op); > @@ -1627,6 +1644,7 @@ static struct xsm_operations flask_ops = { > .unbind_pt_irq = flask_unbind_pt_irq, > .ioport_permission = flask_ioport_permission, > .ioport_mapping = flask_ioport_mapping, > + .pmu_op = flask_pmu_op, > #endif > }; > > diff --git a/xen/xsm/flask/policy/access_vectors > b/xen/xsm/flask/policy/access_vectors > index 3a97577..626850d 100644 > --- a/xen/xsm/flask/policy/access_vectors > +++ b/xen/xsm/flask/policy/access_vectors > @@ -86,6 +86,8 @@ class xen2 > psr_cmt_op > # XENPF_get_symbol > get_symbol > +# PMU control > + pmu_ctrl > } > > # Classes domain and domain2 consist of operations that a domain performs on > -- Company details: http://ts.fujitsu.com/imprint.html _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel