On Sun, Jun 13, 2010 at 03:26:09PM +0300, Nadav Har'El wrote:
> This patch includes a couple of utility functions for extracting pointer
> operands of VMX instructions issued by L1 (a guest hypervisor), and
> translating guest-given vmcs12 virtual addresses to guest-physical addresses.
> 
> Signed-off-by: Nadav Har'El <n...@il.ibm.com>
> ---
> --- .before/arch/x86/kvm/x86.c        2010-06-13 15:01:29.000000000 +0300
> +++ .after/arch/x86/kvm/x86.c 2010-06-13 15:01:29.000000000 +0300
> @@ -3286,13 +3286,14 @@ static int kvm_fetch_guest_virt(gva_t ad
>                                         access | PFERR_FETCH_MASK, error);
>  }
>  
> -static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
> +int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
>                              struct kvm_vcpu *vcpu, u32 *error)
>  {
>       u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
>       return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
>                                         error);
>  }
> +EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
>  
>  static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int 
> bytes,
>                              struct kvm_vcpu *vcpu, u32 *error)
> --- .before/arch/x86/kvm/x86.h        2010-06-13 15:01:29.000000000 +0300
> +++ .after/arch/x86/kvm/x86.h 2010-06-13 15:01:29.000000000 +0300
> @@ -75,6 +75,9 @@ static inline struct kvm_mem_aliases *kv
>  void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
>  void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
>  
> +int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
> +                     struct kvm_vcpu *vcpu, u32 *error);
> +
>  extern int nested;
>  
>  #endif
> --- .before/arch/x86/kvm/vmx.c        2010-06-13 15:01:29.000000000 +0300
> +++ .after/arch/x86/kvm/vmx.c 2010-06-13 15:01:29.000000000 +0300
> @@ -3654,6 +3654,86 @@ static int handle_vmoff(struct kvm_vcpu 
>       return 1;
>  }
>  
> +/*
> + * Decode the memory-address operand of a vmx instruction, according to the
> + * Intel spec.
> + */
> +#define VMX_OPERAND_SCALING(vii)     ((vii) & 3)
> +#define VMX_OPERAND_ADDR_SIZE(vii)   (((vii) >> 7) & 7)
> +#define VMX_OPERAND_IS_REG(vii)              ((vii) & (1u << 10))
> +#define VMX_OPERAND_SEG_REG(vii)     (((vii) >> 15) & 7)
> +#define VMX_OPERAND_INDEX_REG(vii)   (((vii) >> 18) & 0xf)
> +#define VMX_OPERAND_INDEX_INVALID(vii)       ((vii) & (1u << 22))
> +#define VMX_OPERAND_BASE_REG(vii)    (((vii) >> 23) & 0xf)
> +#define VMX_OPERAND_BASE_INVALID(vii)        ((vii) & (1u << 27))
> +#define VMX_OPERAND_REG(vii)         (((vii) >> 3) & 0xf)
> +#define VMX_OPERAND_REG2(vii)                (((vii) >> 28) & 0xf)
> +static gva_t get_vmx_mem_address(struct kvm_vcpu *vcpu,
> +                              unsigned long exit_qualification,
> +                              u32 vmx_instruction_info)
> +{
> +     int  scaling = VMX_OPERAND_SCALING(vmx_instruction_info);
> +     int  addr_size = VMX_OPERAND_ADDR_SIZE(vmx_instruction_info);
> +     bool is_reg = VMX_OPERAND_IS_REG(vmx_instruction_info);
> +     int  seg_reg = VMX_OPERAND_SEG_REG(vmx_instruction_info);
> +     int  index_reg = VMX_OPERAND_SEG_REG(vmx_instruction_info);
> +     bool index_is_valid = !VMX_OPERAND_INDEX_INVALID(vmx_instruction_info);
> +     int  base_reg       = VMX_OPERAND_BASE_REG(vmx_instruction_info);
> +     bool base_is_valid  = !VMX_OPERAND_BASE_INVALID(vmx_instruction_info);
> +     gva_t addr;
> +
> +     if (is_reg) {
> +             kvm_queue_exception(vcpu, UD_VECTOR);
> +             return 0;
Isn't zero a legitimate address for vmx operation?

> +     }
> +
> +     switch (addr_size) {
> +     case 1: /* 32 bit. high bits are undefined according to the spec: */
> +             exit_qualification &= 0xffffffff;
> +             break;
> +     case 2: /* 64 bit */
> +             break;
> +     default: /* addr_size=0 means 16 bit */
> +             return 0;
> +     }
> +
> +     /* Addr = segment_base + offset */
> +     /* offfset = Base + [Index * Scale] + Displacement */
> +     addr = vmx_get_segment_base(vcpu, seg_reg);
> +     if (base_is_valid)
> +             addr += kvm_register_read(vcpu, base_reg);
> +     if (index_is_valid)
> +             addr += kvm_register_read(vcpu, index_reg)<<scaling;
> +     addr += exit_qualification; /* holds the displacement */
> +
> +     return addr;
> +}
> +
> +static int read_guest_vmcs_gpa(struct kvm_vcpu *vcpu, gpa_t *gpap)
> +{
> +     int r;
> +     gva_t gva = get_vmx_mem_address(vcpu,
> +             vmcs_readl(EXIT_QUALIFICATION),
> +             vmcs_read32(VMX_INSTRUCTION_INFO));
> +     if (gva == 0)
> +             return 1;
> +     *gpap = 0;
> +     r = kvm_read_guest_virt(gva, gpap, sizeof(*gpap), vcpu, NULL);
> +     if (r) {
> +             printk(KERN_ERR "%s cannot read guest vmcs addr %lx : %d\n",
> +                    __func__, gva, r);
> +             return r;
> +     }
> +     /* According to the spec, VMCS addresses must be 4K aligned */
> +     if (!IS_ALIGNED(*gpap, PAGE_SIZE)) {
> +             printk(KERN_DEBUG "%s addr %llx not aligned\n",
> +                    __func__, *gpap);
> +             return 1;
> +     }
> +
> +     return 0;
> +}
> +
>  static int handle_invlpg(struct kvm_vcpu *vcpu)
>  {
>       unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majord...@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
                        Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to