Please consider to create a ReadyKernel live patch for this issue.

--
Best regards,

Konstantin Khorenko,
Virtuozzo Linux Kernel Team

On 01/11/2017 05:56 PM, Konstantin Khorenko wrote:
The commit is pushed to "branch-rh7-3.10.0-514.vz7.27.x-ovz" and will appear at 
https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-514.vz7.27.9
------>
commit 7efd73d3548321581182145c349fadcce13ad00b
Author: Jim Mattson <jmatt...@google.com>
Date:   Wed Jan 11 18:56:16 2017 +0400

    ms/kvm: nVMX: Allow L1 to intercept software exceptions (#BP and #OF)

    When L2 exits to L0 due to "exception or NMI", software exceptions
    (#BP and #OF) for which L1 has requested an intercept should be
    handled by L1 rather than L0. Previously, only hardware exceptions
    were forwarded to L1.

    Signed-off-by: Jim Mattson <jmatt...@google.com>
    Cc: sta...@vger.kernel.org
    Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>

    Backport of ms commit ef85b67385436ddc1998f45f1d6a210f935b3388
    Fixes CVE-2016-9588
    https://vulners.com/cve/CVE-2016-9588

    https://jira.sw.ru/browse/PSBM-58194

    Signed-off-by: Evgeny Yakovlev <eyakov...@virtuozzo.com>
---
 arch/x86/kvm/vmx.c | 11 +++++------
 1 file changed, 5 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3d39923..8dea43b9 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1243,10 +1243,10 @@ static inline bool nested_cpu_has_posted_intr(struct 
vmcs12 *vmcs12)
        return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
 }

-static inline bool is_exception(u32 intr_info)
+static inline bool is_nmi(u32 intr_info)
 {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
-               == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
+               == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
 }

 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
@@ -5185,7 +5185,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
        if (is_machine_check(intr_info))
                return handle_machine_check(vcpu);

-       if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
+       if (is_nmi(intr_info))
                return 1;  /* already handled by vmx_vcpu_run() */

        if (is_no_device(intr_info)) {
@@ -7629,7 +7629,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)

        switch (exit_reason) {
        case EXIT_REASON_EXCEPTION_NMI:
-               if (!is_exception(intr_info))
+               if (is_nmi(intr_info))
                        return false;
                else if (is_page_fault(intr_info))
                        return enable_ept;
@@ -8226,8 +8226,7 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
                kvm_machine_check();

        /* We need to handle NMIs before interrupts are enabled */
-       if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
-           (exit_intr_info & INTR_INFO_VALID_MASK)) {
+       if (is_nmi(exit_intr_info)) {
                kvm_before_handle_nmi(&vmx->vcpu);
                asm("int $2");
                kvm_after_handle_nmi(&vmx->vcpu);
.

_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to