[Devel] [PATCH] scsi: make scsi error laud

2016-11-11 Thread Dmitry Monakhov
This patch is not for release, testing purpose only.
We need it in order to investigate #PSBM-54665

Signed-off-by: Dmitry Monakhov 

diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 287045b..7364d86 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -141,12 +141,13 @@ int scsi_host_set_state(struct Scsi_Host *shost, enum 
scsi_host_state state)
return 0;
 
  illegal:
-   SCSI_LOG_ERROR_RECOVERY(1,
-   shost_printk(KERN_ERR, shost,
-"Illegal host state transition"
-"%s->%s\n",
-scsi_host_state_name(oldstate),
-scsi_host_state_name(state)));
+   shost_printk(KERN_ERR, shost,
+"Illegal host state transition"
+"%s->%s\n",
+scsi_host_state_name(oldstate),
+scsi_host_state_name(state));
+   dump_stack();
+
return -EINVAL;
 }
 EXPORT_SYMBOL(scsi_host_set_state);
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 573574b..c2e3307 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -61,6 +61,13 @@ struct virtio_scsi_vq {
struct virtqueue *vq;
 };
 
+#define __check_ret(val) do {  \
+   if (val == FAILED) {\
+   printk("virtscsi_failure"); \
+   dump_stack();   \
+   }   \
+   } while(0)
+
 /*
  * Per-target queue state.
  *
@@ -489,6 +496,7 @@ static int virtscsi_add_cmd(struct virtqueue *vq,
return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
 }
 
+
 static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
 struct virtio_scsi_cmd *cmd,
 size_t req_size, size_t resp_size)
@@ -633,6 +641,7 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct 
virtio_scsi_cmd *cmd)
virtscsi_poll_requests(vscsi);
 
 out:
+   __check_ret(ret);
mempool_free(cmd, virtscsi_cmd_pool);
return ret;
 }
@@ -644,8 +653,10 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc)
 
sdev_printk(KERN_INFO, sc->device, "device reset\n");
cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
-   if (!cmd)
+   if (!cmd) {
+   __check_ret(FAILED);
return FAILED;
+   }
 
memset(cmd, 0, sizeof(*cmd));
cmd->sc = sc;
@@ -666,11 +677,12 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
struct virtio_scsi *vscsi = shost_priv(sc->device->host);
struct virtio_scsi_cmd *cmd;
 
-   scmd_printk(KERN_INFO, sc, "abort\n");
+   scmd_printk(KERN_INFO, sc, "%s abort\n", __FUNCTION__);
cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
-   if (!cmd)
+   if (!cmd) {
+   __check_ret(FAILED);
return FAILED;
-
+   }
memset(cmd, 0, sizeof(*cmd));
cmd->sc = sc;
cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
-- 
2.7.4

___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


[Devel] [PATCH] scsi-DBG: make scsi error laud

2016-11-11 Thread Dmitry Monakhov
This patch is not for release, testing purpose only.
We need it in order to investigate #PSBM-54665

Signed-off-by: Dmitry Monakhov 

diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 287045b..7364d86 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -141,12 +141,13 @@ int scsi_host_set_state(struct Scsi_Host *shost, enum 
scsi_host_state state)
return 0;
 
  illegal:
-   SCSI_LOG_ERROR_RECOVERY(1,
-   shost_printk(KERN_ERR, shost,
-"Illegal host state transition"
-"%s->%s\n",
-scsi_host_state_name(oldstate),
-scsi_host_state_name(state)));
+   shost_printk(KERN_ERR, shost,
+"Illegal host state transition"
+"%s->%s\n",
+scsi_host_state_name(oldstate),
+scsi_host_state_name(state));
+   dump_stack();
+
return -EINVAL;
 }
 EXPORT_SYMBOL(scsi_host_set_state);
-- 
2.7.4

___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


[Devel] [PATCH 00/16] add tsc_scale and fix VM pause/resume

2016-11-11 Thread Roman Kagan
From: Denis Plotnikov 

The set of patches:
1. adds tsc_scaling functionality for VMX
2. generalizes a number of functions responsible for tsc writing and
   reading
3. fix VM pause/resume issue #PSBM-54338 by switching to using
   of cached tsc_offset

Haozhong Zhang (12):
  KVM: x86: Collect information for setting TSC scaling ratio
  KVM: x86: Add a common TSC scaling ratio field in kvm_vcpu_arch
  KVM: x86: Add a common TSC scaling function
  KVM: x86: Replace call-back set_tsc_khz() with a common function
  KVM: x86: Replace call-back compute_tsc_offset() with a common
function
  KVM: x86: Move TSC scaling logic out of call-back adjust_tsc_offset()
  KVM: x86: Move TSC scaling logic out of call-back read_l1_tsc()
  KVM: x86: Use the correct vcpu's TSC rate to compute time scale
  KVM: VMX: Enable and initialize VMX TSC scaling
  KVM: VMX: Setup TSC scaling ratio when a vcpu is loaded
  KVM: VMX: Use a scaled host TSC for guest readings of MSR_IA32_TSC
  KVM: VMX: Dump TSC multiplier in dump_vmcs()

Luiz Capitulino (2):
  kvm: x86: add tsc_offset field to struct kvm_vcpu_arch
  kvm: x86: drop read_tsc_offset()

Paolo Bonzini (2):
  KVM: x86: declare a few variables as __read_mostly
  KVM: x86: drop TSC offsetting kvm_x86_ops to fix KVM_GET/SET_CLOCK

 arch/x86/include/asm/kvm_host.h |  28 ++-
 arch/x86/include/asm/vmx.h  |   4 +-
 arch/x86/kvm/hyperv.c   |   2 +-
 arch/x86/kvm/lapic.c|   4 +-
 arch/x86/kvm/svm.c  | 144 +++
 arch/x86/kvm/vmx.c  | 103 ---
 arch/x86/kvm/x86.c  | 182 ++--
 7 files changed, 214 insertions(+), 253 deletions(-)

-- 
2.7.4

___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


Re: [Devel] [PATCH 00/16] add tsc_scale and fix VM pause/resume

2016-11-11 Thread Denis V. Lunev
On 11/11/2016 11:47 AM, Roman Kagan wrote:
> From: Denis Plotnikov 
>
> The set of patches:
> 1. adds tsc_scaling functionality for VMX
> 2. generalizes a number of functions responsible for tsc writing and
>reading
> 3. fix VM pause/resume issue #PSBM-54338 by switching to using
>of cached tsc_offset
>
> Haozhong Zhang (12):
>   KVM: x86: Collect information for setting TSC scaling ratio
>   KVM: x86: Add a common TSC scaling ratio field in kvm_vcpu_arch
>   KVM: x86: Add a common TSC scaling function
>   KVM: x86: Replace call-back set_tsc_khz() with a common function
>   KVM: x86: Replace call-back compute_tsc_offset() with a common
> function
>   KVM: x86: Move TSC scaling logic out of call-back adjust_tsc_offset()
>   KVM: x86: Move TSC scaling logic out of call-back read_l1_tsc()
>   KVM: x86: Use the correct vcpu's TSC rate to compute time scale
>   KVM: VMX: Enable and initialize VMX TSC scaling
>   KVM: VMX: Setup TSC scaling ratio when a vcpu is loaded
>   KVM: VMX: Use a scaled host TSC for guest readings of MSR_IA32_TSC
>   KVM: VMX: Dump TSC multiplier in dump_vmcs()
>
> Luiz Capitulino (2):
>   kvm: x86: add tsc_offset field to struct kvm_vcpu_arch
>   kvm: x86: drop read_tsc_offset()
>
> Paolo Bonzini (2):
>   KVM: x86: declare a few variables as __read_mostly
>   KVM: x86: drop TSC offsetting kvm_x86_ops to fix KVM_GET/SET_CLOCK
>
>  arch/x86/include/asm/kvm_host.h |  28 ++-
>  arch/x86/include/asm/vmx.h  |   4 +-
>  arch/x86/kvm/hyperv.c   |   2 +-
>  arch/x86/kvm/lapic.c|   4 +-
>  arch/x86/kvm/svm.c  | 144 +++
>  arch/x86/kvm/vmx.c  | 103 ---
>  arch/x86/kvm/x86.c  | 182 
> ++--
>  7 files changed, 214 insertions(+), 253 deletions(-)
>
guys, pls also drop separate series for 7.3
___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


[Devel] [PATCH RHEL7 COMMIT] xfs: compilation fix in xfs_buftarg_wait_rele()

2016-11-11 Thread Konstantin Khorenko
The commit is pushed to "branch-rh7-3.10.0-493.vz7.25.x-ovz" and will appear at 
https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-493.el7
-->
commit a7df44fbebed6954ea001ce4bfdc023454ec1fc1
Author: Dmitry Monakhov 
Date:   Fri Nov 11 13:15:22 2016 +0400

xfs: compilation fix in xfs_buftarg_wait_rele()

To be merged into 661c0b9b33ce924344a100a6580165c265547b4c
("ms/xfs: convert buftarg LRU to generic code")

Signed-off-by: Dmitry Monakhov 
---
 fs/xfs/xfs_buf.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index e379876..28ad0bf 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1582,7 +1582,7 @@ xfs_buftarg_wait_rele(
 
 {
struct xfs_buf  *bp = container_of(item, struct xfs_buf, b_lru);
-
+   struct xfs_buftarg  *btp = bp->b_target;
/*
 * First wait on the buftarg I/O count for all in-flight buffers to be
 * released. This is critical as new buffers do not make the LRU until
___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


[Devel] [PATCH RHEL7 COMMIT] ms/xfs: convert dquot cache lru to list_lru part2

2016-11-11 Thread Konstantin Khorenko
The commit is pushed to "branch-rh7-3.10.0-493.vz7.25.x-ovz" and will appear at 
https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-493.el7
-->
commit 17b05cfc89e9a34006abd7267d22548811f447dd
Author: Dmitry Monakhov 
Date:   Fri Nov 11 13:15:20 2016 +0400

ms/xfs: convert dquot cache lru to list_lru part2

After ms commit ff6d6af2351 ("xfs: per-filesystem stats counter
implementation") XFS_STATS_XXX() requires two arguments, so fix it.

To be merged into Initial patchset.

Signed-off-by: Dmitry Monakhov 
---
 fs/xfs/xfs_qm.c | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 1b383f5..a0518a8 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -478,11 +478,11 @@ xfs_qm_dquot_isolate(
 */
if (dqp->q_nrefs) {
xfs_dqunlock(dqp);
-   XFS_STATS_INC(xs_qm_dqwants);
+   XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
 
trace_xfs_dqreclaim_want(dqp);
list_lru_isolate(lru, >q_lru);
-   XFS_STATS_DEC(xs_qm_dquot_unused);
+   XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
return LRU_REMOVED;
}
 
@@ -526,19 +526,19 @@ xfs_qm_dquot_isolate(
 
ASSERT(dqp->q_nrefs == 0);
list_lru_isolate_move(lru, >q_lru, >dispose);
-   XFS_STATS_DEC(xs_qm_dquot_unused);
+   XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
trace_xfs_dqreclaim_done(dqp);
-   XFS_STATS_INC(xs_qm_dqreclaims);
+   XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
return LRU_REMOVED;
 
 out_miss_busy:
trace_xfs_dqreclaim_busy(dqp);
-   XFS_STATS_INC(xs_qm_dqreclaim_misses);
+   XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
return LRU_SKIP;
 
 out_unlock_dirty:
trace_xfs_dqreclaim_busy(dqp);
-   XFS_STATS_INC(xs_qm_dqreclaim_misses);
+   XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
xfs_dqunlock(dqp);
spin_lock(lru_lock);
return LRU_RETRY;
___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


[Devel] [PATCH 04/16] KVM: x86: Add a common TSC scaling function

2016-11-11 Thread Roman Kagan
From: Haozhong Zhang 

VMX and SVM calculate the TSC scaling ratio in a similar logic, so this
patch generalizes it to a common TSC scaling function.

Signed-off-by: Haozhong Zhang 
[Inline the multiplication and shift steps into mul_u64_u64_shr.  Remove
 BUG_ON.  - Paolo]
Signed-off-by: Paolo Bonzini 
(cherry-picked from commit 35181e86df97e4223f4a28fb33e2bcf3b73de141)
Signed-off-by: Denis Plotnikov 
Signed-off-by: Roman Kagan 
---
 arch/x86/include/asm/kvm_host.h |  2 ++
 arch/x86/kvm/svm.c  | 48 -
 arch/x86/kvm/x86.c  | 40 +-
 3 files changed, 45 insertions(+), 45 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 44ff862..4e35f42 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1201,6 +1201,8 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm 
*kvm,
 void kvm_define_shared_msr(unsigned index, u32 msr);
 int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
 
+u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
+
 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index c613f6e..1efd912 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -211,7 +211,6 @@ static int nested_svm_intercept(struct vcpu_svm *svm);
 static int nested_svm_vmexit(struct vcpu_svm *svm);
 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
  bool has_error_code, u32 error_code);
-static u64 __scale_tsc(u64 ratio, u64 tsc);
 
 enum {
VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
@@ -891,21 +890,7 @@ static __init int svm_hardware_setup(void)
kvm_enable_efer_bits(EFER_FFXSR);
 
if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
-   u64 max;
-
kvm_has_tsc_control = true;
-
-   /*
-* Make sure the user can only configure tsc_khz values that
-* fit into a signed integer.
-* A min value is not calculated needed because it will always
-* be 1 on all machines and a value of 0 is used to disable
-* tsc-scaling for the vcpu.
-*/
-   max = min(0x7fffULL, __scale_tsc(tsc_khz, TSC_RATIO_MAX));
-
-   kvm_max_guest_tsc_khz = max;
-
kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
kvm_tsc_scaling_ratio_frac_bits = 32;
}
@@ -971,31 +956,6 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t 
type)
seg->base = 0;
 }
 
-static u64 __scale_tsc(u64 ratio, u64 tsc)
-{
-   u64 mult, frac, _tsc;
-
-   mult  = ratio >> 32;
-   frac  = ratio & ((1ULL << 32) - 1);
-
-   _tsc  = tsc;
-   _tsc *= mult;
-   _tsc += (tsc >> 32) * frac;
-   _tsc += ((tsc & ((1ULL << 32) - 1)) * frac) >> 32;
-
-   return _tsc;
-}
-
-static u64 svm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
-{
-   u64 _tsc = tsc;
-
-   if (vcpu->arch.tsc_scaling_ratio != TSC_RATIO_DEFAULT)
-   _tsc = __scale_tsc(vcpu->arch.tsc_scaling_ratio, tsc);
-
-   return _tsc;
-}
-
 static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool 
scale)
 {
u64 ratio;
@@ -1064,7 +1024,7 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, 
s64 adjustment, bool ho
if (host) {
if (vcpu->arch.tsc_scaling_ratio != TSC_RATIO_DEFAULT)
WARN_ON(adjustment < 0);
-   adjustment = svm_scale_tsc(vcpu, (u64)adjustment);
+   adjustment = kvm_scale_tsc(vcpu, (u64)adjustment);
}
 
svm->vmcb->control.tsc_offset += adjustment;
@@ -1082,7 +1042,7 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, 
u64 target_tsc)
 {
u64 tsc;
 
-   tsc = svm_scale_tsc(vcpu, native_read_tsc());
+   tsc = kvm_scale_tsc(vcpu, native_read_tsc());
 
return target_tsc - tsc;
 }
@@ -3062,7 +3022,7 @@ static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 
host_tsc)
 {
struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
return vmcb->control.tsc_offset +
-   svm_scale_tsc(vcpu, host_tsc);
+   kvm_scale_tsc(vcpu, host_tsc);
 }
 
 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
@@ -3072,7 +3032,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
switch (msr_info->index) {
case MSR_IA32_TSC: {
msr_info->data = svm->vmcb->control.tsc_offset +
-   svm_scale_tsc(vcpu, native_read_tsc());
+   kvm_scale_tsc(vcpu, native_read_tsc());
 
break;
}

[Devel] [PATCH 15/16] kvm: x86: drop read_tsc_offset()

2016-11-11 Thread Roman Kagan
From: Luiz Capitulino 

The TSC offset can now be read directly from struct kvm_arch_vcpu.

Signed-off-by: Luiz Capitulino 
Signed-off-by: Paolo Bonzini 
(cherry-picked from commit 3e3f50262eb441d0fd1de4dce06739e9c0fe7c61)
Signed-off-by: Denis Plotnikov 
Signed-off-by: Roman Kagan 
---
 arch/x86/include/asm/kvm_host.h | 1 -
 arch/x86/kvm/svm.c  | 8 
 arch/x86/kvm/vmx.c  | 6 --
 arch/x86/kvm/x86.c  | 2 +-
 4 files changed, 1 insertion(+), 16 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 1166dc5..aa86551 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -829,7 +829,6 @@ struct kvm_x86_ops {
 
bool (*has_wbinvd_exit)(void);
 
-   u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
 
u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 6636074..9021e79 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -956,13 +956,6 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t 
type)
seg->base = 0;
 }
 
-static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu)
-{
-   struct vcpu_svm *svm = to_svm(vcpu);
-
-   return svm->vmcb->control.tsc_offset;
-}
-
 static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 {
struct vcpu_svm *svm = to_svm(vcpu);
@@ -4347,7 +4340,6 @@ static struct kvm_x86_ops svm_x86_ops = {
 
.has_wbinvd_exit = svm_has_wbinvd_exit,
 
-   .read_tsc_offset = svm_read_tsc_offset,
.write_tsc_offset = svm_write_tsc_offset,
.adjust_tsc_offset_guest = svm_adjust_tsc_offset_guest,
.read_l1_tsc = svm_read_l1_tsc,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a03e7d9..45007f1 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2202,11 +2202,6 @@ static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 
host_tsc)
return host_tsc + tsc_offset;
 }
 
-static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu)
-{
-   return vmcs_read64(TSC_OFFSET);
-}
-
 /*
  * writes 'offset' into guest's timestamp counter offset register
  */
@@ -9855,7 +9850,6 @@ static struct kvm_x86_ops vmx_x86_ops = {
 
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
 
-   .read_tsc_offset = vmx_read_tsc_offset,
.write_tsc_offset = vmx_write_tsc_offset,
.adjust_tsc_offset_guest = vmx_adjust_tsc_offset_guest,
.read_l1_tsc = vmx_read_l1_tsc,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bd8e822..5ec55b1 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1339,7 +1339,7 @@ static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
 
 static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
 {
-   u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu);
+   u64 curr_offset = vcpu->arch.tsc_offset;
vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
 }
 
-- 
2.7.4

___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


[Devel] [PATCH 16/16] KVM: x86: drop TSC offsetting kvm_x86_ops to fix KVM_GET/SET_CLOCK

2016-11-11 Thread Roman Kagan
From: Paolo Bonzini 

Since commit a545ab6a0085 ("kvm: x86: add tsc_offset field to struct
kvm_vcpu_arch", 2016-09-07) the offset between host and L1 TSC is
cached and need not be fished out of the VMCS or VMCB.  This means
that we can implement adjust_tsc_offset_guest and read_l1_tsc
entirely in generic code.  The simplification is particularly
significant for VMX code, where vmx->nested.vmcs01_tsc_offset
was duplicating what is now in vcpu->arch.tsc_offset.  Therefore
the vmcs01_tsc_offset can be dropped completely.

More importantly, this fixes KVM_GET_CLOCK/KVM_SET_CLOCK
which, after commit 108b249c453d ("KVM: x86: introduce get_kvmclock_ns",
2016-09-01) called read_l1_tsc while the VMCS was not loaded.
It thus returned bogus values on Intel CPUs.

Fixes: 108b249c453dd7132599ab6dc7e435a7036c193f
Reported-by: Roman Kagan 
Reviewed-by: Radim Krčmář 
Signed-off-by: Paolo Bonzini 

(cherry-picked from commit ea26e4ec08d4727e3a9e48a6b74695861effcbd9)
modifications to hyperv.c added
fix #PSBM-54338
Signed-off-by: Denis Plotnikov 
Signed-off-by: Roman Kagan 
---
 arch/x86/include/asm/kvm_host.h |  3 ---
 arch/x86/kvm/hyperv.c   |  2 +-
 arch/x86/kvm/svm.c  | 23 ---
 arch/x86/kvm/vmx.c  | 39 +++
 arch/x86/kvm/x86.c  |  8 
 5 files changed, 8 insertions(+), 67 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index aa86551..2348a55 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -821,7 +821,6 @@ struct kvm_x86_ops {
int (*get_lpage_level)(void);
bool (*rdtscp_supported)(void);
bool (*invpcid_supported)(void);
-   void (*adjust_tsc_offset_guest)(struct kvm_vcpu *vcpu, s64 adjustment);
 
void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
 
@@ -831,8 +830,6 @@ struct kvm_x86_ops {
 
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
 
-   u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);
-
void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
 
int (*check_intercept)(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index b5ebcaf..9a5df5c 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -398,7 +398,7 @@ static u64 get_time_ref_counter(struct kvm *kvm)
return div_u64(get_kvmclock_ns(kvm), 100);
 
vcpu = kvm_get_vcpu(kvm, 0);
-   tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
+   tsc = kvm_read_l1_tsc(vcpu, native_read_tsc());
return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
+ hv->tsc_ref.tsc_offset;
 }
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 9021e79..3e51a1f 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -975,21 +975,6 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, 
u64 offset)
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 }
 
-static void svm_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
-{
-   struct vcpu_svm *svm = to_svm(vcpu);
-
-   svm->vmcb->control.tsc_offset += adjustment;
-   if (is_guest_mode(vcpu))
-   svm->nested.hsave->control.tsc_offset += adjustment;
-   else
-   trace_kvm_write_tsc_offset(vcpu->vcpu_id,
-svm->vmcb->control.tsc_offset - adjustment,
-svm->vmcb->control.tsc_offset);
-
-   mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
-}
-
 static void init_vmcb(struct vcpu_svm *svm)
 {
struct vmcb_control_area *control = >vmcb->control;
@@ -2961,12 +2946,6 @@ static int cr8_write_interception(struct vcpu_svm *svm)
return 0;
 }
 
-static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
-{
-   struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
-   return vmcb->control.tsc_offset + host_tsc;
-}
-
 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
struct vcpu_svm *svm = to_svm(vcpu);
@@ -4341,8 +4320,6 @@ static struct kvm_x86_ops svm_x86_ops = {
.has_wbinvd_exit = svm_has_wbinvd_exit,
 
.write_tsc_offset = svm_write_tsc_offset,
-   .adjust_tsc_offset_guest = svm_adjust_tsc_offset_guest,
-   .read_l1_tsc = svm_read_l1_tsc,
 
.set_tdp_cr3 = set_tdp_cr3,
 
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 45007f1..6497232 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -396,7 +396,6 @@ struct nested_vmx {
/* vmcs02_list cache of VMCSs recently used to run L2 guests */
struct list_head vmcs02_pool;
int vmcs02_num;
-   u64 vmcs01_tsc_offset;
/* L2 must run next, and mustn't decide to exit to L1. */
bool nested_run_pending;
/*
@@ 

[Devel] [PATCH 14/16] kvm: x86: add tsc_offset field to struct kvm_vcpu_arch

2016-11-11 Thread Roman Kagan
From: Luiz Capitulino 

A future commit will want to easily read a vCPU's TSC offset,
so we store it in struct kvm_arch_vcpu_arch for easy access.

Signed-off-by: Luiz Capitulino 
Signed-off-by: Paolo Bonzini 
(cherry-picked from commit a545ab6a0085e6df9c7b6e9734b40ba4d2aca8c9)
Signed-off-by: Denis Plotnikov 
Signed-off-by: Roman Kagan 
---
 arch/x86/include/asm/kvm_host.h |  1 +
 arch/x86/kvm/x86.c  | 10 --
 2 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 830d1cd..1166dc5 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -484,6 +484,7 @@ struct kvm_vcpu_arch {
struct kvm_steal_time steal;
} st;
 
+   u64 tsc_offset;
u64 last_guest_tsc;
u64 last_host_tsc;
u64 tsc_offset_adjustment;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7fcb155..bd8e822 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1385,6 +1385,12 @@ u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
 }
 EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
 
+static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+{
+   kvm_x86_ops->write_tsc_offset(vcpu, offset);
+   vcpu->arch.tsc_offset = offset;
+}
+
 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 {
struct kvm *kvm = vcpu->kvm;
@@ -1494,7 +1500,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data 
*msr)
 
if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
update_ia32_tsc_adjust_msr(vcpu, offset);
-   kvm_x86_ops->write_tsc_offset(vcpu, offset);
+   kvm_vcpu_write_tsc_offset(vcpu, offset);
raw_spin_unlock_irqrestore(>arch.tsc_write_lock, flags);
 
spin_lock(>arch.pvclock_gtod_sync_lock);
@@ -2959,7 +2965,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (check_tsc_unstable()) {
u64 offset = kvm_compute_tsc_offset(vcpu,
vcpu->arch.last_guest_tsc);
-   kvm_x86_ops->write_tsc_offset(vcpu, offset);
+   kvm_vcpu_write_tsc_offset(vcpu, offset);
vcpu->arch.tsc_catchup = 1;
}
/*
-- 
2.7.4

___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


[Devel] [PATCH 03/16] KVM: x86: Add a common TSC scaling ratio field in kvm_vcpu_arch

2016-11-11 Thread Roman Kagan
From: Haozhong Zhang 

This patch moves the field of TSC scaling ratio from the architecture
struct vcpu_svm to the common struct kvm_vcpu_arch.

Signed-off-by: Haozhong Zhang 
Signed-off-by: Paolo Bonzini 
(cherry-picked from commit ad721883e9c5f46cc5fa9496bc12c097c6238b4a)
Signed-off-by: Denis Plotnikov 
Signed-off-by: Roman Kagan 
---
 arch/x86/include/asm/kvm_host.h |  1 +
 arch/x86/kvm/svm.c  | 27 +--
 arch/x86/kvm/x86.c  | 18 --
 3 files changed, 30 insertions(+), 16 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index e4cff52..44ff862 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -496,6 +496,7 @@ struct kvm_vcpu_arch {
u32 virtual_tsc_mult;
u32 virtual_tsc_khz;
s64 ia32_tsc_adjust_msr;
+   u64 tsc_scaling_ratio;
 
atomic_t nmi_queued;  /* unprocessed asynchronous NMIs */
unsigned nmi_pending; /* NMI queued after currently running handler */
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 71b7c57..c613f6e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -158,7 +158,8 @@ struct vcpu_svm {
unsigned long int3_rip;
u32 apf_reason;
 
-   u64  tsc_ratio;
+   /* cached guest cpuid flags for faster access */
+   bool nrips_enabled  : 1;
 };
 
 static DEFINE_PER_CPU(u64, current_tsc_ratio);
@@ -987,24 +988,22 @@ static u64 __scale_tsc(u64 ratio, u64 tsc)
 
 static u64 svm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
 {
-   struct vcpu_svm *svm = to_svm(vcpu);
u64 _tsc = tsc;
 
-   if (svm->tsc_ratio != TSC_RATIO_DEFAULT)
-   _tsc = __scale_tsc(svm->tsc_ratio, tsc);
+   if (vcpu->arch.tsc_scaling_ratio != TSC_RATIO_DEFAULT)
+   _tsc = __scale_tsc(vcpu->arch.tsc_scaling_ratio, tsc);
 
return _tsc;
 }
 
 static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool 
scale)
 {
-   struct vcpu_svm *svm = to_svm(vcpu);
u64 ratio;
u64 khz;
 
/* Guest TSC same frequency as host TSC? */
if (!scale) {
-   svm->tsc_ratio = TSC_RATIO_DEFAULT;
+   vcpu->arch.tsc_scaling_ratio = TSC_RATIO_DEFAULT;
return;
}
 
@@ -1029,7 +1028,7 @@ static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 
user_tsc_khz, bool scale)
user_tsc_khz);
return;
}
-   svm->tsc_ratio = ratio;
+   vcpu->arch.tsc_scaling_ratio = ratio;
 }
 
 static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu)
@@ -1063,7 +1062,7 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, 
s64 adjustment, bool ho
struct vcpu_svm *svm = to_svm(vcpu);
 
if (host) {
-   if (svm->tsc_ratio != TSC_RATIO_DEFAULT)
+   if (vcpu->arch.tsc_scaling_ratio != TSC_RATIO_DEFAULT)
WARN_ON(adjustment < 0);
adjustment = svm_scale_tsc(vcpu, (u64)adjustment);
}
@@ -1231,8 +1230,6 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, 
unsigned int id)
goto out;
}
 
-   svm->tsc_ratio = TSC_RATIO_DEFAULT;
-
err = kvm_vcpu_init(>vcpu, kvm, id);
if (err)
goto free_svm;
@@ -1323,10 +1320,12 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int 
cpu)
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
 
-   if (static_cpu_has(X86_FEATURE_TSCRATEMSR) &&
-   svm->tsc_ratio != __get_cpu_var(current_tsc_ratio)) {
-   __get_cpu_var(current_tsc_ratio) = svm->tsc_ratio;
-   wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio);
+   if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
+   u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
+   if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
+   __this_cpu_write(current_tsc_ratio, tsc_ratio);
+   wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
+   }
}
 }
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e911037..e348b4f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -112,6 +112,7 @@ u8   __read_mostly kvm_tsc_scaling_ratio_frac_bits;
 EXPORT_SYMBOL_GPL(kvm_tsc_scaling_ratio_frac_bits);
 u64  __read_mostly kvm_max_tsc_scaling_ratio;
 EXPORT_SYMBOL_GPL(kvm_max_tsc_scaling_ratio);
+static u64 __read_mostly kvm_default_tsc_scaling_ratio;
 
 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
 static u32 __read_mostly tsc_tolerance_ppm = 250;
@@ -1236,8 +1237,11 @@ static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 
this_tsc_khz)
int use_scaling = 0;
 
/* tsc_khz can be zero if TSC 

[Devel] [PATCH 12/16] KVM: VMX: Use a scaled host TSC for guest readings of MSR_IA32_TSC

2016-11-11 Thread Roman Kagan
From: Haozhong Zhang 

This patch makes kvm-intel to return a scaled host TSC plus the TSC
offset when handling guest readings to MSR_IA32_TSC.

Signed-off-by: Haozhong Zhang 
Signed-off-by: Paolo Bonzini 
(cherry-picked from commit be7b263ea925324e54e48c3558d4719be5374053)
Signed-off-by: Denis Plotnikov 
Signed-off-by: Roman Kagan 
---
 arch/x86/kvm/vmx.c | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 265bc0e..9f3c26a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2176,15 +2176,16 @@ static void setup_msrs(struct vcpu_vmx *vmx)
 
 /*
  * reads and returns guest's timestamp counter "register"
- * guest_tsc = host_tsc + tsc_offset-- 21.3
+ * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset
+ * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3
  */
-static u64 guest_read_tsc(void)
+static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
 {
u64 host_tsc, tsc_offset;
 
rdtscll(host_tsc);
tsc_offset = vmcs_read64(TSC_OFFSET);
-   return host_tsc + tsc_offset;
+   return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset;
 }
 
 /*
@@ -2539,7 +2540,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
case MSR_EFER:
return kvm_get_msr_common(vcpu, msr_info);
case MSR_IA32_TSC:
-   msr_info->data = guest_read_tsc();
+   msr_info->data = guest_read_tsc(vcpu);
break;
case MSR_IA32_SYSENTER_CS:
msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
-- 
2.7.4

___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


[Devel] [PATCH 07/16] KVM: x86: Move TSC scaling logic out of call-back adjust_tsc_offset()

2016-11-11 Thread Roman Kagan
From: Haozhong Zhang 

For both VMX and SVM, if the 2nd argument of call-back
adjust_tsc_offset() is the host TSC, then adjust_tsc_offset() will scale
it first. This patch moves this common TSC scaling logic to its caller
adjust_tsc_offset_host() and rename the call-back adjust_tsc_offset() to
adjust_tsc_offset_guest().

Signed-off-by: Haozhong Zhang 
Signed-off-by: Paolo Bonzini 
(cherry-picked from commit 58ea6767874e791a6c4f5c96c7d9155de4b1af28)
Signed-off-by: Denis Plotnikov 
Signed-off-by: Roman Kagan 
---
 arch/x86/include/asm/kvm_host.h | 13 +
 arch/x86/kvm/svm.c  | 10 ++
 arch/x86/kvm/vmx.c  |  4 ++--
 arch/x86/kvm/x86.c  | 16 +++-
 4 files changed, 20 insertions(+), 23 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index fec5255..ca305fc 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -820,7 +820,7 @@ struct kvm_x86_ops {
int (*get_lpage_level)(void);
bool (*rdtscp_supported)(void);
bool (*invpcid_supported)(void);
-   void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool 
host);
+   void (*adjust_tsc_offset_guest)(struct kvm_vcpu *vcpu, s64 adjustment);
 
void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
 
@@ -882,17 +882,6 @@ struct kvm_arch_async_pf {
 
 extern struct kvm_x86_ops *kvm_x86_ops;
 
-static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
-  s64 adjustment)
-{
-   kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false);
-}
-
-static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 
adjustment)
-{
-   kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true);
-}
-
 int kvm_mmu_module_init(void);
 void kvm_mmu_module_exit(void);
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index fd0b58e..b7c2e94 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -982,16 +982,10 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, 
u64 offset)
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 }
 
-static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool 
host)
+static void svm_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
 {
struct vcpu_svm *svm = to_svm(vcpu);
 
-   if (host) {
-   if (vcpu->arch.tsc_scaling_ratio != TSC_RATIO_DEFAULT)
-   WARN_ON(adjustment < 0);
-   adjustment = kvm_scale_tsc(vcpu, (u64)adjustment);
-   }
-
svm->vmcb->control.tsc_offset += adjustment;
if (is_guest_mode(vcpu))
svm->nested.hsave->control.tsc_offset += adjustment;
@@ -4356,7 +4350,7 @@ static struct kvm_x86_ops svm_x86_ops = {
 
.read_tsc_offset = svm_read_tsc_offset,
.write_tsc_offset = svm_write_tsc_offset,
-   .adjust_tsc_offset = svm_adjust_tsc_offset,
+   .adjust_tsc_offset_guest = svm_adjust_tsc_offset_guest,
.read_l1_tsc = svm_read_l1_tsc,
 
.set_tdp_cr3 = set_tdp_cr3,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 986aa95..f61433a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2218,7 +2218,7 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, 
u64 offset)
}
 }
 
-static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool 
host)
+static void vmx_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
 {
u64 offset = vmcs_read64(TSC_OFFSET);
 
@@ -9832,7 +9832,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
 
.read_tsc_offset = vmx_read_tsc_offset,
.write_tsc_offset = vmx_write_tsc_offset,
-   .adjust_tsc_offset = vmx_adjust_tsc_offset,
+   .adjust_tsc_offset_guest = vmx_adjust_tsc_offset_guest,
.read_l1_tsc = vmx_read_l1_tsc,
 
.set_tdp_cr3 = vmx_set_cr3,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c2c2a60..5a7d050 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1504,6 +1504,20 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct 
msr_data *msr)
 
 EXPORT_SYMBOL_GPL(kvm_write_tsc);
 
+static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
+  s64 adjustment)
+{
+   kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment);
+}
+
+static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 
adjustment)
+{
+   if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
+   WARN_ON(adjustment < 0);
+   adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
+   kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment);
+}
+
 #ifdef CONFIG_X86_64
 
 static cycle_t read_tsc(void)
@@ -2263,7 +2277,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
if 

[Devel] [PATCH 09/16] KVM: x86: Use the correct vcpu's TSC rate to compute time scale

2016-11-11 Thread Roman Kagan
From: Haozhong Zhang 

This patch makes KVM use virtual_tsc_khz rather than the host TSC rate
as vcpu's TSC rate to compute the time scale if TSC scaling is enabled.

Signed-off-by: Haozhong Zhang 
Signed-off-by: Paolo Bonzini 
(cherry-picked from commit 27cca94e032c1749825fdd9b6b379e4235cd52e1)
Signed-off-by: Denis Plotnikov 
Signed-off-by: Roman Kagan 
---
 arch/x86/kvm/x86.c | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3913a62..7fcb155 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1780,7 +1780,7 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
 
 static int kvm_guest_time_update(struct kvm_vcpu *v)
 {
-   unsigned long flags, this_tsc_khz;
+   unsigned long flags, this_tsc_khz, tgt_tsc_khz;
struct kvm_vcpu_arch *vcpu = >arch;
struct kvm_arch *ka = >kvm->arch;
s64 kernel_ns;
@@ -1841,7 +1841,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
/* With all the info we got, fill in the values */
 
if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
-   kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
+   tgt_tsc_khz = kvm_has_tsc_control ?
+   vcpu->virtual_tsc_khz : this_tsc_khz;
+   kvm_get_time_scale(NSEC_PER_SEC / 1000, tgt_tsc_khz,
   >hv_clock.tsc_shift,
   >hv_clock.tsc_to_system_mul);
vcpu->hw_tsc_khz = this_tsc_khz;
-- 
2.7.4

___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


[Devel] [PATCH 13/16] KVM: VMX: Dump TSC multiplier in dump_vmcs()

2016-11-11 Thread Roman Kagan
From: Haozhong Zhang 

This patch enhances dump_vmcs() to dump the value of TSC multiplier
field in VMCS.

Signed-off-by: Haozhong Zhang 
Signed-off-by: Paolo Bonzini 
(cherry-picked from commit 8cfe9866960581303f244780945c5d12ecc4e5bc)
Signed-off-by: Denis Plotnikov 
Signed-off-by: Roman Kagan 
---
 arch/x86/kvm/vmx.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9f3c26a..a03e7d9 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7521,6 +7521,9 @@ static void dump_vmcs(void)
   vmcs_read32(IDT_VECTORING_INFO_FIELD),
   vmcs_read32(IDT_VECTORING_ERROR_CODE));
pr_err("TSC Offset = 0x%016lx\n", vmcs_readl(TSC_OFFSET));
+   if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
+   pr_err("TSC Multiplier = 0x%016lx\n",
+  vmcs_readl(TSC_MULTIPLIER));
if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW)
pr_err("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
-- 
2.7.4

___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


[Devel] [PATCH 11/16] KVM: VMX: Setup TSC scaling ratio when a vcpu is loaded

2016-11-11 Thread Roman Kagan
From: Haozhong Zhang 

This patch makes kvm-intel module to load TSC scaling ratio into TSC
multiplier field of VMCS when a vcpu is loaded, so that TSC scaling
ratio can take effect if VMX TSC scaling is enabled.

Signed-off-by: Haozhong Zhang 
Signed-off-by: Paolo Bonzini 
(cherry-picked from commit ff2c3a1803775cc72dc6f624b59554956396b0ee)
Signed-off-by: Denis Plotnikov 
Signed-off-by: Roman Kagan 
---
 arch/x86/kvm/vmx.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ab8a678..265bc0e 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1883,6 +1883,12 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+
+   /* Setup TSC multiplier */
+   if (cpu_has_vmx_tsc_scaling())
+   vmcs_write64(TSC_MULTIPLIER,
+vcpu->arch.tsc_scaling_ratio);
+
vmx->loaded_vmcs->cpu = cpu;
}
 }
-- 
2.7.4

___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


[Devel] [PATCH 10/16] KVM: VMX: Enable and initialize VMX TSC scaling

2016-11-11 Thread Roman Kagan
From: Haozhong Zhang 

This patch exhances kvm-intel module to enable VMX TSC scaling and
collects information of TSC scaling ratio during initialization.

Signed-off-by: Haozhong Zhang 
Signed-off-by: Paolo Bonzini 
(cherry-picked from commit 64903d6195cbfb051ce339d30848cc64babdba12)
Signed-off-by: Denis Plotnikov 
Signed-off-by: Roman Kagan 
---
 arch/x86/include/asm/vmx.h |  4 +++-
 arch/x86/kvm/vmx.c | 17 -
 2 files changed, 19 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 8530b93..ebe7cf9d 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -70,7 +70,7 @@
 #define SECONDARY_EXEC_ENABLE_INVPCID  0x1000
 #define SECONDARY_EXEC_SHADOW_VMCS  0x4000
 #define SECONDARY_EXEC_ENABLE_PML   0x0002
-
+#define SECONDARY_EXEC_TSC_SCALING  0x0200
 
 #define PIN_BASED_EXT_INTR_MASK 0x0001
 #define PIN_BASED_NMI_EXITING   0x0008
@@ -163,6 +163,8 @@ enum vmcs_field {
EOI_EXIT_BITMAP3_HIGH   = 0x2023,
VMREAD_BITMAP   = 0x2026,
VMWRITE_BITMAP  = 0x2028,
+   TSC_MULTIPLIER  = 0x2032,
+   TSC_MULTIPLIER_HIGH = 0x2033,
GUEST_PHYSICAL_ADDRESS  = 0x2400,
GUEST_PHYSICAL_ADDRESS_HIGH = 0x2401,
VMCS_LINK_POINTER   = 0x2800,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f61433a..ab8a678 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -104,6 +104,8 @@ module_param(nested, bool, S_IRUGO);
 static bool __read_mostly enable_pml = 1;
 module_param_named(pml, enable_pml, bool, S_IRUGO);
 
+#define KVM_VMX_TSC_MULTIPLIER_MAX 0xULL
+
 #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
 #define KVM_VM_CR0_ALWAYS_ON   \
@@ -1071,6 +1073,12 @@ static inline bool cpu_has_vmx_pml(void)
return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
 }
 
+static inline bool cpu_has_vmx_tsc_scaling(void)
+{
+   return vmcs_config.cpu_based_2nd_exec_ctrl &
+   SECONDARY_EXEC_TSC_SCALING;
+}
+
 static inline bool report_flexpriority(void)
 {
return flexpriority_enabled;
@@ -2877,7 +2885,8 @@ static __init int setup_vmcs_config(struct vmcs_config 
*vmcs_conf)
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_SHADOW_VMCS |
-   SECONDARY_EXEC_ENABLE_PML;
+   SECONDARY_EXEC_ENABLE_PML |
+   SECONDARY_EXEC_TSC_SCALING;
if (adjust_vmx_controls(min2, opt2,
MSR_IA32_VMX_PROCBASED_CTLS2,
&_cpu_based_2nd_exec_control) < 0)
@@ -5840,6 +5849,12 @@ static __init int hardware_setup(void)
if (nested)
nested_vmx_setup_ctls_msrs();
 
+   if (cpu_has_vmx_tsc_scaling()) {
+   kvm_has_tsc_control = true;
+   kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
+   kvm_tsc_scaling_ratio_frac_bits = 48;
+   }
+
vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
-- 
2.7.4

___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


[Devel] [PATCH 08/16] KVM: x86: Move TSC scaling logic out of call-back read_l1_tsc()

2016-11-11 Thread Roman Kagan
From: Haozhong Zhang 

Both VMX and SVM scales the host TSC in the same way in call-back
read_l1_tsc(), so this patch moves the scaling logic from call-back
read_l1_tsc() to a common function kvm_read_l1_tsc().

Signed-off-by: Haozhong Zhang 
Signed-off-by: Paolo Bonzini 
(cherry-picked from commit 4ba76538dd52dd9b18b464e509cb8f3ed4ed993f)
Signed-off-by: Denis Plotnikov 
Signed-off-by: Roman Kagan 
---
 arch/x86/include/asm/kvm_host.h |  1 +
 arch/x86/kvm/lapic.c|  4 ++--
 arch/x86/kvm/svm.c  |  3 +--
 arch/x86/kvm/x86.c  | 11 ---
 4 files changed, 12 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ca305fc..830d1cd 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1189,6 +1189,7 @@ void kvm_define_shared_msr(unsigned index, u32 msr);
 int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
 
 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
+u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
 
 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index d6d6b52..3041749 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1125,7 +1125,7 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
 
tsc_deadline = apic->lapic_timer.expired_tscdeadline;
apic->lapic_timer.expired_tscdeadline = 0;
-   guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
+   guest_tsc = kvm_read_l1_tsc(vcpu, native_read_tsc());
trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
 
/* __delay is delay_tsc whenever the hardware has TSC, thus always.  */
@@ -1223,7 +1223,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
local_irq_save(flags);
 
now = apic->lapic_timer.timer.base->get_time();
-   guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
+   guest_tsc = kvm_read_l1_tsc(vcpu, native_read_tsc());
if (likely(tscdeadline > guest_tsc)) {
ns = (tscdeadline - guest_tsc) * 100ULL;
do_div(ns, this_tsc_khz);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b7c2e94..6636074 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2971,8 +2971,7 @@ static int cr8_write_interception(struct vcpu_svm *svm)
 static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
 {
struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
-   return vmcb->control.tsc_offset +
-   kvm_scale_tsc(vcpu, host_tsc);
+   return vmcb->control.tsc_offset + host_tsc;
 }
 
 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5a7d050..3913a62 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1379,6 +1379,12 @@ static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, 
u64 target_tsc)
return target_tsc - tsc;
 }
 
+u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
+{
+   return kvm_x86_ops->read_l1_tsc(vcpu, kvm_scale_tsc(vcpu, host_tsc));
+}
+EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
+
 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 {
struct kvm *kvm = vcpu->kvm;
@@ -1810,7 +1816,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
kernel_ns = get_kernel_ns();
}
 
-   tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc);
+   tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
 
/*
 * We may have to catch up the TSC to match elapsed wall clock
@@ -6658,8 +6664,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (hw_breakpoint_active())
hw_breakpoint_restore();
 
-   vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu,
-  native_read_tsc());
+   vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, native_read_tsc());
 
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
-- 
2.7.4

___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


[Devel] [PATCH 02/16] KVM: x86: Collect information for setting TSC scaling ratio

2016-11-11 Thread Roman Kagan
From: Haozhong Zhang 

The number of bits of the fractional part of the 64-bit TSC scaling
ratio in VMX and SVM is different. This patch makes the architecture
code to collect the number of fractional bits and other related
information into variables that can be accessed in the common code.

Signed-off-by: Haozhong Zhang 
Signed-off-by: Paolo Bonzini 
(cherry-picked from commit bc9b961b357ea8129d75613b7af4fdf57ced9b9f)
Signed-off-by: Denis Plotnikov 
Signed-off-by: Roman Kagan 
---
 arch/x86/include/asm/kvm_host.h | 4 
 arch/x86/kvm/svm.c  | 3 +++
 arch/x86/kvm/x86.c  | 4 
 3 files changed, 11 insertions(+)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 51ad191..e4cff52 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -947,6 +947,10 @@ u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
 extern bool kvm_has_tsc_control;
 /* maximum supported tsc_khz for guests */
 extern u32  kvm_max_guest_tsc_khz;
+/* number of bits of the fractional part of the TSC scaling ratio */
+extern u8   kvm_tsc_scaling_ratio_frac_bits;
+/* maximum allowed value of TSC scaling ratio */
+extern u64  kvm_max_tsc_scaling_ratio;
 
 enum emulation_result {
EMULATE_DONE, /* no further processing */
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 12850b2..71b7c57 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -904,6 +904,9 @@ static __init int svm_hardware_setup(void)
max = min(0x7fffULL, __scale_tsc(tsc_khz, TSC_RATIO_MAX));
 
kvm_max_guest_tsc_khz = max;
+
+   kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
+   kvm_tsc_scaling_ratio_frac_bits = 32;
}
 
if (nested) {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 992f20c..e911037 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -108,6 +108,10 @@ bool __read_mostly kvm_has_tsc_control;
 EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
 u32  __read_mostly kvm_max_guest_tsc_khz;
 EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
+u8   __read_mostly kvm_tsc_scaling_ratio_frac_bits;
+EXPORT_SYMBOL_GPL(kvm_tsc_scaling_ratio_frac_bits);
+u64  __read_mostly kvm_max_tsc_scaling_ratio;
+EXPORT_SYMBOL_GPL(kvm_max_tsc_scaling_ratio);
 
 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
 static u32 __read_mostly tsc_tolerance_ppm = 250;
-- 
2.7.4

___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


[Devel] [PATCH 06/16] KVM: x86: Replace call-back compute_tsc_offset() with a common function

2016-11-11 Thread Roman Kagan
From: Haozhong Zhang 

Both VMX and SVM calculate the tsc-offset in the same way, so this
patch removes the call-back compute_tsc_offset() and replaces it with a
common function kvm_compute_tsc_offset().

Signed-off-by: Haozhong Zhang 
Signed-off-by: Paolo Bonzini 
(cherry-picked from commit 07c1419a32bbba08cf1efb6d1ecaf24f174fa4c3)
Signed-off-by: Denis Plotnikov 
Signed-off-by: Roman Kagan 
---
 arch/x86/include/asm/kvm_host.h |  1 -
 arch/x86/kvm/svm.c  | 10 --
 arch/x86/kvm/vmx.c  |  6 --
 arch/x86/kvm/x86.c  | 15 ---
 4 files changed, 12 insertions(+), 20 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 62cba87..fec5255 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -831,7 +831,6 @@ struct kvm_x86_ops {
u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
 
-   u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);
 
void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 5dd5ecf..fd0b58e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1003,15 +1003,6 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, 
s64 adjustment, bool ho
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 }
 
-static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
-{
-   u64 tsc;
-
-   tsc = kvm_scale_tsc(vcpu, native_read_tsc());
-
-   return target_tsc - tsc;
-}
-
 static void init_vmcb(struct vcpu_svm *svm)
 {
struct vmcb_control_area *control = >vmcb->control;
@@ -4366,7 +4357,6 @@ static struct kvm_x86_ops svm_x86_ops = {
.read_tsc_offset = svm_read_tsc_offset,
.write_tsc_offset = svm_write_tsc_offset,
.adjust_tsc_offset = svm_adjust_tsc_offset,
-   .compute_tsc_offset = svm_compute_tsc_offset,
.read_l1_tsc = svm_read_l1_tsc,
 
.set_tdp_cr3 = set_tdp_cr3,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 69a85cd..986aa95 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2231,11 +2231,6 @@ static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, 
s64 adjustment, bool ho
   offset + adjustment);
 }
 
-static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
-{
-   return target_tsc - native_read_tsc();
-}
-
 static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
 {
struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
@@ -9838,7 +9833,6 @@ static struct kvm_x86_ops vmx_x86_ops = {
.read_tsc_offset = vmx_read_tsc_offset,
.write_tsc_offset = vmx_write_tsc_offset,
.adjust_tsc_offset = vmx_adjust_tsc_offset,
-   .compute_tsc_offset = vmx_compute_tsc_offset,
.read_l1_tsc = vmx_read_l1_tsc,
 
.set_tdp_cr3 = vmx_set_cr3,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 868d22c..c2c2a60 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1370,6 +1370,15 @@ u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
 }
 EXPORT_SYMBOL_GPL(kvm_scale_tsc);
 
+static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
+{
+   u64 tsc;
+
+   tsc = kvm_scale_tsc(vcpu, native_read_tsc());
+
+   return target_tsc - tsc;
+}
+
 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 {
struct kvm *kvm = vcpu->kvm;
@@ -1381,7 +1390,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data 
*msr)
u64 data = msr->data;
 
raw_spin_lock_irqsave(>arch.tsc_write_lock, flags);
-   offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
+   offset = kvm_compute_tsc_offset(vcpu, data);
ns = get_kernel_ns();
elapsed = ns - kvm->arch.last_tsc_nsec;
 
@@ -1438,7 +1447,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data 
*msr)
} else {
u64 delta = nsec_to_cycles(vcpu, elapsed);
data += delta;
-   offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
+   offset = kvm_compute_tsc_offset(vcpu, data);
pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
}
matched = true;
@@ -2926,7 +2935,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (tsc_delta < 0)
mark_tsc_unstable("KVM discovered backwards TSC");
if (check_tsc_unstable()) {
-   u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu,
+   u64 offset = kvm_compute_tsc_offset(vcpu,
  

[Devel] [PATCH 01/16] KVM: x86: declare a few variables as __read_mostly

2016-11-11 Thread Roman Kagan
From: Paolo Bonzini 

These include module parameters and variables that are set by
kvm_x86_ops->hardware_setup.

Signed-off-by: Paolo Bonzini 
(cherry-picked from commit 893590c73426585dfd9f33358b19f18d9395fb2f)
Signed-off-by: Denis Plotnikov 
Signed-off-by: Roman Kagan 
---
 arch/x86/include/asm/kvm_host.h |  2 --
 arch/x86/kvm/x86.c  | 14 +++---
 2 files changed, 7 insertions(+), 9 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index eee8034..51ad191 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -945,8 +945,6 @@ u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
 
 /* control of guest tsc rate supported? */
 extern bool kvm_has_tsc_control;
-/* minimum supported tsc_khz for guests */
-extern u32  kvm_min_guest_tsc_khz;
 /* maximum supported tsc_khz for guests */
 extern u32  kvm_max_guest_tsc_khz;
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1a3bea2..992f20c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -92,10 +92,10 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
 static void process_nmi(struct kvm_vcpu *vcpu);
 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
 
-struct kvm_x86_ops *kvm_x86_ops;
+struct kvm_x86_ops *kvm_x86_ops __read_mostly;
 EXPORT_SYMBOL_GPL(kvm_x86_ops);
 
-static bool ignore_msrs = 0;
+static bool __read_mostly ignore_msrs = 0;
 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
 
 unsigned int min_timer_period_us = 500;
@@ -104,20 +104,20 @@ module_param(min_timer_period_us, uint, S_IRUGO | 
S_IWUSR);
 static bool __read_mostly kvmclock_periodic_sync = true;
 module_param(kvmclock_periodic_sync, bool, S_IRUGO);
 
-bool kvm_has_tsc_control;
+bool __read_mostly kvm_has_tsc_control;
 EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
-u32  kvm_max_guest_tsc_khz;
+u32  __read_mostly kvm_max_guest_tsc_khz;
 EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
 
 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
-static u32 tsc_tolerance_ppm = 250;
+static u32 __read_mostly tsc_tolerance_ppm = 250;
 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
 
 /* lapic timer advance (tscdeadline mode only) in nanoseconds */
-unsigned int lapic_timer_advance_ns = 0;
+unsigned int __read_mostly lapic_timer_advance_ns = 0;
 module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
 
-static bool backwards_tsc_observed = false;
+static bool __read_mostly backwards_tsc_observed = false;
 
 #define KVM_NR_SHARED_MSRS 16
 
-- 
2.7.4

___
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel


[Devel] [PATCH 05/16] KVM: x86: Replace call-back set_tsc_khz() with a common function

2016-11-11 Thread Roman Kagan
From: Haozhong Zhang 

Both VMX and SVM propagate virtual_tsc_khz in the same way, so this
patch removes the call-back set_tsc_khz() and replaces it with a common
function.

Signed-off-by: Haozhong Zhang 
Signed-off-by: Paolo Bonzini 
(cherry-picked from commit 381d585c80e34988269bd7901ad910981e900be1)
Signed-off-by: Denis Plotnikov 
Signed-off-by: Roman Kagan 
---
 arch/x86/include/asm/kvm_host.h |  1 -
 arch/x86/kvm/svm.c  | 36 
 arch/x86/kvm/vmx.c  | 17 ---
 arch/x86/kvm/x86.c  | 46 -
 4 files changed, 41 insertions(+), 59 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4e35f42..62cba87 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -828,7 +828,6 @@ struct kvm_x86_ops {
 
bool (*has_wbinvd_exit)(void);
 
-   void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool 
scale);
u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1efd912..5dd5ecf 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -956,41 +956,6 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t 
type)
seg->base = 0;
 }
 
-static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool 
scale)
-{
-   u64 ratio;
-   u64 khz;
-
-   /* Guest TSC same frequency as host TSC? */
-   if (!scale) {
-   vcpu->arch.tsc_scaling_ratio = TSC_RATIO_DEFAULT;
-   return;
-   }
-
-   /* TSC scaling supported? */
-   if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
-   if (user_tsc_khz > tsc_khz) {
-   vcpu->arch.tsc_catchup = 1;
-   vcpu->arch.tsc_always_catchup = 1;
-   } else
-   WARN(1, "user requested TSC rate below hardware 
speed\n");
-   return;
-   }
-
-   khz = user_tsc_khz;
-
-   /* TSC scaling required  - calculate ratio */
-   ratio = khz << 32;
-   do_div(ratio, tsc_khz);
-
-   if (ratio == 0 || ratio & TSC_RATIO_RSVD) {
-   WARN_ONCE(1, "Invalid TSC ratio - virtual-tsc-khz=%u\n",
-   user_tsc_khz);
-   return;
-   }
-   vcpu->arch.tsc_scaling_ratio = ratio;
-}
-
 static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu)
 {
struct vcpu_svm *svm = to_svm(vcpu);
@@ -4398,7 +4363,6 @@ static struct kvm_x86_ops svm_x86_ops = {
 
.has_wbinvd_exit = svm_has_wbinvd_exit,
 
-   .set_tsc_khz = svm_set_tsc_khz,
.read_tsc_offset = svm_read_tsc_offset,
.write_tsc_offset = svm_write_tsc_offset,
.adjust_tsc_offset = svm_adjust_tsc_offset,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index aecf056..69a85cd 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2187,22 +2187,6 @@ static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 
host_tsc)
return host_tsc + tsc_offset;
 }
 
-/*
- * Engage any workarounds for mis-matched TSC rates.  Currently limited to
- * software catchup for faster rates on slower CPUs.
- */
-static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool 
scale)
-{
-   if (!scale)
-   return;
-
-   if (user_tsc_khz > tsc_khz) {
-   vcpu->arch.tsc_catchup = 1;
-   vcpu->arch.tsc_always_catchup = 1;
-   } else
-   WARN(1, "user requested TSC rate below hardware speed\n");
-}
-
 static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu)
 {
return vmcs_read64(TSC_OFFSET);
@@ -9851,7 +9835,6 @@ static struct kvm_x86_ops vmx_x86_ops = {
 
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
 
-   .set_tsc_khz = vmx_set_tsc_khz,
.read_tsc_offset = vmx_read_tsc_offset,
.write_tsc_offset = vmx_write_tsc_offset,
.adjust_tsc_offset = vmx_adjust_tsc_offset,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4e81751..868d22c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1231,7 +1231,43 @@ static u32 adjust_tsc_khz(u32 khz, s32 ppm)
return v;
 }
 
-static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
+static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
+{
+   u64 ratio;
+
+   /* Guest TSC same frequency as host TSC? */
+   if (!scale) {
+   vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
+   return 0;
+   }
+
+   /* TSC scaling supported? */
+   if (!kvm_has_tsc_control) {
+   if (user_tsc_khz > tsc_khz) {
+   vcpu->arch.tsc_catchup = 1;
+   vcpu->arch.tsc_always_catchup = 1;
+