Re: [PATCH 5/5] KVM: arm64: Support the vcpu preemption check

2019-12-26 Thread yezengruan
Hi Steve,

On 2019/12/17 22:40, Steven Price wrote:
> On Tue, Dec 17, 2019 at 01:55:49PM +, yezengr...@huawei.com wrote:
>> From: Zengruan Ye 
>>
>> Support the vcpu_is_preempted() functionality under KVM/arm64. This will
>> enhance lock performance on overcommitted hosts (more runnable vcpus
>> than physical cpus in the system) as doing busy waits for preempted
>> vcpus will hurt system performance far worse than early yielding.
>>
>> unix benchmark result:
>>   host:  kernel 5.5.0-rc1, HiSilicon Kunpeng920, 8 cpus
>>   guest: kernel 5.5.0-rc1, 16 vcpus
>>
>>test-case|after-patch|   before-patch
>> +---+--
>>  Dhrystone 2 using register variables   | 334600751.0 lps   | 335319028.3 lps
>>  Double-Precision Whetstone | 32856.1 MWIPS | 32849.6 
>> MWIPS
>>  Execl Throughput   |  3662.1 lps   |  2718.0 lps
>>  File Copy 1024 bufsize 2000 maxblocks  |432906.4 KBps  |158011.8 
>> KBps
>>  File Copy 256 bufsize 500 maxblocks|116023.0 KBps  | 37664.0 
>> KBps
>>  File Copy 4096 bufsize 8000 maxblocks  |   1432769.8 KBps  |441108.8 
>> KBps
>>  Pipe Throughput|   6405029.6 lps   |   6021457.6 lps
>>  Pipe-based Context Switching   |185872.7 lps   |184255.3 lps
>>  Process Creation   |  4025.7 lps   |  3706.6 lps
>>  Shell Scripts (1 concurrent)   |  6745.6 lpm   |  6436.1 lpm
>>  Shell Scripts (8 concurrent)   |   998.7 lpm   |   931.1 lpm
>>  System Call Overhead   |   3913363.1 lps   |   3883287.8 lps
>> +---+--
>>  System Benchmarks Index Score  |  1835.1   |  1327.6
>>
>> Signed-off-by: Zengruan Ye 
>> ---
>>  arch/arm64/include/asm/paravirt.h |  3 +
>>  arch/arm64/kernel/paravirt.c  | 91 +++
>>  arch/arm64/kernel/setup.c |  2 +
>>  include/linux/cpuhotplug.h|  1 +
>>  4 files changed, 97 insertions(+)
>>
>> diff --git a/arch/arm64/include/asm/paravirt.h 
>> b/arch/arm64/include/asm/paravirt.h
>> index 7b1c81b544bb..a2cd0183bbef 100644
>> --- a/arch/arm64/include/asm/paravirt.h
>> +++ b/arch/arm64/include/asm/paravirt.h
>> @@ -29,6 +29,8 @@ static inline u64 paravirt_steal_clock(int cpu)
>>  
>>  int __init pv_time_init(void);
>>  
>> +int __init kvm_guest_init(void);
>> +
> 
> This is a *very* generic name - I suggest something like pv_lock_init()
> so it's clear what the function actually does.
> 
>>  __visible bool __native_vcpu_is_preempted(int cpu);
>>  
>>  static inline bool pv_vcpu_is_preempted(int cpu)
>> @@ -39,6 +41,7 @@ static inline bool pv_vcpu_is_preempted(int cpu)
>>  #else
>>  
>>  #define pv_time_init() do {} while (0)
>> +#define kvm_guest_init() do {} while (0)
>>  
>>  #endif // CONFIG_PARAVIRT
>>  
>> diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c
>> index d8f1ba8c22ce..a86dead40473 100644
>> --- a/arch/arm64/kernel/paravirt.c
>> +++ b/arch/arm64/kernel/paravirt.c
>> @@ -22,6 +22,7 @@
>>  #include 
>>  #include 
>>  #include 
>> +#include 
>>  
>>  struct static_key paravirt_steal_enabled;
>>  struct static_key paravirt_steal_rq_enabled;
>> @@ -158,3 +159,93 @@ int __init pv_time_init(void)
>>  
>>  return 0;
>>  }
>> +
>> +DEFINE_PER_CPU(struct pvlock_vcpu_state, pvlock_vcpu_region) __aligned(64);
>> +EXPORT_PER_CPU_SYMBOL(pvlock_vcpu_region);
>> +
>> +static int pvlock_vcpu_state_dying_cpu(unsigned int cpu)
>> +{
>> +struct pvlock_vcpu_state *reg;
>> +
>> +reg = this_cpu_ptr(_vcpu_region);
>> +if (!reg)
>> +return -EFAULT;
>> +
>> +memset(reg, 0, sizeof(*reg));
> 
> I might be missing something obvious here - but I don't see the point of
> this. The hypervisor might immediately overwrite the structure again.
> Indeed you should conside a mechanism for the guest to "unregister" the
> region - otherwise you will face issues with the likes of kexec.
> 
> For pv_time the memory is allocated by the hypervisor not the guest to
> avoid lifetime issues about kexec.


Thanks for pointing it out to me! I'll update the memory allocation
mechanism of the PV lock structure to avoid lifetime issues about
kexec.

> 
>> +
>> +return 0;
>> +}
>> +
>> +static int init_pvlock_vcpu_state(unsigned int cpu)
>> +{
>> +struct pvlock_vcpu_state *reg;
>> +struct arm_smccc_res res;
>> +
>> +reg = this_cpu_ptr(_vcpu_region);
>> +if (!reg)
>> +return -EFAULT;
>> +
>> +/* Pass the memory address to host via hypercall */
>> +arm_smccc_1_1_invoke(ARM_SMCCC_HV_PV_LOCK_PREEMPTED,
>> + virt_to_phys(reg), );
>> +
>> +return 0;
>> +}
>> +
>> +static bool kvm_vcpu_is_preempted(int cpu)
>> +{
>> +struct pvlock_vcpu_state *reg = _cpu(pvlock_vcpu_region, cpu);

[PATCH 5/5] KVM: arm64: Support the vcpu preemption check

2019-12-17 Thread yezengruan
From: Zengruan Ye 

Support the vcpu_is_preempted() functionality under KVM/arm64. This will
enhance lock performance on overcommitted hosts (more runnable vcpus
than physical cpus in the system) as doing busy waits for preempted
vcpus will hurt system performance far worse than early yielding.

unix benchmark result:
  host:  kernel 5.5.0-rc1, HiSilicon Kunpeng920, 8 cpus
  guest: kernel 5.5.0-rc1, 16 vcpus

   test-case|after-patch|   before-patch
+---+--
 Dhrystone 2 using register variables   | 334600751.0 lps   | 335319028.3 lps
 Double-Precision Whetstone | 32856.1 MWIPS | 32849.6 MWIPS
 Execl Throughput   |  3662.1 lps   |  2718.0 lps
 File Copy 1024 bufsize 2000 maxblocks  |432906.4 KBps  |158011.8 KBps
 File Copy 256 bufsize 500 maxblocks|116023.0 KBps  | 37664.0 KBps
 File Copy 4096 bufsize 8000 maxblocks  |   1432769.8 KBps  |441108.8 KBps
 Pipe Throughput|   6405029.6 lps   |   6021457.6 lps
 Pipe-based Context Switching   |185872.7 lps   |184255.3 lps
 Process Creation   |  4025.7 lps   |  3706.6 lps
 Shell Scripts (1 concurrent)   |  6745.6 lpm   |  6436.1 lpm
 Shell Scripts (8 concurrent)   |   998.7 lpm   |   931.1 lpm
 System Call Overhead   |   3913363.1 lps   |   3883287.8 lps
+---+--
 System Benchmarks Index Score  |  1835.1   |  1327.6

Signed-off-by: Zengruan Ye 
---
 arch/arm64/include/asm/paravirt.h |  3 +
 arch/arm64/kernel/paravirt.c  | 91 +++
 arch/arm64/kernel/setup.c |  2 +
 include/linux/cpuhotplug.h|  1 +
 4 files changed, 97 insertions(+)

diff --git a/arch/arm64/include/asm/paravirt.h 
b/arch/arm64/include/asm/paravirt.h
index 7b1c81b544bb..a2cd0183bbef 100644
--- a/arch/arm64/include/asm/paravirt.h
+++ b/arch/arm64/include/asm/paravirt.h
@@ -29,6 +29,8 @@ static inline u64 paravirt_steal_clock(int cpu)
 
 int __init pv_time_init(void);
 
+int __init kvm_guest_init(void);
+
 __visible bool __native_vcpu_is_preempted(int cpu);
 
 static inline bool pv_vcpu_is_preempted(int cpu)
@@ -39,6 +41,7 @@ static inline bool pv_vcpu_is_preempted(int cpu)
 #else
 
 #define pv_time_init() do {} while (0)
+#define kvm_guest_init() do {} while (0)
 
 #endif // CONFIG_PARAVIRT
 
diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c
index d8f1ba8c22ce..a86dead40473 100644
--- a/arch/arm64/kernel/paravirt.c
+++ b/arch/arm64/kernel/paravirt.c
@@ -22,6 +22,7 @@
 #include 
 #include 
 #include 
+#include 
 
 struct static_key paravirt_steal_enabled;
 struct static_key paravirt_steal_rq_enabled;
@@ -158,3 +159,93 @@ int __init pv_time_init(void)
 
return 0;
 }
+
+DEFINE_PER_CPU(struct pvlock_vcpu_state, pvlock_vcpu_region) __aligned(64);
+EXPORT_PER_CPU_SYMBOL(pvlock_vcpu_region);
+
+static int pvlock_vcpu_state_dying_cpu(unsigned int cpu)
+{
+   struct pvlock_vcpu_state *reg;
+
+   reg = this_cpu_ptr(_vcpu_region);
+   if (!reg)
+   return -EFAULT;
+
+   memset(reg, 0, sizeof(*reg));
+
+   return 0;
+}
+
+static int init_pvlock_vcpu_state(unsigned int cpu)
+{
+   struct pvlock_vcpu_state *reg;
+   struct arm_smccc_res res;
+
+   reg = this_cpu_ptr(_vcpu_region);
+   if (!reg)
+   return -EFAULT;
+
+   /* Pass the memory address to host via hypercall */
+   arm_smccc_1_1_invoke(ARM_SMCCC_HV_PV_LOCK_PREEMPTED,
+virt_to_phys(reg), );
+
+   return 0;
+}
+
+static bool kvm_vcpu_is_preempted(int cpu)
+{
+   struct pvlock_vcpu_state *reg = _cpu(pvlock_vcpu_region, cpu);
+
+   if (reg)
+   return !!(reg->preempted & 1);
+
+   return false;
+}
+
+static int kvm_arm_init_pvlock(void)
+{
+   int ret;
+
+   ret = cpuhp_setup_state(CPUHP_AP_ARM_KVM_PVLOCK_STARTING,
+   "hypervisor/arm/pvlock:starting",
+   init_pvlock_vcpu_state,
+   pvlock_vcpu_state_dying_cpu);
+   if (ret < 0)
+   return ret;
+
+   pv_ops.lock.vcpu_is_preempted = kvm_vcpu_is_preempted;
+
+   pr_info("using PV-lock preempted\n");
+
+   return 0;
+}
+
+static bool has_kvm_pvlock(void)
+{
+   struct arm_smccc_res res;
+
+   /* To detect the presence of PV lock support we require SMCCC 1.1+ */
+   if (psci_ops.smccc_version < SMCCC_VERSION_1_1)
+   return false;
+
+   arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ARM_SMCCC_HV_PV_LOCK_FEATURES, );
+
+   if (res.a0 != SMCCC_RET_SUCCESS)
+   return false;
+
+   return true;
+}
+
+int __init kvm_guest_init(void)
+{
+