Re: [PATCH 3/5] KVM: arm64: Support pvlock preempted via shared structure

2019-12-26 Thread yezengruan
Hi Steve,

On 2019/12/17 22:33, Steven Price wrote:
> On Tue, Dec 17, 2019 at 01:55:47PM +, yezengr...@huawei.com wrote:
>> From: Zengruan Ye 
>>
>> Implement the service call for configuring a shared structure between a
>> vcpu and the hypervisor in which the hypervisor can tell the vcpu is
>> running or not.
>>
>> The preempted field is zero if 1) some old KVM deos not support this filed.
>> 2) the vcpu is not preempted. Other values means the vcpu has been preempted.
>>
>> Signed-off-by: Zengruan Ye 
>> ---
>>  arch/arm/include/asm/kvm_host.h   | 13 +
>>  arch/arm64/include/asm/kvm_host.h | 17 +
>>  arch/arm64/kvm/Makefile   |  1 +
>>  virt/kvm/arm/arm.c|  8 
>>  virt/kvm/arm/hypercalls.c |  4 
>>  virt/kvm/arm/pvlock.c | 21 +
>>  6 files changed, 64 insertions(+)
>>  create mode 100644 virt/kvm/arm/pvlock.c
>>
>> diff --git a/arch/arm/include/asm/kvm_host.h 
>> b/arch/arm/include/asm/kvm_host.h
>> index 556cd818eccf..098375f1c89e 100644
>> --- a/arch/arm/include/asm/kvm_host.h
>> +++ b/arch/arm/include/asm/kvm_host.h
>> @@ -356,6 +356,19 @@ static inline bool kvm_arm_is_pvtime_enabled(struct 
>> kvm_vcpu_arch *vcpu_arch)
>>  return false;
>>  }
>>  
>> +static inline void kvm_arm_pvlock_preempted_init(struct kvm_vcpu_arch 
>> *vcpu_arch)
>> +{
>> +}
>> +
>> +static inline bool kvm_arm_is_pvlock_preempted_ready(struct kvm_vcpu_arch 
>> *vcpu_arch)
>> +{
>> +return false;
>> +}
>> +
>> +static inline void kvm_update_pvlock_preempted(struct kvm_vcpu *vcpu, u64 
>> preempted)
>> +{
>> +}
>> +
>>  void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
>>  
>>  struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
>> diff --git a/arch/arm64/include/asm/kvm_host.h 
>> b/arch/arm64/include/asm/kvm_host.h
>> index c61260cf63c5..d9b2a21a87ac 100644
>> --- a/arch/arm64/include/asm/kvm_host.h
>> +++ b/arch/arm64/include/asm/kvm_host.h
>> @@ -354,6 +354,11 @@ struct kvm_vcpu_arch {
>>  u64 last_steal;
>>  gpa_t base;
>>  } steal;
>> +
>> +/* Guest PV lock state */
>> +struct {
>> +gpa_t base;
>> +} pv;
>>  };
>>  
>>  /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
>> @@ -515,6 +520,18 @@ static inline bool kvm_arm_is_pvtime_enabled(struct 
>> kvm_vcpu_arch *vcpu_arch)
>>  return (vcpu_arch->steal.base != GPA_INVALID);
>>  }
>>  
>> +static inline void kvm_arm_pvlock_preempted_init(struct kvm_vcpu_arch 
>> *vcpu_arch)
>> +{
>> +vcpu_arch->pv.base = GPA_INVALID;
>> +}
>> +
>> +static inline bool kvm_arm_is_pvlock_preempted_ready(struct kvm_vcpu_arch 
>> *vcpu_arch)
>> +{
>> +return (vcpu_arch->pv.base != GPA_INVALID);
>> +}
>> +
>> +void kvm_update_pvlock_preempted(struct kvm_vcpu *vcpu, u64 preempted);
>> +
>>  void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
>>  
>>  struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
>> diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
>> index 5ffbdc39e780..e4591f56d5f1 100644
>> --- a/arch/arm64/kvm/Makefile
>> +++ b/arch/arm64/kvm/Makefile
>> @@ -15,6 +15,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arm.o 
>> $(KVM)/arm/mmu.o $(KVM)/arm/mmio.
>>  kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/psci.o $(KVM)/arm/perf.o
>>  kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hypercalls.o
>>  kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/pvtime.o
>> +kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/pvlock.o
>>  
>>  kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o va_layout.o
>>  kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
>> diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
>> index 12e0280291ce..c562f62fdd45 100644
>> --- a/virt/kvm/arm/arm.c
>> +++ b/virt/kvm/arm/arm.c
>> @@ -383,6 +383,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
>>  
>>  kvm_arm_pvtime_vcpu_init(&vcpu->arch);
>>  
>> +kvm_arm_pvlock_preempted_init(&vcpu->arch);
>> +
>>  return kvm_vgic_vcpu_init(vcpu);
>>  }
>>  
>> @@ -421,6 +423,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>>  vcpu_set_wfx_traps(vcpu);
>>  
>>  vcpu_ptrauth_setup_lazy(vcpu);
>> +
>> +if (kvm_arm_is_pvlock_preempted_ready(&vcpu->arch))
>> +kvm_update_pvlock_preempted(vcpu, 0);
>>  }
>>  
>>  void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
>> @@ -434,6 +439,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
>>  vcpu->cpu = -1;
>>  
>>  kvm_arm_set_running_vcpu(NULL);
>> +
>> +if (kvm_arm_is_pvlock_preempted_ready(&vcpu->arch))
>> +kvm_update_pvlock_preempted(vcpu, 1);
>>  }
>>  
>>  static void vcpu_power_off(struct kvm_vcpu *vcpu)
>> diff --git a/virt/kvm/arm/hypercalls.c b/virt/kvm/arm/hypercalls.c
>> index ff13871fd85a..5964982ccd05 100644
>> --- a/virt/kvm/arm/hypercalls.c
>> +++ b/virt/kvm/arm/hypercalls.c
>> @@ -65,6 +65,10 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
>>  if (g

Re: [PATCH 5/5] KVM: arm64: Support the vcpu preemption check

2019-12-26 Thread yezengruan
Hi Steve,

On 2019/12/17 22:40, Steven Price wrote:
> On Tue, Dec 17, 2019 at 01:55:49PM +, yezengr...@huawei.com wrote:
>> From: Zengruan Ye 
>>
>> Support the vcpu_is_preempted() functionality under KVM/arm64. This will
>> enhance lock performance on overcommitted hosts (more runnable vcpus
>> than physical cpus in the system) as doing busy waits for preempted
>> vcpus will hurt system performance far worse than early yielding.
>>
>> unix benchmark result:
>>   host:  kernel 5.5.0-rc1, HiSilicon Kunpeng920, 8 cpus
>>   guest: kernel 5.5.0-rc1, 16 vcpus
>>
>>test-case|after-patch|   before-patch
>> +---+--
>>  Dhrystone 2 using register variables   | 334600751.0 lps   | 335319028.3 lps
>>  Double-Precision Whetstone | 32856.1 MWIPS | 32849.6 
>> MWIPS
>>  Execl Throughput   |  3662.1 lps   |  2718.0 lps
>>  File Copy 1024 bufsize 2000 maxblocks  |432906.4 KBps  |158011.8 
>> KBps
>>  File Copy 256 bufsize 500 maxblocks|116023.0 KBps  | 37664.0 
>> KBps
>>  File Copy 4096 bufsize 8000 maxblocks  |   1432769.8 KBps  |441108.8 
>> KBps
>>  Pipe Throughput|   6405029.6 lps   |   6021457.6 lps
>>  Pipe-based Context Switching   |185872.7 lps   |184255.3 lps
>>  Process Creation   |  4025.7 lps   |  3706.6 lps
>>  Shell Scripts (1 concurrent)   |  6745.6 lpm   |  6436.1 lpm
>>  Shell Scripts (8 concurrent)   |   998.7 lpm   |   931.1 lpm
>>  System Call Overhead   |   3913363.1 lps   |   3883287.8 lps
>> +---+--
>>  System Benchmarks Index Score  |  1835.1   |  1327.6
>>
>> Signed-off-by: Zengruan Ye 
>> ---
>>  arch/arm64/include/asm/paravirt.h |  3 +
>>  arch/arm64/kernel/paravirt.c  | 91 +++
>>  arch/arm64/kernel/setup.c |  2 +
>>  include/linux/cpuhotplug.h|  1 +
>>  4 files changed, 97 insertions(+)
>>
>> diff --git a/arch/arm64/include/asm/paravirt.h 
>> b/arch/arm64/include/asm/paravirt.h
>> index 7b1c81b544bb..a2cd0183bbef 100644
>> --- a/arch/arm64/include/asm/paravirt.h
>> +++ b/arch/arm64/include/asm/paravirt.h
>> @@ -29,6 +29,8 @@ static inline u64 paravirt_steal_clock(int cpu)
>>  
>>  int __init pv_time_init(void);
>>  
>> +int __init kvm_guest_init(void);
>> +
> 
> This is a *very* generic name - I suggest something like pv_lock_init()
> so it's clear what the function actually does.
> 
>>  __visible bool __native_vcpu_is_preempted(int cpu);
>>  
>>  static inline bool pv_vcpu_is_preempted(int cpu)
>> @@ -39,6 +41,7 @@ static inline bool pv_vcpu_is_preempted(int cpu)
>>  #else
>>  
>>  #define pv_time_init() do {} while (0)
>> +#define kvm_guest_init() do {} while (0)
>>  
>>  #endif // CONFIG_PARAVIRT
>>  
>> diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c
>> index d8f1ba8c22ce..a86dead40473 100644
>> --- a/arch/arm64/kernel/paravirt.c
>> +++ b/arch/arm64/kernel/paravirt.c
>> @@ -22,6 +22,7 @@
>>  #include 
>>  #include 
>>  #include 
>> +#include 
>>  
>>  struct static_key paravirt_steal_enabled;
>>  struct static_key paravirt_steal_rq_enabled;
>> @@ -158,3 +159,93 @@ int __init pv_time_init(void)
>>  
>>  return 0;
>>  }
>> +
>> +DEFINE_PER_CPU(struct pvlock_vcpu_state, pvlock_vcpu_region) __aligned(64);
>> +EXPORT_PER_CPU_SYMBOL(pvlock_vcpu_region);
>> +
>> +static int pvlock_vcpu_state_dying_cpu(unsigned int cpu)
>> +{
>> +struct pvlock_vcpu_state *reg;
>> +
>> +reg = this_cpu_ptr(&pvlock_vcpu_region);
>> +if (!reg)
>> +return -EFAULT;
>> +
>> +memset(reg, 0, sizeof(*reg));
> 
> I might be missing something obvious here - but I don't see the point of
> this. The hypervisor might immediately overwrite the structure again.
> Indeed you should conside a mechanism for the guest to "unregister" the
> region - otherwise you will face issues with the likes of kexec.
> 
> For pv_time the memory is allocated by the hypervisor not the guest to
> avoid lifetime issues about kexec.


Thanks for pointing it out to me! I'll update the memory allocation
mechanism of the PV lock structure to avoid lifetime issues about
kexec.

> 
>> +
>> +return 0;
>> +}
>> +
>> +static int init_pvlock_vcpu_state(unsigned int cpu)
>> +{
>> +struct pvlock_vcpu_state *reg;
>> +struct arm_smccc_res res;
>> +
>> +reg = this_cpu_ptr(&pvlock_vcpu_region);
>> +if (!reg)
>> +return -EFAULT;
>> +
>> +/* Pass the memory address to host via hypercall */
>> +arm_smccc_1_1_invoke(ARM_SMCCC_HV_PV_LOCK_PREEMPTED,
>> + virt_to_phys(reg), &res);
>> +
>> +return 0;
>> +}
>> +
>> +static bool kvm_vcpu_is_preempted(int cpu)
>> +{
>> +struct pvlock_vcpu_state *reg = &per_cpu(pvlock

[PATCH v2 0/6] KVM: arm64: VCPU preempted check support

2019-12-26 Thread Zengruan Ye
This patch set aims to support the vcpu_is_preempted() functionality
under KVM/arm64, which allowing the guest to obtain the VCPU is
currently running or not. This will enhance lock performance on
overcommitted hosts (more runnable VCPUs than physical CPUs in the
system) as doing busy waits for preempted VCPUs will hurt system
performance far worse than early yielding.

We have observed some performace improvements in uninx benchmark tests.

unix benchmark result:
  host:  kernel 5.5.0-rc1, HiSilicon Kunpeng920, 8 CPUs
  guest: kernel 5.5.0-rc1, 16 VCPUs

   test-case|after-patch|   before-patch
+---+--
 Dhrystone 2 using register variables   | 334600751.0 lps   | 335319028.3 lps
 Double-Precision Whetstone | 32856.1 MWIPS | 32849.6 MWIPS
 Execl Throughput   |  3662.1 lps   |  2718.0 lps
 File Copy 1024 bufsize 2000 maxblocks  |432906.4 KBps  |158011.8 KBps
 File Copy 256 bufsize 500 maxblocks|116023.0 KBps  | 37664.0 KBps
 File Copy 4096 bufsize 8000 maxblocks  |   1432769.8 KBps  |441108.8 KBps
 Pipe Throughput|   6405029.6 lps   |   6021457.6 lps
 Pipe-based Context Switching   |185872.7 lps   |184255.3 lps
 Process Creation   |  4025.7 lps   |  3706.6 lps
 Shell Scripts (1 concurrent)   |  6745.6 lpm   |  6436.1 lpm
 Shell Scripts (8 concurrent)   |   998.7 lpm   |   931.1 lpm
 System Call Overhead   |   3913363.1 lps   |   3883287.8 lps
+---+--
 System Benchmarks Index Score  |  1835.1   |  1327.6

Changes from v1:
https://lore.kernel.org/lkml/20191217135549.3240-1-yezengr...@huawei.com/
 * Guest kernel no longer allocates the PV lock structure, instead it
   is allocated by user space to avoid lifetime issues about kexec.
 * Provide VCPU attributes for PV lock.
 * Update SMC number of PV lock features.
 * Report some basic validation when PV lock init.
 * Document preempted field.
 * Bunch of typo fixes.

Zengruan Ye (6):
  KVM: arm64: Document PV-lock interface
  KVM: arm64: Add SMCCC paravirtualised lock calls
  KVM: arm64: Support pvlock preempted via shared structure
  KVM: arm64: Provide VCPU attributes for PV lock
  KVM: arm64: Add interface to support VCPU preempted check
  KVM: arm64: Support the VCPU preemption check

 Documentation/virt/kvm/arm/pvlock.rst   |  63 
 Documentation/virt/kvm/devices/vcpu.txt |  14 +++
 arch/arm/include/asm/kvm_host.h |  18 
 arch/arm64/include/asm/kvm_host.h   |  28 ++
 arch/arm64/include/asm/paravirt.h   |  15 +++
 arch/arm64/include/asm/pvlock-abi.h |  16 
 arch/arm64/include/asm/spinlock.h   |   7 ++
 arch/arm64/include/uapi/asm/kvm.h   |   2 +
 arch/arm64/kernel/Makefile  |   2 +-
 arch/arm64/kernel/paravirt-spinlocks.c  |  13 +++
 arch/arm64/kernel/paravirt.c| 121 +++-
 arch/arm64/kernel/setup.c   |   2 +
 arch/arm64/kvm/Makefile |   1 +
 arch/arm64/kvm/guest.c  |   9 ++
 include/linux/arm-smccc.h   |  14 +++
 include/linux/cpuhotplug.h  |   1 +
 include/uapi/linux/kvm.h|   2 +
 virt/kvm/arm/arm.c  |   8 ++
 virt/kvm/arm/hypercalls.c   |   8 ++
 virt/kvm/arm/pvlock.c   | 103 
 20 files changed, 445 insertions(+), 2 deletions(-)
 create mode 100644 Documentation/virt/kvm/arm/pvlock.rst
 create mode 100644 arch/arm64/include/asm/pvlock-abi.h
 create mode 100644 arch/arm64/kernel/paravirt-spinlocks.c
 create mode 100644 virt/kvm/arm/pvlock.c

-- 
2.19.1


___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


[PATCH v2 6/6] KVM: arm64: Support the VCPU preemption check

2019-12-26 Thread Zengruan Ye
Support the vcpu_is_preempted() functionality under KVM/arm64. This will
enhance lock performance on overcommitted hosts (more runnable VCPUs
than physical CPUs in the system) as doing busy waits for preempted
VCPUs will hurt system performance far worse than early yielding.

unix benchmark result:
  host:  kernel 5.5.0-rc1, HiSilicon Kunpeng920, 8 CPUs
  guest: kernel 5.5.0-rc1, 16 VCPUs

   test-case|after-patch|   before-patch
+---+--
 Dhrystone 2 using register variables   | 334600751.0 lps   | 335319028.3 lps
 Double-Precision Whetstone | 32856.1 MWIPS | 32849.6 MWIPS
 Execl Throughput   |  3662.1 lps   |  2718.0 lps
 File Copy 1024 bufsize 2000 maxblocks  |432906.4 KBps  |158011.8 KBps
 File Copy 256 bufsize 500 maxblocks|116023.0 KBps  | 37664.0 KBps
 File Copy 4096 bufsize 8000 maxblocks  |   1432769.8 KBps  |441108.8 KBps
 Pipe Throughput|   6405029.6 lps   |   6021457.6 lps
 Pipe-based Context Switching   |185872.7 lps   |184255.3 lps
 Process Creation   |  4025.7 lps   |  3706.6 lps
 Shell Scripts (1 concurrent)   |  6745.6 lpm   |  6436.1 lpm
 Shell Scripts (8 concurrent)   |   998.7 lpm   |   931.1 lpm
 System Call Overhead   |   3913363.1 lps   |   3883287.8 lps
+---+--
 System Benchmarks Index Score  |  1835.1   |  1327.6

Signed-off-by: Zengruan Ye 
---
 arch/arm64/include/asm/paravirt.h |   3 +
 arch/arm64/kernel/paravirt.c  | 117 ++
 arch/arm64/kernel/setup.c |   2 +
 include/linux/cpuhotplug.h|   1 +
 4 files changed, 123 insertions(+)

diff --git a/arch/arm64/include/asm/paravirt.h 
b/arch/arm64/include/asm/paravirt.h
index 7b1c81b544bb..ca3a2c7881f3 100644
--- a/arch/arm64/include/asm/paravirt.h
+++ b/arch/arm64/include/asm/paravirt.h
@@ -29,6 +29,8 @@ static inline u64 paravirt_steal_clock(int cpu)
 
 int __init pv_time_init(void);
 
+int __init pv_lock_init(void);
+
 __visible bool __native_vcpu_is_preempted(int cpu);
 
 static inline bool pv_vcpu_is_preempted(int cpu)
@@ -39,6 +41,7 @@ static inline bool pv_vcpu_is_preempted(int cpu)
 #else
 
 #define pv_time_init() do {} while (0)
+#define pv_lock_init() do {} while (0)
 
 #endif // CONFIG_PARAVIRT
 
diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c
index d8f1ba8c22ce..bd2ad6a17a26 100644
--- a/arch/arm64/kernel/paravirt.c
+++ b/arch/arm64/kernel/paravirt.c
@@ -22,6 +22,7 @@
 #include 
 #include 
 #include 
+#include 
 
 struct static_key paravirt_steal_enabled;
 struct static_key paravirt_steal_rq_enabled;
@@ -35,6 +36,10 @@ struct pv_time_stolen_time_region {
struct pvclock_vcpu_stolen_time *kaddr;
 };
 
+struct pv_lock_state_region {
+   struct pvlock_vcpu_state *kaddr;
+};
+
 static DEFINE_PER_CPU(struct pv_time_stolen_time_region, stolen_time_region);
 
 static bool steal_acc = true;
@@ -158,3 +163,115 @@ int __init pv_time_init(void)
 
return 0;
 }
+
+static DEFINE_PER_CPU(struct pv_lock_state_region, lock_state_region);
+
+static bool kvm_vcpu_is_preempted(int cpu)
+{
+   struct pv_lock_state_region *reg;
+   __le64 preempted_le;
+
+   reg = per_cpu_ptr(&lock_state_region, cpu);
+   if (!reg->kaddr) {
+   pr_warn_once("PV lock enabled but not configured for cpu %d\n",
+cpu);
+   return false;
+   }
+
+   preempted_le = le64_to_cpu(READ_ONCE(reg->kaddr->preempted));
+
+   return !!(preempted_le & 1);
+}
+
+static int pvlock_vcpu_state_dying_cpu(unsigned int cpu)
+{
+   struct pv_lock_state_region *reg;
+
+   reg = this_cpu_ptr(&lock_state_region);
+   if (!reg->kaddr)
+   return 0;
+
+   memunmap(reg->kaddr);
+   memset(reg, 0, sizeof(*reg));
+
+   return 0;
+}
+
+static int init_pvlock_vcpu_state(unsigned int cpu)
+{
+   struct pv_lock_state_region *reg;
+   struct arm_smccc_res res;
+
+   reg = this_cpu_ptr(&lock_state_region);
+
+   arm_smccc_1_1_invoke(ARM_SMCCC_HV_PV_LOCK_PREEMPTED, &res);
+
+   if (res.a0 == SMCCC_RET_NOT_SUPPORTED) {
+   pr_warn("Failed to init PV lock data structure\n");
+   return -EINVAL;
+   }
+
+   reg->kaddr = memremap(res.a0,
+ sizeof(struct pvlock_vcpu_state),
+ MEMREMAP_WB);
+
+   if (!reg->kaddr) {
+   pr_warn("Failed to map PV lock data structure\n");
+   return -ENOMEM;
+   }
+
+   return 0;
+}
+
+static int kvm_arm_init_pvlock(void)
+{
+   int ret;
+
+   ret = cpuhp_setup_state(CPUHP_AP_ARM_KVM_PVLOCK_STARTING,
+   "hypervisor/ar

[PATCH v2 3/6] KVM: arm64: Support pvlock preempted via shared structure

2019-12-26 Thread Zengruan Ye
Implement the service call for configuring a shared structure between a
VCPU and the hypervisor in which the hypervisor can tell the VCPU is
running or not.

The preempted field is zero if 1) some old KVM deos not support this filed.
2) the VCPU is not preempted. Other values means the VCPU has been preempted.

Signed-off-by: Zengruan Ye 
---
 arch/arm/include/asm/kvm_host.h   | 18 
 arch/arm64/include/asm/kvm_host.h | 19 +
 arch/arm64/kvm/Makefile   |  1 +
 virt/kvm/arm/arm.c|  8 ++
 virt/kvm/arm/hypercalls.c |  8 ++
 virt/kvm/arm/pvlock.c | 46 +++
 6 files changed, 100 insertions(+)
 create mode 100644 virt/kvm/arm/pvlock.c

diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 556cd818eccf..dfeaf9204875 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -356,6 +356,24 @@ static inline bool kvm_arm_is_pvtime_enabled(struct 
kvm_vcpu_arch *vcpu_arch)
return false;
 }
 
+static inline void kvm_arm_pvlock_preempted_init(struct kvm_vcpu_arch 
*vcpu_arch)
+{
+}
+
+static inline bool kvm_arm_is_pvlock_preempted_ready(struct kvm_vcpu_arch 
*vcpu_arch)
+{
+   return false;
+}
+
+static inline gpa_t kvm_init_pvlock(struct kvm_vcpu *vcpu)
+{
+   return GPA_INVALID;
+}
+
+static inline void kvm_update_pvlock_preempted(struct kvm_vcpu *vcpu, u64 
preempted)
+{
+}
+
 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
 
 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
index c61260cf63c5..2818a2330f92 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -354,6 +354,12 @@ struct kvm_vcpu_arch {
u64 last_steal;
gpa_t base;
} steal;
+
+   /* Guest PV lock state */
+   struct {
+   u64 preempted;
+   gpa_t base;
+   } pv;
 };
 
 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
@@ -515,6 +521,19 @@ static inline bool kvm_arm_is_pvtime_enabled(struct 
kvm_vcpu_arch *vcpu_arch)
return (vcpu_arch->steal.base != GPA_INVALID);
 }
 
+static inline void kvm_arm_pvlock_preempted_init(struct kvm_vcpu_arch 
*vcpu_arch)
+{
+   vcpu_arch->pv.base = GPA_INVALID;
+}
+
+static inline bool kvm_arm_is_pvlock_preempted_ready(struct kvm_vcpu_arch 
*vcpu_arch)
+{
+   return (vcpu_arch->pv.base != GPA_INVALID);
+}
+
+gpa_t kvm_init_pvlock(struct kvm_vcpu *vcpu);
+void kvm_update_pvlock_preempted(struct kvm_vcpu *vcpu, u64 preempted);
+
 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
 
 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 5ffbdc39e780..e4591f56d5f1 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -15,6 +15,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arm.o 
$(KVM)/arm/mmu.o $(KVM)/arm/mmio.
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/psci.o $(KVM)/arm/perf.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hypercalls.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/pvtime.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/pvlock.o
 
 kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o va_layout.o
 kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 8de4daf25097..36d57e77d3c4 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -383,6 +383,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 
kvm_arm_pvtime_vcpu_init(&vcpu->arch);
 
+   kvm_arm_pvlock_preempted_init(&vcpu->arch);
+
return kvm_vgic_vcpu_init(vcpu);
 }
 
@@ -421,6 +423,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu_set_wfx_traps(vcpu);
 
vcpu_ptrauth_setup_lazy(vcpu);
+
+   if (kvm_arm_is_pvlock_preempted_ready(&vcpu->arch))
+   kvm_update_pvlock_preempted(vcpu, 0);
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -434,6 +439,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->cpu = -1;
 
kvm_arm_set_running_vcpu(NULL);
+
+   if (kvm_arm_is_pvlock_preempted_ready(&vcpu->arch))
+   kvm_update_pvlock_preempted(vcpu, 1);
 }
 
 static void vcpu_power_off(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/hypercalls.c b/virt/kvm/arm/hypercalls.c
index 550dfa3e53cd..1c6a11f21bb4 100644
--- a/virt/kvm/arm/hypercalls.c
+++ b/virt/kvm/arm/hypercalls.c
@@ -52,6 +52,9 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
case ARM_SMCCC_HV_PV_TIME_FEATURES:
val = SMCCC_RET_SUCCESS;
break;
+   case ARM_SMCCC_HV_PV_LOCK_FEATURES:
+   val = SMCCC_RET_SUCCESS;
+   break;
}
break;
case ARM_SMCCC_HV_P

[PATCH v2 1/6] KVM: arm64: Document PV-lock interface

2019-12-26 Thread Zengruan Ye
Introduce a paravirtualization interface for KVM/arm64 to obtain the VCPU
is currently running or not.

The PV lock structure of the guest is allocated by user space.

A hypercall interface is provided for the guest to interrogate the
hypervisor's support for this interface and the location of the shared
memory structures.

Signed-off-by: Zengruan Ye 
---
 Documentation/virt/kvm/arm/pvlock.rst   | 63 +
 Documentation/virt/kvm/devices/vcpu.txt | 14 ++
 2 files changed, 77 insertions(+)
 create mode 100644 Documentation/virt/kvm/arm/pvlock.rst

diff --git a/Documentation/virt/kvm/arm/pvlock.rst 
b/Documentation/virt/kvm/arm/pvlock.rst
new file mode 100644
index ..58b3b8ee7537
--- /dev/null
+++ b/Documentation/virt/kvm/arm/pvlock.rst
@@ -0,0 +1,63 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Paravirtualized lock support for arm64
+==
+
+KVM/arm64 provides some hypervisor service calls to support a paravirtualized
+guest obtaining the VCPU is currently running or not.
+
+Two new SMCCC compatible hypercalls are defined:
+
+* PV_LOCK_FEATURES:   0xC620
+* PV_LOCK_PREEMPTED:  0xC621
+
+The existence of the PV_LOCK hypercall should be probed using the SMCCC 1.1
+ARCH_FEATURES mechanism before calling it.
+
+PV_LOCK_FEATURES
+= ==
+Function ID:  (uint32)0xC620
+PV_call_id:   (uint32)The function to query for support.
+Return value: (int64) NOT_SUPPORTED (-1) or SUCCESS (0) if the relevant
+  PV-lock feature is supported by the hypervisor.
+= ==
+
+PV_LOCK_PREEMPTED
+= ==
+Function ID:  (uint32)0xC621
+Return value: (int64) NOT_SUPPORTED (-1) or SUCCESS (0) if the IPA of
+  this VCPU's pv data structure is configured by
+  the hypervisor.
+= ==
+
+The IPA returned by PV_LOCK_PREEMPTED should be mapped by the guest as normal
+memory with inner and outer write back caching attributes, in the inner
+shareable domain.
+
+PV_LOCK_PREEMPTED returns the structure for the calling VCPU.
+
+PV lock state
+-
+
+The structure pointed to by the PV_LOCK_PREEMPTED hypercall is as follows:
+
++---+-+-+-+
+| Field | Byte Length | Byte Offset | Description |
++===+=+=+=+
+| preempted |  8  |  0  | Indicate the VCPU who owns this |
+|   | | | struct is running or not.   |
+|   | | | Non-zero values mean the VCPU   |
+|   | | | has been preempted. Zero means  |
+|   | | | the VCPU is not preempted.  |
++---+-+-+-+
+
+The preempted field will be updated to 1 by the hypervisor prior to scheduling
+a VCPU. When the VCPU is scheduled out, the preempted field will be updated
+to 0 by the hypervisor.
+
+The structure will be present within a reserved region of the normal memory
+given to the guest. The guest should not attempt to write into this memory.
+There is a structure per VCPU of the guest.
+
+For the user space interface see Documentation/virt/kvm/devices/vcpu.txt
+section "4. GROUP: KVM_ARM_VCPU_PVLOCK_CTRL".
diff --git a/Documentation/virt/kvm/devices/vcpu.txt 
b/Documentation/virt/kvm/devices/vcpu.txt
index 6f3bd64a05b0..c10a5945075b 100644
--- a/Documentation/virt/kvm/devices/vcpu.txt
+++ b/Documentation/virt/kvm/devices/vcpu.txt
@@ -74,3 +74,17 @@ Specifies the base address of the stolen time structure for 
this VCPU. The
 base address must be 64 byte aligned and exist within a valid guest memory
 region. See Documentation/virt/kvm/arm/pvtime.txt for more information
 including the layout of the stolen time structure.
+
+4. GROUP: KVM_ARM_VCPU_PVLOCK_CTRL
+Architectures: ARM64
+
+4.1 ATTRIBUTE: KVM_ARM_VCPU_PVLOCK_IPA
+Parameters: 64-bit base address
+Returns: -ENXIO:  PV lock not implemented
+ -EEXIST: Base address already set for this VCPU
+ -EINVAL: Base address not 64 byte aligned
+
+Specifies the base address of the PV lock structure for this VCPU. The
+base address must be 64 byte aligned and exist within a valid guest memory
+region. See Documentation/virt/kvm/arm/pvlock.rst for more information
+including the layout of the pv lock structure.
-- 
2.19.1


___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


[PATCH v2 2/6] KVM: arm64: Add SMCCC paravirtualised lock calls

2019-12-26 Thread Zengruan Ye
Add two new SMCCC compatible hypercalls for PV lock features:
  PV_LOCK_FEATURES:   0xC620
  PV_LOCK_PREEMPTED:  0xC621

Also add the header file which defines the ABI for the paravirtualized
lock features we're about to add.

Signed-off-by: Zengruan Ye 
---
 arch/arm64/include/asm/pvlock-abi.h | 16 
 include/linux/arm-smccc.h   | 14 ++
 2 files changed, 30 insertions(+)
 create mode 100644 arch/arm64/include/asm/pvlock-abi.h

diff --git a/arch/arm64/include/asm/pvlock-abi.h 
b/arch/arm64/include/asm/pvlock-abi.h
new file mode 100644
index ..06e0c3d7710a
--- /dev/null
+++ b/arch/arm64/include/asm/pvlock-abi.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright(c) 2019 Huawei Technologies Co., Ltd
+ * Author: Zengruan Ye 
+ */
+
+#ifndef __ASM_PVLOCK_ABI_H
+#define __ASM_PVLOCK_ABI_H
+
+struct pvlock_vcpu_state {
+   __le64 preempted;
+   /* Structure must be 64 byte aligned, pad to that size */
+   u8 padding[56];
+} __packed;
+
+#endif
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 59494df0f55b..3a5c6b35492f 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -46,6 +46,7 @@
 #define ARM_SMCCC_OWNER_OEM3
 #define ARM_SMCCC_OWNER_STANDARD   4
 #define ARM_SMCCC_OWNER_STANDARD_HYP   5
+#define ARM_SMCCC_OWNER_VENDOR_HYP 6
 #define ARM_SMCCC_OWNER_TRUSTED_APP48
 #define ARM_SMCCC_OWNER_TRUSTED_APP_END49
 #define ARM_SMCCC_OWNER_TRUSTED_OS 50
@@ -377,5 +378,18 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned 
long a1,
   ARM_SMCCC_OWNER_STANDARD_HYP,\
   0x21)
 
+/* Paravirtualised lock calls */
+#define ARM_SMCCC_HV_PV_LOCK_FEATURES  \
+   ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+  ARM_SMCCC_SMC_64,\
+  ARM_SMCCC_OWNER_VENDOR_HYP,  \
+  0x20)
+
+#define ARM_SMCCC_HV_PV_LOCK_PREEMPTED \
+   ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+  ARM_SMCCC_SMC_64,\
+  ARM_SMCCC_OWNER_VENDOR_HYP,  \
+  0x21)
+
 #endif /*__ASSEMBLY__*/
 #endif /*__LINUX_ARM_SMCCC_H*/
-- 
2.19.1


___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


[PATCH v2 4/6] KVM: arm64: Provide VCPU attributes for PV lock

2019-12-26 Thread Zengruan Ye
Allow user space to inform the KVM host where in the physical memory
map the paravirtualized lock structures should be located.

User space can set an attribute on the VCPU providing the IPA base
address of the PV lock structure for that VCPU. This must be
repeated for every VCPU in the VM.

The address is given in terms of the physical address visible to
the guest and must be 64 byte aligned. The guest will discover the
address via a hypercall.

Signed-off-by: Zengruan Ye 
---
 arch/arm64/include/asm/kvm_host.h |  9 +
 arch/arm64/include/uapi/asm/kvm.h |  2 ++
 arch/arm64/kvm/guest.c|  9 +
 include/uapi/linux/kvm.h  |  2 ++
 virt/kvm/arm/pvlock.c | 57 +++
 5 files changed, 79 insertions(+)

diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
index 2818a2330f92..63b6e204676b 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -521,6 +521,15 @@ static inline bool kvm_arm_is_pvtime_enabled(struct 
kvm_vcpu_arch *vcpu_arch)
return (vcpu_arch->steal.base != GPA_INVALID);
 }
 
+int kvm_arm_pvlock_set_attr(struct kvm_vcpu *vcpu,
+   struct kvm_device_attr *attr);
+
+int kvm_arm_pvlock_get_attr(struct kvm_vcpu *vcpu,
+   struct kvm_device_attr *attr);
+
+int kvm_arm_pvlock_has_attr(struct kvm_vcpu *vcpu,
+   struct kvm_device_attr *attr);
+
 static inline void kvm_arm_pvlock_preempted_init(struct kvm_vcpu_arch 
*vcpu_arch)
 {
vcpu_arch->pv.base = GPA_INVALID;
diff --git a/arch/arm64/include/uapi/asm/kvm.h 
b/arch/arm64/include/uapi/asm/kvm.h
index 820e5751ada7..137d966b57c7 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -326,6 +326,8 @@ struct kvm_vcpu_events {
 #define   KVM_ARM_VCPU_TIMER_IRQ_PTIMER1
 #define KVM_ARM_VCPU_PVTIME_CTRL   2
 #define   KVM_ARM_VCPU_PVTIME_IPA  0
+#define KVM_ARM_VCPU_PVLOCK_CTRL   3
+#define   KVM_ARM_VCPU_PVLOCK_IPA  0
 
 /* KVM_IRQ_LINE irq field index values */
 #define KVM_ARM_IRQ_VCPU2_SHIFT28
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 2fff06114a8f..6a5c12f3b08b 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -875,6 +875,9 @@ int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
case KVM_ARM_VCPU_PVTIME_CTRL:
ret = kvm_arm_pvtime_set_attr(vcpu, attr);
break;
+   case KVM_ARM_VCPU_PVLOCK_CTRL:
+   ret = kvm_arm_pvlock_set_attr(vcpu, attr);
+   break;
default:
ret = -ENXIO;
break;
@@ -898,6 +901,9 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
case KVM_ARM_VCPU_PVTIME_CTRL:
ret = kvm_arm_pvtime_get_attr(vcpu, attr);
break;
+   case KVM_ARM_VCPU_PVLOCK_CTRL:
+   ret = kvm_arm_pvlock_get_attr(vcpu, attr);
+   break;
default:
ret = -ENXIO;
break;
@@ -921,6 +927,9 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
case KVM_ARM_VCPU_PVTIME_CTRL:
ret = kvm_arm_pvtime_has_attr(vcpu, attr);
break;
+   case KVM_ARM_VCPU_PVLOCK_CTRL:
+   ret = kvm_arm_pvlock_has_attr(vcpu, attr);
+   break;
default:
ret = -ENXIO;
break;
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index f0a16b4adbbd..bfc628c580d4 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1238,6 +1238,8 @@ enum kvm_device_type {
 #define KVM_DEV_TYPE_XIVE  KVM_DEV_TYPE_XIVE
KVM_DEV_TYPE_ARM_PV_TIME,
 #define KVM_DEV_TYPE_ARM_PV_TIME   KVM_DEV_TYPE_ARM_PV_TIME
+   KVM_DEV_TYPE_ARM_PV_LOCK,
+#define KVM_DEV_TYPE_ARM_PV_LOCK   KVM_DEV_TYPE_ARM_PV_LOCK
KVM_DEV_TYPE_MAX,
 };
 
diff --git a/virt/kvm/arm/pvlock.c b/virt/kvm/arm/pvlock.c
index cdfd30a903b9..cbc562274e5e 100644
--- a/virt/kvm/arm/pvlock.c
+++ b/virt/kvm/arm/pvlock.c
@@ -44,3 +44,60 @@ void kvm_update_pvlock_preempted(struct kvm_vcpu *vcpu, u64 
preempted)
kvm_put_guest(kvm, base + offset, preempted_le, u64);
srcu_read_unlock(&kvm->srcu, idx);
 }
+
+int kvm_arm_pvlock_set_attr(struct kvm_vcpu *vcpu,
+   struct kvm_device_attr *attr)
+{
+   u64 __user *user = (u64 __user *)attr->addr;
+   struct kvm *kvm = vcpu->kvm;
+   u64 ipa;
+   int ret = 0;
+   int idx;
+
+   if (attr->attr != KVM_ARM_VCPU_PVLOCK_IPA)
+   return -ENXIO;
+
+   if (get_user(ipa, user))
+   return -EFAULT;
+   if (!IS_ALIGNED(ipa, 64))
+   return -EINVAL;
+   if (vcpu->arch.pv.base != GPA_INVALID)
+   return -EEXIST;
+
+   /* Check the address is in a valid memslot */
+   idx = srcu_read_lock(&kvm->srcu);
+ 

[PATCH v2 5/6] KVM: arm64: Add interface to support VCPU preempted check

2019-12-26 Thread Zengruan Ye
This is to fix some lock holder preemption issues. Some other locks
implementation do a spin loop before acquiring the lock itself.
Currently kernel has an interface of bool vcpu_is_preempted(int cpu). It
takes the CPU as parameter and return true if the CPU is preempted.
Then kernel can break the spin loops upon the retval of vcpu_is_preempted.

As kernel has used this interface, So lets support it.

Signed-off-by: Zengruan Ye 
---
 arch/arm64/include/asm/paravirt.h  | 12 
 arch/arm64/include/asm/spinlock.h  |  7 +++
 arch/arm64/kernel/Makefile |  2 +-
 arch/arm64/kernel/paravirt-spinlocks.c | 13 +
 arch/arm64/kernel/paravirt.c   |  4 +++-
 5 files changed, 36 insertions(+), 2 deletions(-)
 create mode 100644 arch/arm64/kernel/paravirt-spinlocks.c

diff --git a/arch/arm64/include/asm/paravirt.h 
b/arch/arm64/include/asm/paravirt.h
index cf3a0fd7c1a7..7b1c81b544bb 100644
--- a/arch/arm64/include/asm/paravirt.h
+++ b/arch/arm64/include/asm/paravirt.h
@@ -11,8 +11,13 @@ struct pv_time_ops {
unsigned long long (*steal_clock)(int cpu);
 };
 
+struct pv_lock_ops {
+   bool (*vcpu_is_preempted)(int cpu);
+};
+
 struct paravirt_patch_template {
struct pv_time_ops time;
+   struct pv_lock_ops lock;
 };
 
 extern struct paravirt_patch_template pv_ops;
@@ -24,6 +29,13 @@ static inline u64 paravirt_steal_clock(int cpu)
 
 int __init pv_time_init(void);
 
+__visible bool __native_vcpu_is_preempted(int cpu);
+
+static inline bool pv_vcpu_is_preempted(int cpu)
+{
+   return pv_ops.lock.vcpu_is_preempted(cpu);
+}
+
 #else
 
 #define pv_time_init() do {} while (0)
diff --git a/arch/arm64/include/asm/spinlock.h 
b/arch/arm64/include/asm/spinlock.h
index b093b287babf..45ff1b2949a6 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -7,8 +7,15 @@
 
 #include 
 #include 
+#include 
 
 /* See include/linux/spinlock.h */
 #define smp_mb__after_spinlock()   smp_mb()
 
+#define vcpu_is_preempted vcpu_is_preempted
+static inline bool vcpu_is_preempted(long cpu)
+{
+   return pv_vcpu_is_preempted(cpu);
+}
+
 #endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index fc6488660f64..b23cdae433a4 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -50,7 +50,7 @@ obj-$(CONFIG_ARMV8_DEPRECATED)+= 
armv8_deprecated.o
 obj-$(CONFIG_ACPI) += acpi.o
 obj-$(CONFIG_ACPI_NUMA)+= acpi_numa.o
 obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL)  += acpi_parking_protocol.o
-obj-$(CONFIG_PARAVIRT) += paravirt.o
+obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt-spinlocks.o
 obj-$(CONFIG_RANDOMIZE_BASE)   += kaslr.o
 obj-$(CONFIG_HIBERNATION)  += hibernate.o hibernate-asm.o
 obj-$(CONFIG_KEXEC_CORE)   += machine_kexec.o relocate_kernel.o
\
diff --git a/arch/arm64/kernel/paravirt-spinlocks.c 
b/arch/arm64/kernel/paravirt-spinlocks.c
new file mode 100644
index ..718aa773d45c
--- /dev/null
+++ b/arch/arm64/kernel/paravirt-spinlocks.c
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2019 Huawei Technologies Co., Ltd
+ * Author: Zengruan Ye 
+ */
+
+#include 
+#include 
+
+__visible bool __native_vcpu_is_preempted(int cpu)
+{
+   return false;
+}
diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c
index 1ef702b0be2d..d8f1ba8c22ce 100644
--- a/arch/arm64/kernel/paravirt.c
+++ b/arch/arm64/kernel/paravirt.c
@@ -26,7 +26,9 @@
 struct static_key paravirt_steal_enabled;
 struct static_key paravirt_steal_rq_enabled;
 
-struct paravirt_patch_template pv_ops;
+struct paravirt_patch_template pv_ops = {
+   .lock.vcpu_is_preempted = __native_vcpu_is_preempted,
+};
 EXPORT_SYMBOL_GPL(pv_ops);
 
 struct pv_time_stolen_time_region {
-- 
2.19.1


___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [PATCH v2 5/6] KVM: arm64: Add interface to support VCPU preempted check

2019-12-26 Thread kbuild test robot
Hi Zengruan,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on kvmarm/next]
[also build test ERROR on kvm/linux-next linus/master v5.5-rc3 next-20191220]
[cannot apply to arm64/for-next/core]
[if your patch is applied to the wrong git tree, please drop us a note to help
improve the system. BTW, we also suggest to use '--base' option to specify the
base tree in git format-patch, please see https://stackoverflow.com/a/37406982]

url:
https://github.com/0day-ci/linux/commits/Zengruan-Ye/KVM-arm64-VCPU-preempted-check-support/20191227-000637
base:   https://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git next
config: arm64-alldefconfig (attached as .config)
compiler: aarch64-linux-gcc (GCC) 7.5.0
reproduce:
wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
GCC_VERSION=7.5.0 make.cross ARCH=arm64 

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot 

All errors (new ones prefixed by >>):

   In file included from include/linux/spinlock.h:89:0,
from include/linux/radix-tree.h:16,
from include/linux/idr.h:15,
from include/linux/kernfs.h:13,
from include/linux/sysfs.h:16,
from include/linux/kobject.h:20,
from include/linux/of.h:17,
from include/linux/irqdomain.h:35,
from include/linux/acpi.h:13,
from include/acpi/apei.h:9,
from include/acpi/ghes.h:5,
from include/linux/arm_sdei.h:8,
from arch/arm64/kernel/asm-offsets.c:10:
   arch/arm64/include/asm/spinlock.h: In function 'vcpu_is_preempted':
>> arch/arm64/include/asm/spinlock.h:18:9: error: implicit declaration of 
>> function 'pv_vcpu_is_preempted'; did you mean 'vcpu_is_preempted'? 
>> [-Werror=implicit-function-declaration]
 return pv_vcpu_is_preempted(cpu);
^~~~
vcpu_is_preempted
   cc1: some warnings being treated as errors
   make[2]: *** [arch/arm64/kernel/asm-offsets.s] Error 1
   make[2]: Target '__build' not remade because of errors.
   make[1]: *** [prepare0] Error 2
   make[1]: Target 'prepare' not remade because of errors.
   make: *** [sub-make] Error 2
   27 real  5 user  7 sys  48.63% cpu   make prepare

vim +18 arch/arm64/include/asm/spinlock.h

14  
15  #define vcpu_is_preempted vcpu_is_preempted
16  static inline bool vcpu_is_preempted(long cpu)
17  {
  > 18  return pv_vcpu_is_preempted(cpu);
19  }
20  

---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/hyperkitty/list/kbuild-...@lists.01.org Intel Corporation


.config.gz
Description: application/gzip
___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH v2 5/6] KVM: arm64: Add interface to support VCPU preempted check

2019-12-26 Thread yezengruan
Hi,

On 2019/12/27 2:51, kbuild test robot wrote:
> Hi Zengruan,
> 
> Thank you for the patch! Yet something to improve:
> 
> [auto build test ERROR on kvmarm/next]
> [also build test ERROR on kvm/linux-next linus/master v5.5-rc3 next-20191220]
> [cannot apply to arm64/for-next/core]
> [if your patch is applied to the wrong git tree, please drop us a note to help
> improve the system. BTW, we also suggest to use '--base' option to specify the
> base tree in git format-patch, please see 
> https://stackoverflow.com/a/37406982]
> 
> url:
> https://github.com/0day-ci/linux/commits/Zengruan-Ye/KVM-arm64-VCPU-preempted-check-support/20191227-000637
> base:   https://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git next
> config: arm64-alldefconfig (attached as .config)
> compiler: aarch64-linux-gcc (GCC) 7.5.0
> reproduce:
> wget 
> https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
> ~/bin/make.cross
> chmod +x ~/bin/make.cross
> # save the attached .config to linux build tree
> GCC_VERSION=7.5.0 make.cross ARCH=arm64 
> 
> If you fix the issue, kindly add following tag
> Reported-by: kbuild test robot 
> 
> All errors (new ones prefixed by >>):
> 
>In file included from include/linux/spinlock.h:89:0,
> from include/linux/radix-tree.h:16,
> from include/linux/idr.h:15,
> from include/linux/kernfs.h:13,
> from include/linux/sysfs.h:16,
> from include/linux/kobject.h:20,
> from include/linux/of.h:17,
> from include/linux/irqdomain.h:35,
> from include/linux/acpi.h:13,
> from include/acpi/apei.h:9,
> from include/acpi/ghes.h:5,
> from include/linux/arm_sdei.h:8,
> from arch/arm64/kernel/asm-offsets.c:10:
>arch/arm64/include/asm/spinlock.h: In function 'vcpu_is_preempted':
>>> arch/arm64/include/asm/spinlock.h:18:9: error: implicit declaration of 
>>> function 'pv_vcpu_is_preempted'; did you mean 'vcpu_is_preempted'? 
>>> [-Werror=implicit-function-declaration]
>  return pv_vcpu_is_preempted(cpu);
> ^~~~
> vcpu_is_preempted
>cc1: some warnings being treated as errors
>make[2]: *** [arch/arm64/kernel/asm-offsets.s] Error 1
>make[2]: Target '__build' not remade because of errors.
>make[1]: *** [prepare0] Error 2
>make[1]: Target 'prepare' not remade because of errors.
>make: *** [sub-make] Error 2
>27 real  5 user  7 sys  48.63% cpu make prepare
> 
> vim +18 arch/arm64/include/asm/spinlock.h
> 
> 14
> 15#define vcpu_is_preempted vcpu_is_preempted
> 16static inline bool vcpu_is_preempted(long cpu)
> 17{
>   > 18return pv_vcpu_is_preempted(cpu);
> 19}
> 20
> 
> ---
> 0-DAY kernel test infrastructure Open Source Technology Center
> https://lists.01.org/hyperkitty/list/kbuild-...@lists.01.org Intel Corporation
> 

Thanks for posting this, I'll update the code to fix this issue.

Thanks,

Zengruan


---
 arch/arm64/include/asm/spinlock.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/arch/arm64/include/asm/spinlock.h 
b/arch/arm64/include/asm/spinlock.h
index 45ff1b2949a6..b5d1982414c5 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -12,10 +12,12 @@
 /* See include/linux/spinlock.h */
 #define smp_mb__after_spinlock()   smp_mb()

+#ifdef CONFIG_PARAVIRT
 #define vcpu_is_preempted vcpu_is_preempted
 static inline bool vcpu_is_preempted(long cpu)
 {
return pv_vcpu_is_preempted(cpu);
 }
+#endif // CONFIG_PARAVIRT

 #endif /* __ASM_SPINLOCK_H */
-- 
2.19.1




___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization