Re: [PATCH 5/5] LoongArch: Add pv ipi support on LoongArch system

2024-01-08 Thread kernel test robot
Hi Bibo,

kernel test robot noticed the following build warnings:

[auto build test WARNING on 610a9b8f49fbcf1100716370d3b5f6f884a2835a]

url:
https://github.com/intel-lab-lkp/linux/commits/Bibo-Mao/LoongArch-KVM-Add-hypercall-instruction-emulation-support/20240103-151946
base:   610a9b8f49fbcf1100716370d3b5f6f884a2835a
patch link:
https://lore.kernel.org/r/20240103071615.3422264-6-maobibo%40loongson.cn
patch subject: [PATCH 5/5] LoongArch: Add pv ipi support on LoongArch system
config: loongarch-randconfig-r061-20240109 
(https://download.01.org/0day-ci/archive/20240109/202401091354.gtbergqj-...@intel.com/config)
compiler: loongarch64-linux-gcc (GCC) 13.2.0

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot 
| Closes: 
https://lore.kernel.org/oe-kbuild-all/202401091354.gtbergqj-...@intel.com/

cocci warnings: (new ones prefixed by >>)
>> arch/loongarch/kvm/exit.c:681:5-8: Unneeded variable: "ret". Return "  0" on 
>> line 701
--
>> arch/loongarch/kvm/exit.c:720:2-3: Unneeded semicolon

vim +681 arch/loongarch/kvm/exit.c

   678  
   679  static int kvm_pv_send_ipi(struct kvm_vcpu *vcpu, int sgi)
   680  {
 > 681  int ret = 0;
   682  u64 ipi_bitmap;
   683  unsigned int min, cpu;
   684  struct kvm_vcpu *dest;
   685  
   686  ipi_bitmap = vcpu->arch.gprs[LOONGARCH_GPR_A1];
   687  min = vcpu->arch.gprs[LOONGARCH_GPR_A2];
   688  
   689  if (ipi_bitmap) {
   690  cpu = find_first_bit((void *)_bitmap, 
BITS_PER_LONG);
   691  while (cpu < BITS_PER_LONG) {
   692  if ((cpu + min) < KVM_MAX_VCPUS) {
   693  dest = kvm_get_vcpu_by_id(vcpu->kvm, 
cpu + min);
   694  kvm_queue_irq(dest, sgi);
   695  kvm_vcpu_kick(dest);
   696  }
   697  cpu = find_next_bit((void *)_bitmap, 
BITS_PER_LONG, cpu + 1);
   698  }
   699  }
   700  
 > 701  return ret;
   702  }
   703  
   704  /*
   705   * hypcall emulation always return to guest, Caller should check retval.
   706   */
   707  static void kvm_handle_pv_hcall(struct kvm_vcpu *vcpu)
   708  {
   709  unsigned long func = vcpu->arch.gprs[LOONGARCH_GPR_A0];
   710  long ret;
   711  
   712  switch (func) {
   713  case KVM_HC_FUNC_IPI:
   714  kvm_pv_send_ipi(vcpu, INT_SWI0);
   715  ret = KVM_HC_STATUS_SUCCESS;
   716  break;
   717  default:
   718  ret = KVM_HC_INVALID_CODE;
   719  break;
 > 720  };
   721  
   722  vcpu->arch.gprs[LOONGARCH_GPR_A0] = ret;
   723  }
   724  

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki



[PATCH 5/5] LoongArch: Add pv ipi support on LoongArch system

2024-01-02 Thread Bibo Mao
On LoongArch system, ipi hw uses iocsr registers, there is one iocsr
register access on ipi sender and two iocsr access on ipi interrupt
handler. On VM mode all iocsr registers accessing will trap into
hypervisor.

This patch adds pv ipi support for VM, hypercall instruction is used
to ipi sender, and hypervisor will inject SWI on the VM. During SWI
interrupt handler, only estat CSR register is read and written. Estat
CSR register access will not trap into hypervisor. So with pv ipi
supported, pv ipi sender will trap into hypervsor, pv ipi interrupt
handler will not trap.

Also this patch adds ipi multicast support, the method is similar with
x86. With ipi multicast support, ipi notification can be sent to at most
64 vcpus at a time. And hw cpuid is equal to logic cpuid in LoongArch
kvm hypervisor now, will add hw cpuid search logic in kvm hypervisor
in the next patch.

Signed-off-by: Bibo Mao 
---
 arch/loongarch/include/asm/hardirq.h   |   1 +
 arch/loongarch/include/asm/kvm_para.h  | 124 +
 arch/loongarch/include/asm/loongarch.h |   1 +
 arch/loongarch/kernel/irq.c|   2 +-
 arch/loongarch/kernel/paravirt.c   | 103 
 arch/loongarch/kernel/smp.c|   2 +-
 arch/loongarch/kvm/exit.c  |  66 -
 7 files changed, 295 insertions(+), 4 deletions(-)

diff --git a/arch/loongarch/include/asm/hardirq.h 
b/arch/loongarch/include/asm/hardirq.h
index 9f0038e19c7f..998011f162d0 100644
--- a/arch/loongarch/include/asm/hardirq.h
+++ b/arch/loongarch/include/asm/hardirq.h
@@ -21,6 +21,7 @@ enum ipi_msg_type {
 typedef struct {
unsigned int ipi_irqs[NR_IPI];
unsigned int __softirq_pending;
+   atomic_t messages;
 } cacheline_aligned irq_cpustat_t;
 
 DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
diff --git a/arch/loongarch/include/asm/kvm_para.h 
b/arch/loongarch/include/asm/kvm_para.h
index 41200e922a82..a25a84e372b9 100644
--- a/arch/loongarch/include/asm/kvm_para.h
+++ b/arch/loongarch/include/asm/kvm_para.h
@@ -9,6 +9,10 @@
 #define HYPERVISOR_VENDOR_SHIFT8
 #define HYPERCALL_CODE(vendor, code)   ((vendor << HYPERVISOR_VENDOR_SHIFT) + 
code)
 
+#define KVM_HC_CODE_SERVICE0
+#define KVM_HC_SERVICE HYPERCALL_CODE(HYPERVISOR_KVM, 
KVM_HC_CODE_SERVICE)
+#define  KVM_HC_FUNC_IPI   1
+
 /*
  * LoongArch hypcall return code
  */
@@ -16,6 +20,126 @@
 #define KVM_HC_INVALID_CODE-1UL
 #define KVM_HC_INVALID_PARAMETER   -2UL
 
+/*
+ * Hypercalls interface for KVM hypervisor
+ *
+ * a0: function identifier
+ * a1-a6: args
+ * Return value will be placed in v0.
+ * Up to 6 arguments are passed in a1, a2, a3, a4, a5, a6.
+ */
+static __always_inline long kvm_hypercall(u64 fid)
+{
+   register long ret asm("v0");
+   register unsigned long fun asm("a0") = fid;
+
+   __asm__ __volatile__(
+   "hvcl "__stringify(KVM_HC_SERVICE)
+   : "=r" (ret)
+   : "r" (fun)
+   : "memory"
+   );
+
+   return ret;
+}
+
+static __always_inline long kvm_hypercall1(u64 fid, unsigned long arg0)
+{
+   register long ret asm("v0");
+   register unsigned long fun asm("a0") = fid;
+   register unsigned long a1  asm("a1") = arg0;
+
+   __asm__ __volatile__(
+   "hvcl "__stringify(KVM_HC_SERVICE)
+   : "=r" (ret)
+   : "r" (fun), "r" (a1)
+   : "memory"
+   );
+
+   return ret;
+}
+
+static __always_inline long kvm_hypercall2(u64 fid,
+   unsigned long arg0, unsigned long arg1)
+{
+   register long ret asm("v0");
+   register unsigned long fun asm("a0") = fid;
+   register unsigned long a1  asm("a1") = arg0;
+   register unsigned long a2  asm("a2") = arg1;
+
+   __asm__ __volatile__(
+   "hvcl "__stringify(KVM_HC_SERVICE)
+   : "=r" (ret)
+   : "r" (fun), "r" (a1), "r" (a2)
+   : "memory"
+   );
+
+   return ret;
+}
+
+static __always_inline long kvm_hypercall3(u64 fid,
+   unsigned long arg0, unsigned long arg1, unsigned long arg2)
+{
+   register long ret asm("v0");
+   register unsigned long fun asm("a0") = fid;
+   register unsigned long a1  asm("a1") = arg0;
+   register unsigned long a2  asm("a2") = arg1;
+   register unsigned long a3  asm("a3") = arg2;
+
+   __asm__ __volatile__(
+   "hvcl "__stringify(KVM_HC_SERVICE)
+   : "=r" (ret)
+   : "r" (fun), "r" (a1), "r" (a2), "r" (a3)
+   : "memory"
+   );
+
+   return ret;
+}
+
+static __always_inline long kvm_hypercall4(u64 fid,
+   unsigned long arg0, unsigned long arg1, unsigned long arg2,
+   unsigned long arg3)
+{
+   register long ret asm("v0");
+   register unsigned long fun asm("a0") = fid;
+