From: Naman Jain <[email protected]> Sent: Monday, March 16, 2026 
5:13 AM
> 
> Add necessary support to make MSHV_VTL work for arm64 architecture.
> * Add stub implementation for mshv_vtl_return_call_init(): not required
>   for arm64
> * Remove fpu/legacy.h header inclusion, as this is not required
> * handle HV_REGISTER_VSM_CODE_PAGE_OFFSETS register: not supported
>   in arm64
> * Configure custom percpu_vmbus_handler by using
>   hv_setup_percpu_vmbus_handler()
> * Handle hugepage functions by config checks
> 
> Signed-off-by: Roman Kisel <[email protected]>
> Signed-off-by: Naman Jain <[email protected]>
> ---
>  arch/arm64/include/asm/mshyperv.h |  2 ++
>  drivers/hv/mshv_vtl_main.c        | 21 ++++++++++++++-------
>  2 files changed, 16 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/mshyperv.h
> b/arch/arm64/include/asm/mshyperv.h
> index 36803f0386cc..027a7f062d70 100644
> --- a/arch/arm64/include/asm/mshyperv.h
> +++ b/arch/arm64/include/asm/mshyperv.h
> @@ -83,6 +83,8 @@ static inline int hv_vtl_get_set_reg(struct 
> hv_register_assoc *regs, bool set, u
>       return 1;
>  }
> 
> +static inline void mshv_vtl_return_call_init(u64 vtl_return_offset) {}
> +
>  void mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0);
>  bool hv_vtl_configure_reg_page(struct mshv_vtl_per_cpu *per_cpu);
>  #endif
> diff --git a/drivers/hv/mshv_vtl_main.c b/drivers/hv/mshv_vtl_main.c
> index 4c9ae65ad3e8..5702fe258500 100644
> --- a/drivers/hv/mshv_vtl_main.c
> +++ b/drivers/hv/mshv_vtl_main.c
> @@ -23,8 +23,6 @@
>  #include <trace/events/ipi.h>
>  #include <uapi/linux/mshv.h>
>  #include <hyperv/hvhdk.h>
> -
> -#include "../../kernel/fpu/legacy.h"

Was there a particular code change that made this unnecessary? Or was it
unnecessary from the start of this source code file? Just curious ....

>  #include "mshv.h"
>  #include "mshv_vtl.h"
>  #include "hyperv_vmbus.h"
> @@ -206,18 +204,21 @@ static void mshv_vtl_synic_enable_regs(unsigned int cpu)
>  static int mshv_vtl_get_vsm_regs(void)
>  {
>       struct hv_register_assoc registers[2];
> -     int ret, count = 2;
> +     int ret, count = 0;
> 
> -     registers[0].name = HV_REGISTER_VSM_CODE_PAGE_OFFSETS;
> -     registers[1].name = HV_REGISTER_VSM_CAPABILITIES;
> +     registers[count++].name = HV_REGISTER_VSM_CAPABILITIES;
> +     /* Code page offset register is not supported on ARM */
> +     if (IS_ENABLED(CONFIG_X86_64))
> +             registers[count++].name = HV_REGISTER_VSM_CODE_PAGE_OFFSETS;
> 
>       ret = hv_call_get_vp_registers(HV_VP_INDEX_SELF, HV_PARTITION_ID_SELF,
>                                      count, input_vtl_zero, registers);
>       if (ret)
>               return ret;
> 
> -     mshv_vsm_page_offsets.as_uint64 = registers[0].value.reg64;
> -     mshv_vsm_capabilities.as_uint64 = registers[1].value.reg64;
> +     mshv_vsm_capabilities.as_uint64 = registers[0].value.reg64;
> +     if (IS_ENABLED(CONFIG_X86_64))
> +             mshv_vsm_page_offsets.as_uint64 = registers[1].value.reg64;
> 
>       return ret;
>  }

This function has gotten somewhat messy to handle the x86 and arm64
differences. Let me suggest a different approach. Have this function only
get the VSM capabilities register, as that is generic across x86 and
arm64. Then, update x86 mshv_vtl_return_call_init() to get the
PAGE_OFFSETS register and then immediately use the value to update
the static call. The global variable mshv_vms_page_offsets is no longer
necessary.

My suggestion might be little more code because hv_call_get_vp_registers()
is invoked in two different places. But it cleanly separates the two use
cases, and keeps the x86 hackery under arch/x86.

> @@ -280,10 +281,13 @@ static int hv_vtl_setup_synic(void)
> 
>       /* Use our isr to first filter out packets destined for userspace */
>       hv_setup_vmbus_handler(mshv_vtl_vmbus_isr);
> +     /* hv_setup_vmbus_handler() is stubbed for ARM64, add per-cpu VMBus 
> handlers instead */
> +     hv_setup_percpu_vmbus_handler(mshv_vtl_vmbus_isr);
> 
>       ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vtl:online",
>                               mshv_vtl_alloc_context, NULL);
>       if (ret < 0) {
> +             hv_setup_percpu_vmbus_handler(vmbus_isr);
>               hv_setup_vmbus_handler(vmbus_isr);
>               return ret;
>       }
> @@ -296,6 +300,7 @@ static int hv_vtl_setup_synic(void)
>  static void hv_vtl_remove_synic(void)
>  {
>       cpuhp_remove_state(mshv_vtl_cpuhp_online);
> +     hv_setup_percpu_vmbus_handler(vmbus_isr);
>       hv_setup_vmbus_handler(vmbus_isr);
>  }
> 
> @@ -1080,10 +1085,12 @@ static vm_fault_t mshv_vtl_low_huge_fault(struct 
> vm_fault *vmf, unsigned int ord
>                       ret = vmf_insert_pfn_pmd(vmf, pfn, vmf->flags & 
> FAULT_FLAG_WRITE);
>               return ret;
> 
> +#if defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
>       case PUD_ORDER:
>               if (can_fault(vmf, PUD_SIZE, &pfn))
>                       ret = vmf_insert_pfn_pud(vmf, pfn, vmf->flags & 
> FAULT_FLAG_WRITE);
>               return ret;
> +#endif
> 
>       default:
>               return VM_FAULT_SIGBUS;
> --
> 2.43.0
> 


Reply via email to