Changes looks good to me.

Reviewed-by: Oleksii Kurochko <oleksii.kuroc...@gmail.com>

I'll update the RISC-V part correspondingly.

~ Oleksii

On Tue, 2024-03-05 at 09:33 +0100, Jan Beulich wrote:
> There's no use of them anymore except in the definitions of the non-
> underscore-prefixed aliases. Rename the inline functions, adjust the
> virt_to_maddr() #define-e, and purge the (x86-only) maddr_to_virt()
> one,
> thus eliminating a bogus cast which would have allowed the passing of
> a
> pointer type variable into maddr_to_virt() to go silently.
> 
> Signed-off-by: Jan Beulich <jbeul...@suse.com>
> 
> --- a/xen/arch/arm/include/asm/mm.h
> +++ b/xen/arch/arm/include/asm/mm.h
> @@ -256,12 +256,12 @@ static inline void __iomem *ioremap_wc(p
>  /* Page-align address and convert to frame number format */
>  #define paddr_to_pfn_aligned(paddr)   
> paddr_to_pfn(PAGE_ALIGN(paddr))
>  
> -static inline paddr_t __virt_to_maddr(vaddr_t va)
> +static inline paddr_t virt_to_maddr(vaddr_t va)
>  {
>      uint64_t par = va_to_par(va);
>      return (par & PADDR_MASK & PAGE_MASK) | (va & ~PAGE_MASK);
>  }
> -#define virt_to_maddr(va)   __virt_to_maddr((vaddr_t)(va))
> +#define virt_to_maddr(va) virt_to_maddr((vaddr_t)(va))
>  
>  #ifdef CONFIG_ARM_32
>  /**
> --- a/xen/arch/x86/hvm/nestedhvm.c
> +++ b/xen/arch/x86/hvm/nestedhvm.c
> @@ -125,7 +125,7 @@ static int __init cf_check nestedhvm_set
>      /* shadow_io_bitmaps can't be declared static because
>       *   they must fulfill hw requirements (page aligned section)
>       *   and doing so triggers the ASSERT(va >= XEN_VIRT_START)
> -     *   in __virt_to_maddr()
> +     *   in virt_to_maddr()
>       *
>       * So as a compromise pre-allocate them when xen boots.
>       * This function must be called from within start_xen() when
> --- a/xen/arch/x86/include/asm/page.h
> +++ b/xen/arch/x86/include/asm/page.h
> @@ -269,8 +269,6 @@ void scrub_page_cold(void *);
>  #define mfn_valid(mfn)      __mfn_valid(mfn_x(mfn))
>  #define virt_to_mfn(va)     __virt_to_mfn(va)
>  #define mfn_to_virt(mfn)    __mfn_to_virt(mfn)
> -#define virt_to_maddr(va)   __virt_to_maddr((unsigned long)(va))
> -#define maddr_to_virt(ma)   __maddr_to_virt((unsigned long)(ma))
>  #define maddr_to_page(ma)   __maddr_to_page(ma)
>  #define page_to_maddr(pg)   __page_to_maddr(pg)
>  #define virt_to_page(va)    __virt_to_page(va)
> --- a/xen/arch/x86/include/asm/x86_64/page.h
> +++ b/xen/arch/x86/include/asm/x86_64/page.h
> @@ -34,7 +34,7 @@ static inline unsigned long canonicalise
>  #define pdx_to_virt(pdx) ((void *)(DIRECTMAP_VIRT_START + \
>                                     ((unsigned long)(pdx) <<
> PAGE_SHIFT)))
>  
> -static inline unsigned long __virt_to_maddr(unsigned long va)
> +static inline unsigned long virt_to_maddr(unsigned long va)
>  {
>      ASSERT(va < DIRECTMAP_VIRT_END);
>      if ( va >= DIRECTMAP_VIRT_START )
> @@ -47,8 +47,9 @@ static inline unsigned long __virt_to_ma
>  
>      return xen_phys_start + va - XEN_VIRT_START;
>  }
> +#define virt_to_maddr(va) virt_to_maddr((unsigned long)(va))
>  
> -static inline void *__maddr_to_virt(unsigned long ma)
> +static inline void *maddr_to_virt(unsigned long ma)
>  {
>      /* Offset in the direct map, accounting for pdx compression */
>      unsigned long va_offset = maddr_to_directmapoff(ma);


Reply via email to