On Mon, Feb 24, 2025 at 10:52:43PM +0000, [email protected] wrote:
> From: Jeff Xu <[email protected]>
>
> Provide support for CONFIG_MSEAL_SYSTEM_MAPPINGS on arm64, covering
> the vdso, vvar, and compat-mode vectors and sigpage mappings.
>
> Production release testing passes on Android and Chrome OS.

This is pretty limited (yes yes I know android is massive etc. but we must
account for all the weird and wonderful arm64 devices out there in context of
upstream :)

Have you looking through all arm64-code relating to vdso, vvar, compat-mode
vectors, sigpage mapping and ensured nothing kernel-side relies upon relocation?
Some arches actually seem to want to do this. Pretty sure PPC does... so a bit
nervous of that.

At any rate some comment about having checked/confirmed this would be good, arm
concerns me a lot more than x86 on this front.

Thanks however for doing extensive testing android/chrome side! This is of
course, very important for sheer volume (and probably worldwide % of deployed
arm64 devices...)

Just need to dot our i's and cross our t's...

>
> Signed-off-by: Jeff Xu <[email protected]>
> ---
>  arch/arm64/Kconfig       |  1 +
>  arch/arm64/kernel/vdso.c | 22 +++++++++++++++-------
>  2 files changed, 16 insertions(+), 7 deletions(-)
>
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index fcdd0ed3eca8..39202aa9a5af 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -38,6 +38,7 @@ config ARM64
>       select ARCH_HAS_KEEPINITRD
>       select ARCH_HAS_MEMBARRIER_SYNC_CORE
>       select ARCH_HAS_MEM_ENCRYPT
> +     select ARCH_HAS_MSEAL_SYSTEM_MAPPINGS
>       select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
>       select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
>       select ARCH_HAS_NONLEAF_PMD_YOUNG if ARM64_HAFT
> diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
> index e8ed8e5b713b..12e6ab396018 100644
> --- a/arch/arm64/kernel/vdso.c
> +++ b/arch/arm64/kernel/vdso.c
> @@ -183,6 +183,7 @@ static int __setup_additional_pages(enum vdso_abi abi,
>  {
>       unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
>       unsigned long gp_flags = 0;
> +     unsigned long vm_flags;
>       void *ret;
>
>       BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
> @@ -197,8 +198,10 @@ static int __setup_additional_pages(enum vdso_abi abi,
>               goto up_fail;
>       }
>
> +     vm_flags = VM_READ|VM_MAYREAD|VM_PFNMAP;
> +     vm_flags |= VM_SEALED_SYSMAP;
>       ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
> -                                    VM_READ|VM_MAYREAD|VM_PFNMAP,
> +                                    vm_flags,
>                                      &vvar_map);
>       if (IS_ERR(ret))
>               goto up_fail;
> @@ -208,9 +211,10 @@ static int __setup_additional_pages(enum vdso_abi abi,
>
>       vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
>       mm->context.vdso = (void *)vdso_base;
> +     vm_flags = VM_READ|VM_EXEC|gp_flags|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC;
> +     vm_flags |= VM_SEALED_SYSMAP;
>       ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
> -                                    VM_READ|VM_EXEC|gp_flags|
> -                                    VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
> +                                    vm_flags,
>                                      vdso_info[abi].cm);
>       if (IS_ERR(ret))
>               goto up_fail;
> @@ -326,6 +330,7 @@ arch_initcall(aarch32_alloc_vdso_pages);
>  static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
>  {
>       void *ret;
> +     unsigned long vm_flags;
>
>       if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
>               return 0;
> @@ -334,9 +339,10 @@ static int aarch32_kuser_helpers_setup(struct mm_struct 
> *mm)
>        * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
>        * not safe to CoW the page containing the CPU exception vectors.
>        */
> +     vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC;
> +     vm_flags |= VM_SEALED_SYSMAP;
>       ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
> -                                    VM_READ | VM_EXEC |
> -                                    VM_MAYREAD | VM_MAYEXEC,
> +                                    vm_flags,
>                                      &aarch32_vdso_maps[AA32_MAP_VECTORS]);
>
>       return PTR_ERR_OR_ZERO(ret);
> @@ -345,6 +351,7 @@ static int aarch32_kuser_helpers_setup(struct mm_struct 
> *mm)
>  static int aarch32_sigreturn_setup(struct mm_struct *mm)
>  {
>       unsigned long addr;
> +     unsigned long vm_flags;
>       void *ret;
>
>       addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
> @@ -357,9 +364,10 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm)
>        * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
>        * set breakpoints.
>        */
> +     vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC;
> +     vm_flags |= VM_SEALED_SYSMAP;
>       ret = _install_special_mapping(mm, addr, PAGE_SIZE,
> -                                    VM_READ | VM_EXEC | VM_MAYREAD |
> -                                    VM_MAYWRITE | VM_MAYEXEC,
> +                                    vm_flags,
>                                      &aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
>       if (IS_ERR(ret))
>               goto out;
> --
> 2.48.1.658.g4767266eb4-goog
>

Patch looks fine for purposes of what you're trying to achieve though, just
need to have some calming reassurances about arch :)

Thanks!

Reply via email to