On Tue, May 20, 2025 at 02:01:17PM +0100, Lorenz Bauer wrote: > User space needs access to kernel BTF for many modern features of BPF. > Right now each process needs to read the BTF blob either in pieces or > as a whole. Allow mmaping the sysfs file so that processes can directly > access the memory allocated for it in the kernel. > > remap_pfn_range is used instead of vm_insert_page due to aarch64 > compatibility issues. > > Tested-by: Alan Maguire <alan.magu...@oracle.com> > Signed-off-by: Lorenz Bauer <l...@isovalent.com> > --- > include/asm-generic/vmlinux.lds.h | 3 ++- > kernel/bpf/sysfs_btf.c | 32 ++++++++++++++++++++++++++++++++ > 2 files changed, 34 insertions(+), 1 deletion(-) > > diff --git a/include/asm-generic/vmlinux.lds.h > b/include/asm-generic/vmlinux.lds.h > index > 58a635a6d5bdf0c53c267c2a3d21a5ed8678ce73..1750390735fac7637cc4d2fa05f96cb2a36aa448 > 100644 > --- a/include/asm-generic/vmlinux.lds.h > +++ b/include/asm-generic/vmlinux.lds.h > @@ -667,10 +667,11 @@ defined(CONFIG_AUTOFDO_CLANG) || > defined(CONFIG_PROPELLER_CLANG) > */ > #ifdef CONFIG_DEBUG_INFO_BTF > #define BTF \ > + . = ALIGN(PAGE_SIZE); \ > .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \ > BOUNDED_SECTION_BY(.BTF, _BTF) \ > } \ > - . = ALIGN(4); \ > + . = ALIGN(PAGE_SIZE); \ > .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \ > *(.BTF_ids) \ > } > diff --git a/kernel/bpf/sysfs_btf.c b/kernel/bpf/sysfs_btf.c > index > 81d6cf90584a7157929c50f62a5c6862e7a3d081..941d0d2427e3a2d27e8f1cff7b6424d0d41817c1 > 100644 > --- a/kernel/bpf/sysfs_btf.c > +++ b/kernel/bpf/sysfs_btf.c > @@ -7,14 +7,46 @@ > #include <linux/kobject.h> > #include <linux/init.h> > #include <linux/sysfs.h> > +#include <linux/mm.h> > +#include <linux/io.h> > +#include <linux/btf.h> > > /* See scripts/link-vmlinux.sh, gen_btf() func for details */ > extern char __start_BTF[]; > extern char __stop_BTF[]; > > +static int btf_sysfs_vmlinux_mmap(struct file *filp, struct kobject *kobj, > + const struct bin_attribute *attr, > + struct vm_area_struct *vma) > +{ > + unsigned long pages = PAGE_ALIGN(attr->size) >> PAGE_SHIFT; > + size_t vm_size = vma->vm_end - vma->vm_start; > + phys_addr_t addr = virt_to_phys(__start_BTF); > + unsigned long pfn = addr >> PAGE_SHIFT; > + > + if (attr->private != __start_BTF || !PAGE_ALIGNED(addr))
With vmlinux.lds.h change above, is the page aligned check still needed? Oh also can the size of btf region be non-page aligned? > + return -EINVAL; > + > + if (vma->vm_pgoff) > + return -EINVAL; > + > + if (vma->vm_flags & (VM_WRITE | VM_EXEC | VM_MAYSHARE)) > + return -EACCES; > + > + if (pfn + pages < pfn) > + return -EINVAL; > + > + if ((vm_size >> PAGE_SHIFT) > pages) > + return -EINVAL; > + > + vm_flags_mod(vma, VM_DONTDUMP, VM_MAYEXEC | VM_MAYWRITE); Is it ok for fork() to keep the mapping in the child? (i.e. do you need VM_DONTCOPY). BTW VM_DONTDUMP is added by remap_pfn_range(), so if you want you can remove it here. > + return remap_pfn_range(vma, vma->vm_start, pfn, vm_size, > vma->vm_page_prot); > +} > + > static struct bin_attribute bin_attr_btf_vmlinux __ro_after_init = { > .attr = { .name = "vmlinux", .mode = 0444, }, > .read_new = sysfs_bin_attr_simple_read, > + .mmap = btf_sysfs_vmlinux_mmap, > }; > > struct kobject *btf_kobj; > Overall this looks good to me, so you can add: Reviewed-by: Shakeel Butt <shakeel.b...@linux.dev>