On Sun, Jun 15, 2025 at 10:02:03PM -0700, Nicolin Chen wrote:
>  FIXTURE_TEARDOWN(iommufd_dirty_tracking)
>  {
> -     munmap(self->buffer, variant->buffer_size);
> -     munmap(self->bitmap, DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE));
> +     unsigned long size = variant->buffer_size;
> +
> +     if (variant->hugepages)
> +             size = __ALIGN_KERNEL(variant->buffer_size, HUGEPAGE_SIZE);
> +     munmap(self->buffer, size);
> +     free(self->buffer);
> +     free(self->bitmap);
>       teardown_iommufd(self->fd, _metadata);

munmap followed by free isn't right..

This code is using the glibc allocator to get a bunch of pages mmap'd
to an aligned location then replacing the pages with MAP_SHARED and
maybe HAP_HUGETLB versions.

The glibc allocator is fine to unmap them via free.

I think like this will do the job:

diff --git a/tools/testing/selftests/iommu/iommufd.c 
b/tools/testing/selftests/iommu/iommufd.c
index 1a8e85afe9aa51..e0f4f1541a1f4a 100644
--- a/tools/testing/selftests/iommu/iommufd.c
+++ b/tools/testing/selftests/iommu/iommufd.c
@@ -2008,6 +2008,7 @@ FIXTURE_VARIANT(iommufd_dirty_tracking)
 
 FIXTURE_SETUP(iommufd_dirty_tracking)
 {
+       size_t mmap_buffer_size;
        unsigned long size;
        int mmap_flags;
        void *vrc;
@@ -2022,12 +2023,7 @@ FIXTURE_SETUP(iommufd_dirty_tracking)
        self->fd = open("/dev/iommu", O_RDWR);
        ASSERT_NE(-1, self->fd);
 
-       rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, variant->buffer_size);
-       if (rc || !self->buffer) {
-               SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
-                          variant->buffer_size, rc);
-       }
-
+       mmap_buffer_size = variant->buffer_size;
        mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED;
        if (variant->hugepages) {
                /*
@@ -2035,9 +2031,25 @@ FIXTURE_SETUP(iommufd_dirty_tracking)
                 * not available.
                 */
                mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
+
+               /*
+                * Allocation must be aligned to the HUGEPAGE_SIZE, because the
+                * following mmap() will automatically align the length to be a
+                * multiple of the underlying huge page size. Failing to do the
+                * same at this allocation will result in a memory overwrite by
+                * the mmap().
+                */
+               if (mmap_buffer_size < HUGEPAGE_SIZE)
+                       mmap_buffer_size = HUGEPAGE_SIZE;
+       }
+
+       rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, mmap_buffer_size);
+       if (rc || !self->buffer) {
+               SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
+                          mmap_buffer_size, rc);
        }
        assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0);
-       vrc = mmap(self->buffer, variant->buffer_size, PROT_READ | PROT_WRITE,
+       vrc = mmap(self->buffer, mmap_buffer_size, PROT_READ | PROT_WRITE,
                   mmap_flags, -1, 0);
        assert(vrc == self->buffer);
 
@@ -2066,8 +2078,8 @@ FIXTURE_SETUP(iommufd_dirty_tracking)
 
 FIXTURE_TEARDOWN(iommufd_dirty_tracking)
 {
-       munmap(self->buffer, variant->buffer_size);
-       munmap(self->bitmap, DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE));
+       free(self->buffer);
+       free(self->bitmap);
        teardown_iommufd(self->fd, _metadata);
 }
 

Reply via email to