The intent of exercising parallel page fault is not necessarily to exercise parallel swap-in (we can safely rely on that being well tested and is orthogonal to page faulting), but to make sure that our object and GGTT locking is exercised. We can safely reduce our RSS without loss of coverage. Furthermore, by using varying sizes we can exercise different code paths within page faulting, rather than all being serviced as partial mmaps. Instead of allocating 32 surfaces, each of 16MiB, we allocate 32 surfaces in incremental 512KiB sizes; halving the memory requirement.
References: https://bugs.freedesktop.org/show_bug.cgi?id=111864 Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk> --- tests/i915/gem_mmap_gtt.c | 40 +++++++++++++++++++++++++++------------ 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/tests/i915/gem_mmap_gtt.c b/tests/i915/gem_mmap_gtt.c index 91da5a37b..6ebbdd8c7 100644 --- a/tests/i915/gem_mmap_gtt.c +++ b/tests/i915/gem_mmap_gtt.c @@ -57,30 +57,36 @@ set_domain_gtt(int fd, uint32_t handle) } static void * -mmap_bo(int fd, uint32_t handle) +mmap_bo(int fd, uint32_t handle, uint64_t size) { void *ptr; - ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); + ptr = gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE); return ptr; } static void * -create_pointer(int fd) +create_pointer_size(int fd, uint64_t size) { uint32_t handle; void *ptr; - handle = gem_create(fd, OBJECT_SIZE); + handle = gem_create(fd, size); - ptr = mmap_bo(fd, handle); + ptr = mmap_bo(fd, handle, size); gem_close(fd, handle); return ptr; } +static void * +create_pointer(int fd) +{ + return create_pointer_size(fd, OBJECT_SIZE); +} + static void test_access(int fd) { @@ -468,7 +474,7 @@ test_write_gtt(int fd) dst = gem_create(fd, OBJECT_SIZE); /* prefault object into gtt */ - dst_gtt = mmap_bo(fd, dst); + dst_gtt = mmap_bo(fd, dst, OBJECT_SIZE); set_domain_gtt(fd, dst); memset(dst_gtt, 0, OBJECT_SIZE); munmap(dst_gtt, OBJECT_SIZE); @@ -958,10 +964,16 @@ thread_fault_concurrent(void *closure) int n; for (n = 0; n < 32; n++) { + unsigned int id = (n + t->id) % 32; + uint32_t sz = *t->ptr[id] - 1; + int idx = rand() % sz + 1; + if (n & 1) - *t->ptr[(n + t->id) % 32] = val; + t->ptr[id][idx] = val; else - val = *t->ptr[(n + t->id) % 32]; + val = t->ptr[id][idx]; + + val++; } return NULL; @@ -975,7 +987,10 @@ test_fault_concurrent(int fd) int n; for (n = 0; n < 32; n++) { - ptr[n] = create_pointer(fd); + uint32_t sz = (n + 1) << 19; /* 512KiB increments */ + + ptr[n] = create_pointer_size(fd, sz); + *ptr[n] = sz / sizeof(uint32_t); /* num_elems for convenience */ } for (n = 0; n < 64; n++) { @@ -984,12 +999,13 @@ test_fault_concurrent(int fd) pthread_create(&thread[n].thread, NULL, thread_fault_concurrent, &thread[n]); } + sleep(2); + for (n = 0; n < 64; n++) pthread_join(thread[n].thread, NULL); - for (n = 0; n < 32; n++) { - munmap(ptr[n], OBJECT_SIZE); - } + for (n = 0; n < 32; n++) + munmap(ptr[n], *ptr[n] * sizeof(uint32_t)); } static void -- 2.23.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/intel-gfx