Now that for all the relevant backends we do randomised testing, we need to make sure we still sanity check the obvious cases that might blow up, such that introducing a temporary regression is less likely. Also rather than do this for every backend, just limit to our two memory types: system and local.
Suggested-by: Chris Wilson <ch...@chris-wilson.co.uk> Signed-off-by: Matthew Auld <matthew.a...@intel.com> Cc: Chris Wilson <ch...@chris-wilson.co.uk> --- .../gpu/drm/i915/gem/selftests/huge_pages.c | 103 ++++++++++++++++++ 1 file changed, 103 insertions(+) diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 1d7c2a50d636..fee8a6c338b8 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -1505,6 +1505,108 @@ static int igt_ppgtt_lmem_huge(void *arg) return err; } +static struct drm_i915_gem_object * +igt_create_local(struct drm_i915_private *i915, u32 size) +{ + return i915_gem_object_create_lmem(i915, size, I915_ALLOC_CONTIGUOUS); +} + +static struct drm_i915_gem_object * +igt_create_system(struct drm_i915_private *i915, u32 size) +{ + return huge_pages_object(i915, size, size); +} + +typedef struct drm_i915_gem_object * +(*igt_create_fn)(struct drm_i915_private *i915, u32 size); + +static int igt_ppgtt_sanity_check(void *arg) +{ + struct i915_gem_context *ctx = arg; + struct drm_i915_private *i915 = ctx->i915; + unsigned int supported = INTEL_INFO(i915)->page_sizes; + struct { + u32 size; + u32 pages; + } combos[] = { + { SZ_64K, SZ_64K, }, + { SZ_2M, SZ_2M, }, + { SZ_2M, SZ_64K, }, + { SZ_2M + SZ_4K, SZ_64K | SZ_4K, }, + { SZ_2M + SZ_4K, SZ_2M | SZ_4K, }, + { SZ_2M + SZ_64K, SZ_2M | SZ_64K, }, + }; + igt_create_fn fns[] = { + igt_create_local, + igt_create_system, + }; + int i, j; + int err; + + if (supported == I915_GTT_PAGE_SIZE_4K) + return 0; + + /* + * Sanity check that the HW behaves with a limited set of combinations. + * We already have a bunch of randomised testing, which should give us + * a decent amount of variation between runs, however we should keep + * this to limit the chances of introducing a temporary regression, by + * testing the most obvious cases that might make something blow up. + */ + + for (i = 0; i < ARRAY_SIZE(fns); ++i) { + for (j = 0; j < ARRAY_SIZE(combos); ++j) { + struct drm_i915_gem_object *obj; + u32 size = combos[j].size; + u32 pages = combos[j].pages; + + obj = fns[i](i915, size); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + if (err == -ENODEV) { + pr_info("Device lacks local memory, skipping\n"); + err = 0; + break; + } + + return err; + } + + err = i915_gem_object_pin_pages(obj); + if (err) { + i915_gem_object_put(obj); + goto out; + } + + GEM_BUG_ON(pages > obj->base.size); + pages = pages & supported; + + if (pages) + obj->mm.page_sizes.sg = pages; + + err = igt_write_huge(ctx, obj); + + i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); + i915_gem_object_put(obj); + + if (err) { + pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n", + __func__, size, pages, i, j); + goto out; + } + } + + cond_resched(); + } + +out: + if (err == -ENOMEM) + err = 0; + + return err; +} + static int igt_ppgtt_pin_update(void *arg) { struct i915_gem_context *ctx = arg; @@ -1867,6 +1969,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_ppgtt_gemfs_huge), SUBTEST(igt_ppgtt_internal_huge), SUBTEST(igt_ppgtt_lmem_huge), + SUBTEST(igt_ppgtt_sanity_check), }; struct drm_file *file; struct i915_gem_context *ctx; -- 2.20.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/intel-gfx