Commit 23741bc8 authored by Matthew Auld's avatar Matthew Auld Committed by Chris Wilson

drm/i915/selftests: extend coverage to include LMEM huge-pages

Add LMEM objects to list of backends we test for huge-GTT-pages.
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20191025153728.23689-5-chris@chris-wilson.co.uk
parent 340be48f
......@@ -9,6 +9,7 @@
#include "i915_selftest.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
......@@ -983,7 +984,8 @@ static int gpu_write(struct intel_context *ce,
vma->size >> PAGE_SHIFT, val);
}
static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
static int
__cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
{
unsigned int needs_flush;
unsigned long n;
......@@ -1015,6 +1017,51 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
return err;
}
static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
{
unsigned long n;
int err;
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_wc_domain(obj, false);
i915_gem_object_unlock(obj);
if (err)
return err;
err = i915_gem_object_pin_pages(obj);
if (err)
return err;
for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
u32 __iomem *base;
u32 read_val;
base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
read_val = ioread32(base + dword);
io_mapping_unmap_atomic(base);
if (read_val != val) {
pr_err("n=%lu base[%u]=%u, val=%u\n",
n, dword, read_val, val);
err = -EINVAL;
break;
}
}
i915_gem_object_unpin_pages(obj);
return err;
}
static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
{
if (i915_gem_object_has_struct_page(obj))
return __cpu_check_shmem(obj, dword, val);
else if (i915_gem_object_is_lmem(obj))
return __cpu_check_lmem(obj, dword, val);
return -ENODEV;
}
static int __igt_write_huge(struct intel_context *ce,
struct drm_i915_gem_object *obj,
u64 size, u64 offset,
......@@ -1399,6 +1446,79 @@ static int igt_ppgtt_gemfs_huge(void *arg)
return err;
}
static int igt_ppgtt_lmem_huge(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
static const unsigned int sizes[] = {
SZ_64K,
SZ_512K,
SZ_1M,
SZ_2M,
};
int i;
int err;
if (!HAS_LMEM(i915)) {
pr_info("device lacks LMEM support, skipping\n");
return 0;
}
/*
* Sanity check that the HW uses huge pages correctly through LMEM
* -- ensure that our writes land in the right place.
*/
for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
unsigned int size = sizes[i];
obj = i915_gem_object_create_lmem(i915, size,
I915_BO_ALLOC_CONTIGUOUS);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
if (err == -E2BIG) {
pr_info("object too big for region!\n");
return 0;
}
return err;
}
err = i915_gem_object_pin_pages(obj);
if (err)
goto out_put;
if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
pr_info("LMEM unable to allocate huge-page(s) with size=%u\n",
size);
goto out_unpin;
}
err = igt_write_huge(ctx, obj);
if (err) {
pr_err("LMEM write-huge failed with size=%u\n", size);
goto out_unpin;
}
i915_gem_object_unpin_pages(obj);
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
i915_gem_object_put(obj);
}
return 0;
out_unpin:
i915_gem_object_unpin_pages(obj);
out_put:
i915_gem_object_put(obj);
if (err == -ENOMEM)
err = 0;
return err;
}
static int igt_ppgtt_pin_update(void *arg)
{
struct i915_gem_context *ctx = arg;
......@@ -1760,6 +1880,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_ppgtt_exhaust_huge),
SUBTEST(igt_ppgtt_gemfs_huge),
SUBTEST(igt_ppgtt_internal_huge),
SUBTEST(igt_ppgtt_lmem_huge),
};
struct drm_file *file;
struct i915_gem_context *ctx;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment