Commit a9fe9ca4 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gtt: Rename i915_vm_is_48b to i915_vm_is_4lvl

Large ppGTT are differentiated by the requirement to go to four levels
to address more than 32b. Given the introduction of more 4 level ppGTT
with different sizes of addressable bits, rename i915_vm_is_48b() to
better reflect the commonality of using 4 levels.

Based on a patch by Bob Paauwe.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Bob Paauwe <bob.j.paauwe@intel.com>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190314223839.28258-4-chris@chris-wilson.co.uk
parent 51d623b6
...@@ -1101,9 +1101,9 @@ i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s) ...@@ -1101,9 +1101,9 @@ i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s)
struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt; struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
int i; int i;
if (i915_vm_is_48bit(&i915_ppgtt->vm)) if (i915_vm_is_4lvl(&i915_ppgtt->vm)) {
px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4; px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4;
else { } else {
for (i = 0; i < GEN8_3LVL_PDPES; i++) for (i = 0; i < GEN8_3LVL_PDPES; i++)
px_dma(i915_ppgtt->pdp.page_directory[i]) = px_dma(i915_ppgtt->pdp.page_directory[i]) =
s->i915_context_pdps[i]; s->i915_context_pdps[i];
...@@ -1154,7 +1154,7 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s) ...@@ -1154,7 +1154,7 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s)
struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt; struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
int i; int i;
if (i915_vm_is_48bit(&i915_ppgtt->vm)) if (i915_vm_is_4lvl(&i915_ppgtt->vm))
s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4); s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4);
else { else {
for (i = 0; i < GEN8_3LVL_PDPES; i++) for (i = 0; i < GEN8_3LVL_PDPES; i++)
......
...@@ -321,7 +321,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915, ...@@ -321,7 +321,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE; desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
address_mode = INTEL_LEGACY_32B_CONTEXT; address_mode = INTEL_LEGACY_32B_CONTEXT;
if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) if (ppgtt && i915_vm_is_4lvl(&ppgtt->vm))
address_mode = INTEL_LEGACY_64B_CONTEXT; address_mode = INTEL_LEGACY_64B_CONTEXT;
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
......
...@@ -584,7 +584,7 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) ...@@ -584,7 +584,7 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
* for all. * for all.
*/ */
size = I915_GTT_PAGE_SIZE_4K; size = I915_GTT_PAGE_SIZE_4K;
if (i915_vm_is_48bit(vm) && if (i915_vm_is_4lvl(vm) &&
HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) { HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
size = I915_GTT_PAGE_SIZE_64K; size = I915_GTT_PAGE_SIZE_64K;
gfp |= __GFP_NOWARN; gfp |= __GFP_NOWARN;
...@@ -727,18 +727,13 @@ static void __pdp_fini(struct i915_page_directory_pointer *pdp) ...@@ -727,18 +727,13 @@ static void __pdp_fini(struct i915_page_directory_pointer *pdp)
pdp->page_directory = NULL; pdp->page_directory = NULL;
} }
static inline bool use_4lvl(const struct i915_address_space *vm)
{
return i915_vm_is_48bit(vm);
}
static struct i915_page_directory_pointer * static struct i915_page_directory_pointer *
alloc_pdp(struct i915_address_space *vm) alloc_pdp(struct i915_address_space *vm)
{ {
struct i915_page_directory_pointer *pdp; struct i915_page_directory_pointer *pdp;
int ret = -ENOMEM; int ret = -ENOMEM;
GEM_BUG_ON(!use_4lvl(vm)); GEM_BUG_ON(!i915_vm_is_4lvl(vm));
pdp = kzalloc(sizeof(*pdp), GFP_KERNEL); pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
if (!pdp) if (!pdp)
...@@ -767,7 +762,7 @@ static void free_pdp(struct i915_address_space *vm, ...@@ -767,7 +762,7 @@ static void free_pdp(struct i915_address_space *vm,
{ {
__pdp_fini(pdp); __pdp_fini(pdp);
if (!use_4lvl(vm)) if (!i915_vm_is_4lvl(vm))
return; return;
cleanup_px(vm, pdp); cleanup_px(vm, pdp);
...@@ -871,7 +866,7 @@ static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm, ...@@ -871,7 +866,7 @@ static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
gen8_ppgtt_pdpe_t *vaddr; gen8_ppgtt_pdpe_t *vaddr;
pdp->page_directory[pdpe] = pd; pdp->page_directory[pdpe] = pd;
if (!use_4lvl(vm)) if (!i915_vm_is_4lvl(vm))
return; return;
vaddr = kmap_atomic_px(pdp); vaddr = kmap_atomic_px(pdp);
...@@ -936,7 +931,7 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, ...@@ -936,7 +931,7 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp; struct i915_page_directory_pointer *pdp;
unsigned int pml4e; unsigned int pml4e;
GEM_BUG_ON(!use_4lvl(vm)); GEM_BUG_ON(!i915_vm_is_4lvl(vm));
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
GEM_BUG_ON(pdp == vm->scratch_pdp); GEM_BUG_ON(pdp == vm->scratch_pdp);
...@@ -1247,7 +1242,7 @@ static int gen8_init_scratch(struct i915_address_space *vm) ...@@ -1247,7 +1242,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
goto free_pt; goto free_pt;
} }
if (use_4lvl(vm)) { if (i915_vm_is_4lvl(vm)) {
vm->scratch_pdp = alloc_pdp(vm); vm->scratch_pdp = alloc_pdp(vm);
if (IS_ERR(vm->scratch_pdp)) { if (IS_ERR(vm->scratch_pdp)) {
ret = PTR_ERR(vm->scratch_pdp); ret = PTR_ERR(vm->scratch_pdp);
...@@ -1257,7 +1252,7 @@ static int gen8_init_scratch(struct i915_address_space *vm) ...@@ -1257,7 +1252,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
gen8_initialize_pt(vm, vm->scratch_pt); gen8_initialize_pt(vm, vm->scratch_pt);
gen8_initialize_pd(vm, vm->scratch_pd); gen8_initialize_pd(vm, vm->scratch_pd);
if (use_4lvl(vm)) if (i915_vm_is_4lvl(vm))
gen8_initialize_pdp(vm, vm->scratch_pdp); gen8_initialize_pdp(vm, vm->scratch_pdp);
return 0; return 0;
...@@ -1279,7 +1274,7 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) ...@@ -1279,7 +1274,7 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
enum vgt_g2v_type msg; enum vgt_g2v_type msg;
int i; int i;
if (use_4lvl(vm)) { if (i915_vm_is_4lvl(vm)) {
const u64 daddr = px_dma(&ppgtt->pml4); const u64 daddr = px_dma(&ppgtt->pml4);
I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
...@@ -1309,7 +1304,7 @@ static void gen8_free_scratch(struct i915_address_space *vm) ...@@ -1309,7 +1304,7 @@ static void gen8_free_scratch(struct i915_address_space *vm)
if (!vm->scratch_page.daddr) if (!vm->scratch_page.daddr)
return; return;
if (use_4lvl(vm)) if (i915_vm_is_4lvl(vm))
free_pdp(vm, vm->scratch_pdp); free_pdp(vm, vm->scratch_pdp);
free_pd(vm, vm->scratch_pd); free_pd(vm, vm->scratch_pd);
free_pt(vm, vm->scratch_pt); free_pt(vm, vm->scratch_pt);
...@@ -1355,7 +1350,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) ...@@ -1355,7 +1350,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
if (intel_vgpu_active(dev_priv)) if (intel_vgpu_active(dev_priv))
gen8_ppgtt_notify_vgt(ppgtt, false); gen8_ppgtt_notify_vgt(ppgtt, false);
if (use_4lvl(vm)) if (i915_vm_is_4lvl(vm))
gen8_ppgtt_cleanup_4lvl(ppgtt); gen8_ppgtt_cleanup_4lvl(ppgtt);
else else
gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp); gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
...@@ -1555,7 +1550,7 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) ...@@ -1555,7 +1550,7 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
if (err) if (err)
goto err_free; goto err_free;
if (use_4lvl(&ppgtt->vm)) { if (i915_vm_is_4lvl(&ppgtt->vm)) {
err = setup_px(&ppgtt->vm, &ppgtt->pml4); err = setup_px(&ppgtt->vm, &ppgtt->pml4);
if (err) if (err)
goto err_scratch; goto err_scratch;
......
...@@ -348,7 +348,7 @@ struct i915_address_space { ...@@ -348,7 +348,7 @@ struct i915_address_space {
#define i915_is_ggtt(vm) ((vm)->is_ggtt) #define i915_is_ggtt(vm) ((vm)->is_ggtt)
static inline bool static inline bool
i915_vm_is_48bit(const struct i915_address_space *vm) i915_vm_is_4lvl(const struct i915_address_space *vm)
{ {
return (vm->total - 1) >> 32; return (vm->total - 1) >> 32;
} }
...@@ -488,7 +488,7 @@ static inline u32 gen6_pde_index(u32 addr) ...@@ -488,7 +488,7 @@ static inline u32 gen6_pde_index(u32 addr)
static inline unsigned int static inline unsigned int
i915_pdpes_per_pdp(const struct i915_address_space *vm) i915_pdpes_per_pdp(const struct i915_address_space *vm)
{ {
if (i915_vm_is_48bit(vm)) if (i915_vm_is_4lvl(vm))
return GEN8_PML4ES_PER_PML4; return GEN8_PML4ES_PER_PML4;
return GEN8_3LVL_PDPES; return GEN8_3LVL_PDPES;
......
...@@ -1499,7 +1499,7 @@ static int execlists_request_alloc(struct i915_request *request) ...@@ -1499,7 +1499,7 @@ static int execlists_request_alloc(struct i915_request *request)
*/ */
/* Unconditionally invalidate GPU caches and TLBs. */ /* Unconditionally invalidate GPU caches and TLBs. */
if (i915_vm_is_48bit(&request->gem_context->ppgtt->vm)) if (i915_vm_is_4lvl(&request->gem_context->ppgtt->vm))
ret = request->engine->emit_flush(request, EMIT_INVALIDATE); ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
else else
ret = emit_pdps(request); ret = emit_pdps(request);
...@@ -2719,7 +2719,7 @@ static void execlists_init_reg_state(u32 *regs, ...@@ -2719,7 +2719,7 @@ static void execlists_init_reg_state(u32 *regs,
CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0); CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0); CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
if (i915_vm_is_48bit(&ppgtt->vm)) { if (i915_vm_is_4lvl(&ppgtt->vm)) {
/* 64b PPGTT (48bit canonical) /* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and * PDP0_DESCRIPTOR contains the base address to PML4 and
* other PDP Descriptors are ignored. * other PDP Descriptors are ignored.
......
...@@ -1449,7 +1449,7 @@ static int igt_ppgtt_pin_update(void *arg) ...@@ -1449,7 +1449,7 @@ static int igt_ppgtt_pin_update(void *arg)
* huge-gtt-pages. * huge-gtt-pages.
*/ */
if (!ppgtt || !i915_vm_is_48bit(&ppgtt->vm)) { if (!ppgtt || !i915_vm_is_4lvl(&ppgtt->vm)) {
pr_info("48b PPGTT not supported, skipping\n"); pr_info("48b PPGTT not supported, skipping\n");
return 0; return 0;
} }
...@@ -1719,7 +1719,7 @@ int i915_gem_huge_page_mock_selftests(void) ...@@ -1719,7 +1719,7 @@ int i915_gem_huge_page_mock_selftests(void)
goto out_unlock; goto out_unlock;
} }
if (!i915_vm_is_48bit(&ppgtt->vm)) { if (!i915_vm_is_4lvl(&ppgtt->vm)) {
pr_err("failed to create 48b PPGTT\n"); pr_err("failed to create 48b PPGTT\n");
err = -EINVAL; err = -EINVAL;
goto out_close; goto out_close;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment