Commit b5b7bef9 authored by Mika Kuoppala's avatar Mika Kuoppala

drm/i915/gtt: Use a common type for page directories

All page directories are identical in function, only the position in the
hierarchy differ. Use same base type for directory functionality.

v2: cleanup, size always 512, init to null

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Signed-off-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164350.30415-2-mika.kuoppala@linux.intel.com
parent 7d82cc35
...@@ -1027,7 +1027,7 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data) ...@@ -1027,7 +1027,7 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
if (i915_vm_is_4lvl(vm)) { if (i915_vm_is_4lvl(vm)) {
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
const dma_addr_t pd_daddr = px_dma(&ppgtt->pml4); const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
cs = intel_ring_begin(rq, 6); cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs)) if (IS_ERR(cs))
......
...@@ -55,7 +55,7 @@ ...@@ -55,7 +55,7 @@
#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \ #define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
u32 *reg_state__ = (reg_state); \ u32 *reg_state__ = (reg_state); \
const u64 addr__ = px_dma(&ppgtt->pml4); \ const u64 addr__ = px_dma(ppgtt->pd); \
(reg_state__)[CTX_PDP0_UDW + 1] = upper_32_bits(addr__); \ (reg_state__)[CTX_PDP0_UDW + 1] = upper_32_bits(addr__); \
(reg_state__)[CTX_PDP0_LDW + 1] = lower_32_bits(addr__); \ (reg_state__)[CTX_PDP0_LDW + 1] = lower_32_bits(addr__); \
} while (0) } while (0)
......
...@@ -1484,7 +1484,7 @@ static int load_pd_dir(struct i915_request *rq, const struct i915_ppgtt *ppgtt) ...@@ -1484,7 +1484,7 @@ static int load_pd_dir(struct i915_request *rq, const struct i915_ppgtt *ppgtt)
*cs++ = MI_LOAD_REGISTER_IMM(1); *cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
*cs++ = ppgtt->pd.base.ggtt_offset << 10; *cs++ = ppgtt->pd->base.ggtt_offset << 10;
intel_ring_advance(rq, cs); intel_ring_advance(rq, cs);
......
...@@ -375,11 +375,13 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, ...@@ -375,11 +375,13 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
return -EINVAL; return -EINVAL;
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0]; px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0];
} else { } else {
for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) { for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
px_dma(ppgtt->pdp.page_directory[i]) = struct i915_page_directory * const pd =
mm->ppgtt_mm.shadow_pdps[i]; i915_pd_entry(ppgtt->pd, i);
px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
} }
} }
...@@ -1107,11 +1109,14 @@ i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s, ...@@ -1107,11 +1109,14 @@ i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s,
int i; int i;
if (i915_vm_is_4lvl(&ppgtt->vm)) { if (i915_vm_is_4lvl(&ppgtt->vm)) {
px_dma(&ppgtt->pml4) = s->i915_context_pml4; px_dma(ppgtt->pd) = s->i915_context_pml4;
} else { } else {
for (i = 0; i < GEN8_3LVL_PDPES; i++) for (i = 0; i < GEN8_3LVL_PDPES; i++) {
px_dma(ppgtt->pdp.page_directory[i]) = struct i915_page_directory * const pd =
s->i915_context_pdps[i]; i915_pd_entry(ppgtt->pd, i);
px_dma(pd) = s->i915_context_pdps[i];
}
} }
} }
...@@ -1165,11 +1170,14 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s, ...@@ -1165,11 +1170,14 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
int i; int i;
if (i915_vm_is_4lvl(&ppgtt->vm)) { if (i915_vm_is_4lvl(&ppgtt->vm)) {
s->i915_context_pml4 = px_dma(&ppgtt->pml4); s->i915_context_pml4 = px_dma(ppgtt->pd);
} else { } else {
for (i = 0; i < GEN8_3LVL_PDPES; i++) for (i = 0; i < GEN8_3LVL_PDPES; i++) {
s->i915_context_pdps[i] = struct i915_page_directory * const pd =
px_dma(ppgtt->pdp.page_directory[i]); i915_pd_entry(ppgtt->pd, i);
s->i915_context_pdps[i] = px_dma(pd);
}
} }
} }
......
This diff is collapsed.
...@@ -248,28 +248,14 @@ struct i915_page_dma { ...@@ -248,28 +248,14 @@ struct i915_page_dma {
struct i915_page_table { struct i915_page_table {
struct i915_page_dma base; struct i915_page_dma base;
atomic_t used_ptes; atomic_t used;
}; };
struct i915_page_directory { struct i915_page_directory {
struct i915_page_dma base; struct i915_page_dma base;
atomic_t used;
struct i915_page_table *page_table[I915_PDES]; /* PDEs */
atomic_t used_pdes;
spinlock_t lock;
};
struct i915_page_directory_pointer {
struct i915_page_dma base;
struct i915_page_directory **page_directory;
atomic_t used_pdpes;
spinlock_t lock;
};
struct i915_pml4 {
struct i915_page_dma base;
struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
spinlock_t lock; spinlock_t lock;
void *entry[512];
}; };
struct i915_vma_ops { struct i915_vma_ops {
...@@ -321,7 +307,7 @@ struct i915_address_space { ...@@ -321,7 +307,7 @@ struct i915_address_space {
struct i915_page_dma scratch_page; struct i915_page_dma scratch_page;
struct i915_page_table *scratch_pt; struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd; struct i915_page_directory *scratch_pd;
struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */ struct i915_page_directory *scratch_pdp; /* GEN8+ & 48b PPGTT */
/** /**
* List of vma currently bound. * List of vma currently bound.
...@@ -428,11 +414,7 @@ struct i915_ppgtt { ...@@ -428,11 +414,7 @@ struct i915_ppgtt {
struct i915_address_space vm; struct i915_address_space vm;
intel_engine_mask_t pd_dirty_engines; intel_engine_mask_t pd_dirty_engines;
union { struct i915_page_directory *pd;
struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */
struct i915_page_directory_pointer pdp; /* GEN8+ */
struct i915_page_directory pd; /* GEN6-7 */
};
}; };
struct gen6_ppgtt { struct gen6_ppgtt {
...@@ -466,7 +448,7 @@ static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base) ...@@ -466,7 +448,7 @@ static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base)
#define gen6_for_each_pde(pt, pd, start, length, iter) \ #define gen6_for_each_pde(pt, pd, start, length, iter) \
for (iter = gen6_pde_index(start); \ for (iter = gen6_pde_index(start); \
length > 0 && iter < I915_PDES && \ length > 0 && iter < I915_PDES && \
(pt = (pd)->page_table[iter], true); \ (pt = i915_pt_entry(pd, iter), true); \
({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \ ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
temp = min(temp - start, length); \ temp = min(temp - start, length); \
start += temp, length -= temp; }), ++iter) start += temp, length -= temp; }), ++iter)
...@@ -474,7 +456,7 @@ static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base) ...@@ -474,7 +456,7 @@ static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base)
#define gen6_for_all_pdes(pt, pd, iter) \ #define gen6_for_all_pdes(pt, pd, iter) \
for (iter = 0; \ for (iter = 0; \
iter < I915_PDES && \ iter < I915_PDES && \
(pt = (pd)->page_table[iter], true); \ (pt = i915_pt_entry(pd, iter), true); \
++iter) ++iter)
static inline u32 i915_pte_index(u64 address, unsigned int pde_shift) static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
...@@ -533,6 +515,27 @@ i915_pdpes_per_pdp(const struct i915_address_space *vm) ...@@ -533,6 +515,27 @@ i915_pdpes_per_pdp(const struct i915_address_space *vm)
return GEN8_3LVL_PDPES; return GEN8_3LVL_PDPES;
} }
static inline struct i915_page_table *
i915_pt_entry(const struct i915_page_directory * const pd,
const unsigned short n)
{
return pd->entry[n];
}
static inline struct i915_page_directory *
i915_pd_entry(const struct i915_page_directory * const pdp,
const unsigned short n)
{
return pdp->entry[n];
}
static inline struct i915_page_directory *
i915_pdp_entry(const struct i915_page_directory * const pml4,
const unsigned short n)
{
return pml4->entry[n];
}
/* Equivalent to the gen6 version, For each pde iterates over every pde /* Equivalent to the gen6 version, For each pde iterates over every pde
* between from start until start + length. On gen8+ it simply iterates * between from start until start + length. On gen8+ it simply iterates
* over every page directory entry in a page directory. * over every page directory entry in a page directory.
...@@ -540,7 +543,7 @@ i915_pdpes_per_pdp(const struct i915_address_space *vm) ...@@ -540,7 +543,7 @@ i915_pdpes_per_pdp(const struct i915_address_space *vm)
#define gen8_for_each_pde(pt, pd, start, length, iter) \ #define gen8_for_each_pde(pt, pd, start, length, iter) \
for (iter = gen8_pde_index(start); \ for (iter = gen8_pde_index(start); \
length > 0 && iter < I915_PDES && \ length > 0 && iter < I915_PDES && \
(pt = (pd)->page_table[iter], true); \ (pt = i915_pt_entry(pd, iter), true); \
({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT); \ ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT); \
temp = min(temp - start, length); \ temp = min(temp - start, length); \
start += temp, length -= temp; }), ++iter) start += temp, length -= temp; }), ++iter)
...@@ -548,7 +551,7 @@ i915_pdpes_per_pdp(const struct i915_address_space *vm) ...@@ -548,7 +551,7 @@ i915_pdpes_per_pdp(const struct i915_address_space *vm)
#define gen8_for_each_pdpe(pd, pdp, start, length, iter) \ #define gen8_for_each_pdpe(pd, pdp, start, length, iter) \
for (iter = gen8_pdpe_index(start); \ for (iter = gen8_pdpe_index(start); \
length > 0 && iter < i915_pdpes_per_pdp(vm) && \ length > 0 && iter < i915_pdpes_per_pdp(vm) && \
(pd = (pdp)->page_directory[iter], true); \ (pd = i915_pd_entry(pdp, iter), true); \
({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \ ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \
temp = min(temp - start, length); \ temp = min(temp - start, length); \
start += temp, length -= temp; }), ++iter) start += temp, length -= temp; }), ++iter)
...@@ -556,7 +559,7 @@ i915_pdpes_per_pdp(const struct i915_address_space *vm) ...@@ -556,7 +559,7 @@ i915_pdpes_per_pdp(const struct i915_address_space *vm)
#define gen8_for_each_pml4e(pdp, pml4, start, length, iter) \ #define gen8_for_each_pml4e(pdp, pml4, start, length, iter) \
for (iter = gen8_pml4e_index(start); \ for (iter = gen8_pml4e_index(start); \
length > 0 && iter < GEN8_PML4ES_PER_PML4 && \ length > 0 && iter < GEN8_PML4ES_PER_PML4 && \
(pdp = (pml4)->pdps[iter], true); \ (pdp = i915_pdp_entry(pml4, iter), true); \
({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT); \ ({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT); \
temp = min(temp - start, length); \ temp = min(temp - start, length); \
start += temp, length -= temp; }), ++iter) start += temp, length -= temp; }), ++iter)
...@@ -589,7 +592,10 @@ static inline u64 gen8_pte_count(u64 address, u64 length) ...@@ -589,7 +592,10 @@ static inline u64 gen8_pte_count(u64 address, u64 length)
static inline dma_addr_t static inline dma_addr_t
i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
{ {
return px_dma(ppgtt->pdp.page_directory[n]); struct i915_page_directory *pd;
pd = i915_pdp_entry(ppgtt->pd, n);
return px_dma(pd);
} }
static inline struct i915_ggtt * static inline struct i915_ggtt *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment