Commit 6b777607 authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher

drm/amdgpu: expand pte flags to uint64_t

Necessary for new asics.
Signed-off-by: default avatarChunming Zhou <David1.Zhou@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 7ccf5aa8
...@@ -280,7 +280,7 @@ struct amdgpu_vm_pte_funcs { ...@@ -280,7 +280,7 @@ struct amdgpu_vm_pte_funcs {
void (*set_pte_pde)(struct amdgpu_ib *ib, void (*set_pte_pde)(struct amdgpu_ib *ib,
uint64_t pe, uint64_t pe,
uint64_t addr, unsigned count, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags); uint32_t incr, uint64_t flags);
}; };
/* provided by the gmc block */ /* provided by the gmc block */
...@@ -293,7 +293,7 @@ struct amdgpu_gart_funcs { ...@@ -293,7 +293,7 @@ struct amdgpu_gart_funcs {
void *cpu_pt_addr, /* cpu addr of page table */ void *cpu_pt_addr, /* cpu addr of page table */
uint32_t gpu_page_idx, /* pte/pde to update */ uint32_t gpu_page_idx, /* pte/pde to update */
uint64_t addr, /* addr to write into pte/pde */ uint64_t addr, /* addr to write into pte/pde */
uint32_t flags); /* access flags */ uint64_t flags); /* access flags */
/* enable/disable PRT support */ /* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable); void (*set_prt)(struct amdgpu_device *adev, bool enable);
}; };
...@@ -539,7 +539,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, ...@@ -539,7 +539,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages); int pages);
int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist, int pages, struct page **pagelist,
dma_addr_t *dma_addr, uint32_t flags); dma_addr_t *dma_addr, uint64_t flags);
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev); int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
/* /*
...@@ -1746,7 +1746,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, ...@@ -1746,7 +1746,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
int *last_invalidated); int *last_invalidated);
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem); struct ttm_mem_reg *mem);
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base); void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
......
...@@ -229,7 +229,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, ...@@ -229,7 +229,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
unsigned p; unsigned p;
int i, j; int i, j;
u64 page_base; u64 page_base;
uint32_t flags = AMDGPU_PTE_SYSTEM; uint64_t flags = AMDGPU_PTE_SYSTEM;
if (!adev->gart.ready) { if (!adev->gart.ready) {
WARN(1, "trying to unbind memory from uninitialized GART !\n"); WARN(1, "trying to unbind memory from uninitialized GART !\n");
...@@ -271,7 +271,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, ...@@ -271,7 +271,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
*/ */
int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist, dma_addr_t *dma_addr, int pages, struct page **pagelist, dma_addr_t *dma_addr,
uint32_t flags) uint64_t flags)
{ {
unsigned t; unsigned t;
unsigned p; unsigned p;
......
...@@ -746,7 +746,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) ...@@ -746,7 +746,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
{ {
struct ttm_tt *ttm = bo->ttm; struct ttm_tt *ttm = bo->ttm;
struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
uint32_t flags; uint64_t flags;
int r; int r;
if (!ttm || amdgpu_ttm_is_bound(ttm)) if (!ttm || amdgpu_ttm_is_bound(ttm))
...@@ -1027,10 +1027,10 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) ...@@ -1027,10 +1027,10 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
} }
uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem)
{ {
uint32_t flags = 0; uint64_t flags = 0;
if (mem && mem->mem_type != TTM_PL_SYSTEM) if (mem && mem->mem_type != TTM_PL_SYSTEM)
flags |= AMDGPU_PTE_VALID; flags |= AMDGPU_PTE_VALID;
......
...@@ -64,7 +64,7 @@ struct amdgpu_pte_update_params { ...@@ -64,7 +64,7 @@ struct amdgpu_pte_update_params {
/* Function which actually does the update */ /* Function which actually does the update */
void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe, void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
uint64_t addr, unsigned count, uint32_t incr, uint64_t addr, unsigned count, uint32_t incr,
uint32_t flags); uint64_t flags);
/* indicate update pt or its shadow */ /* indicate update pt or its shadow */
bool shadow; bool shadow;
}; };
...@@ -496,7 +496,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, ...@@ -496,7 +496,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params, static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
uint64_t pe, uint64_t addr, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, unsigned count, uint32_t incr,
uint32_t flags) uint64_t flags)
{ {
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
...@@ -525,7 +525,7 @@ static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params, ...@@ -525,7 +525,7 @@ static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params, static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
uint64_t pe, uint64_t addr, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, unsigned count, uint32_t incr,
uint32_t flags) uint64_t flags)
{ {
uint64_t src = (params->src + (addr >> 12) * 8); uint64_t src = (params->src + (addr >> 12) * 8);
...@@ -718,7 +718,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ...@@ -718,7 +718,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
uint64_t start, uint64_t end, uint64_t start, uint64_t end,
uint64_t dst, uint32_t flags) uint64_t dst, uint64_t flags)
{ {
const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
...@@ -808,7 +808,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, ...@@ -808,7 +808,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
uint64_t start, uint64_t end, uint64_t start, uint64_t end,
uint64_t dst, uint32_t flags) uint64_t dst, uint64_t flags)
{ {
/** /**
* The MC L1 TLB supports variable sized pages, based on a fragment * The MC L1 TLB supports variable sized pages, based on a fragment
...@@ -885,7 +885,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ...@@ -885,7 +885,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
dma_addr_t *pages_addr, dma_addr_t *pages_addr,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
uint64_t start, uint64_t last, uint64_t start, uint64_t last,
uint32_t flags, uint64_t addr, uint64_t flags, uint64_t addr,
struct dma_fence **fence) struct dma_fence **fence)
{ {
struct amdgpu_ring *ring; struct amdgpu_ring *ring;
...@@ -1023,11 +1023,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ...@@ -1023,11 +1023,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
*/ */
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct dma_fence *exclusive, struct dma_fence *exclusive,
uint32_t gtt_flags, uint64_t gtt_flags,
dma_addr_t *pages_addr, dma_addr_t *pages_addr,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping, struct amdgpu_bo_va_mapping *mapping,
uint32_t flags, uint64_t flags,
struct drm_mm_node *nodes, struct drm_mm_node *nodes,
struct dma_fence **fence) struct dma_fence **fence)
{ {
...@@ -1114,7 +1114,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, ...@@ -1114,7 +1114,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_vm *vm = bo_va->vm; struct amdgpu_vm *vm = bo_va->vm;
struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo_va_mapping *mapping;
dma_addr_t *pages_addr = NULL; dma_addr_t *pages_addr = NULL;
uint32_t gtt_flags, flags; uint64_t gtt_flags, flags;
struct ttm_mem_reg *mem; struct ttm_mem_reg *mem;
struct drm_mm_node *nodes; struct drm_mm_node *nodes;
struct dma_fence *exclusive; struct dma_fence *exclusive;
......
...@@ -749,7 +749,7 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, ...@@ -749,7 +749,7 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
*/ */
static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
uint64_t addr, unsigned count, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags) uint32_t incr, uint64_t flags)
{ {
/* for physically contiguous pages (vram) */ /* for physically contiguous pages (vram) */
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
......
...@@ -367,7 +367,7 @@ static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev, ...@@ -367,7 +367,7 @@ static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev,
void *cpu_pt_addr, void *cpu_pt_addr,
uint32_t gpu_page_idx, uint32_t gpu_page_idx,
uint64_t addr, uint64_t addr,
uint32_t flags) uint64_t flags)
{ {
void __iomem *ptr = (void *)cpu_pt_addr; void __iomem *ptr = (void *)cpu_pt_addr;
uint64_t value; uint64_t value;
......
...@@ -439,7 +439,7 @@ static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev, ...@@ -439,7 +439,7 @@ static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev,
void *cpu_pt_addr, void *cpu_pt_addr,
uint32_t gpu_page_idx, uint32_t gpu_page_idx,
uint64_t addr, uint64_t addr,
uint32_t flags) uint64_t flags)
{ {
void __iomem *ptr = (void *)cpu_pt_addr; void __iomem *ptr = (void *)cpu_pt_addr;
uint64_t value; uint64_t value;
......
...@@ -531,7 +531,7 @@ static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev, ...@@ -531,7 +531,7 @@ static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev,
void *cpu_pt_addr, void *cpu_pt_addr,
uint32_t gpu_page_idx, uint32_t gpu_page_idx,
uint64_t addr, uint64_t addr,
uint32_t flags) uint64_t flags)
{ {
void __iomem *ptr = (void *)cpu_pt_addr; void __iomem *ptr = (void *)cpu_pt_addr;
uint64_t value; uint64_t value;
......
...@@ -798,7 +798,7 @@ static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, ...@@ -798,7 +798,7 @@ static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
*/ */
static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
uint64_t addr, unsigned count, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags) uint32_t incr, uint64_t flags)
{ {
/* for physically contiguous pages (vram) */ /* for physically contiguous pages (vram) */
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
......
...@@ -1007,7 +1007,7 @@ static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, ...@@ -1007,7 +1007,7 @@ static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
*/ */
static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
uint64_t addr, unsigned count, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags) uint32_t incr, uint64_t flags)
{ {
/* for physically contiguous pages (vram) */ /* for physically contiguous pages (vram) */
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
......
...@@ -398,7 +398,7 @@ static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, ...@@ -398,7 +398,7 @@ static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib, static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib,
uint64_t pe, uint64_t pe,
uint64_t addr, unsigned count, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags) uint32_t incr, uint64_t flags)
{ {
uint64_t value; uint64_t value;
unsigned ndw; unsigned ndw;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment