Commit 132f34e4 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: move struct gart_funcs into amdgpu_gmc.h

And rename it to struct gmc_funcs.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarSamuel Li <Samuel.Li@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 770d13b1
...@@ -333,28 +333,6 @@ struct amdgpu_vm_pte_funcs { ...@@ -333,28 +333,6 @@ struct amdgpu_vm_pte_funcs {
uint32_t incr, uint64_t flags); uint32_t incr, uint64_t flags);
}; };
/* provided by the gmc block */
struct amdgpu_gart_funcs {
/* flush the vm tlb via mmio */
void (*flush_gpu_tlb)(struct amdgpu_device *adev,
uint32_t vmid);
/* write pte/pde updates using the cpu */
int (*set_pte_pde)(struct amdgpu_device *adev,
void *cpu_pt_addr, /* cpu addr of page table */
uint32_t gpu_page_idx, /* pte/pde to update */
uint64_t addr, /* addr to write into pte/pde */
uint64_t flags); /* access flags */
/* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable);
/* set pte flags based per asic */
uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
uint32_t flags);
/* get the pde for a given mc addr */
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
u64 *dst, u64 *flags);
uint32_t (*get_invalidate_req)(unsigned int vmid);
};
/* provided by the ih block */ /* provided by the ih block */
struct amdgpu_ih_funcs { struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */ /* ring read/write ptr handling, called from interrupt context */
...@@ -1797,13 +1775,13 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) ...@@ -1797,13 +1775,13 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
#define amdgpu_asic_flush_hdp(adev) (adev)->asic_funcs->flush_hdp((adev)) #define amdgpu_asic_flush_hdp(adev) (adev)->asic_funcs->flush_hdp((adev))
#define amdgpu_asic_invalidate_hdp(adev) (adev)->asic_funcs->invalidate_hdp((adev)) #define amdgpu_asic_invalidate_hdp(adev) (adev)->asic_funcs->invalidate_hdp((adev))
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) #define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) #define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
#define amdgpu_gart_get_vm_pde(adev, level, dst, flags) (adev)->gart.gart_funcs->get_vm_pde((adev), (level), (dst), (flags)) #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
#define amdgpu_vm_get_pte_flags(adev, flags) (adev)->gart.gart_funcs->get_vm_pte_flags((adev),(flags))
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t)) #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
......
...@@ -1775,7 +1775,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -1775,7 +1775,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->mman.buffer_funcs_ring = NULL; adev->mman.buffer_funcs_ring = NULL;
adev->vm_manager.vm_pte_funcs = NULL; adev->vm_manager.vm_pte_funcs = NULL;
adev->vm_manager.vm_pte_num_rings = 0; adev->vm_manager.vm_pte_num_rings = 0;
adev->gart.gart_funcs = NULL; adev->gmc.gmc_funcs = NULL;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
......
...@@ -241,14 +241,14 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, ...@@ -241,14 +241,14 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
continue; continue;
for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
t, page_base, flags); t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE; page_base += AMDGPU_GPU_PAGE_SIZE;
} }
} }
mb(); mb();
amdgpu_asic_flush_hdp(adev); amdgpu_asic_flush_hdp(adev);
amdgpu_gart_flush_gpu_tlb(adev, 0); amdgpu_gmc_flush_gpu_tlb(adev, 0);
return 0; return 0;
} }
...@@ -280,7 +280,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, ...@@ -280,7 +280,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
for (i = 0; i < pages; i++) { for (i = 0; i < pages; i++) {
page_base = dma_addr[i]; page_base = dma_addr[i];
for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags); amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE; page_base += AMDGPU_GPU_PAGE_SIZE;
} }
} }
...@@ -331,7 +331,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, ...@@ -331,7 +331,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
mb(); mb();
amdgpu_asic_flush_hdp(adev); amdgpu_asic_flush_hdp(adev);
amdgpu_gart_flush_gpu_tlb(adev, 0); amdgpu_gmc_flush_gpu_tlb(adev, 0);
return 0; return 0;
} }
......
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
*/ */
struct amdgpu_device; struct amdgpu_device;
struct amdgpu_bo; struct amdgpu_bo;
struct amdgpu_gart_funcs;
#define AMDGPU_GPU_PAGE_SIZE 4096 #define AMDGPU_GPU_PAGE_SIZE 4096
#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1) #define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
...@@ -52,8 +51,6 @@ struct amdgpu_gart { ...@@ -52,8 +51,6 @@ struct amdgpu_gart {
/* Asic default pte flags */ /* Asic default pte flags */
uint64_t gart_pte_flags; uint64_t gart_pte_flags;
const struct amdgpu_gart_funcs *gart_funcs;
}; };
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
......
...@@ -634,7 +634,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -634,7 +634,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
if (r) if (r)
goto error_backoff; goto error_backoff;
va_flags = amdgpu_vm_get_pte_flags(adev, args->flags); va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size, args->offset_in_bo, args->map_size,
va_flags); va_flags);
...@@ -654,7 +654,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -654,7 +654,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
if (r) if (r)
goto error_backoff; goto error_backoff;
va_flags = amdgpu_vm_get_pte_flags(adev, args->flags); va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address, r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size, args->offset_in_bo, args->map_size,
va_flags); va_flags);
......
...@@ -48,6 +48,27 @@ struct amdgpu_vmhub { ...@@ -48,6 +48,27 @@ struct amdgpu_vmhub {
/* /*
* GPU MC structures, functions & helpers * GPU MC structures, functions & helpers
*/ */
struct amdgpu_gmc_funcs {
/* flush the vm tlb via mmio */
void (*flush_gpu_tlb)(struct amdgpu_device *adev,
uint32_t vmid);
/* write pte/pde updates using the cpu */
int (*set_pte_pde)(struct amdgpu_device *adev,
void *cpu_pt_addr, /* cpu addr of page table */
uint32_t gpu_page_idx, /* pte/pde to update */
uint64_t addr, /* addr to write into pte/pde */
uint64_t flags); /* access flags */
/* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable);
/* set pte flags based per asic */
uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
uint32_t flags);
/* get the pde for a given mc addr */
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
u64 *dst, u64 *flags);
uint32_t (*get_invalidate_req)(unsigned int vmid);
};
struct amdgpu_gmc { struct amdgpu_gmc {
resource_size_t aper_size; resource_size_t aper_size;
resource_size_t aper_base; resource_size_t aper_base;
...@@ -79,6 +100,8 @@ struct amdgpu_gmc { ...@@ -79,6 +100,8 @@ struct amdgpu_gmc {
/* protects concurrent invalidation */ /* protects concurrent invalidation */
spinlock_t invalidate_lock; spinlock_t invalidate_lock;
bool translate_further; bool translate_further;
const struct amdgpu_gmc_funcs *gmc_funcs;
}; };
#endif #endif
...@@ -679,7 +679,7 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params, ...@@ -679,7 +679,7 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
value = params->pages_addr ? value = params->pages_addr ?
amdgpu_vm_map_gart(params->pages_addr, addr) : amdgpu_vm_map_gart(params->pages_addr, addr) :
addr; addr;
amdgpu_gart_set_pte_pde(params->adev, (void *)(uintptr_t)pe, amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
i, value, flags); i, value, flags);
addr += incr; addr += incr;
} }
...@@ -738,7 +738,7 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params, ...@@ -738,7 +738,7 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
level += params->adev->vm_manager.root_level; level += params->adev->vm_manager.root_level;
pt = amdgpu_bo_gpu_offset(bo); pt = amdgpu_bo_gpu_offset(bo);
flags = AMDGPU_PTE_VALID; flags = AMDGPU_PTE_VALID;
amdgpu_gart_get_vm_pde(params->adev, level, &pt, &flags); amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags);
if (shadow) { if (shadow) {
pde = shadow_addr + (entry - parent->entries) * 8; pde = shadow_addr + (entry - parent->entries) * 8;
params->func(params, pde, pt, 1, 0, flags); params->func(params, pde, pt, 1, 0, flags);
...@@ -967,8 +967,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, ...@@ -967,8 +967,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
} }
entry->huge = true; entry->huge = true;
amdgpu_gart_get_vm_pde(p->adev, AMDGPU_VM_PDB0, amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
&dst, &flags);
if (p->func == amdgpu_vm_cpu_set_ptes) { if (p->func == amdgpu_vm_cpu_set_ptes) {
pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo); pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
...@@ -1485,7 +1484,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev) ...@@ -1485,7 +1484,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
spin_lock_irqsave(&adev->vm_manager.prt_lock, flags); spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
enable = !!atomic_read(&adev->vm_manager.num_prt_users); enable = !!atomic_read(&adev->vm_manager.num_prt_users);
adev->gart.gart_funcs->set_prt(adev, enable); adev->gmc.gmc_funcs->set_prt(adev, enable);
spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags); spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
} }
...@@ -1494,7 +1493,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev) ...@@ -1494,7 +1493,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
*/ */
static void amdgpu_vm_prt_get(struct amdgpu_device *adev) static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
{ {
if (!adev->gart.gart_funcs->set_prt) if (!adev->gmc.gmc_funcs->set_prt)
return; return;
if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1) if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
...@@ -1529,7 +1528,7 @@ static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev, ...@@ -1529,7 +1528,7 @@ static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
{ {
struct amdgpu_prt_cb *cb; struct amdgpu_prt_cb *cb;
if (!adev->gart.gart_funcs->set_prt) if (!adev->gmc.gmc_funcs->set_prt)
return; return;
cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL); cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
...@@ -2405,7 +2404,7 @@ static void amdgpu_vm_free_levels(struct amdgpu_device *adev, ...@@ -2405,7 +2404,7 @@ static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{ {
struct amdgpu_bo_va_mapping *mapping, *tmp; struct amdgpu_bo_va_mapping *mapping, *tmp;
bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt; bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
struct amdgpu_bo *root; struct amdgpu_bo *root;
u64 fault; u64 fault;
int i, r; int i, r;
......
...@@ -3688,11 +3688,11 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -3688,11 +3688,11 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
{ {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
uint64_t flags = AMDGPU_PTE_VALID; uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng; unsigned eng = ring->vm_inv_eng;
amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
pd_addr |= flags; pd_addr |= flags;
gfx_v9_0_write_data_to_reg(ring, usepfp, true, gfx_v9_0_write_data_to_reg(ring, usepfp, true,
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
#include "dce/dce_6_0_sh_mask.h" #include "dce/dce_6_0_sh_mask.h"
#include "si_enums.h" #include "si_enums.h"
static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev); static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev); static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static int gmc_v6_0_wait_for_idle(void *handle); static int gmc_v6_0_wait_for_idle(void *handle);
...@@ -357,16 +357,13 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev) ...@@ -357,16 +357,13 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
return 0; return 0;
} }
static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
uint32_t vmid)
{ {
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
} }
static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev, static int gmc_v6_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
void *cpu_pt_addr, uint32_t gpu_page_idx, uint64_t addr,
uint32_t gpu_page_idx,
uint64_t addr,
uint64_t flags) uint64_t flags)
{ {
void __iomem *ptr = (void *)cpu_pt_addr; void __iomem *ptr = (void *)cpu_pt_addr;
...@@ -559,7 +556,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) ...@@ -559,7 +556,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
else else
gmc_v6_0_set_fault_enable_default(adev, true); gmc_v6_0_set_fault_enable_default(adev, true);
gmc_v6_0_gart_flush_gpu_tlb(adev, 0); gmc_v6_0_flush_gpu_tlb(adev, 0);
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20), (unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr); (unsigned long long)adev->gart.table_addr);
...@@ -793,7 +790,7 @@ static int gmc_v6_0_early_init(void *handle) ...@@ -793,7 +790,7 @@ static int gmc_v6_0_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v6_0_set_gart_funcs(adev); gmc_v6_0_set_gmc_funcs(adev);
gmc_v6_0_set_irq_funcs(adev); gmc_v6_0_set_irq_funcs(adev);
return 0; return 0;
...@@ -1127,9 +1124,9 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = { ...@@ -1127,9 +1124,9 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
.set_powergating_state = gmc_v6_0_set_powergating_state, .set_powergating_state = gmc_v6_0_set_powergating_state,
}; };
static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = { static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb, .flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb,
.set_pte_pde = gmc_v6_0_gart_set_pte_pde, .set_pte_pde = gmc_v6_0_set_pte_pde,
.set_prt = gmc_v6_0_set_prt, .set_prt = gmc_v6_0_set_prt,
.get_vm_pde = gmc_v6_0_get_vm_pde, .get_vm_pde = gmc_v6_0_get_vm_pde,
.get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags .get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
...@@ -1140,10 +1137,10 @@ static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = { ...@@ -1140,10 +1137,10 @@ static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
.process = gmc_v6_0_process_interrupt, .process = gmc_v6_0_process_interrupt,
}; };
static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev) static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev)
{ {
if (adev->gart.gart_funcs == NULL) if (adev->gmc.gmc_funcs == NULL)
adev->gart.gart_funcs = &gmc_v6_0_gart_funcs; adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
} }
static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev) static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
......
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
#include "amdgpu_atombios.h" #include "amdgpu_atombios.h"
static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static int gmc_v7_0_wait_for_idle(void *handle); static int gmc_v7_0_wait_for_idle(void *handle);
...@@ -422,22 +422,21 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) ...@@ -422,22 +422,21 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
*/ */
/** /**
* gmc_v7_0_gart_flush_gpu_tlb - gart tlb flush callback * gmc_v7_0_flush_gpu_tlb - gart tlb flush callback
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @vmid: vm instance to flush * @vmid: vm instance to flush
* *
* Flush the TLB for the requested page table (CIK). * Flush the TLB for the requested page table (CIK).
*/ */
static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
uint32_t vmid)
{ {
/* bits 0-15 are the VM contexts0-15 */ /* bits 0-15 are the VM contexts0-15 */
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
} }
/** /**
* gmc_v7_0_gart_set_pte_pde - update the page tables using MMIO * gmc_v7_0_set_pte_pde - update the page tables using MMIO
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @cpu_pt_addr: cpu address of the page table * @cpu_pt_addr: cpu address of the page table
...@@ -447,10 +446,8 @@ static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, ...@@ -447,10 +446,8 @@ static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
* *
* Update the page tables using the CPU. * Update the page tables using the CPU.
*/ */
static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev, static int gmc_v7_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
void *cpu_pt_addr, uint32_t gpu_page_idx, uint64_t addr,
uint32_t gpu_page_idx,
uint64_t addr,
uint64_t flags) uint64_t flags)
{ {
void __iomem *ptr = (void *)cpu_pt_addr; void __iomem *ptr = (void *)cpu_pt_addr;
...@@ -672,7 +669,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) ...@@ -672,7 +669,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmCHUB_CONTROL, tmp); WREG32(mmCHUB_CONTROL, tmp);
} }
gmc_v7_0_gart_flush_gpu_tlb(adev, 0); gmc_v7_0_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20), (unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr); (unsigned long long)adev->gart.table_addr);
...@@ -919,7 +916,7 @@ static int gmc_v7_0_early_init(void *handle) ...@@ -919,7 +916,7 @@ static int gmc_v7_0_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v7_0_set_gart_funcs(adev); gmc_v7_0_set_gmc_funcs(adev);
gmc_v7_0_set_irq_funcs(adev); gmc_v7_0_set_irq_funcs(adev);
adev->gmc.shared_aperture_start = 0x2000000000000000ULL; adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
...@@ -1306,9 +1303,9 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = { ...@@ -1306,9 +1303,9 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
.set_powergating_state = gmc_v7_0_set_powergating_state, .set_powergating_state = gmc_v7_0_set_powergating_state,
}; };
static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = { static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb, .flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
.set_pte_pde = gmc_v7_0_gart_set_pte_pde, .set_pte_pde = gmc_v7_0_set_pte_pde,
.set_prt = gmc_v7_0_set_prt, .set_prt = gmc_v7_0_set_prt,
.get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags, .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
.get_vm_pde = gmc_v7_0_get_vm_pde .get_vm_pde = gmc_v7_0_get_vm_pde
...@@ -1319,10 +1316,10 @@ static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = { ...@@ -1319,10 +1316,10 @@ static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
.process = gmc_v7_0_process_interrupt, .process = gmc_v7_0_process_interrupt,
}; };
static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev) static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev)
{ {
if (adev->gart.gart_funcs == NULL) if (adev->gmc.gmc_funcs == NULL)
adev->gart.gart_funcs = &gmc_v7_0_gart_funcs; adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
} }
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev) static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
......
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
#include "amdgpu_atombios.h" #include "amdgpu_atombios.h"
static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev); static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
static int gmc_v8_0_wait_for_idle(void *handle); static int gmc_v8_0_wait_for_idle(void *handle);
...@@ -597,14 +597,14 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) ...@@ -597,14 +597,14 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
*/ */
/** /**
* gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @vmid: vm instance to flush * @vmid: vm instance to flush
* *
* Flush the TLB for the requested page table (CIK). * Flush the TLB for the requested page table (CIK).
*/ */
static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
uint32_t vmid) uint32_t vmid)
{ {
/* bits 0-15 are the VM contexts0-15 */ /* bits 0-15 are the VM contexts0-15 */
...@@ -612,7 +612,7 @@ static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, ...@@ -612,7 +612,7 @@ static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
} }
/** /**
* gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO * gmc_v8_0_set_pte_pde - update the page tables using MMIO
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @cpu_pt_addr: cpu address of the page table * @cpu_pt_addr: cpu address of the page table
...@@ -622,10 +622,8 @@ static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, ...@@ -622,10 +622,8 @@ static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
* *
* Update the page tables using the CPU. * Update the page tables using the CPU.
*/ */
static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev, static int gmc_v8_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
void *cpu_pt_addr, uint32_t gpu_page_idx, uint64_t addr,
uint32_t gpu_page_idx,
uint64_t addr,
uint64_t flags) uint64_t flags)
{ {
void __iomem *ptr = (void *)cpu_pt_addr; void __iomem *ptr = (void *)cpu_pt_addr;
...@@ -888,7 +886,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) ...@@ -888,7 +886,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
else else
gmc_v8_0_set_fault_enable_default(adev, true); gmc_v8_0_set_fault_enable_default(adev, true);
gmc_v8_0_gart_flush_gpu_tlb(adev, 0); gmc_v8_0_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20), (unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr); (unsigned long long)adev->gart.table_addr);
...@@ -1009,7 +1007,7 @@ static int gmc_v8_0_early_init(void *handle) ...@@ -1009,7 +1007,7 @@ static int gmc_v8_0_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v8_0_set_gart_funcs(adev); gmc_v8_0_set_gmc_funcs(adev);
gmc_v8_0_set_irq_funcs(adev); gmc_v8_0_set_irq_funcs(adev);
adev->gmc.shared_aperture_start = 0x2000000000000000ULL; adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
...@@ -1640,9 +1638,9 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = { ...@@ -1640,9 +1638,9 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.get_clockgating_state = gmc_v8_0_get_clockgating_state, .get_clockgating_state = gmc_v8_0_get_clockgating_state,
}; };
static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = { static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb, .flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
.set_pte_pde = gmc_v8_0_gart_set_pte_pde, .set_pte_pde = gmc_v8_0_set_pte_pde,
.set_prt = gmc_v8_0_set_prt, .set_prt = gmc_v8_0_set_prt,
.get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags, .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
.get_vm_pde = gmc_v8_0_get_vm_pde .get_vm_pde = gmc_v8_0_get_vm_pde
...@@ -1653,10 +1651,10 @@ static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = { ...@@ -1653,10 +1651,10 @@ static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
.process = gmc_v8_0_process_interrupt, .process = gmc_v8_0_process_interrupt,
}; };
static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev) static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
{ {
if (adev->gart.gart_funcs == NULL) if (adev->gmc.gmc_funcs == NULL)
adev->gart.gart_funcs = &gmc_v8_0_gart_funcs; adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
} }
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev) static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
......
...@@ -316,14 +316,14 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid) ...@@ -316,14 +316,14 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
*/ */
/** /**
* gmc_v9_0_gart_flush_gpu_tlb - gart tlb flush callback * gmc_v9_0_flush_gpu_tlb - gart tlb flush callback
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @vmid: vm instance to flush * @vmid: vm instance to flush
* *
* Flush the TLB for the requested page table. * Flush the TLB for the requested page table.
*/ */
static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
uint32_t vmid) uint32_t vmid)
{ {
/* Use register 17 for GART */ /* Use register 17 for GART */
...@@ -367,7 +367,7 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, ...@@ -367,7 +367,7 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
} }
/** /**
* gmc_v9_0_gart_set_pte_pde - update the page tables using MMIO * gmc_v9_0_set_pte_pde - update the page tables using MMIO
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @cpu_pt_addr: cpu address of the page table * @cpu_pt_addr: cpu address of the page table
...@@ -377,10 +377,8 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, ...@@ -377,10 +377,8 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
* *
* Update the page tables using the CPU. * Update the page tables using the CPU.
*/ */
static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device *adev, static int gmc_v9_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
void *cpu_pt_addr, uint32_t gpu_page_idx, uint64_t addr,
uint32_t gpu_page_idx,
uint64_t addr,
uint64_t flags) uint64_t flags)
{ {
void __iomem *ptr = (void *)cpu_pt_addr; void __iomem *ptr = (void *)cpu_pt_addr;
...@@ -491,25 +489,25 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, ...@@ -491,25 +489,25 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
} }
} }
static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = { static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb, .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
.set_pte_pde = gmc_v9_0_gart_set_pte_pde, .set_pte_pde = gmc_v9_0_set_pte_pde,
.get_invalidate_req = gmc_v9_0_get_invalidate_req, .get_invalidate_req = gmc_v9_0_get_invalidate_req,
.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags, .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
.get_vm_pde = gmc_v9_0_get_vm_pde .get_vm_pde = gmc_v9_0_get_vm_pde
}; };
static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
{ {
if (adev->gart.gart_funcs == NULL) if (adev->gmc.gmc_funcs == NULL)
adev->gart.gart_funcs = &gmc_v9_0_gart_funcs; adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
} }
static int gmc_v9_0_early_init(void *handle) static int gmc_v9_0_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v9_0_set_gart_funcs(adev); gmc_v9_0_set_gmc_funcs(adev);
gmc_v9_0_set_irq_funcs(adev); gmc_v9_0_set_irq_funcs(adev);
adev->gmc.shared_aperture_start = 0x2000000000000000ULL; adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
...@@ -981,7 +979,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) ...@@ -981,7 +979,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
gfxhub_v1_0_set_fault_enable_default(adev, value); gfxhub_v1_0_set_fault_enable_default(adev, value);
mmhub_v1_0_set_fault_enable_default(adev, value); mmhub_v1_0_set_fault_enable_default(adev, value);
gmc_v9_0_gart_flush_gpu_tlb(adev, 0); gmc_v9_0_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20), (unsigned)(adev->gmc.gart_size >> 20),
......
...@@ -1136,11 +1136,11 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -1136,11 +1136,11 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr) unsigned vmid, uint64_t pd_addr)
{ {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
uint64_t flags = AMDGPU_PTE_VALID; uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng; unsigned eng = ring->vm_inv_eng;
amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
pd_addr |= flags; pd_addr |= flags;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
......
...@@ -1294,12 +1294,12 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -1294,12 +1294,12 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr) unsigned vmid, uint64_t pd_addr)
{ {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
uint64_t flags = AMDGPU_PTE_VALID; uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng; unsigned eng = ring->vm_inv_eng;
uint32_t data0, data1, mask; uint32_t data0, data1, mask;
amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
pd_addr |= flags; pd_addr |= flags;
data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2; data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2;
...@@ -1346,11 +1346,11 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -1346,11 +1346,11 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr) unsigned int vmid, uint64_t pd_addr)
{ {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
uint64_t flags = AMDGPU_PTE_VALID; uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng; unsigned eng = ring->vm_inv_eng;
amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
pd_addr |= flags; pd_addr |= flags;
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE); amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
......
...@@ -968,11 +968,11 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -968,11 +968,11 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr) unsigned int vmid, uint64_t pd_addr)
{ {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
uint64_t flags = AMDGPU_PTE_VALID; uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng; unsigned eng = ring->vm_inv_eng;
amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
pd_addr |= flags; pd_addr |= flags;
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE); amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
......
...@@ -891,12 +891,12 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -891,12 +891,12 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr) unsigned vmid, uint64_t pd_addr)
{ {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
uint64_t flags = AMDGPU_PTE_VALID; uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng; unsigned eng = ring->vm_inv_eng;
uint32_t data0, data1, mask; uint32_t data0, data1, mask;
amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
pd_addr |= flags; pd_addr |= flags;
data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2; data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2;
...@@ -1024,11 +1024,11 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -1024,11 +1024,11 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr) unsigned int vmid, uint64_t pd_addr)
{ {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); uint32_t req = ring->adev->gmc.gmc_funcs->get_invalidate_req(vmid);
uint64_t flags = AMDGPU_PTE_VALID; uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng; unsigned eng = ring->vm_inv_eng;
amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
pd_addr |= flags; pd_addr |= flags;
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE); amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment