Commit 95045783 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: add full TMZ support into amdgpu_ttm_map_buffer v2

This should allow us to also support VRAM->GTT moves.

v2: fix missing vram_base_adjustment
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarHuang Rui <ray.huang@amd.com>
Tested-by: default avatarPierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent f0ee63cb
...@@ -305,21 +305,21 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, ...@@ -305,21 +305,21 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
unsigned window, struct amdgpu_ring *ring, unsigned window, struct amdgpu_ring *ring,
bool tmz, uint64_t *addr) bool tmz, uint64_t *addr)
{ {
struct ttm_dma_tt *dma = container_of(bo->ttm, struct ttm_dma_tt, ttm);
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job; struct amdgpu_job *job;
unsigned num_dw, num_bytes; unsigned num_dw, num_bytes;
dma_addr_t *dma_address;
struct dma_fence *fence; struct dma_fence *fence;
uint64_t src_addr, dst_addr; uint64_t src_addr, dst_addr;
void *cpu_addr;
uint64_t flags; uint64_t flags;
unsigned int i;
int r; int r;
BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
/* Map only what can't be accessed directly */ /* Map only what can't be accessed directly */
if (mem->start != AMDGPU_BO_INVALID_OFFSET) { if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
*addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset; *addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset;
return 0; return 0;
} }
...@@ -348,16 +348,38 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, ...@@ -348,16 +348,38 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
amdgpu_ring_pad_ib(ring, &job->ibs[0]); amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw); WARN_ON(job->ibs[0].length_dw > num_dw);
dma_address = &dma->dma_address[offset >> PAGE_SHIFT];
flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem); flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
if (tmz) if (tmz)
flags |= AMDGPU_PTE_TMZ; flags |= AMDGPU_PTE_TMZ;
cpu_addr = &job->ibs[0].ptr[num_dw];
if (mem->mem_type == TTM_PL_TT) {
struct ttm_dma_tt *dma;
dma_addr_t *dma_address;
dma = container_of(bo->ttm, struct ttm_dma_tt, ttm);
dma_address = &dma->dma_address[offset >> PAGE_SHIFT];
r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags, r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
&job->ibs[0].ptr[num_dw]); cpu_addr);
if (r)
goto error_free;
} else {
dma_addr_t dma_address;
dma_address = (mm_node->start << PAGE_SHIFT) + offset;
dma_address += adev->vm_manager.vram_base_offset;
for (i = 0; i < num_pages; ++i) {
r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
&dma_address, flags, cpu_addr);
if (r) if (r)
goto error_free; goto error_free;
dma_address += PAGE_SIZE;
}
}
r = amdgpu_job_submit(job, &adev->mman.entity, r = amdgpu_job_submit(job, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &fence); AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
if (r) if (r)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment