Commit effb97cc authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: add TMZ handling to amdgpu_move_blit

This way we should be at least able to move buffers from VRAM to GTT.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Tested-by: default avatarPierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent bffc8c5c
...@@ -65,7 +65,7 @@ ...@@ -65,7 +65,7 @@
static int amdgpu_map_buffer(struct ttm_buffer_object *bo, static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem, unsigned num_pages, struct ttm_mem_reg *mem, unsigned num_pages,
uint64_t offset, unsigned window, uint64_t offset, unsigned window,
struct amdgpu_ring *ring, struct amdgpu_ring *ring, bool tmz,
uint64_t *addr); uint64_t *addr);
/** /**
...@@ -290,17 +290,23 @@ static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem, ...@@ -290,17 +290,23 @@ static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
/** /**
* amdgpu_copy_ttm_mem_to_mem - Helper function for copy * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
* @adev: amdgpu device
* @src: buffer/address where to read from
* @dst: buffer/address where to write to
* @size: number of bytes to copy
* @tmz: if a secure copy should be used
* @resv: resv object to sync to
* @f: Returns the last fence if multiple jobs are submitted.
* *
* The function copies @size bytes from {src->mem + src->offset} to * The function copies @size bytes from {src->mem + src->offset} to
* {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
* move and different for a BO to BO copy. * move and different for a BO to BO copy.
* *
* @f: Returns the last fence if multiple jobs are submitted.
*/ */
int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct amdgpu_copy_mem *src, struct amdgpu_copy_mem *src,
struct amdgpu_copy_mem *dst, struct amdgpu_copy_mem *dst,
uint64_t size, uint64_t size, bool tmz,
struct dma_resv *resv, struct dma_resv *resv,
struct dma_fence **f) struct dma_fence **f)
{ {
...@@ -352,7 +358,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, ...@@ -352,7 +358,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
if (src->mem->start == AMDGPU_BO_INVALID_OFFSET) { if (src->mem->start == AMDGPU_BO_INVALID_OFFSET) {
r = amdgpu_map_buffer(src->bo, src->mem, r = amdgpu_map_buffer(src->bo, src->mem,
PFN_UP(cur_size + src_page_offset), PFN_UP(cur_size + src_page_offset),
src_node_start, 0, ring, src_node_start, 0, ring, tmz,
&from); &from);
if (r) if (r)
goto error; goto error;
...@@ -365,7 +371,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, ...@@ -365,7 +371,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
if (dst->mem->start == AMDGPU_BO_INVALID_OFFSET) { if (dst->mem->start == AMDGPU_BO_INVALID_OFFSET) {
r = amdgpu_map_buffer(dst->bo, dst->mem, r = amdgpu_map_buffer(dst->bo, dst->mem,
PFN_UP(cur_size + dst_page_offset), PFN_UP(cur_size + dst_page_offset),
dst_node_start, 1, ring, dst_node_start, 1, ring, tmz,
&to); &to);
if (r) if (r)
goto error; goto error;
...@@ -373,7 +379,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, ...@@ -373,7 +379,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
} }
r = amdgpu_copy_buffer(ring, from, to, cur_size, r = amdgpu_copy_buffer(ring, from, to, cur_size,
resv, &next, false, true, false); resv, &next, false, true, tmz);
if (r) if (r)
goto error; goto error;
...@@ -425,6 +431,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, ...@@ -425,6 +431,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem) struct ttm_mem_reg *old_mem)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_copy_mem src, dst; struct amdgpu_copy_mem src, dst;
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
int r; int r;
...@@ -438,14 +445,14 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, ...@@ -438,14 +445,14 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst, r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
new_mem->num_pages << PAGE_SHIFT, new_mem->num_pages << PAGE_SHIFT,
amdgpu_bo_encrypted(abo),
bo->base.resv, &fence); bo->base.resv, &fence);
if (r) if (r)
goto error; goto error;
/* clear the space being freed */ /* clear the space being freed */
if (old_mem->mem_type == TTM_PL_VRAM && if (old_mem->mem_type == TTM_PL_VRAM &&
(ttm_to_amdgpu_bo(bo)->flags & (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
struct dma_fence *wipe_fence = NULL; struct dma_fence *wipe_fence = NULL;
r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON, r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
...@@ -2022,7 +2029,7 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -2022,7 +2029,7 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
static int amdgpu_map_buffer(struct ttm_buffer_object *bo, static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem, unsigned num_pages, struct ttm_mem_reg *mem, unsigned num_pages,
uint64_t offset, unsigned window, uint64_t offset, unsigned window,
struct amdgpu_ring *ring, struct amdgpu_ring *ring, bool tmz,
uint64_t *addr) uint64_t *addr)
{ {
struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
...@@ -2064,6 +2071,9 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo, ...@@ -2064,6 +2071,9 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT]; dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem); flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
if (tmz)
flags |= AMDGPU_PTE_TMZ;
r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags, r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
&job->ibs[0].ptr[num_dw]); &job->ibs[0].ptr[num_dw]);
if (r) if (r)
......
...@@ -91,7 +91,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, ...@@ -91,7 +91,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct amdgpu_copy_mem *src, struct amdgpu_copy_mem *src,
struct amdgpu_copy_mem *dst, struct amdgpu_copy_mem *dst,
uint64_t size, uint64_t size, bool tmz,
struct dma_resv *resv, struct dma_resv *resv,
struct dma_fence **f); struct dma_fence **f);
int amdgpu_fill_buffer(struct amdgpu_bo *bo, int amdgpu_fill_buffer(struct amdgpu_bo *bo,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment