Commit 498ad8ec authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: use the new cursor in amdgpu_ttm_access_memory

Separate the drm_mm_node walking from the actual handling.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarOak Zeng <Oak.Zeng@amd.com>
Tested-by: default avatarNirmoy Das <nirmoy.das@amd.com>
Reviewed-by: default avatarArunpravin <Arunpravin.PaneerSelvam@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 10ebcd95
...@@ -178,26 +178,6 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) ...@@ -178,26 +178,6 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
filp->private_data); filp->private_data);
} }
/**
* amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
* @offset. It also modifies the offset to be within the drm_mm_node returned
*
* @mem: The region where the bo resides.
* @offset: The offset that drm_mm_node is used for finding.
*
*/
static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_resource *mem,
uint64_t *offset)
{
struct drm_mm_node *mm_node = mem->mm_node;
while (*offset >= (mm_node->size << PAGE_SHIFT)) {
*offset -= (mm_node->size << PAGE_SHIFT);
++mm_node;
}
return mm_node;
}
/** /**
* amdgpu_ttm_map_buffer - Map memory into the GART windows * amdgpu_ttm_map_buffer - Map memory into the GART windows
* @bo: buffer object to map * @bo: buffer object to map
...@@ -1478,40 +1458,35 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, ...@@ -1478,40 +1458,35 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
* access for debugging purposes. * access for debugging purposes.
*/ */
static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
unsigned long offset, unsigned long offset, void *buf, int len,
void *buf, int len, int write) int write)
{ {
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
struct drm_mm_node *nodes; struct amdgpu_res_cursor cursor;
unsigned long flags;
uint32_t value = 0; uint32_t value = 0;
int ret = 0; int ret = 0;
uint64_t pos;
unsigned long flags;
if (bo->mem.mem_type != TTM_PL_VRAM) if (bo->mem.mem_type != TTM_PL_VRAM)
return -EIO; return -EIO;
pos = offset; amdgpu_res_first(&bo->mem, offset, len, &cursor);
nodes = amdgpu_find_mm_node(&abo->tbo.mem, &pos); while (cursor.remaining) {
pos += (nodes->start << PAGE_SHIFT); uint64_t aligned_pos = cursor.start & ~(uint64_t)3;
uint64_t bytes = 4 - (cursor.start & 3);
while (len && pos < adev->gmc.mc_vram_size) { uint32_t shift = (cursor.start & 3) * 8;
uint64_t aligned_pos = pos & ~(uint64_t)3;
uint64_t bytes = 4 - (pos & 3);
uint32_t shift = (pos & 3) * 8;
uint32_t mask = 0xffffffff << shift; uint32_t mask = 0xffffffff << shift;
if (len < bytes) { if (cursor.size < bytes) {
mask &= 0xffffffff >> (bytes - len) * 8; mask &= 0xffffffff >> (bytes - cursor.size) * 8;
bytes = len; bytes = cursor.size;
} }
if (mask != 0xffffffff) { if (mask != 0xffffffff) {
spin_lock_irqsave(&adev->mmio_idx_lock, flags); spin_lock_irqsave(&adev->mmio_idx_lock, flags);
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000); WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31); WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
if (!write || mask != 0xffffffff)
value = RREG32_NO_KIQ(mmMM_DATA); value = RREG32_NO_KIQ(mmMM_DATA);
if (write) { if (write) {
value &= ~mask; value &= ~mask;
...@@ -1524,21 +1499,15 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, ...@@ -1524,21 +1499,15 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
memcpy(buf, &value, bytes); memcpy(buf, &value, bytes);
} }
} else { } else {
bytes = (nodes->start + nodes->size) << PAGE_SHIFT; bytes = cursor.size & 0x3ull;
bytes = min(bytes - pos, (uint64_t)len & ~0x3ull); amdgpu_device_vram_access(adev, cursor.start,
(uint32_t *)buf, bytes,
amdgpu_device_vram_access(adev, pos, (uint32_t *)buf, write);
bytes, write);
} }
ret += bytes; ret += bytes;
buf = (uint8_t *)buf + bytes; buf = (uint8_t *)buf + bytes;
pos += bytes; amdgpu_res_next(&cursor, bytes);
len -= bytes;
if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
++nodes;
pos = (nodes->start << PAGE_SHIFT);
}
} }
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment