Commit b2dba064 authored by Philip Yang's avatar Philip Yang Committed by Alex Deucher

drm/amdgpu: Handle sg size limit for contiguous allocation

Define macro AMDGPU_MAX_SG_SEGMENT_SIZE 2GB, because struct scatterlist
length is unsigned int, and some users of it cast to a signed int, so
every segment of sg table is limited to size 2GB maximum.

For contiguous VRAM allocation, don't limit the max buddy block size in
order to get contiguous VRAM memory. To workaround the sg table segment
size limit, allocate multiple segments if contiguous size is bigger than
AMDGPU_MAX_SG_SEGMENT_SIZE.
Signed-off-by: default avatarPhilip Yang <Philip.Yang@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent fbbbf6fb
...@@ -31,6 +31,8 @@ ...@@ -31,6 +31,8 @@
#include "amdgpu_atomfirmware.h" #include "amdgpu_atomfirmware.h"
#include "atom.h" #include "atom.h"
#define AMDGPU_MAX_SG_SEGMENT_SIZE (2UL << 30)
struct amdgpu_vram_reservation { struct amdgpu_vram_reservation {
u64 start; u64 start;
u64 size; u64 size;
...@@ -515,9 +517,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -515,9 +517,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
else else
min_block_size = mgr->default_page_size; min_block_size = mgr->default_page_size;
/* Limit maximum size to 2GiB due to SG table limitations */ size = remaining_size;
size = min(remaining_size, 2ULL << 30);
if ((size >= (u64)pages_per_block << PAGE_SHIFT) && if ((size >= (u64)pages_per_block << PAGE_SHIFT) &&
!(size & (((u64)pages_per_block << PAGE_SHIFT) - 1))) !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
min_block_size = (u64)pages_per_block << PAGE_SHIFT; min_block_size = (u64)pages_per_block << PAGE_SHIFT;
...@@ -657,7 +657,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, ...@@ -657,7 +657,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
amdgpu_res_first(res, offset, length, &cursor); amdgpu_res_first(res, offset, length, &cursor);
while (cursor.remaining) { while (cursor.remaining) {
num_entries++; num_entries++;
amdgpu_res_next(&cursor, cursor.size); amdgpu_res_next(&cursor, min(cursor.size, AMDGPU_MAX_SG_SEGMENT_SIZE));
} }
r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL); r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
...@@ -677,7 +677,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, ...@@ -677,7 +677,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
amdgpu_res_first(res, offset, length, &cursor); amdgpu_res_first(res, offset, length, &cursor);
for_each_sgtable_sg((*sgt), sg, i) { for_each_sgtable_sg((*sgt), sg, i) {
phys_addr_t phys = cursor.start + adev->gmc.aper_base; phys_addr_t phys = cursor.start + adev->gmc.aper_base;
size_t size = cursor.size; unsigned long size = min(cursor.size, AMDGPU_MAX_SG_SEGMENT_SIZE);
dma_addr_t addr; dma_addr_t addr;
addr = dma_map_resource(dev, phys, size, dir, addr = dma_map_resource(dev, phys, size, dir,
...@@ -690,7 +690,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, ...@@ -690,7 +690,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
sg_dma_address(sg) = addr; sg_dma_address(sg) = addr;
sg_dma_len(sg) = size; sg_dma_len(sg) = size;
amdgpu_res_next(&cursor, cursor.size); amdgpu_res_next(&cursor, size);
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment