Commit 1b0c0f9d authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: move userptr BOs to CPU domain during CS v2

Instead of moving them in the MMU notifier move them during CS.

v2: still mark pages as accessed/dirty
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> (v1)
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent ca666a3c
......@@ -1794,6 +1794,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm);
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
uint32_t flags);
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
......
......@@ -475,6 +475,12 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
/* Check if we have user pages and nobody bound the BO already */
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
lobj->user_pages) {
amdgpu_ttm_placement_from_domain(bo,
AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true,
false);
if (r)
return r;
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
lobj->user_pages);
binding_userptr = true;
......
......@@ -137,10 +137,7 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (r)
DRM_ERROR("(%ld) failed to validate user bo\n", r);
amdgpu_ttm_tt_mark_user_pages(bo->tbo.ttm);
amdgpu_bo_unreserve(bo);
}
......
......@@ -685,6 +685,24 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
}
}
void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned i;
for (i = 0; i < ttm->num_pages; ++i) {
struct page *page = ttm->pages[i];
if (!page)
continue;
if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
set_page_dirty(page);
mark_page_accessed(page);
}
}
static void amdgpu_trace_dma_map(struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
......@@ -740,7 +758,6 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct sg_page_iter sg_iter;
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
......@@ -753,13 +770,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
/* free the sg table and pages again */
dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
struct page *page = sg_page_iter_page(&sg_iter);
if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
set_page_dirty(page);
mark_page_accessed(page);
}
amdgpu_ttm_tt_mark_user_pages(ttm);
amdgpu_trace_dma_unmap(ttm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment