Commit 8944042d authored by Alex Deucher's avatar Alex Deucher

Revert "drm/amdgpu: fix HMM config dependency issue"

This reverts commit 6b8f7e3d.

This depends on an HMM fix which is not upstream yet.
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 96aa5bfa
...@@ -26,7 +26,6 @@ config DRM_AMDGPU_CIK ...@@ -26,7 +26,6 @@ config DRM_AMDGPU_CIK
config DRM_AMDGPU_USERPTR config DRM_AMDGPU_USERPTR
bool "Always enable userptr write support" bool "Always enable userptr write support"
depends on DRM_AMDGPU depends on DRM_AMDGPU
depends on ARCH_HAS_HMM
select HMM_MIRROR select HMM_MIRROR
help help
This option selects CONFIG_HMM and CONFIG_HMM_MIRROR if it This option selects CONFIG_HMM and CONFIG_HMM_MIRROR if it
......
...@@ -710,9 +710,7 @@ struct amdgpu_ttm_tt { ...@@ -710,9 +710,7 @@ struct amdgpu_ttm_tt {
uint64_t userptr; uint64_t userptr;
struct task_struct *usertask; struct task_struct *usertask;
uint32_t userflags; uint32_t userflags;
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
struct hmm_range range; struct hmm_range range;
#endif
}; };
/** /**
...@@ -722,7 +720,6 @@ struct amdgpu_ttm_tt { ...@@ -722,7 +720,6 @@ struct amdgpu_ttm_tt {
* Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
* once afterwards to stop HMM tracking * once afterwards to stop HMM tracking
*/ */
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
{ {
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
...@@ -805,7 +802,6 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) ...@@ -805,7 +802,6 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
return r; return r;
} }
#endif
/** /**
* amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary. * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
...@@ -822,6 +818,29 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) ...@@ -822,6 +818,29 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
ttm->pages[i] = pages ? pages[i] : NULL; ttm->pages[i] = pages ? pages[i] : NULL;
} }
/**
* amdgpu_ttm_tt_mark_user_page - Mark pages as dirty
*
* Called while unpinning userptr pages
*/
void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned i;
for (i = 0; i < ttm->num_pages; ++i) {
struct page *page = ttm->pages[i];
if (!page)
continue;
if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
set_page_dirty(page);
mark_page_accessed(page);
}
}
/** /**
* amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
* *
...@@ -883,11 +902,9 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) ...@@ -883,11 +902,9 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
sg_free_table(ttm->sg); sg_free_table(ttm->sg);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
if (gtt->range.pfns && if (gtt->range.pfns &&
ttm->pages[0] == hmm_pfn_to_page(&gtt->range, gtt->range.pfns[0])) ttm->pages[0] == hmm_pfn_to_page(&gtt->range, gtt->range.pfns[0]))
WARN_ONCE(1, "Missing get_user_page_done\n"); WARN_ONCE(1, "Missing get_user_page_done\n");
#endif
} }
int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
......
...@@ -101,21 +101,10 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); ...@@ -101,21 +101,10 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo); int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo); int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm); bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm);
#else
static inline int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
{
return -EPERM;
}
static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
{
return false;
}
#endif
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages); void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm);
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
uint32_t flags); uint32_t flags);
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment