Commit 1f961807 authored by Ralph Campbell's avatar Ralph Campbell Committed by Jason Gunthorpe

mm/hmm: replace hmm_update with mmu_notifier_range

The hmm_mirror_ops callback function sync_cpu_device_pagetables() passes a
struct hmm_update which is a simplified version of struct
mmu_notifier_range. This is unnecessary so replace hmm_update with
mmu_notifier_range directly.

Link: https://lore.kernel.org/r/20190726005650.2566-2-rcampbell@nvidia.comSigned-off-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Reviewed: Christoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJason Gunthorpe <jgg@mellanox.com>
[jgg: white space tuning]
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent e709accc
...@@ -195,13 +195,14 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, ...@@ -195,13 +195,14 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
* Block for operations on BOs to finish and mark pages as accessed and * Block for operations on BOs to finish and mark pages as accessed and
* potentially dirty. * potentially dirty.
*/ */
static int amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror, static int
const struct hmm_update *update) amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror,
const struct mmu_notifier_range *update)
{ {
struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror); struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
unsigned long start = update->start; unsigned long start = update->start;
unsigned long end = update->end; unsigned long end = update->end;
bool blockable = update->blockable; bool blockable = mmu_notifier_range_blockable(update);
struct interval_tree_node *it; struct interval_tree_node *it;
/* notification is exclusive, but interval is inclusive */ /* notification is exclusive, but interval is inclusive */
...@@ -243,13 +244,14 @@ static int amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror, ...@@ -243,13 +244,14 @@ static int amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror,
* necessitates evicting all user-mode queues of the process. The BOs * necessitates evicting all user-mode queues of the process. The BOs
* are restorted in amdgpu_mn_invalidate_range_end_hsa. * are restorted in amdgpu_mn_invalidate_range_end_hsa.
*/ */
static int amdgpu_mn_sync_pagetables_hsa(struct hmm_mirror *mirror, static int
const struct hmm_update *update) amdgpu_mn_sync_pagetables_hsa(struct hmm_mirror *mirror,
const struct mmu_notifier_range *update)
{ {
struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror); struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
unsigned long start = update->start; unsigned long start = update->start;
unsigned long end = update->end; unsigned long end = update->end;
bool blockable = update->blockable; bool blockable = mmu_notifier_range_blockable(update);
struct interval_tree_node *it; struct interval_tree_node *it;
/* notification is exclusive, but interval is inclusive */ /* notification is exclusive, but interval is inclusive */
......
...@@ -252,13 +252,13 @@ nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit) ...@@ -252,13 +252,13 @@ nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
static int static int
nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror, nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
const struct hmm_update *update) const struct mmu_notifier_range *update)
{ {
struct nouveau_svmm *svmm = container_of(mirror, typeof(*svmm), mirror); struct nouveau_svmm *svmm = container_of(mirror, typeof(*svmm), mirror);
unsigned long start = update->start; unsigned long start = update->start;
unsigned long limit = update->end; unsigned long limit = update->end;
if (!update->blockable) if (!mmu_notifier_range_blockable(update))
return -EAGAIN; return -EAGAIN;
SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit); SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
......
...@@ -340,29 +340,6 @@ static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range, ...@@ -340,29 +340,6 @@ static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
struct hmm_mirror; struct hmm_mirror;
/*
* enum hmm_update_event - type of update
* @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why)
*/
enum hmm_update_event {
HMM_UPDATE_INVALIDATE,
};
/*
* struct hmm_update - HMM update information for callback
*
* @start: virtual start address of the range to update
* @end: virtual end address of the range to update
* @event: event triggering the update (what is happening)
* @blockable: can the callback block/sleep ?
*/
struct hmm_update {
unsigned long start;
unsigned long end;
enum hmm_update_event event;
bool blockable;
};
/* /*
* struct hmm_mirror_ops - HMM mirror device operations callback * struct hmm_mirror_ops - HMM mirror device operations callback
* *
...@@ -383,9 +360,9 @@ struct hmm_mirror_ops { ...@@ -383,9 +360,9 @@ struct hmm_mirror_ops {
/* sync_cpu_device_pagetables() - synchronize page tables /* sync_cpu_device_pagetables() - synchronize page tables
* *
* @mirror: pointer to struct hmm_mirror * @mirror: pointer to struct hmm_mirror
* @update: update information (see struct hmm_update) * @update: update information (see struct mmu_notifier_range)
* Return: -EAGAIN if update.blockable false and callback need to * Return: -EAGAIN if mmu_notifier_range_blockable(update) is false
* block, 0 otherwise. * and callback needs to block, 0 otherwise.
* *
* This callback ultimately originates from mmu_notifiers when the CPU * This callback ultimately originates from mmu_notifiers when the CPU
* page table is updated. The device driver must update its page table * page table is updated. The device driver must update its page table
...@@ -396,8 +373,9 @@ struct hmm_mirror_ops { ...@@ -396,8 +373,9 @@ struct hmm_mirror_ops {
* page tables are completely updated (TLBs flushed, etc); this is a * page tables are completely updated (TLBs flushed, etc); this is a
* synchronous call. * synchronous call.
*/ */
int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror, int (*sync_cpu_device_pagetables)(
const struct hmm_update *update); struct hmm_mirror *mirror,
const struct mmu_notifier_range *update);
}; };
/* /*
......
...@@ -165,7 +165,6 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn, ...@@ -165,7 +165,6 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
{ {
struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
struct hmm_mirror *mirror; struct hmm_mirror *mirror;
struct hmm_update update;
struct hmm_range *range; struct hmm_range *range;
unsigned long flags; unsigned long flags;
int ret = 0; int ret = 0;
...@@ -173,15 +172,10 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn, ...@@ -173,15 +172,10 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
if (!kref_get_unless_zero(&hmm->kref)) if (!kref_get_unless_zero(&hmm->kref))
return 0; return 0;
update.start = nrange->start;
update.end = nrange->end;
update.event = HMM_UPDATE_INVALIDATE;
update.blockable = mmu_notifier_range_blockable(nrange);
spin_lock_irqsave(&hmm->ranges_lock, flags); spin_lock_irqsave(&hmm->ranges_lock, flags);
hmm->notifiers++; hmm->notifiers++;
list_for_each_entry(range, &hmm->ranges, list) { list_for_each_entry(range, &hmm->ranges, list) {
if (update.end < range->start || update.start >= range->end) if (nrange->end < range->start || nrange->start >= range->end)
continue; continue;
range->valid = false; range->valid = false;
...@@ -198,9 +192,10 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn, ...@@ -198,9 +192,10 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
list_for_each_entry(mirror, &hmm->mirrors, list) { list_for_each_entry(mirror, &hmm->mirrors, list) {
int rc; int rc;
rc = mirror->ops->sync_cpu_device_pagetables(mirror, &update); rc = mirror->ops->sync_cpu_device_pagetables(mirror, nrange);
if (rc) { if (rc) {
if (WARN_ON(update.blockable || rc != -EAGAIN)) if (WARN_ON(mmu_notifier_range_blockable(nrange) ||
rc != -EAGAIN))
continue; continue;
ret = -EAGAIN; ret = -EAGAIN;
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment