Commit 5d6527a7 authored by Jérôme Glisse's avatar Jérôme Glisse Committed by Linus Torvalds

mm/mmu_notifier: use structure for invalidate_range_start/end callback

Patch series "mmu notifier contextual informations", v2.

This patchset adds contextual information, why an invalidation is
happening, to mmu notifier callback.  This is necessary for user of mmu
notifier that wish to maintains their own data structure without having to
add new fields to struct vm_area_struct (vma).

For instance device can have they own page table that mirror the process
address space.  When a vma is unmap (munmap() syscall) the device driver
can free the device page table for the range.

Today we do not have any information on why a mmu notifier call back is
happening and thus device driver have to assume that it is always an
munmap().  This is inefficient at it means that it needs to re-allocate
device page table on next page fault and rebuild the whole device driver
data structure for the range.

Other use case beside munmap() also exist, for instance it is pointless
for device driver to invalidate the device page table when the
invalidation is for the soft dirtyness tracking.  Or device driver can
optimize away mprotect() that change the page table permission access for
the range.

This patchset enables all this optimizations for device drivers.  I do not
include any of those in this series but another patchset I am posting will
leverage this.

The patchset is pretty simple from a code point of view.  The first two
patches consolidate all mmu notifier arguments into a struct so that it is
easier to add/change arguments.  The last patch adds the contextual
information (munmap, protection, soft dirty, clear, ...).

This patch (of 3):

To avoid having to change many callback definition everytime we want to
add a parameter use a structure to group all parameters for the
mmu_notifier invalidate_range_start/end callback.  No functional changes
with this patch.

[akpm@linux-foundation.org: fix drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c kerneldoc]
Link: http://lkml.kernel.org/r/20181205053628.3210-2-jglisse@redhat.comSigned-off-by: default avatarJérôme Glisse <jglisse@redhat.com>
Acked-by: default avatarJan Kara <jack@suse.cz>
Acked-by: Jason Gunthorpe <jgg@mellanox.com>	[infiniband]
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Ross Zwisler <zwisler@kernel.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krcmar <rkrcmar@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Christian Koenig <christian.koenig@amd.com>
Cc: Felix Kuehling <felix.kuehling@amd.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b15c8726
...@@ -238,44 +238,40 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, ...@@ -238,44 +238,40 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
* amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change * amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change
* *
* @mn: our notifier * @mn: our notifier
* @mm: the mm this callback is about * @range: mmu notifier context
* @start: start of updated range
* @end: end of updated range
* *
* Block for operations on BOs to finish and mark pages as accessed and * Block for operations on BOs to finish and mark pages as accessed and
* potentially dirty. * potentially dirty.
*/ */
static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
struct mm_struct *mm, const struct mmu_notifier_range *range)
unsigned long start,
unsigned long end,
bool blockable)
{ {
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it; struct interval_tree_node *it;
unsigned long end;
/* notification is exclusive, but interval is inclusive */ /* notification is exclusive, but interval is inclusive */
end -= 1; end = range->end - 1;
/* TODO we should be able to split locking for interval tree and /* TODO we should be able to split locking for interval tree and
* amdgpu_mn_invalidate_node * amdgpu_mn_invalidate_node
*/ */
if (amdgpu_mn_read_lock(amn, blockable)) if (amdgpu_mn_read_lock(amn, range->blockable))
return -EAGAIN; return -EAGAIN;
it = interval_tree_iter_first(&amn->objects, start, end); it = interval_tree_iter_first(&amn->objects, range->start, end);
while (it) { while (it) {
struct amdgpu_mn_node *node; struct amdgpu_mn_node *node;
if (!blockable) { if (!range->blockable) {
amdgpu_mn_read_unlock(amn); amdgpu_mn_read_unlock(amn);
return -EAGAIN; return -EAGAIN;
} }
node = container_of(it, struct amdgpu_mn_node, it); node = container_of(it, struct amdgpu_mn_node, it);
it = interval_tree_iter_next(it, start, end); it = interval_tree_iter_next(it, range->start, end);
amdgpu_mn_invalidate_node(node, start, end); amdgpu_mn_invalidate_node(node, range->start, end);
} }
return 0; return 0;
...@@ -294,39 +290,38 @@ static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, ...@@ -294,39 +290,38 @@ static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
* are restorted in amdgpu_mn_invalidate_range_end_hsa. * are restorted in amdgpu_mn_invalidate_range_end_hsa.
*/ */
static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
struct mm_struct *mm, const struct mmu_notifier_range *range)
unsigned long start,
unsigned long end,
bool blockable)
{ {
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it; struct interval_tree_node *it;
unsigned long end;
/* notification is exclusive, but interval is inclusive */ /* notification is exclusive, but interval is inclusive */
end -= 1; end = range->end - 1;
if (amdgpu_mn_read_lock(amn, blockable)) if (amdgpu_mn_read_lock(amn, range->blockable))
return -EAGAIN; return -EAGAIN;
it = interval_tree_iter_first(&amn->objects, start, end); it = interval_tree_iter_first(&amn->objects, range->start, end);
while (it) { while (it) {
struct amdgpu_mn_node *node; struct amdgpu_mn_node *node;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
if (!blockable) { if (!range->blockable) {
amdgpu_mn_read_unlock(amn); amdgpu_mn_read_unlock(amn);
return -EAGAIN; return -EAGAIN;
} }
node = container_of(it, struct amdgpu_mn_node, it); node = container_of(it, struct amdgpu_mn_node, it);
it = interval_tree_iter_next(it, start, end); it = interval_tree_iter_next(it, range->start, end);
list_for_each_entry(bo, &node->bos, mn_list) { list_for_each_entry(bo, &node->bos, mn_list) {
struct kgd_mem *mem = bo->kfd_bo; struct kgd_mem *mem = bo->kfd_bo;
if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
start, end)) range->start,
amdgpu_amdkfd_evict_userptr(mem, mm); end))
amdgpu_amdkfd_evict_userptr(mem, range->mm);
} }
} }
...@@ -344,9 +339,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, ...@@ -344,9 +339,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
* Release the lock again to allow new command submissions. * Release the lock again to allow new command submissions.
*/ */
static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn, static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
struct mm_struct *mm, const struct mmu_notifier_range *range)
unsigned long start,
unsigned long end)
{ {
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
......
...@@ -113,27 +113,25 @@ static void del_object(struct i915_mmu_object *mo) ...@@ -113,27 +113,25 @@ static void del_object(struct i915_mmu_object *mo)
} }
static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
struct mm_struct *mm, const struct mmu_notifier_range *range)
unsigned long start,
unsigned long end,
bool blockable)
{ {
struct i915_mmu_notifier *mn = struct i915_mmu_notifier *mn =
container_of(_mn, struct i915_mmu_notifier, mn); container_of(_mn, struct i915_mmu_notifier, mn);
struct i915_mmu_object *mo; struct i915_mmu_object *mo;
struct interval_tree_node *it; struct interval_tree_node *it;
LIST_HEAD(cancelled); LIST_HEAD(cancelled);
unsigned long end;
if (RB_EMPTY_ROOT(&mn->objects.rb_root)) if (RB_EMPTY_ROOT(&mn->objects.rb_root))
return 0; return 0;
/* interval ranges are inclusive, but invalidate range is exclusive */ /* interval ranges are inclusive, but invalidate range is exclusive */
end--; end = range->end - 1;
spin_lock(&mn->lock); spin_lock(&mn->lock);
it = interval_tree_iter_first(&mn->objects, start, end); it = interval_tree_iter_first(&mn->objects, range->start, end);
while (it) { while (it) {
if (!blockable) { if (!range->blockable) {
spin_unlock(&mn->lock); spin_unlock(&mn->lock);
return -EAGAIN; return -EAGAIN;
} }
...@@ -151,7 +149,7 @@ static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, ...@@ -151,7 +149,7 @@ static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
queue_work(mn->wq, &mo->work); queue_work(mn->wq, &mo->work);
list_add(&mo->link, &cancelled); list_add(&mo->link, &cancelled);
it = interval_tree_iter_next(it, start, end); it = interval_tree_iter_next(it, range->start, end);
} }
list_for_each_entry(mo, &cancelled, link) list_for_each_entry(mo, &cancelled, link)
del_object(mo); del_object(mo);
......
...@@ -119,40 +119,38 @@ static void radeon_mn_release(struct mmu_notifier *mn, ...@@ -119,40 +119,38 @@ static void radeon_mn_release(struct mmu_notifier *mn,
* unmap them by move them into system domain again. * unmap them by move them into system domain again.
*/ */
static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn, static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm, const struct mmu_notifier_range *range)
unsigned long start,
unsigned long end,
bool blockable)
{ {
struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn); struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
struct ttm_operation_ctx ctx = { false, false }; struct ttm_operation_ctx ctx = { false, false };
struct interval_tree_node *it; struct interval_tree_node *it;
unsigned long end;
int ret = 0; int ret = 0;
/* notification is exclusive, but interval is inclusive */ /* notification is exclusive, but interval is inclusive */
end -= 1; end = range->end - 1;
/* TODO we should be able to split locking for interval tree and /* TODO we should be able to split locking for interval tree and
* the tear down. * the tear down.
*/ */
if (blockable) if (range->blockable)
mutex_lock(&rmn->lock); mutex_lock(&rmn->lock);
else if (!mutex_trylock(&rmn->lock)) else if (!mutex_trylock(&rmn->lock))
return -EAGAIN; return -EAGAIN;
it = interval_tree_iter_first(&rmn->objects, start, end); it = interval_tree_iter_first(&rmn->objects, range->start, end);
while (it) { while (it) {
struct radeon_mn_node *node; struct radeon_mn_node *node;
struct radeon_bo *bo; struct radeon_bo *bo;
long r; long r;
if (!blockable) { if (!range->blockable) {
ret = -EAGAIN; ret = -EAGAIN;
goto out_unlock; goto out_unlock;
} }
node = container_of(it, struct radeon_mn_node, it); node = container_of(it, struct radeon_mn_node, it);
it = interval_tree_iter_next(it, start, end); it = interval_tree_iter_next(it, range->start, end);
list_for_each_entry(bo, &node->bos, mn_list) { list_for_each_entry(bo, &node->bos, mn_list) {
......
...@@ -146,15 +146,12 @@ static int invalidate_range_start_trampoline(struct ib_umem_odp *item, ...@@ -146,15 +146,12 @@ static int invalidate_range_start_trampoline(struct ib_umem_odp *item,
} }
static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn, static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm, const struct mmu_notifier_range *range)
unsigned long start,
unsigned long end,
bool blockable)
{ {
struct ib_ucontext_per_mm *per_mm = struct ib_ucontext_per_mm *per_mm =
container_of(mn, struct ib_ucontext_per_mm, mn); container_of(mn, struct ib_ucontext_per_mm, mn);
if (blockable) if (range->blockable)
down_read(&per_mm->umem_rwsem); down_read(&per_mm->umem_rwsem);
else if (!down_read_trylock(&per_mm->umem_rwsem)) else if (!down_read_trylock(&per_mm->umem_rwsem))
return -EAGAIN; return -EAGAIN;
...@@ -169,9 +166,10 @@ static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn, ...@@ -169,9 +166,10 @@ static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
return 0; return 0;
} }
return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, start, end, return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
range->end,
invalidate_range_start_trampoline, invalidate_range_start_trampoline,
blockable, NULL); range->blockable, NULL);
} }
static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start, static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
...@@ -182,9 +180,7 @@ static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start, ...@@ -182,9 +180,7 @@ static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
} }
static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn, static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
struct mm_struct *mm, const struct mmu_notifier_range *range)
unsigned long start,
unsigned long end)
{ {
struct ib_ucontext_per_mm *per_mm = struct ib_ucontext_per_mm *per_mm =
container_of(mn, struct ib_ucontext_per_mm, mn); container_of(mn, struct ib_ucontext_per_mm, mn);
...@@ -192,8 +188,8 @@ static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn, ...@@ -192,8 +188,8 @@ static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
if (unlikely(!per_mm->active)) if (unlikely(!per_mm->active))
return; return;
rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, start, rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
end, range->end,
invalidate_range_end_trampoline, true, NULL); invalidate_range_end_trampoline, true, NULL);
up_read(&per_mm->umem_rwsem); up_read(&per_mm->umem_rwsem);
} }
......
...@@ -68,8 +68,7 @@ struct mmu_rb_handler { ...@@ -68,8 +68,7 @@ struct mmu_rb_handler {
static unsigned long mmu_node_start(struct mmu_rb_node *); static unsigned long mmu_node_start(struct mmu_rb_node *);
static unsigned long mmu_node_last(struct mmu_rb_node *); static unsigned long mmu_node_last(struct mmu_rb_node *);
static int mmu_notifier_range_start(struct mmu_notifier *, static int mmu_notifier_range_start(struct mmu_notifier *,
struct mm_struct *, const struct mmu_notifier_range *);
unsigned long, unsigned long, bool);
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *, static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
unsigned long, unsigned long); unsigned long, unsigned long);
static void do_remove(struct mmu_rb_handler *handler, static void do_remove(struct mmu_rb_handler *handler,
...@@ -284,10 +283,7 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler, ...@@ -284,10 +283,7 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
} }
static int mmu_notifier_range_start(struct mmu_notifier *mn, static int mmu_notifier_range_start(struct mmu_notifier *mn,
struct mm_struct *mm, const struct mmu_notifier_range *range)
unsigned long start,
unsigned long end,
bool blockable)
{ {
struct mmu_rb_handler *handler = struct mmu_rb_handler *handler =
container_of(mn, struct mmu_rb_handler, mn); container_of(mn, struct mmu_rb_handler, mn);
...@@ -297,10 +293,11 @@ static int mmu_notifier_range_start(struct mmu_notifier *mn, ...@@ -297,10 +293,11 @@ static int mmu_notifier_range_start(struct mmu_notifier *mn,
bool added = false; bool added = false;
spin_lock_irqsave(&handler->lock, flags); spin_lock_irqsave(&handler->lock, flags);
for (node = __mmu_int_rb_iter_first(root, start, end - 1); for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1);
node; node = ptr) { node; node = ptr) {
/* Guard against node removal. */ /* Guard against node removal. */
ptr = __mmu_int_rb_iter_next(node, start, end - 1); ptr = __mmu_int_rb_iter_next(node, range->start,
range->end - 1);
trace_hfi1_mmu_mem_invalidate(node->addr, node->len); trace_hfi1_mmu_mem_invalidate(node->addr, node->len);
if (handler->ops->invalidate(handler->ops_arg, node)) { if (handler->ops->invalidate(handler->ops_arg, node)) {
__mmu_int_rb_remove(node, root); __mmu_int_rb_remove(node, root);
......
...@@ -201,23 +201,18 @@ static void scif_mmu_notifier_release(struct mmu_notifier *mn, ...@@ -201,23 +201,18 @@ static void scif_mmu_notifier_release(struct mmu_notifier *mn,
} }
static int scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, static int scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm, const struct mmu_notifier_range *range)
unsigned long start,
unsigned long end,
bool blockable)
{ {
struct scif_mmu_notif *mmn; struct scif_mmu_notif *mmn;
mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier); mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier);
scif_rma_destroy_tcw(mmn, start, end - start); scif_rma_destroy_tcw(mmn, range->start, range->end - range->start);
return 0; return 0;
} }
static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
struct mm_struct *mm, const struct mmu_notifier_range *range)
unsigned long start,
unsigned long end)
{ {
/* /*
* Nothing to do here, everything needed was done in * Nothing to do here, everything needed was done in
......
...@@ -220,9 +220,7 @@ void gru_flush_all_tlb(struct gru_state *gru) ...@@ -220,9 +220,7 @@ void gru_flush_all_tlb(struct gru_state *gru)
* MMUOPS notifier callout functions * MMUOPS notifier callout functions
*/ */
static int gru_invalidate_range_start(struct mmu_notifier *mn, static int gru_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm, const struct mmu_notifier_range *range)
unsigned long start, unsigned long end,
bool blockable)
{ {
struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
ms_notifier); ms_notifier);
...@@ -230,15 +228,14 @@ static int gru_invalidate_range_start(struct mmu_notifier *mn, ...@@ -230,15 +228,14 @@ static int gru_invalidate_range_start(struct mmu_notifier *mn,
STAT(mmu_invalidate_range); STAT(mmu_invalidate_range);
atomic_inc(&gms->ms_range_active); atomic_inc(&gms->ms_range_active);
gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx, act %d\n", gms, gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx, act %d\n", gms,
start, end, atomic_read(&gms->ms_range_active)); range->start, range->end, atomic_read(&gms->ms_range_active));
gru_flush_tlb_range(gms, start, end - start); gru_flush_tlb_range(gms, range->start, range->end - range->start);
return 0; return 0;
} }
static void gru_invalidate_range_end(struct mmu_notifier *mn, static void gru_invalidate_range_end(struct mmu_notifier *mn,
struct mm_struct *mm, unsigned long start, const struct mmu_notifier_range *range)
unsigned long end)
{ {
struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
ms_notifier); ms_notifier);
...@@ -247,7 +244,8 @@ static void gru_invalidate_range_end(struct mmu_notifier *mn, ...@@ -247,7 +244,8 @@ static void gru_invalidate_range_end(struct mmu_notifier *mn,
(void)atomic_dec_and_test(&gms->ms_range_active); (void)atomic_dec_and_test(&gms->ms_range_active);
wake_up_all(&gms->ms_wait_queue); wake_up_all(&gms->ms_wait_queue);
gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n", gms, start, end); gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n",
gms, range->start, range->end);
} }
static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm) static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm)
......
...@@ -520,26 +520,26 @@ static int unmap_if_in_range(struct gntdev_grant_map *map, ...@@ -520,26 +520,26 @@ static int unmap_if_in_range(struct gntdev_grant_map *map,
} }
static int mn_invl_range_start(struct mmu_notifier *mn, static int mn_invl_range_start(struct mmu_notifier *mn,
struct mm_struct *mm, const struct mmu_notifier_range *range)
unsigned long start, unsigned long end,
bool blockable)
{ {
struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
struct gntdev_grant_map *map; struct gntdev_grant_map *map;
int ret = 0; int ret = 0;
if (blockable) if (range->blockable)
mutex_lock(&priv->lock); mutex_lock(&priv->lock);
else if (!mutex_trylock(&priv->lock)) else if (!mutex_trylock(&priv->lock))
return -EAGAIN; return -EAGAIN;
list_for_each_entry(map, &priv->maps, next) { list_for_each_entry(map, &priv->maps, next) {
ret = unmap_if_in_range(map, start, end, blockable); ret = unmap_if_in_range(map, range->start, range->end,
range->blockable);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
} }
list_for_each_entry(map, &priv->freeable_maps, next) { list_for_each_entry(map, &priv->freeable_maps, next) {
ret = unmap_if_in_range(map, start, end, blockable); ret = unmap_if_in_range(map, range->start, range->end,
range->blockable);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
} }
......
...@@ -25,6 +25,13 @@ struct mmu_notifier_mm { ...@@ -25,6 +25,13 @@ struct mmu_notifier_mm {
spinlock_t lock; spinlock_t lock;
}; };
struct mmu_notifier_range {
struct mm_struct *mm;
unsigned long start;
unsigned long end;
bool blockable;
};
struct mmu_notifier_ops { struct mmu_notifier_ops {
/* /*
* Called either by mmu_notifier_unregister or when the mm is * Called either by mmu_notifier_unregister or when the mm is
...@@ -146,12 +153,9 @@ struct mmu_notifier_ops { ...@@ -146,12 +153,9 @@ struct mmu_notifier_ops {
* *
*/ */
int (*invalidate_range_start)(struct mmu_notifier *mn, int (*invalidate_range_start)(struct mmu_notifier *mn,
struct mm_struct *mm, const struct mmu_notifier_range *range);
unsigned long start, unsigned long end,
bool blockable);
void (*invalidate_range_end)(struct mmu_notifier *mn, void (*invalidate_range_end)(struct mmu_notifier *mn,
struct mm_struct *mm, const struct mmu_notifier_range *range);
unsigned long start, unsigned long end);
/* /*
* invalidate_range() is either called between * invalidate_range() is either called between
......
...@@ -189,35 +189,30 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) ...@@ -189,35 +189,30 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
} }
static int hmm_invalidate_range_start(struct mmu_notifier *mn, static int hmm_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm, const struct mmu_notifier_range *range)
unsigned long start,
unsigned long end,
bool blockable)
{ {
struct hmm_update update; struct hmm_update update;
struct hmm *hmm = mm->hmm; struct hmm *hmm = range->mm->hmm;
VM_BUG_ON(!hmm); VM_BUG_ON(!hmm);
update.start = start; update.start = range->start;
update.end = end; update.end = range->end;
update.event = HMM_UPDATE_INVALIDATE; update.event = HMM_UPDATE_INVALIDATE;
update.blockable = blockable; update.blockable = range->blockable;
return hmm_invalidate_range(hmm, true, &update); return hmm_invalidate_range(hmm, true, &update);
} }
static void hmm_invalidate_range_end(struct mmu_notifier *mn, static void hmm_invalidate_range_end(struct mmu_notifier *mn,
struct mm_struct *mm, const struct mmu_notifier_range *range)
unsigned long start,
unsigned long end)
{ {
struct hmm_update update; struct hmm_update update;
struct hmm *hmm = mm->hmm; struct hmm *hmm = range->mm->hmm;
VM_BUG_ON(!hmm); VM_BUG_ON(!hmm);
update.start = start; update.start = range->start;
update.end = end; update.end = range->end;
update.event = HMM_UPDATE_INVALIDATE; update.event = HMM_UPDATE_INVALIDATE;
update.blockable = true; update.blockable = true;
hmm_invalidate_range(hmm, false, &update); hmm_invalidate_range(hmm, false, &update);
......
...@@ -171,14 +171,20 @@ int __mmu_notifier_invalidate_range_start(struct mm_struct *mm, ...@@ -171,14 +171,20 @@ int __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end, unsigned long start, unsigned long end,
bool blockable) bool blockable)
{ {
struct mmu_notifier_range _range, *range = &_range;
struct mmu_notifier *mn; struct mmu_notifier *mn;
int ret = 0; int ret = 0;
int id; int id;
range->blockable = blockable;
range->start = start;
range->end = end;
range->mm = mm;
id = srcu_read_lock(&srcu); id = srcu_read_lock(&srcu);
hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->invalidate_range_start) { if (mn->ops->invalidate_range_start) {
int _ret = mn->ops->invalidate_range_start(mn, mm, start, end, blockable); int _ret = mn->ops->invalidate_range_start(mn, range);
if (_ret) { if (_ret) {
pr_info("%pS callback failed with %d in %sblockable context.\n", pr_info("%pS callback failed with %d in %sblockable context.\n",
mn->ops->invalidate_range_start, _ret, mn->ops->invalidate_range_start, _ret,
...@@ -198,9 +204,20 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, ...@@ -198,9 +204,20 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
unsigned long end, unsigned long end,
bool only_end) bool only_end)
{ {
struct mmu_notifier_range _range, *range = &_range;
struct mmu_notifier *mn; struct mmu_notifier *mn;
int id; int id;
/*
* The end call back will never be call if the start refused to go
* through because of blockable was false so here assume that we
* can block.
*/
range->blockable = true;
range->start = start;
range->end = end;
range->mm = mm;
id = srcu_read_lock(&srcu); id = srcu_read_lock(&srcu);
hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
/* /*
...@@ -219,7 +236,7 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, ...@@ -219,7 +236,7 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
if (!only_end && mn->ops->invalidate_range) if (!only_end && mn->ops->invalidate_range)
mn->ops->invalidate_range(mn, mm, start, end); mn->ops->invalidate_range(mn, mm, start, end);
if (mn->ops->invalidate_range_end) if (mn->ops->invalidate_range_end)
mn->ops->invalidate_range_end(mn, mm, start, end); mn->ops->invalidate_range_end(mn, range);
} }
srcu_read_unlock(&srcu, id); srcu_read_unlock(&srcu, id);
} }
......
...@@ -363,10 +363,7 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, ...@@ -363,10 +363,7 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
} }
static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm, const struct mmu_notifier_range *range)
unsigned long start,
unsigned long end,
bool blockable)
{ {
struct kvm *kvm = mmu_notifier_to_kvm(mn); struct kvm *kvm = mmu_notifier_to_kvm(mn);
int need_tlb_flush = 0, idx; int need_tlb_flush = 0, idx;
...@@ -380,7 +377,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, ...@@ -380,7 +377,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
* count is also read inside the mmu_lock critical section. * count is also read inside the mmu_lock critical section.
*/ */
kvm->mmu_notifier_count++; kvm->mmu_notifier_count++;
need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end);
need_tlb_flush |= kvm->tlbs_dirty; need_tlb_flush |= kvm->tlbs_dirty;
/* we've to flush the tlb before the pages can be freed */ /* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush) if (need_tlb_flush)
...@@ -388,7 +385,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, ...@@ -388,7 +385,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
ret = kvm_arch_mmu_notifier_invalidate_range(kvm, start, end, blockable); ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range->start,
range->end, range->blockable);
srcu_read_unlock(&kvm->srcu, idx); srcu_read_unlock(&kvm->srcu, idx);
...@@ -396,9 +394,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, ...@@ -396,9 +394,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
} }
static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
struct mm_struct *mm, const struct mmu_notifier_range *range)
unsigned long start,
unsigned long end)
{ {
struct kvm *kvm = mmu_notifier_to_kvm(mn); struct kvm *kvm = mmu_notifier_to_kvm(mn);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment