Commit 4300cd63 authored by John Hubbard's avatar John Hubbard Committed by Jens Wiklander

tee: convert get_user_pages() --> pin_user_pages()

This code was using get_user_pages*(), in a "Case 2" scenario
(DMA/RDMA), using the categorization from [1]. That means that it's
time to convert the get_user_pages*() + put_page() calls to
pin_user_pages*() + unpin_user_pages() calls.

Factor out a new, small release_registered_pages() function, in
order to consolidate the logic for discerning between
TEE_SHM_USER_MAPPED and TEE_SHM_KERNEL_MAPPED pages. This also
absorbs the kfree() call that is also required there.

There is some helpful background in [2]: basically, this is a small
part of fixing a long-standing disconnect between pinning pages, and
file systems' use of those pages.

[1] Documentation/core-api/pin_user_pages.rst

[2] "Explicit pinning of user-space pages":
    https://lwn.net/Articles/807108/

Cc: Jens Wiklander <jens.wiklander@linaro.org>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: tee-dev@lists.linaro.org
Cc: linux-media@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org
Cc: linaro-mm-sig@lists.linaro.org
Signed-off-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Signed-off-by: default avatarJens Wiklander <jens.wiklander@linaro.org>
parent 9123e3a7
......@@ -12,6 +12,22 @@
#include <linux/uio.h>
#include "tee_private.h"
static void release_registered_pages(struct tee_shm *shm)
{
if (shm->pages) {
if (shm->flags & TEE_SHM_USER_MAPPED) {
unpin_user_pages(shm->pages, shm->num_pages);
} else {
size_t n;
for (n = 0; n < shm->num_pages; n++)
put_page(shm->pages[n]);
}
kfree(shm->pages);
}
}
static void tee_shm_release(struct tee_shm *shm)
{
struct tee_device *teedev = shm->ctx->teedev;
......@@ -32,17 +48,13 @@ static void tee_shm_release(struct tee_shm *shm)
poolm->ops->free(poolm, shm);
} else if (shm->flags & TEE_SHM_REGISTER) {
size_t n;
int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
if (rc)
dev_err(teedev->dev.parent,
"unregister shm %p failed: %d", shm, rc);
for (n = 0; n < shm->num_pages; n++)
put_page(shm->pages[n]);
kfree(shm->pages);
release_registered_pages(shm);
}
teedev_ctx_put(shm->ctx);
......@@ -228,7 +240,7 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
}
if (flags & TEE_SHM_USER_MAPPED) {
rc = get_user_pages_fast(start, num_pages, FOLL_WRITE,
rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE,
shm->pages);
} else {
struct kvec *kiov;
......@@ -292,18 +304,12 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
return shm;
err:
if (shm) {
size_t n;
if (shm->id >= 0) {
mutex_lock(&teedev->mutex);
idr_remove(&teedev->idr, shm->id);
mutex_unlock(&teedev->mutex);
}
if (shm->pages) {
for (n = 0; n < shm->num_pages; n++)
put_page(shm->pages[n]);
kfree(shm->pages);
}
release_registered_pages(shm);
}
kfree(shm);
teedev_ctx_put(ctx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment