Commit 9446fa16 authored by Abel Vesa's avatar Abel Vesa Committed by Greg Kroah-Hartman

misc: fastrpc: Fix use-after-free and race in fastrpc_map_find

Currently, there is a race window between the point when the mutex is
unlocked in fastrpc_map_lookup and the reference count increasing
(fastrpc_map_get) in fastrpc_map_find, which can also lead to
use-after-free.

So lets merge fastrpc_map_find into fastrpc_map_lookup which allows us
to both protect the maps list by also taking the &fl->lock spinlock and
the reference count, since the spinlock will be released only after.
Add take_ref argument to make this suitable for all callers.

Fixes: 8f6c1d8c ("misc: fastrpc: Add fdlist implementation")
Cc: stable <stable@kernel.org>
Co-developed-by: default avatarOla Jeppsson <ola@snap.com>
Signed-off-by: default avatarOla Jeppsson <ola@snap.com>
Signed-off-by: default avatarAbel Vesa <abel.vesa@linaro.org>
Signed-off-by: default avatarSrinivas Kandagatla <srinivas.kandagatla@linaro.org>
Link: https://lore.kernel.org/r/20221124174941.418450-2-srinivas.kandagatla@linaro.orgSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 5023adc3
......@@ -351,30 +351,31 @@ static void fastrpc_map_get(struct fastrpc_map *map)
static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
struct fastrpc_map **ppmap)
struct fastrpc_map **ppmap, bool take_ref)
{
struct fastrpc_session_ctx *sess = fl->sctx;
struct fastrpc_map *map = NULL;
int ret = -ENOENT;
mutex_lock(&fl->mutex);
spin_lock(&fl->lock);
list_for_each_entry(map, &fl->maps, node) {
if (map->fd == fd) {
*ppmap = map;
mutex_unlock(&fl->mutex);
return 0;
if (map->fd != fd)
continue;
if (take_ref) {
ret = fastrpc_map_get(map);
if (ret) {
dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n",
__func__, fd, ret);
break;
}
}
mutex_unlock(&fl->mutex);
return -ENOENT;
}
static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
struct fastrpc_map **ppmap)
{
int ret = fastrpc_map_lookup(fl, fd, ppmap);
if (!ret)
fastrpc_map_get(*ppmap);
*ppmap = map;
ret = 0;
break;
}
spin_unlock(&fl->lock);
return ret;
}
......@@ -746,7 +747,7 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
struct fastrpc_map *map = NULL;
int err = 0;
if (!fastrpc_map_find(fl, fd, ppmap))
if (!fastrpc_map_lookup(fl, fd, ppmap, true))
return 0;
map = kzalloc(sizeof(*map), GFP_KERNEL);
......@@ -1070,7 +1071,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
if (!fdlist[i])
break;
if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap))
if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false))
fastrpc_map_put(mmap);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment