Commit 2124d84d authored by Linus Torvalds's avatar Linus Torvalds

module: make waiting for a concurrent module loader interruptible

The recursive aes-arm-bs module load situation reported by Russell King
is getting fixed in the crypto layer, but this in the meantime fixes the
"recursive load hangs forever" by just making the waiting for the first
module load be interruptible.

This should now match the old behavior before commit 9b9879fc
("modules: catch concurrent module loads, treat them as idempotent"),
which used the different "wait for module to be ready" code in
module_patient_check_exists().

End result: a recursive module load will still block, but now a signal
will interrupt it and fail the second module load, at which point the
first module will successfully complete loading.

Fixes: 9b9879fc ("modules: catch concurrent module loads, treat them as idempotent")
Cc: Russell King <linux@armlinux.org.uk>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ee9a43b7
......@@ -3104,7 +3104,7 @@ static bool idempotent(struct idempotent *u, const void *cookie)
struct idempotent *existing;
bool first;
u->ret = 0;
u->ret = -EINTR;
u->cookie = cookie;
init_completion(&u->complete);
......@@ -3140,7 +3140,7 @@ static int idempotent_complete(struct idempotent *u, int ret)
hlist_for_each_entry_safe(pos, next, head, entry) {
if (pos->cookie != cookie)
continue;
hlist_del(&pos->entry);
hlist_del_init(&pos->entry);
pos->ret = ret;
complete(&pos->complete);
}
......@@ -3148,6 +3148,28 @@ static int idempotent_complete(struct idempotent *u, int ret)
return ret;
}
/*
* Wait for the idempotent worker.
*
* If we get interrupted, we need to remove ourselves from the
* the idempotent list, and the completion may still come in.
*
* The 'idem_lock' protects against the race, and 'idem.ret' was
* initialized to -EINTR and is thus always the right return
* value even if the idempotent work then completes between
* the wait_for_completion and the cleanup.
*/
static int idempotent_wait_for_completion(struct idempotent *u)
{
if (wait_for_completion_interruptible(&u->complete)) {
spin_lock(&idem_lock);
if (!hlist_unhashed(&u->entry))
hlist_del(&u->entry);
spin_unlock(&idem_lock);
}
return u->ret;
}
static int init_module_from_file(struct file *f, const char __user * uargs, int flags)
{
struct load_info info = { };
......@@ -3191,20 +3213,8 @@ static int idempotent_init_module(struct file *f, const char __user * uargs, int
/*
* Somebody else won the race and is loading the module.
*
* We have to wait for it forever, since our 'idem' is
* on the stack and the list entry stays there until
* completed (but we could fix it under the idem_lock)
*
* It's also unclear what a real timeout might be,
* but we could maybe at least make this killable
* and remove the idem entry in that case?
*/
for (;;) {
if (wait_for_completion_timeout(&idem.complete, 10*HZ))
return idem.ret;
pr_warn_once("module '%pD' taking a long time to load", f);
}
return idempotent_wait_for_completion(&idem);
}
SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment