Commit 7ea600b5 authored by Al Viro's avatar Al Viro

Nest rename_lock inside vfsmount_lock

... lest we get livelocks between path_is_under() and d_path() and friends.

The thing is, wrt fairness lglocks are more similar to rwsems than to rwlocks;
it is possible to have thread B spin on attempt to take lock shared while thread
A is already holding it shared, if B is on lower-numbered CPU than A and there's
a thread C spinning on attempt to take the same lock exclusive.

As the result, we need consistent ordering between vfsmount_lock (lglock) and
rename_lock (seq_lock), even though everything that takes both is going to take
vfsmount_lock only shared.
Spotted-by: default avatarBrad Spengler <spender@grsecurity.net>
Cc: stable@vger.kernel.org
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 06ae43f3
......@@ -2542,7 +2542,6 @@ static int prepend_path(const struct path *path,
bool slash = false;
int error = 0;
br_read_lock(&vfsmount_lock);
while (dentry != root->dentry || vfsmnt != root->mnt) {
struct dentry * parent;
......@@ -2572,8 +2571,6 @@ static int prepend_path(const struct path *path,
if (!error && !slash)
error = prepend(buffer, buflen, "/", 1);
out:
br_read_unlock(&vfsmount_lock);
return error;
global_root:
......@@ -2590,7 +2587,7 @@ static int prepend_path(const struct path *path,
error = prepend(buffer, buflen, "/", 1);
if (!error)
error = is_mounted(vfsmnt) ? 1 : 2;
goto out;
return error;
}
/**
......@@ -2617,9 +2614,11 @@ char *__d_path(const struct path *path,
int error;
prepend(&res, &buflen, "\0", 1);
br_read_lock(&vfsmount_lock);
write_seqlock(&rename_lock);
error = prepend_path(path, root, &res, &buflen);
write_sequnlock(&rename_lock);
br_read_unlock(&vfsmount_lock);
if (error < 0)
return ERR_PTR(error);
......@@ -2636,9 +2635,11 @@ char *d_absolute_path(const struct path *path,
int error;
prepend(&res, &buflen, "\0", 1);
br_read_lock(&vfsmount_lock);
write_seqlock(&rename_lock);
error = prepend_path(path, &root, &res, &buflen);
write_sequnlock(&rename_lock);
br_read_unlock(&vfsmount_lock);
if (error > 1)
error = -EINVAL;
......@@ -2702,11 +2703,13 @@ char *d_path(const struct path *path, char *buf, int buflen)
return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
get_fs_root(current->fs, &root);
br_read_lock(&vfsmount_lock);
write_seqlock(&rename_lock);
error = path_with_deleted(path, &root, &res, &buflen);
write_sequnlock(&rename_lock);
br_read_unlock(&vfsmount_lock);
if (error < 0)
res = ERR_PTR(error);
write_sequnlock(&rename_lock);
path_put(&root);
return res;
}
......@@ -2830,6 +2833,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
get_fs_root_and_pwd(current->fs, &root, &pwd);
error = -ENOENT;
br_read_lock(&vfsmount_lock);
write_seqlock(&rename_lock);
if (!d_unlinked(pwd.dentry)) {
unsigned long len;
......@@ -2839,6 +2843,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
prepend(&cwd, &buflen, "\0", 1);
error = prepend_path(&pwd, &root, &cwd, &buflen);
write_sequnlock(&rename_lock);
br_read_unlock(&vfsmount_lock);
if (error < 0)
goto out;
......@@ -2859,6 +2864,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
}
} else {
write_sequnlock(&rename_lock);
br_read_unlock(&vfsmount_lock);
}
out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment