Commit 4c9014f2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'nfs-for-3.8-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs

Pull NFS client bugfixes from Trond Myklebust:

 - Fix a permissions problem when opening NFSv4 files that only have the
   exec bit set.

 - Fix a couple of typos in pNFS (inverted logic), and the mount parsing
   (missing pointer dereference).

 - Work around a series of deadlock issues due to workqueues using
   struct work_struct pointer address comparisons in the re-entrancy
   tests.  Ensure that we don't free struct work_struct prematurely if
   our work function involves waiting for completion of other work items
   (e.g. by calling rpc_shutdown_client).

 - Revert the part of commit 168e4b39 that is causing unnecessary
   warnings to be issued in the nfsd callback code.

* tag 'nfs-for-3.8-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs:
  nfs: avoid dereferencing null pointer in initiate_bulk_draining
  SUNRPC: Partial revert of commit 168e4b39
  NFS: Ensure that we free the rpc_task after read and write cleanups are done
  SUNRPC: Ensure that we free the rpc_task after cleanups are done
  nfs: fix null checking in nfs_get_option_str()
  pnfs: Increase the refcount when LAYOUTGET fails the first time
  NFS: Fix access to suid/sgid executables
parents 5ce2955e ecf0eb9e
......@@ -206,7 +206,7 @@ static u32 initiate_bulk_draining(struct nfs_client *clp,
list_for_each_entry(lo, &server->layouts, plh_layouts) {
ino = igrab(lo->plh_inode);
if (ino)
if (!ino)
continue;
spin_lock(&ino->i_lock);
/* Is this layout in the process of being freed? */
......
......@@ -2153,12 +2153,16 @@ static int nfs_open_permission_mask(int openflags)
{
int mask = 0;
if ((openflags & O_ACCMODE) != O_WRONLY)
mask |= MAY_READ;
if ((openflags & O_ACCMODE) != O_RDONLY)
mask |= MAY_WRITE;
if (openflags & __FMODE_EXEC)
mask |= MAY_EXEC;
if (openflags & __FMODE_EXEC) {
/* ONLY check exec rights */
mask = MAY_EXEC;
} else {
if ((openflags & O_ACCMODE) != O_WRONLY)
mask |= MAY_READ;
if ((openflags & O_ACCMODE) != O_RDONLY)
mask |= MAY_WRITE;
}
return mask;
}
......
......@@ -1626,7 +1626,8 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
static int nfs4_opendata_access(struct rpc_cred *cred,
struct nfs4_opendata *opendata,
struct nfs4_state *state, fmode_t fmode)
struct nfs4_state *state, fmode_t fmode,
int openflags)
{
struct nfs_access_entry cache;
u32 mask;
......@@ -1638,11 +1639,14 @@ static int nfs4_opendata_access(struct rpc_cred *cred,
mask = 0;
/* don't check MAY_WRITE - a newly created file may not have
* write mode bits, but POSIX allows the creating process to write */
if (fmode & FMODE_READ)
mask |= MAY_READ;
if (fmode & FMODE_EXEC)
mask |= MAY_EXEC;
* write mode bits, but POSIX allows the creating process to write.
* use openflags to check for exec, because fmode won't
* always have FMODE_EXEC set when file open for exec. */
if (openflags & __FMODE_EXEC) {
/* ONLY check for exec rights */
mask = MAY_EXEC;
} else if (fmode & FMODE_READ)
mask = MAY_READ;
cache.cred = cred;
cache.jiffies = jiffies;
......@@ -1896,7 +1900,7 @@ static int _nfs4_do_open(struct inode *dir,
if (server->caps & NFS_CAP_POSIX_LOCK)
set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
status = nfs4_opendata_access(cred, opendata, state, fmode);
status = nfs4_opendata_access(cred, opendata, state, fmode, flags);
if (status != 0)
goto err_opendata_put;
......
......@@ -254,7 +254,7 @@ static void
pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
{
lo->plh_retry_timestamp = jiffies;
if (test_and_set_bit(fail_bit, &lo->plh_flags))
if (!test_and_set_bit(fail_bit, &lo->plh_flags))
atomic_inc(&lo->plh_refcount);
}
......
......@@ -91,12 +91,16 @@ void nfs_readdata_release(struct nfs_read_data *rdata)
put_nfs_open_context(rdata->args.context);
if (rdata->pages.pagevec != rdata->pages.page_array)
kfree(rdata->pages.pagevec);
if (rdata != &read_header->rpc_data)
kfree(rdata);
else
if (rdata == &read_header->rpc_data) {
rdata->header = NULL;
rdata = NULL;
}
if (atomic_dec_and_test(&hdr->refcnt))
hdr->completion_ops->completion(hdr);
/* Note: we only free the rpc_task after callbacks are done.
* See the comment in rpc_free_task() for why
*/
kfree(rdata);
}
EXPORT_SYMBOL_GPL(nfs_readdata_release);
......
......@@ -1152,7 +1152,7 @@ static int nfs_get_option_str(substring_t args[], char **option)
{
kfree(*option);
*option = match_strdup(args);
return !option;
return !*option;
}
static int nfs_get_option_ul(substring_t args[], unsigned long *option)
......
......@@ -126,12 +126,16 @@ void nfs_writedata_release(struct nfs_write_data *wdata)
put_nfs_open_context(wdata->args.context);
if (wdata->pages.pagevec != wdata->pages.page_array)
kfree(wdata->pages.pagevec);
if (wdata != &write_header->rpc_data)
kfree(wdata);
else
if (wdata == &write_header->rpc_data) {
wdata->header = NULL;
wdata = NULL;
}
if (atomic_dec_and_test(&hdr->refcnt))
hdr->completion_ops->completion(hdr);
/* Note: we only free the rpc_task after callbacks are done.
* See the comment in rpc_free_task() for why
*/
kfree(wdata);
}
EXPORT_SYMBOL_GPL(nfs_writedata_release);
......
......@@ -610,11 +610,6 @@ EXPORT_SYMBOL_GPL(rpc_killall_tasks);
*/
void rpc_shutdown_client(struct rpc_clnt *clnt)
{
/*
* To avoid deadlock, never call rpc_shutdown_client from a
* workqueue context!
*/
WARN_ON_ONCE(current->flags & PF_WQ_WORKER);
might_sleep();
dprintk_rcu("RPC: shutting down %s client for %s\n",
......
......@@ -934,16 +934,35 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
return task;
}
/*
* rpc_free_task - release rpc task and perform cleanups
*
* Note that we free up the rpc_task _after_ rpc_release_calldata()
* in order to work around a workqueue dependency issue.
*
* Tejun Heo states:
* "Workqueue currently considers two work items to be the same if they're
* on the same address and won't execute them concurrently - ie. it
* makes a work item which is queued again while being executed wait
* for the previous execution to complete.
*
* If a work function frees the work item, and then waits for an event
* which should be performed by another work item and *that* work item
* recycles the freed work item, it can create a false dependency loop.
* There really is no reliable way to detect this short of verifying
* every memory free."
*
*/
static void rpc_free_task(struct rpc_task *task)
{
const struct rpc_call_ops *tk_ops = task->tk_ops;
void *calldata = task->tk_calldata;
unsigned short tk_flags = task->tk_flags;
rpc_release_calldata(task->tk_ops, task->tk_calldata);
if (task->tk_flags & RPC_TASK_DYNAMIC) {
if (tk_flags & RPC_TASK_DYNAMIC) {
dprintk("RPC: %5u freeing task\n", task->tk_pid);
mempool_free(task, rpc_task_mempool);
}
rpc_release_calldata(tk_ops, calldata);
}
static void rpc_async_release(struct work_struct *work)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment