Commit 1c2a284f authored by Neil Brown's avatar Neil Brown Committed by Linus Torvalds

[PATCH] Fix the sunrpc cache/reader management properly.

A recent patch changed the rpc/*/channel files to behave
differently depending on whether they were open for read or not.

This hadn't been tested thoroughly.  The cache_reader structure
that was now only allocated when opening for read, had a field that
was iused when writing.

This patch removes that field and takes a different approach to solving
the issue it addressed.

And it has been tested a bit better.
parent 2ed879ff
...@@ -557,7 +557,6 @@ struct cache_request { ...@@ -557,7 +557,6 @@ struct cache_request {
struct cache_reader { struct cache_reader {
struct cache_queue q; struct cache_queue q;
int offset; /* if non-0, we have a refcnt on next request */ int offset; /* if non-0, we have a refcnt on next request */
char *page;
}; };
static ssize_t static ssize_t
...@@ -644,7 +643,7 @@ cache_write(struct file *filp, const char *buf, size_t count, ...@@ -644,7 +643,7 @@ cache_write(struct file *filp, const char *buf, size_t count,
loff_t *ppos) loff_t *ppos)
{ {
int err; int err;
struct cache_reader *rp = filp->private_data; char *page;
struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data; struct cache_detail *cd = PDE(filp->f_dentry->d_inode)->data;
if (ppos != &filp->f_pos) if (ppos != &filp->f_pos)
...@@ -657,26 +656,26 @@ cache_write(struct file *filp, const char *buf, size_t count, ...@@ -657,26 +656,26 @@ cache_write(struct file *filp, const char *buf, size_t count,
down(&queue_io_sem); down(&queue_io_sem);
if (rp->page == NULL) { page = kmalloc(PAGE_SIZE, GFP_KERNEL);
rp->page = kmalloc(PAGE_SIZE, GFP_KERNEL); if (page == NULL) {
if (rp->page == NULL) { up(&queue_io_sem);
up(&queue_io_sem); return -ENOMEM;
return -ENOMEM;
}
} }
if (copy_from_user(rp->page, buf, count)) { if (copy_from_user(page, buf, count)) {
up(&queue_io_sem); up(&queue_io_sem);
kfree(page);
return -EFAULT; return -EFAULT;
} }
if (count < PAGE_SIZE) if (count < PAGE_SIZE)
rp->page[count] = '\0'; page[count] = '\0';
if (cd->cache_parse) if (cd->cache_parse)
err = cd->cache_parse(cd, rp->page, count); err = cd->cache_parse(cd, page, count);
else else
err = -EINVAL; err = -EINVAL;
up(&queue_io_sem); up(&queue_io_sem);
kfree(page);
return err ? err : count; return err ? err : count;
} }
...@@ -694,6 +693,10 @@ cache_poll(struct file *filp, poll_table *wait) ...@@ -694,6 +693,10 @@ cache_poll(struct file *filp, poll_table *wait)
/* alway allow write */ /* alway allow write */
mask = POLL_OUT | POLLWRNORM; mask = POLL_OUT | POLLWRNORM;
if (!rp)
return mask;
spin_lock(&queue_lock); spin_lock(&queue_lock);
for (cq= &rp->q; &cq->list != &cd->queue; for (cq= &rp->q; &cq->list != &cd->queue;
...@@ -715,8 +718,9 @@ cache_ioctl(struct inode *ino, struct file *filp, ...@@ -715,8 +718,9 @@ cache_ioctl(struct inode *ino, struct file *filp,
struct cache_queue *cq; struct cache_queue *cq;
struct cache_detail *cd = PDE(ino)->data; struct cache_detail *cd = PDE(ino)->data;
if (cmd != FIONREAD) if (cmd != FIONREAD || !rp)
return -EINVAL; return -EINVAL;
spin_lock(&queue_lock); spin_lock(&queue_lock);
/* only find the length remaining in current request, /* only find the length remaining in current request,
...@@ -746,7 +750,6 @@ cache_open(struct inode *inode, struct file *filp) ...@@ -746,7 +750,6 @@ cache_open(struct inode *inode, struct file *filp)
rp = kmalloc(sizeof(*rp), GFP_KERNEL); rp = kmalloc(sizeof(*rp), GFP_KERNEL);
if (!rp) if (!rp)
return -ENOMEM; return -ENOMEM;
rp->page = NULL;
rp->offset = 0; rp->offset = 0;
rp->q.reader = 1; rp->q.reader = 1;
atomic_inc(&cd->readers); atomic_inc(&cd->readers);
...@@ -780,9 +783,6 @@ cache_release(struct inode *inode, struct file *filp) ...@@ -780,9 +783,6 @@ cache_release(struct inode *inode, struct file *filp)
list_del(&rp->q.list); list_del(&rp->q.list);
spin_unlock(&queue_lock); spin_unlock(&queue_lock);
if (rp->page)
kfree(rp->page);
filp->private_data = NULL; filp->private_data = NULL;
kfree(rp); kfree(rp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment