Commit 0f516248 authored by Chuck Lever's avatar Chuck Lever

NFSD: Watch for rq_pages bounds checking errors in nfsd_splice_actor()

There have been several bugs over the years where the NFSD splice
actor has attempted to write outside the rq_pages array.

This is a "should never happen" condition, but if for some reason
the pipe splice actor should attempt to walk past the end of
rq_pages, it needs to terminate the READ operation to prevent
corruption of the pointer addresses in the fields just beyond the
array.

A server crash is thus prevented. Since the code is not behaving,
the READ operation returns -EIO to the client. None of the READ
payload data can be trusted if the splice actor isn't operating as
expected.
Suggested-by: default avatarJeff Layton <jlayton@kernel.org>
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Reviewed-by: default avatarJeff Layton <jlayton@kernel.org>
parent 376bcd9b
...@@ -930,6 +930,9 @@ nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp, int may_flags, ...@@ -930,6 +930,9 @@ nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp, int may_flags,
* Grab and keep cached pages associated with a file in the svc_rqst * Grab and keep cached pages associated with a file in the svc_rqst
* so that they can be passed to the network sendmsg/sendpage routines * so that they can be passed to the network sendmsg/sendpage routines
* directly. They will be released after the sending has completed. * directly. They will be released after the sending has completed.
*
* Return values: Number of bytes consumed, or -EIO if there are no
* remaining pages in rqstp->rq_pages.
*/ */
static int static int
nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
...@@ -948,7 +951,8 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, ...@@ -948,7 +951,8 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
*/ */
if (page == *(rqstp->rq_next_page - 1)) if (page == *(rqstp->rq_next_page - 1))
continue; continue;
svc_rqst_replace_page(rqstp, page); if (unlikely(!svc_rqst_replace_page(rqstp, page)))
return -EIO;
} }
if (rqstp->rq_res.page_len == 0) // first call if (rqstp->rq_res.page_len == 0) // first call
rqstp->rq_res.page_base = offset % PAGE_SIZE; rqstp->rq_res.page_base = offset % PAGE_SIZE;
......
...@@ -422,7 +422,7 @@ struct svc_serv *svc_create(struct svc_program *, unsigned int, ...@@ -422,7 +422,7 @@ struct svc_serv *svc_create(struct svc_program *, unsigned int,
int (*threadfn)(void *data)); int (*threadfn)(void *data));
struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv, struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
struct svc_pool *pool, int node); struct svc_pool *pool, int node);
void svc_rqst_replace_page(struct svc_rqst *rqstp, bool svc_rqst_replace_page(struct svc_rqst *rqstp,
struct page *page); struct page *page);
void svc_rqst_free(struct svc_rqst *); void svc_rqst_free(struct svc_rqst *);
void svc_exit_thread(struct svc_rqst *); void svc_exit_thread(struct svc_rqst *);
......
...@@ -1790,6 +1790,31 @@ DEFINE_EVENT(svc_rqst_status, svc_send, ...@@ -1790,6 +1790,31 @@ DEFINE_EVENT(svc_rqst_status, svc_send,
TP_PROTO(const struct svc_rqst *rqst, int status), TP_PROTO(const struct svc_rqst *rqst, int status),
TP_ARGS(rqst, status)); TP_ARGS(rqst, status));
TRACE_EVENT(svc_replace_page_err,
TP_PROTO(const struct svc_rqst *rqst),
TP_ARGS(rqst),
TP_STRUCT__entry(
SVC_RQST_ENDPOINT_FIELDS(rqst)
__field(const void *, begin)
__field(const void *, respages)
__field(const void *, nextpage)
),
TP_fast_assign(
SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst);
__entry->begin = rqst->rq_pages;
__entry->respages = rqst->rq_respages;
__entry->nextpage = rqst->rq_next_page;
),
TP_printk(SVC_RQST_ENDPOINT_FORMAT " begin=%p respages=%p nextpage=%p",
SVC_RQST_ENDPOINT_VARARGS,
__entry->begin, __entry->respages, __entry->nextpage)
);
TRACE_EVENT(svc_stats_latency, TRACE_EVENT(svc_stats_latency,
TP_PROTO( TP_PROTO(
const struct svc_rqst *rqst const struct svc_rqst *rqst
......
...@@ -842,9 +842,21 @@ EXPORT_SYMBOL_GPL(svc_set_num_threads); ...@@ -842,9 +842,21 @@ EXPORT_SYMBOL_GPL(svc_set_num_threads);
* *
* When replacing a page in rq_pages, batch the release of the * When replacing a page in rq_pages, batch the release of the
* replaced pages to avoid hammering the page allocator. * replaced pages to avoid hammering the page allocator.
*
* Return values:
* %true: page replaced
* %false: array bounds checking failed
*/ */
void svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page) bool svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page)
{ {
struct page **begin = rqstp->rq_pages;
struct page **end = &rqstp->rq_pages[RPCSVC_MAXPAGES];
if (unlikely(rqstp->rq_next_page < begin || rqstp->rq_next_page > end)) {
trace_svc_replace_page_err(rqstp);
return false;
}
if (*rqstp->rq_next_page) { if (*rqstp->rq_next_page) {
if (!pagevec_space(&rqstp->rq_pvec)) if (!pagevec_space(&rqstp->rq_pvec))
__pagevec_release(&rqstp->rq_pvec); __pagevec_release(&rqstp->rq_pvec);
...@@ -853,6 +865,7 @@ void svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page) ...@@ -853,6 +865,7 @@ void svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page)
get_page(page); get_page(page);
*(rqstp->rq_next_page++) = page; *(rqstp->rq_next_page++) = page;
return true;
} }
EXPORT_SYMBOL_GPL(svc_rqst_replace_page); EXPORT_SYMBOL_GPL(svc_rqst_replace_page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment