Commit ca9b688c authored by Li Zefan's avatar Li Zefan Committed by Chris Mason

Btrfs: Avoid accessing unmapped kernel address

When decompressing a chunk of data, we'll copy the data out to
a working buffer if the data is stored in more than one page,
otherwise we'll use the mapped page directly to avoid memory
copy.

In the latter case, we'll end up accessing the kernel address
after we've unmapped the page in a corner case.
Reported-by: default avatarJuan Francisco Cantero Hurtado <iam@juanfra.info>
Signed-off-by: default avatarLi Zefan <lizf@cn.fujitsu.com>
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent b4dc2b8c
...@@ -280,6 +280,7 @@ static int lzo_decompress_biovec(struct list_head *ws, ...@@ -280,6 +280,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
unsigned long tot_out; unsigned long tot_out;
unsigned long tot_len; unsigned long tot_len;
char *buf; char *buf;
bool may_late_unmap, need_unmap;
data_in = kmap(pages_in[0]); data_in = kmap(pages_in[0]);
tot_len = read_compress_length(data_in); tot_len = read_compress_length(data_in);
...@@ -300,11 +301,13 @@ static int lzo_decompress_biovec(struct list_head *ws, ...@@ -300,11 +301,13 @@ static int lzo_decompress_biovec(struct list_head *ws,
tot_in += in_len; tot_in += in_len;
working_bytes = in_len; working_bytes = in_len;
may_late_unmap = need_unmap = false;
/* fast path: avoid using the working buffer */ /* fast path: avoid using the working buffer */
if (in_page_bytes_left >= in_len) { if (in_page_bytes_left >= in_len) {
buf = data_in + in_offset; buf = data_in + in_offset;
bytes = in_len; bytes = in_len;
may_late_unmap = true;
goto cont; goto cont;
} }
...@@ -329,14 +332,17 @@ static int lzo_decompress_biovec(struct list_head *ws, ...@@ -329,14 +332,17 @@ static int lzo_decompress_biovec(struct list_head *ws,
if (working_bytes == 0 && tot_in >= tot_len) if (working_bytes == 0 && tot_in >= tot_len)
break; break;
kunmap(pages_in[page_in_index]); if (page_in_index + 1 >= total_pages_in) {
page_in_index++;
if (page_in_index >= total_pages_in) {
ret = -1; ret = -1;
data_in = NULL;
goto done; goto done;
} }
data_in = kmap(pages_in[page_in_index]);
if (may_late_unmap)
need_unmap = true;
else
kunmap(pages_in[page_in_index]);
data_in = kmap(pages_in[++page_in_index]);
in_page_bytes_left = PAGE_CACHE_SIZE; in_page_bytes_left = PAGE_CACHE_SIZE;
in_offset = 0; in_offset = 0;
...@@ -346,6 +352,8 @@ static int lzo_decompress_biovec(struct list_head *ws, ...@@ -346,6 +352,8 @@ static int lzo_decompress_biovec(struct list_head *ws,
out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE); out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE);
ret = lzo1x_decompress_safe(buf, in_len, workspace->buf, ret = lzo1x_decompress_safe(buf, in_len, workspace->buf,
&out_len); &out_len);
if (need_unmap)
kunmap(pages_in[page_in_index - 1]);
if (ret != LZO_E_OK) { if (ret != LZO_E_OK) {
printk(KERN_WARNING "btrfs decompress failed\n"); printk(KERN_WARNING "btrfs decompress failed\n");
ret = -1; ret = -1;
...@@ -363,8 +371,7 @@ static int lzo_decompress_biovec(struct list_head *ws, ...@@ -363,8 +371,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
break; break;
} }
done: done:
if (data_in) kunmap(pages_in[page_in_index]);
kunmap(pages_in[page_in_index]);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment