Commit e3bf460f authored by Nate Diller's avatar Nate Diller Committed by Linus Torvalds

ntfs: use zero_user_page

Use zero_user_page() instead of open-coding it.

[akpm@linux-foundation.org: kmap-type fixes]
Signed-off-by: default avatarNate Diller <nate.diller@gmail.com>
Acked-by: default avatarAnton Altaparmakov <aia21@cantab.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6d690dca
...@@ -86,19 +86,15 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) ...@@ -86,19 +86,15 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
} }
/* Check for the current buffer head overflowing. */ /* Check for the current buffer head overflowing. */
if (unlikely(file_ofs + bh->b_size > init_size)) { if (unlikely(file_ofs + bh->b_size > init_size)) {
u8 *kaddr;
int ofs; int ofs;
ofs = 0; ofs = 0;
if (file_ofs < init_size) if (file_ofs < init_size)
ofs = init_size - file_ofs; ofs = init_size - file_ofs;
local_irq_save(flags); local_irq_save(flags);
kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); zero_user_page(page, bh_offset(bh) + ofs,
memset(kaddr + bh_offset(bh) + ofs, 0, bh->b_size - ofs, KM_BIO_SRC_IRQ);
bh->b_size - ofs);
kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
local_irq_restore(flags); local_irq_restore(flags);
flush_dcache_page(page);
} }
} else { } else {
clear_buffer_uptodate(bh); clear_buffer_uptodate(bh);
...@@ -245,8 +241,7 @@ static int ntfs_read_block(struct page *page) ...@@ -245,8 +241,7 @@ static int ntfs_read_block(struct page *page)
rl = NULL; rl = NULL;
nr = i = 0; nr = i = 0;
do { do {
u8 *kaddr; int err = 0;
int err;
if (unlikely(buffer_uptodate(bh))) if (unlikely(buffer_uptodate(bh)))
continue; continue;
...@@ -254,7 +249,6 @@ static int ntfs_read_block(struct page *page) ...@@ -254,7 +249,6 @@ static int ntfs_read_block(struct page *page)
arr[nr++] = bh; arr[nr++] = bh;
continue; continue;
} }
err = 0;
bh->b_bdev = vol->sb->s_bdev; bh->b_bdev = vol->sb->s_bdev;
/* Is the block within the allowed limits? */ /* Is the block within the allowed limits? */
if (iblock < lblock) { if (iblock < lblock) {
...@@ -340,10 +334,7 @@ static int ntfs_read_block(struct page *page) ...@@ -340,10 +334,7 @@ static int ntfs_read_block(struct page *page)
bh->b_blocknr = -1UL; bh->b_blocknr = -1UL;
clear_buffer_mapped(bh); clear_buffer_mapped(bh);
handle_zblock: handle_zblock:
kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, i * blocksize, blocksize, KM_USER0);
memset(kaddr + i * blocksize, 0, blocksize);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
if (likely(!err)) if (likely(!err))
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} while (i++, iblock++, (bh = bh->b_this_page) != head); } while (i++, iblock++, (bh = bh->b_this_page) != head);
...@@ -460,10 +451,7 @@ static int ntfs_readpage(struct file *file, struct page *page) ...@@ -460,10 +451,7 @@ static int ntfs_readpage(struct file *file, struct page *page)
* ok to ignore the compressed flag here. * ok to ignore the compressed flag here.
*/ */
if (unlikely(page->index > 0)) { if (unlikely(page->index > 0)) {
kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
memset(kaddr, 0, PAGE_CACHE_SIZE);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
goto done; goto done;
} }
if (!NInoAttr(ni)) if (!NInoAttr(ni))
...@@ -790,14 +778,10 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc) ...@@ -790,14 +778,10 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
* uptodate so it can get discarded by the VM. * uptodate so it can get discarded by the VM.
*/ */
if (err == -ENOENT || lcn == LCN_ENOENT) { if (err == -ENOENT || lcn == LCN_ENOENT) {
u8 *kaddr;
bh->b_blocknr = -1; bh->b_blocknr = -1;
clear_buffer_dirty(bh); clear_buffer_dirty(bh);
kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, bh_offset(bh), blocksize,
memset(kaddr + bh_offset(bh), 0, blocksize); KM_USER0);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
err = 0; err = 0;
continue; continue;
...@@ -1422,10 +1406,8 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc) ...@@ -1422,10 +1406,8 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) { if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
/* The page straddles i_size. */ /* The page straddles i_size. */
unsigned int ofs = i_size & ~PAGE_CACHE_MASK; unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, ofs, PAGE_CACHE_SIZE - ofs,
memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs); KM_USER0);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
} }
/* Handle mst protected attributes. */ /* Handle mst protected attributes. */
if (NInoMstProtected(ni)) if (NInoMstProtected(ni))
......
...@@ -606,11 +606,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -606,11 +606,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
ntfs_submit_bh_for_read(bh); ntfs_submit_bh_for_read(bh);
*wait_bh++ = bh; *wait_bh++ = bh;
} else { } else {
u8 *kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, bh_offset(bh),
memset(kaddr + bh_offset(bh), 0, blocksize, KM_USER0);
blocksize);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} }
} }
...@@ -685,12 +682,9 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -685,12 +682,9 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
ntfs_submit_bh_for_read(bh); ntfs_submit_bh_for_read(bh);
*wait_bh++ = bh; *wait_bh++ = bh;
} else { } else {
u8 *kaddr = kmap_atomic(page, zero_user_page(page,
KM_USER0); bh_offset(bh),
memset(kaddr + bh_offset(bh), blocksize, KM_USER0);
0, blocksize);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} }
} }
...@@ -708,11 +702,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -708,11 +702,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
*/ */
if (bh_end <= pos || bh_pos >= end) { if (bh_end <= pos || bh_pos >= end) {
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
u8 *kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, bh_offset(bh),
memset(kaddr + bh_offset(bh), 0, blocksize, KM_USER0);
blocksize);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} }
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
...@@ -751,10 +742,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -751,10 +742,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
if (!buffer_uptodate(bh)) if (!buffer_uptodate(bh))
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} else if (!buffer_uptodate(bh)) { } else if (!buffer_uptodate(bh)) {
u8 *kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, bh_offset(bh), blocksize,
memset(kaddr + bh_offset(bh), 0, blocksize); KM_USER0);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} }
continue; continue;
...@@ -878,11 +867,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -878,11 +867,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
if (!buffer_uptodate(bh)) if (!buffer_uptodate(bh))
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} else if (!buffer_uptodate(bh)) { } else if (!buffer_uptodate(bh)) {
u8 *kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, bh_offset(bh),
memset(kaddr + bh_offset(bh), 0, blocksize, KM_USER0);
blocksize);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} }
continue; continue;
...@@ -1137,16 +1123,12 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -1137,16 +1123,12 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
* to zero the overflowing region. * to zero the overflowing region.
*/ */
if (unlikely(bh_pos + blocksize > initialized_size)) { if (unlikely(bh_pos + blocksize > initialized_size)) {
u8 *kaddr;
int ofs = 0; int ofs = 0;
if (likely(bh_pos < initialized_size)) if (likely(bh_pos < initialized_size))
ofs = initialized_size - bh_pos; ofs = initialized_size - bh_pos;
kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, bh_offset(bh) + ofs,
memset(kaddr + bh_offset(bh) + ofs, 0, blocksize - ofs, KM_USER0);
blocksize - ofs);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
} }
} else /* if (unlikely(!buffer_uptodate(bh))) */ } else /* if (unlikely(!buffer_uptodate(bh))) */
err = -EIO; err = -EIO;
...@@ -1286,11 +1268,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -1286,11 +1268,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
if (PageUptodate(page)) if (PageUptodate(page))
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
else { else {
u8 *kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, bh_offset(bh),
memset(kaddr + bh_offset(bh), 0, blocksize, KM_USER0);
blocksize);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} }
} }
...@@ -1350,9 +1329,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages, ...@@ -1350,9 +1329,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
len = PAGE_CACHE_SIZE; len = PAGE_CACHE_SIZE;
if (len > bytes) if (len > bytes)
len = bytes; len = bytes;
kaddr = kmap_atomic(*pages, KM_USER0); zero_user_page(*pages, 0, len, KM_USER0);
memset(kaddr, 0, len);
kunmap_atomic(kaddr, KM_USER0);
} }
goto out; goto out;
} }
...@@ -1473,9 +1450,7 @@ static inline size_t ntfs_copy_from_user_iovec(struct page **pages, ...@@ -1473,9 +1450,7 @@ static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
len = PAGE_CACHE_SIZE; len = PAGE_CACHE_SIZE;
if (len > bytes) if (len > bytes)
len = bytes; len = bytes;
kaddr = kmap_atomic(*pages, KM_USER0); zero_user_page(*pages, 0, len, KM_USER0);
memset(kaddr, 0, len);
kunmap_atomic(kaddr, KM_USER0);
} }
goto out; goto out;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment