Commit 1138bf4c authored by Anton Altaparmakov's avatar Anton Altaparmakov

NTFS: 2.0.19 - Fix race condition, improvements, and optimizations in i/o interface.

- Apply block optimization added to fs/ntfs/aops.c::ntfs_read_block()
  to fs/ntfs/compress.c::ntfs_file_read_compressed_block() as well.
- Drop the "file" from ntfs_file_read_compressed_block().
- Rename fs/ntfs/aops.c::ntfs_enb_buffer_read_async() to
  ntfs_end_buffer_async_read() (more like the fs/buffer.c counterpart).
- Update ntfs_end_buffer_async_read() with the improved logic from
  its updated counterpart fs/buffer.c::end_buffer_async_read(). Apply
  further logic improvements to better determine when we set PageError.
- Update submission of buffers in fs/ntfs/aops.c::ntfs_read_block() to
  check for the buffers being uptodate first in line with the updated
  fs/buffer.c::block_read_full_page(). This plugs a small race
  condition.
parent 3968bf66
...@@ -247,6 +247,9 @@ ChangeLog ...@@ -247,6 +247,9 @@ ChangeLog
Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog. Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
2.0.19:
- Fix race condition and improvements in block i/o interface.
- Optimization when reading compressed files.
2.0.18: 2.0.18:
- Fix race condition in reading of compressed files. - Fix race condition in reading of compressed files.
2.0.17: 2.0.17:
......
...@@ -9,12 +9,22 @@ ToDo: ...@@ -9,12 +9,22 @@ ToDo:
read() will fail when s_maxbytes is reached? -> Investigate this. read() will fail when s_maxbytes is reached? -> Investigate this.
- Implement/allow non-resident index bitmaps in dir.c::ntfs_readdir() - Implement/allow non-resident index bitmaps in dir.c::ntfs_readdir()
and then also consider initialized_size w.r.t. the bitmaps, etc. and then also consider initialized_size w.r.t. the bitmaps, etc.
- Consider if ntfs_file_read_compressed_block() shouldn't be coping
with initialized_size < data_size. I don't think it can happen but
it requires more careful consideration.
- Enable NFS exporting of NTFS. - Enable NFS exporting of NTFS.
- Apply block resolution optimization from aops.c::ntfs_read_block() to
compress.c::ntfs_file_read_compressed_block() as well. 2.0.19 - Fix race condition, improvements, and optimizations in i/o interface.
- Apply block optimization added to fs/ntfs/aops.c::ntfs_read_block()
to fs/ntfs/compress.c::ntfs_file_read_compressed_block() as well.
- Drop the "file" from ntfs_file_read_compressed_block().
- Rename fs/ntfs/aops.c::ntfs_enb_buffer_read_async() to
ntfs_end_buffer_async_read() (more like the fs/buffer.c counterpart).
- Update ntfs_end_buffer_async_read() with the improved logic from
its updated counterpart fs/buffer.c::end_buffer_async_read(). Apply
further logic improvements to better determine when we set PageError.
- Update submission of buffers in fs/ntfs/aops.c::ntfs_read_block() to
check for the buffers being uptodate first in line with the updated
fs/buffer.c::block_read_full_page(). This plugs a small race
condition.
2.0.18 - Fix race condition in reading of compressed files. 2.0.18 - Fix race condition in reading of compressed files.
......
...@@ -5,7 +5,7 @@ obj-$(CONFIG_NTFS_FS) += ntfs.o ...@@ -5,7 +5,7 @@ obj-$(CONFIG_NTFS_FS) += ntfs.o
ntfs-objs := aops.o attrib.o compress.o debug.o dir.o file.o inode.o mft.o \ ntfs-objs := aops.o attrib.o compress.o debug.o dir.o file.o inode.o mft.o \
mst.o namei.o super.o sysctl.o time.o unistr.o upcase.o mst.o namei.o super.o sysctl.o time.o unistr.o upcase.o
EXTRA_CFLAGS = -DNTFS_VERSION=\"2.0.18\" EXTRA_CFLAGS = -DNTFS_VERSION=\"2.0.19\"
ifeq ($(CONFIG_NTFS_DEBUG),y) ifeq ($(CONFIG_NTFS_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG EXTRA_CFLAGS += -DDEBUG
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include "ntfs.h" #include "ntfs.h"
/** /**
* ntfs_end_buffer_read_async - async io completion for reading attributes * ntfs_end_buffer_async_read - async io completion for reading attributes
* @bh: buffer head on which io is completed * @bh: buffer head on which io is completed
* @uptodate: whether @bh is now uptodate or not * @uptodate: whether @bh is now uptodate or not
* *
...@@ -45,26 +45,23 @@ ...@@ -45,26 +45,23 @@
* record size, and index_block_size_bits, to the log(base 2) of the ntfs * record size, and index_block_size_bits, to the log(base 2) of the ntfs
* record size. * record size.
*/ */
static void ntfs_end_buffer_read_async(struct buffer_head *bh, int uptodate) static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
{ {
static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED; static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED;
unsigned long flags; unsigned long flags;
struct buffer_head *tmp; struct buffer_head *tmp;
struct page *page; struct page *page;
ntfs_inode *ni; ntfs_inode *ni;
int page_uptodate = 1;
if (likely(uptodate))
set_buffer_uptodate(bh);
else
clear_buffer_uptodate(bh);
page = bh->b_page; page = bh->b_page;
ni = NTFS_I(page->mapping->host); ni = NTFS_I(page->mapping->host);
if (likely(uptodate)) { if (likely(uptodate)) {
s64 file_ofs; s64 file_ofs;
set_buffer_uptodate(bh);
file_ofs = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); file_ofs = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
/* Check for the current buffer head overflowing. */ /* Check for the current buffer head overflowing. */
if (file_ofs + bh->b_size > ni->initialized_size) { if (file_ofs + bh->b_size > ni->initialized_size) {
...@@ -78,22 +75,28 @@ static void ntfs_end_buffer_read_async(struct buffer_head *bh, int uptodate) ...@@ -78,22 +75,28 @@ static void ntfs_end_buffer_read_async(struct buffer_head *bh, int uptodate)
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(addr, KM_BIO_SRC_IRQ); kunmap_atomic(addr, KM_BIO_SRC_IRQ);
} }
} else } else {
clear_buffer_uptodate(bh);
ntfs_error(ni->vol->sb, "Buffer I/O error, logical block %Lu.",
(unsigned long long)bh->b_blocknr);
SetPageError(page); SetPageError(page);
}
spin_lock_irqsave(&page_uptodate_lock, flags); spin_lock_irqsave(&page_uptodate_lock, flags);
clear_buffer_async_read(bh); clear_buffer_async_read(bh);
unlock_buffer(bh); unlock_buffer(bh);
tmp = bh;
tmp = bh->b_this_page; do {
while (tmp != bh) { if (!buffer_uptodate(tmp))
if (buffer_locked(tmp)) { page_uptodate = 0;
if (buffer_async_read(tmp)) if (buffer_async_read(tmp)) {
if (likely(buffer_locked(tmp)))
goto still_busy; goto still_busy;
} else if (!buffer_uptodate(tmp)) /* Async buffers must be locked. */
SetPageError(page); BUG();
tmp = tmp->b_this_page;
} }
tmp = tmp->b_this_page;
} while (tmp != bh);
spin_unlock_irqrestore(&page_uptodate_lock, flags); spin_unlock_irqrestore(&page_uptodate_lock, flags);
/* /*
* If none of the buffers had errors then we can set the page uptodate, * If none of the buffers had errors then we can set the page uptodate,
...@@ -101,7 +104,7 @@ static void ntfs_end_buffer_read_async(struct buffer_head *bh, int uptodate) ...@@ -101,7 +104,7 @@ static void ntfs_end_buffer_read_async(struct buffer_head *bh, int uptodate)
* attribute is mst protected, i.e. if NInoMstProteced(ni) is true. * attribute is mst protected, i.e. if NInoMstProteced(ni) is true.
*/ */
if (!NInoMstProtected(ni)) { if (!NInoMstProtected(ni)) {
if (likely(!PageError(page))) if (likely(page_uptodate && !PageError(page)))
SetPageUptodate(page); SetPageUptodate(page);
unlock_page(page); unlock_page(page);
return; return;
...@@ -127,14 +130,17 @@ static void ntfs_end_buffer_read_async(struct buffer_head *bh, int uptodate) ...@@ -127,14 +130,17 @@ static void ntfs_end_buffer_read_async(struct buffer_head *bh, int uptodate)
} }
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(addr, KM_BIO_SRC_IRQ); kunmap_atomic(addr, KM_BIO_SRC_IRQ);
if (likely(!nr_err && recs)) if (likely(!PageError(page))) {
if (likely(!nr_err && recs)) {
if (likely(page_uptodate))
SetPageUptodate(page); SetPageUptodate(page);
else { } else {
ntfs_error(ni->vol->sb, "Setting page error, index " ntfs_error(ni->vol->sb, "Setting page error, "
"0x%lx.", page->index); "index 0x%lx.", page->index);
SetPageError(page); SetPageError(page);
} }
} }
}
unlock_page(page); unlock_page(page);
return; return;
still_busy: still_busy:
...@@ -282,16 +288,23 @@ static int ntfs_read_block(struct page *page) ...@@ -282,16 +288,23 @@ static int ntfs_read_block(struct page *page)
/* Check we have at least one buffer ready for i/o. */ /* Check we have at least one buffer ready for i/o. */
if (nr) { if (nr) {
struct buffer_head *tbh;
/* Lock the buffers. */ /* Lock the buffers. */
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
struct buffer_head *tbh = arr[i]; tbh = arr[i];
lock_buffer(tbh); lock_buffer(tbh);
tbh->b_end_io = ntfs_end_buffer_read_async; tbh->b_end_io = ntfs_end_buffer_async_read;
set_buffer_async_read(tbh); set_buffer_async_read(tbh);
} }
/* Finally, start i/o on the buffers. */ /* Finally, start i/o on the buffers. */
for (i = 0; i < nr; i++) for (i = 0; i < nr; i++) {
submit_bh(READ, arr[i]); tbh = arr[i];
if (likely(!buffer_uptodate(tbh)))
submit_bh(READ, tbh);
else
ntfs_end_buffer_async_read(tbh, 1);
}
return 0; return 0;
} }
/* No i/o was scheduled on any of the buffers. */ /* No i/o was scheduled on any of the buffers. */
...@@ -349,7 +362,7 @@ int ntfs_readpage(struct file *file, struct page *page) ...@@ -349,7 +362,7 @@ int ntfs_readpage(struct file *file, struct page *page)
} }
/* Compressed data streams are handled in compress.c. */ /* Compressed data streams are handled in compress.c. */
if (NInoCompressed(ni)) if (NInoCompressed(ni))
return ntfs_file_read_compressed_block(page); return ntfs_read_compressed_block(page);
} }
/* Normal data stream. */ /* Normal data stream. */
return ntfs_read_block(page); return ntfs_read_block(page);
......
...@@ -386,7 +386,7 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index, ...@@ -386,7 +386,7 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
} }
/** /**
* ntfs_file_read_compressed_block - read a compressed block into the page cache * ntfs_read_compressed_block - read a compressed block into the page cache
* @page: locked page in the compression block(s) we need to read * @page: locked page in the compression block(s) we need to read
* *
* When we are called the page has already been verified to be locked and the * When we are called the page has already been verified to be locked and the
...@@ -418,14 +418,15 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index, ...@@ -418,14 +418,15 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
* initialized_size is less than data_size. This should be safe because of the * initialized_size is less than data_size. This should be safe because of the
* nature of the compression algorithm used. Just in case we check and output * nature of the compression algorithm used. Just in case we check and output
* an error message in read inode if the two sizes are not equal for a * an error message in read inode if the two sizes are not equal for a
* compressed file. * compressed file. (AIA)
*/ */
int ntfs_file_read_compressed_block(struct page *page) int ntfs_read_compressed_block(struct page *page)
{ {
struct address_space *mapping = page->mapping; struct address_space *mapping = page->mapping;
ntfs_inode *ni = NTFS_I(mapping->host); ntfs_inode *ni = NTFS_I(mapping->host);
ntfs_volume *vol = ni->vol; ntfs_volume *vol = ni->vol;
struct super_block *sb = vol->sb; struct super_block *sb = vol->sb;
run_list_element *rl;
unsigned long block_size = sb->s_blocksize; unsigned long block_size = sb->s_blocksize;
unsigned char block_size_bits = sb->s_blocksize_bits; unsigned char block_size_bits = sb->s_blocksize_bits;
u8 *cb, *cb_pos, *cb_end; u8 *cb, *cb_pos, *cb_end;
...@@ -532,14 +533,23 @@ int ntfs_file_read_compressed_block(struct page *page) ...@@ -532,14 +533,23 @@ int ntfs_file_read_compressed_block(struct page *page)
nr_bhs = 0; nr_bhs = 0;
/* Read all cb buffer heads one cluster at a time. */ /* Read all cb buffer heads one cluster at a time. */
rl = NULL;
for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn; for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn;
vcn++) { vcn++) {
BOOL is_retry = FALSE; BOOL is_retry = FALSE;
retry_remap:
/* Find lcn of vcn and convert it into blocks. */ if (!rl) {
lock_retry_remap:
down_read(&ni->run_list.lock); down_read(&ni->run_list.lock);
lcn = vcn_to_lcn(ni->run_list.rl, vcn); rl = ni->run_list.rl;
up_read(&ni->run_list.lock); }
if (likely(rl != NULL)) {
/* Seek to element containing target vcn. */
while (rl->length && rl[1].vcn <= vcn)
rl++;
lcn = vcn_to_lcn(rl, vcn);
} else
lcn = (LCN)LCN_RL_NOT_MAPPED;
ntfs_debug("Reading vcn = 0x%Lx, lcn = 0x%Lx.", ntfs_debug("Reading vcn = 0x%Lx, lcn = 0x%Lx.",
(long long)vcn, (long long)lcn); (long long)vcn, (long long)lcn);
if (lcn < 0) { if (lcn < 0) {
...@@ -552,9 +562,13 @@ int ntfs_file_read_compressed_block(struct page *page) ...@@ -552,9 +562,13 @@ int ntfs_file_read_compressed_block(struct page *page)
if (is_retry || lcn != LCN_RL_NOT_MAPPED) if (is_retry || lcn != LCN_RL_NOT_MAPPED)
goto rl_err; goto rl_err;
is_retry = TRUE; is_retry = TRUE;
/* Map run list of current extent and retry. */ /*
* Attempt to map run list, dropping lock for the
* duration.
*/
up_read(&ni->run_list.lock);
if (!map_run_list(ni, vcn)) if (!map_run_list(ni, vcn))
goto retry_remap; goto lock_retry_remap;
goto map_rl_err; goto map_rl_err;
} }
block = lcn << vol->cluster_size_bits >> block_size_bits; block = lcn << vol->cluster_size_bits >> block_size_bits;
...@@ -568,6 +582,10 @@ int ntfs_file_read_compressed_block(struct page *page) ...@@ -568,6 +582,10 @@ int ntfs_file_read_compressed_block(struct page *page)
} while (++block < max_block); } while (++block < max_block);
} }
/* Release the lock if we took it. */
if (rl)
up_read(&ni->run_list.lock);
/* Setup and initiate io on all buffer heads. */ /* Setup and initiate io on all buffer heads. */
for (i = 0; i < nr_bhs; i++) { for (i = 0; i < nr_bhs; i++) {
struct buffer_head *tbh = bhs[i]; struct buffer_head *tbh = bhs[i];
...@@ -828,11 +846,13 @@ int ntfs_file_read_compressed_block(struct page *page) ...@@ -828,11 +846,13 @@ int ntfs_file_read_compressed_block(struct page *page)
goto err_out; goto err_out;
rl_err: rl_err:
up_read(&ni->run_list.lock);
ntfs_error(vol->sb, "vcn_to_lcn() failed. Cannot read compression " ntfs_error(vol->sb, "vcn_to_lcn() failed. Cannot read compression "
"block."); "block.");
goto err_out; goto err_out;
getblk_err: getblk_err:
up_read(&ni->run_list.lock);
ntfs_error(vol->sb, "getblk() failed. Cannot read compression block."); ntfs_error(vol->sb, "getblk() failed. Cannot read compression block.");
err_out: err_out:
......
...@@ -1007,19 +1007,15 @@ static int ntfs_read_locked_inode(struct inode *vi) ...@@ -1007,19 +1007,15 @@ static int ntfs_read_locked_inode(struct inode *vi)
ni->_ICF(compressed_size) = sle64_to_cpu( ni->_ICF(compressed_size) = sle64_to_cpu(
ctx->attr->_ANR(compressed_size)); ctx->attr->_ANR(compressed_size));
if (vi->i_size != ni->initialized_size) if (vi->i_size != ni->initialized_size)
ntfs_warning(vi->i_sb, "Compressed " ntfs_warning(vi->i_sb, "BUG: Found "
"file with data_size " "compressed file with "
"unequal to " "data_size not equal to "
"initialized size " "initialized_size. This will "
"found. This will " "probably cause problems when "
"probably cause " "trying to access the file. "
"problems when trying " "Please notify linux-ntfs-dev@"
"to access the file. " "lists.sf.net that you saw "
"Please notify " "this message. Thanks!");
"linux-ntfs-dev@"
"lists.sf.net that you"
"saw this message."
"Thanks!");
} }
} else { /* Resident attribute. */ } else { /* Resident attribute. */
/* /*
......
...@@ -158,7 +158,7 @@ static inline struct page *ntfs_map_page(struct address_space *mapping, ...@@ -158,7 +158,7 @@ static inline struct page *ntfs_map_page(struct address_space *mapping,
/* Declarations of functions and global variables. */ /* Declarations of functions and global variables. */
/* From fs/ntfs/compress.c */ /* From fs/ntfs/compress.c */
extern int ntfs_file_read_compressed_block(struct page *page); extern int ntfs_read_compressed_block(struct page *page);
/* From fs/ntfs/super.c */ /* From fs/ntfs/super.c */
#define default_upcase_len 0x10000 #define default_upcase_len 0x10000
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment