Commit 1138bf4c authored by Anton Altaparmakov's avatar Anton Altaparmakov

NTFS: 2.0.19 - Fix race condition, improvements, and optimizations in i/o interface.

- Apply block optimization added to fs/ntfs/aops.c::ntfs_read_block()
  to fs/ntfs/compress.c::ntfs_file_read_compressed_block() as well.
- Drop the "file" from ntfs_file_read_compressed_block().
- Rename fs/ntfs/aops.c::ntfs_enb_buffer_read_async() to
  ntfs_end_buffer_async_read() (more like the fs/buffer.c counterpart).
- Update ntfs_end_buffer_async_read() with the improved logic from
  its updated counterpart fs/buffer.c::end_buffer_async_read(). Apply
  further logic improvements to better determine when we set PageError.
- Update submission of buffers in fs/ntfs/aops.c::ntfs_read_block() to
  check for the buffers being uptodate first in line with the updated
  fs/buffer.c::block_read_full_page(). This plugs a small race
  condition.
parent 3968bf66
......@@ -247,6 +247,9 @@ ChangeLog
Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
2.0.19:
- Fix race condition and improvements in block i/o interface.
- Optimization when reading compressed files.
2.0.18:
- Fix race condition in reading of compressed files.
2.0.17:
......
......@@ -9,12 +9,22 @@ ToDo:
read() will fail when s_maxbytes is reached? -> Investigate this.
- Implement/allow non-resident index bitmaps in dir.c::ntfs_readdir()
and then also consider initialized_size w.r.t. the bitmaps, etc.
- Consider if ntfs_file_read_compressed_block() shouldn't be coping
with initialized_size < data_size. I don't think it can happen but
it requires more careful consideration.
- Enable NFS exporting of NTFS.
- Apply block resolution optimization from aops.c::ntfs_read_block() to
compress.c::ntfs_file_read_compressed_block() as well.
2.0.19 - Fix race condition, improvements, and optimizations in i/o interface.
- Apply block optimization added to fs/ntfs/aops.c::ntfs_read_block()
to fs/ntfs/compress.c::ntfs_file_read_compressed_block() as well.
- Drop the "file" from ntfs_file_read_compressed_block().
- Rename fs/ntfs/aops.c::ntfs_enb_buffer_read_async() to
ntfs_end_buffer_async_read() (more like the fs/buffer.c counterpart).
- Update ntfs_end_buffer_async_read() with the improved logic from
its updated counterpart fs/buffer.c::end_buffer_async_read(). Apply
further logic improvements to better determine when we set PageError.
- Update submission of buffers in fs/ntfs/aops.c::ntfs_read_block() to
check for the buffers being uptodate first in line with the updated
fs/buffer.c::block_read_full_page(). This plugs a small race
condition.
2.0.18 - Fix race condition in reading of compressed files.
......
......@@ -5,7 +5,7 @@ obj-$(CONFIG_NTFS_FS) += ntfs.o
ntfs-objs := aops.o attrib.o compress.o debug.o dir.o file.o inode.o mft.o \
mst.o namei.o super.o sysctl.o time.o unistr.o upcase.o
EXTRA_CFLAGS = -DNTFS_VERSION=\"2.0.18\"
EXTRA_CFLAGS = -DNTFS_VERSION=\"2.0.19\"
ifeq ($(CONFIG_NTFS_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
......
......@@ -30,7 +30,7 @@
#include "ntfs.h"
/**
* ntfs_end_buffer_read_async - async io completion for reading attributes
* ntfs_end_buffer_async_read - async io completion for reading attributes
* @bh: buffer head on which io is completed
* @uptodate: whether @bh is now uptodate or not
*
......@@ -45,26 +45,23 @@
* record size, and index_block_size_bits, to the log(base 2) of the ntfs
* record size.
*/
static void ntfs_end_buffer_read_async(struct buffer_head *bh, int uptodate)
static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
{
static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED;
unsigned long flags;
struct buffer_head *tmp;
struct page *page;
ntfs_inode *ni;
if (likely(uptodate))
set_buffer_uptodate(bh);
else
clear_buffer_uptodate(bh);
int page_uptodate = 1;
page = bh->b_page;
ni = NTFS_I(page->mapping->host);
if (likely(uptodate)) {
s64 file_ofs;
set_buffer_uptodate(bh);
file_ofs = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
/* Check for the current buffer head overflowing. */
if (file_ofs + bh->b_size > ni->initialized_size) {
......@@ -78,22 +75,28 @@ static void ntfs_end_buffer_read_async(struct buffer_head *bh, int uptodate)
flush_dcache_page(page);
kunmap_atomic(addr, KM_BIO_SRC_IRQ);
}
} else
} else {
clear_buffer_uptodate(bh);
ntfs_error(ni->vol->sb, "Buffer I/O error, logical block %Lu.",
(unsigned long long)bh->b_blocknr);
SetPageError(page);
}
spin_lock_irqsave(&page_uptodate_lock, flags);
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh->b_this_page;
while (tmp != bh) {
if (buffer_locked(tmp)) {
if (buffer_async_read(tmp))
tmp = bh;
do {
if (!buffer_uptodate(tmp))
page_uptodate = 0;
if (buffer_async_read(tmp)) {
if (likely(buffer_locked(tmp)))
goto still_busy;
} else if (!buffer_uptodate(tmp))
SetPageError(page);
/* Async buffers must be locked. */
BUG();
}
tmp = tmp->b_this_page;
}
} while (tmp != bh);
spin_unlock_irqrestore(&page_uptodate_lock, flags);
/*
* If none of the buffers had errors then we can set the page uptodate,
......@@ -101,7 +104,7 @@ static void ntfs_end_buffer_read_async(struct buffer_head *bh, int uptodate)
* attribute is mst protected, i.e. if NInoMstProteced(ni) is true.
*/
if (!NInoMstProtected(ni)) {
if (likely(!PageError(page)))
if (likely(page_uptodate && !PageError(page)))
SetPageUptodate(page);
unlock_page(page);
return;
......@@ -127,12 +130,15 @@ static void ntfs_end_buffer_read_async(struct buffer_head *bh, int uptodate)
}
flush_dcache_page(page);
kunmap_atomic(addr, KM_BIO_SRC_IRQ);
if (likely(!nr_err && recs))
SetPageUptodate(page);
else {
ntfs_error(ni->vol->sb, "Setting page error, index "
"0x%lx.", page->index);
SetPageError(page);
if (likely(!PageError(page))) {
if (likely(!nr_err && recs)) {
if (likely(page_uptodate))
SetPageUptodate(page);
} else {
ntfs_error(ni->vol->sb, "Setting page error, "
"index 0x%lx.", page->index);
SetPageError(page);
}
}
}
unlock_page(page);
......@@ -282,16 +288,23 @@ static int ntfs_read_block(struct page *page)
/* Check we have at least one buffer ready for i/o. */
if (nr) {
struct buffer_head *tbh;
/* Lock the buffers. */
for (i = 0; i < nr; i++) {
struct buffer_head *tbh = arr[i];
tbh = arr[i];
lock_buffer(tbh);
tbh->b_end_io = ntfs_end_buffer_read_async;
tbh->b_end_io = ntfs_end_buffer_async_read;
set_buffer_async_read(tbh);
}
/* Finally, start i/o on the buffers. */
for (i = 0; i < nr; i++)
submit_bh(READ, arr[i]);
for (i = 0; i < nr; i++) {
tbh = arr[i];
if (likely(!buffer_uptodate(tbh)))
submit_bh(READ, tbh);
else
ntfs_end_buffer_async_read(tbh, 1);
}
return 0;
}
/* No i/o was scheduled on any of the buffers. */
......@@ -349,7 +362,7 @@ int ntfs_readpage(struct file *file, struct page *page)
}
/* Compressed data streams are handled in compress.c. */
if (NInoCompressed(ni))
return ntfs_file_read_compressed_block(page);
return ntfs_read_compressed_block(page);
}
/* Normal data stream. */
return ntfs_read_block(page);
......
......@@ -386,7 +386,7 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
}
/**
* ntfs_file_read_compressed_block - read a compressed block into the page cache
* ntfs_read_compressed_block - read a compressed block into the page cache
* @page: locked page in the compression block(s) we need to read
*
* When we are called the page has already been verified to be locked and the
......@@ -418,14 +418,15 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
* initialized_size is less than data_size. This should be safe because of the
* nature of the compression algorithm used. Just in case we check and output
* an error message in read inode if the two sizes are not equal for a
* compressed file.
* compressed file. (AIA)
*/
int ntfs_file_read_compressed_block(struct page *page)
int ntfs_read_compressed_block(struct page *page)
{
struct address_space *mapping = page->mapping;
ntfs_inode *ni = NTFS_I(mapping->host);
ntfs_volume *vol = ni->vol;
struct super_block *sb = vol->sb;
run_list_element *rl;
unsigned long block_size = sb->s_blocksize;
unsigned char block_size_bits = sb->s_blocksize_bits;
u8 *cb, *cb_pos, *cb_end;
......@@ -532,14 +533,23 @@ int ntfs_file_read_compressed_block(struct page *page)
nr_bhs = 0;
/* Read all cb buffer heads one cluster at a time. */
rl = NULL;
for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn;
vcn++) {
BOOL is_retry = FALSE;
retry_remap:
/* Find lcn of vcn and convert it into blocks. */
down_read(&ni->run_list.lock);
lcn = vcn_to_lcn(ni->run_list.rl, vcn);
up_read(&ni->run_list.lock);
if (!rl) {
lock_retry_remap:
down_read(&ni->run_list.lock);
rl = ni->run_list.rl;
}
if (likely(rl != NULL)) {
/* Seek to element containing target vcn. */
while (rl->length && rl[1].vcn <= vcn)
rl++;
lcn = vcn_to_lcn(rl, vcn);
} else
lcn = (LCN)LCN_RL_NOT_MAPPED;
ntfs_debug("Reading vcn = 0x%Lx, lcn = 0x%Lx.",
(long long)vcn, (long long)lcn);
if (lcn < 0) {
......@@ -552,9 +562,13 @@ int ntfs_file_read_compressed_block(struct page *page)
if (is_retry || lcn != LCN_RL_NOT_MAPPED)
goto rl_err;
is_retry = TRUE;
/* Map run list of current extent and retry. */
/*
* Attempt to map run list, dropping lock for the
* duration.
*/
up_read(&ni->run_list.lock);
if (!map_run_list(ni, vcn))
goto retry_remap;
goto lock_retry_remap;
goto map_rl_err;
}
block = lcn << vol->cluster_size_bits >> block_size_bits;
......@@ -568,6 +582,10 @@ int ntfs_file_read_compressed_block(struct page *page)
} while (++block < max_block);
}
/* Release the lock if we took it. */
if (rl)
up_read(&ni->run_list.lock);
/* Setup and initiate io on all buffer heads. */
for (i = 0; i < nr_bhs; i++) {
struct buffer_head *tbh = bhs[i];
......@@ -828,11 +846,13 @@ int ntfs_file_read_compressed_block(struct page *page)
goto err_out;
rl_err:
up_read(&ni->run_list.lock);
ntfs_error(vol->sb, "vcn_to_lcn() failed. Cannot read compression "
"block.");
goto err_out;
getblk_err:
up_read(&ni->run_list.lock);
ntfs_error(vol->sb, "getblk() failed. Cannot read compression block.");
err_out:
......
......@@ -1007,19 +1007,15 @@ static int ntfs_read_locked_inode(struct inode *vi)
ni->_ICF(compressed_size) = sle64_to_cpu(
ctx->attr->_ANR(compressed_size));
if (vi->i_size != ni->initialized_size)
ntfs_warning(vi->i_sb, "Compressed "
"file with data_size "
"unequal to "
"initialized size "
"found. This will "
"probably cause "
"problems when trying "
"to access the file. "
"Please notify "
"linux-ntfs-dev@"
"lists.sf.net that you"
"saw this message."
"Thanks!");
ntfs_warning(vi->i_sb, "BUG: Found "
"compressed file with "
"data_size not equal to "
"initialized_size. This will "
"probably cause problems when "
"trying to access the file. "
"Please notify linux-ntfs-dev@"
"lists.sf.net that you saw "
"this message. Thanks!");
}
} else { /* Resident attribute. */
/*
......
......@@ -158,7 +158,7 @@ static inline struct page *ntfs_map_page(struct address_space *mapping,
/* Declarations of functions and global variables. */
/* From fs/ntfs/compress.c */
extern int ntfs_file_read_compressed_block(struct page *page);
extern int ntfs_read_compressed_block(struct page *page);
/* From fs/ntfs/super.c */
#define default_upcase_len 0x10000
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment