Commit eeb382a1 authored by Anton Altaparmakov's avatar Anton Altaparmakov

NTFS 2.0.3: Small bug fixes, cleanups, and performance improvements.

- Remove some dead code from mft.c.
- Optimize readpage and read_block functions throughout aops.c so that
  only initialized blocks are read. Non-initialized ones have their
  buffer head mapped, zeroed, and set up to date, without scheduling
  any i/o. Thanks to Al Viro for advice on how to avoid the device i/o.
Thanks go to Andrew Morton for spotting the below:
- Fix buglet in allocate_compression_buffers() error code path.
- Call flush_dcache_page() after modifying page cache page contents in
  ntfs_file_readpage().
- Check for existence of page buffers throughout aops.c before calling
  create_empty_buffers(). This happens when an I/O error occurs and the
  read is retried. (It also happens once writing is implemented so that
  needed doing anyway but I had left it for later...)
- Don't BUG_ON() uptodate and/or mapped buffers throughout aops.c in
  readpage and read_block functions. Reasoning same as above (i.e. I/O
  error retries and future write code paths.)
parent 43b706e9
......@@ -168,6 +168,8 @@ ChangeLog
Note that a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
2.0.3:
- Small bug fixes, cleanups, and performance improvements.
2.0.2:
- Use default fmask of 0177 so that files are no executable by default.
If you want owner executable files, just use fmask=0077.
......@@ -177,7 +179,6 @@ Note that a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
2.0.1:
- Minor updates, primarily set the executable bit by default on files
so they can be executed.
2.0.0:
- Started ChangeLog.
......@@ -27,6 +27,25 @@ ToDo:
quite big. Modularising them a bit, e.g. a-la get_block(), will make
them cleaner and make code reuse easier.
2.0.3 - Small bug fixes, cleanups, and performance improvements.
- Remove some dead code from mft.c.
- Optimize readpage and read_block functions throughout aops.c so that
only initialized blocks are read. Non-initialized ones have their
buffer head mapped, zeroed, and set up to date, without scheduling
any i/o. Thanks to Al Viro for advice on how to avoid the device i/o.
Thanks go to Andrew Morton for spotting the below:
- Fix buglet in allocate_compression_buffers() error code path.
- Call flush_dcache_page() after modifying page cache page contents in
ntfs_file_readpage().
- Check for existence of page buffers throughout aops.c before calling
create_empty_buffers(). This happens when an I/O error occurs and the
read is retried. (It also happens once writing is implemented so that
needed doing anyway but I had left it for later...)
- Don't BUG_ON() uptodate and/or mapped buffers throughout aops.c in
readpage and read_block functions. Reasoning same as above (i.e. I/O
error retries and future write code paths.)
2.0.2 - Minor updates and cleanups.
- Cleanup: rename mst.c::__post_read_mst_fixup to post_write_mst_fixup
......
......@@ -7,7 +7,7 @@ obj-y := time.o unistr.o inode.o file.o mft.o super.o debug.o aops.o \
obj-m := $(O_TARGET)
EXTRA_CFLAGS = -DNTFS_VERSION=\"2.0.2\"
EXTRA_CFLAGS = -DNTFS_VERSION=\"2.0.3\"
ifeq ($(CONFIG_NTFS_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
......
/*
/**
* aops.c - NTFS kernel address space operations and page cache handling.
* Part of the Linux-NTFS project.
*
......@@ -35,7 +35,9 @@
#define page_buffers(page) (page)->buffers
#endif
/*
/**
* end_buffer_read_file_async -
*
* Async io completion handler for accessing files. Adapted from
* end_buffer_read_mst_async().
*/
......@@ -94,7 +96,11 @@ static void end_buffer_read_file_async(struct buffer_head *bh, int uptodate)
return;
}
/* NTFS version of block_read_full_page(). Adapted from ntfs_mst_readpage(). */
/**
* ntfs_file_read_block -
*
* NTFS version of block_read_full_page(). Adapted from ntfs_mst_readpage().
*/
static int ntfs_file_read_block(struct page *page)
{
VCN vcn;
......@@ -102,7 +108,7 @@ static int ntfs_file_read_block(struct page *page)
ntfs_inode *ni;
ntfs_volume *vol;
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
sector_t iblock, lblock;
sector_t iblock, lblock, zblock;
unsigned int blocksize, blocks, vcn_ofs;
int i, nr;
unsigned char blocksize_bits;
......@@ -113,6 +119,7 @@ static int ntfs_file_read_block(struct page *page)
blocksize_bits = VFS_I(ni)->i_blkbits;
blocksize = 1 << blocksize_bits;
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize);
bh = head = page_buffers(page);
if (!bh)
......@@ -121,6 +128,7 @@ static int ntfs_file_read_block(struct page *page)
blocks = PAGE_CACHE_SIZE >> blocksize_bits;
iblock = page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
zblock = (ni->initialized_size + blocksize - 1) >> blocksize_bits;
#ifdef DEBUG
if (unlikely(!ni->mft_no)) {
......@@ -133,7 +141,12 @@ static int ntfs_file_read_block(struct page *page)
/* Loop through all the buffers in the page. */
nr = i = 0;
do {
BUG_ON(buffer_mapped(bh) || buffer_uptodate(bh));
if (unlikely(buffer_uptodate(bh)))
continue;
if (unlikely(buffer_mapped(bh))) {
arr[nr++] = bh;
continue;
}
bh->b_dev = VFS_I(ni)->i_dev;
/* Is the block within the allowed limits? */
if (iblock < lblock) {
......@@ -155,9 +168,14 @@ static int ntfs_file_read_block(struct page *page)
bh->b_blocknr = ((lcn << vol->cluster_size_bits)
+ vcn_ofs) >> blocksize_bits;
bh->b_state |= (1UL << BH_Mapped);
/* Only read initialized data blocks. */
if (iblock < zblock) {
arr[nr++] = bh;
continue;
}
/* Fully non-initialized data block, zero it. */
goto handle_zblock;
}
/* It is a hole, need to zero it. */
if (lcn == LCN_HOLE)
goto handle_hole;
......@@ -183,6 +201,7 @@ static int ntfs_file_read_block(struct page *page)
handle_hole:
bh->b_blocknr = -1UL;
bh->b_state &= ~(1UL << BH_Mapped);
handle_zblock:
memset(kmap(page) + i * blocksize, 0, blocksize);
flush_dcache_page(page);
kunmap(page);
......@@ -301,6 +320,7 @@ static int ntfs_file_readpage(struct file *file, struct page *page)
bytes);
} else
memset(addr, 0, PAGE_CACHE_SIZE);
flush_dcache_page(page);
kunmap(page);
SetPageUptodate(page);
......@@ -313,7 +333,9 @@ static int ntfs_file_readpage(struct file *file, struct page *page)
return err;
}
/*
/**
* end_buffer_read_mftbmp_async -
*
* Async io completion handler for accessing mft bitmap. Adapted from
* end_buffer_read_mst_async().
*/
......@@ -373,13 +395,17 @@ static void end_buffer_read_mftbmp_async(struct buffer_head *bh, int uptodate)
return;
}
/* Readpage for accessing mft bitmap. Adapted from ntfs_mst_readpage(). */
/**
* ntfs_mftbmp_readpage -
*
* Readpage for accessing mft bitmap. Adapted from ntfs_mst_readpage().
*/
static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page)
{
VCN vcn;
LCN lcn;
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
sector_t iblock, lblock;
sector_t iblock, lblock, zblock;
unsigned int blocksize, blocks, vcn_ofs;
int nr, i;
unsigned char blocksize_bits;
......@@ -390,6 +416,7 @@ static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page)
blocksize = vol->sb->s_blocksize;
blocksize_bits = vol->sb->s_blocksize_bits;
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize);
bh = head = page_buffers(page);
if (!bh)
......@@ -398,11 +425,18 @@ static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page)
blocks = PAGE_CACHE_SIZE >> blocksize_bits;
iblock = page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
lblock = (vol->mftbmp_allocated_size + blocksize - 1) >> blocksize_bits;
zblock = (vol->mftbmp_initialized_size + blocksize - 1) >>
blocksize_bits;
/* Loop through all the buffers in the page. */
nr = i = 0;
do {
BUG_ON(buffer_mapped(bh) || buffer_uptodate(bh));
if (unlikely(buffer_uptodate(bh)))
continue;
if (unlikely(buffer_mapped(bh))) {
arr[nr++] = bh;
continue;
}
bh->b_dev = vol->mft_ino->i_dev;
/* Is the block within the allowed limits? */
if (iblock < lblock) {
......@@ -421,9 +455,14 @@ static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page)
bh->b_blocknr = ((lcn << vol->cluster_size_bits)
+ vcn_ofs) >> blocksize_bits;
bh->b_state |= (1UL << BH_Mapped);
/* Only read initialized data blocks. */
if (iblock < zblock) {
arr[nr++] = bh;
continue;
}
/* Fully non-initialized data block, zero it. */
goto handle_zblock;
}
if (lcn != LCN_HOLE) {
/* Hard error, zero out region. */
SetPageError(page);
......@@ -442,6 +481,7 @@ static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page)
*/
bh->b_blocknr = -1UL;
bh->b_state &= ~(1UL << BH_Mapped);
handle_zblock:
memset(kmap(page) + i * blocksize, 0, blocksize);
flush_dcache_page(page);
kunmap(page);
......@@ -593,15 +633,6 @@ static void end_buffer_read_mst_async(struct buffer_head *bh, int uptodate)
* the page before finally marking it uptodate and unlocking it.
*
* Contains an adapted version of fs/buffer.c::block_read_full_page().
*
* TODO:/FIXME: The current implementation is simple but wasteful as we perform
* actual i/o from disk for all data up to allocated size completely ignoring
* the fact that initialized size, and data size for that matter, may well be
* lower and hence there is no point in reading them in. We can just zero the
* page range, which is what is currently done in our async i/o completion
* handler anyway, once the read from disk completes. However, I am not sure how
* to setup the buffer heads in that case, so for now we do the pointless i/o.
* Any help with this would be appreciated...
*/
int ntfs_mst_readpage(struct file *dir, struct page *page)
{
......@@ -610,7 +641,7 @@ int ntfs_mst_readpage(struct file *dir, struct page *page)
ntfs_inode *ni;
ntfs_volume *vol;
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
sector_t iblock, lblock;
sector_t iblock, lblock, zblock;
unsigned int blocksize, blocks, vcn_ofs;
int i, nr;
unsigned char blocksize_bits;
......@@ -624,6 +655,7 @@ int ntfs_mst_readpage(struct file *dir, struct page *page)
blocksize_bits = VFS_I(ni)->i_blkbits;
blocksize = 1 << blocksize_bits;
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize);
bh = head = page_buffers(page);
if (!bh)
......@@ -632,6 +664,7 @@ int ntfs_mst_readpage(struct file *dir, struct page *page)
blocks = PAGE_CACHE_SIZE >> blocksize_bits;
iblock = page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
zblock = (ni->initialized_size + blocksize - 1) >> blocksize_bits;
#ifdef DEBUG
if (unlikely(!ni->run_list.rl && !ni->mft_no))
......@@ -642,7 +675,12 @@ int ntfs_mst_readpage(struct file *dir, struct page *page)
/* Loop through all the buffers in the page. */
nr = i = 0;
do {
BUG_ON(buffer_mapped(bh) || buffer_uptodate(bh));
if (unlikely(buffer_uptodate(bh)))
continue;
if (unlikely(buffer_mapped(bh))) {
arr[nr++] = bh;
continue;
}
bh->b_dev = VFS_I(ni)->i_dev;
/* Is the block within the allowed limits? */
if (iblock < lblock) {
......@@ -664,9 +702,14 @@ int ntfs_mst_readpage(struct file *dir, struct page *page)
bh->b_blocknr = ((lcn << vol->cluster_size_bits)
+ vcn_ofs) >> blocksize_bits;
bh->b_state |= (1UL << BH_Mapped);
/* Only read initialized data blocks. */
if (iblock < zblock) {
arr[nr++] = bh;
continue;
}
/* Fully non-initialized data block, zero it. */
goto handle_zblock;
}
/* It is a hole, need to zero it. */
if (lcn == LCN_HOLE)
goto handle_hole;
......@@ -692,6 +735,7 @@ int ntfs_mst_readpage(struct file *dir, struct page *page)
handle_hole:
bh->b_blocknr = -1UL;
bh->b_state &= ~(1UL << BH_Mapped);
handle_zblock:
memset(kmap(page) + i * blocksize, 0, blocksize);
flush_dcache_page(page);
kunmap(page);
......@@ -721,7 +765,9 @@ int ntfs_mst_readpage(struct file *dir, struct page *page)
return nr;
}
/* Address space operations for accessing normal file data. */
/**
* ntfs_file_aops - address space operations for accessing normal file data
*/
struct address_space_operations ntfs_file_aops = {
writepage: NULL, /* Write dirty page to disk. */
readpage: ntfs_file_readpage, /* Fill page with data. */
......@@ -733,7 +779,9 @@ struct address_space_operations ntfs_file_aops = {
typedef int readpage_t(struct file *, struct page *);
/* Address space operations for accessing mftbmp. */
/**
* ntfs_mftbmp_aops - address space operations for accessing mftbmp
*/
struct address_space_operations ntfs_mftbmp_aops = {
writepage: NULL, /* Write dirty page to disk. */
readpage: (readpage_t*)ntfs_mftbmp_readpage, /* Fill page with
......@@ -744,7 +792,9 @@ struct address_space_operations ntfs_mftbmp_aops = {
commit_write: NULL, /* . */
};
/*
/**
* ntfs_dir_aops -
*
* Address space operations for accessing normal directory data (i.e. index
* allocation attribute). We can't just use the same operations as for files
* because 1) the attribute is different and even more importantly 2) the index
......
......@@ -69,7 +69,7 @@ int allocate_compression_buffers(void)
BUG_ON(ntfs_compression_buffers);
ntfs_compression_buffers = (u8**)kmalloc(smp_num_cpus * sizeof(u8 *),
ntfs_compression_buffers = (u8**)kmalloc(smp_num_cpus * sizeof(u8*),
GFP_KERNEL);
if (!ntfs_compression_buffers)
return -ENOMEM;
......@@ -81,7 +81,7 @@ int allocate_compression_buffers(void)
if (i == smp_num_cpus)
return 0;
/* Allocation failed, cleanup and return error. */
for (j = 0; i < j; j++)
for (j = 0; j < i; j++)
vfree(ntfs_compression_buffers[j]);
kfree(ntfs_compression_buffers);
return -ENOMEM;
......
......@@ -69,44 +69,6 @@ static void __format_mft_record(MFT_RECORD *m, const int size,
a->length = cpu_to_le32(0);
}
/**
* format_mft_record2 - initialize an empty mft record
* @vfs_sb: vfs super block of volume
* @inum: mft record number / inode number to format
* @mft_rec: mapped, pinned and locked mft record (optional)
*
* Initialize an empty mft record. This is used when extending the MFT.
*
* If @mft_rec is NULL, we call map_mft_record() to obtain the record and we
* unmap it again when finished.
*
* We return 0 on success or -errno on error.
*/
#if 0
// Can't do this as iget_map_mft_record no longer exists...
int format_mft_record2(struct super_block *vfs_sb, const unsigned long inum,
MFT_RECORD *mft_rec)
{
MFT_RECORD *m;
ntfs_inode *ni;
if (mft_rec)
m = mft_rec;
else {
m = iget_map_mft_record(WRITE, vfs_sb, inum, &ni);
if (IS_ERR(m))
return PTR_ERR(m);
}
__format_mft_record(m, NTFS_SB(vfs_sb)->mft_record_size, inum);
if (!mft_rec) {
// TODO: dirty mft record
unmap_mft_record(WRITE, ni);
// TODO: Do stuff to get rid of the ntfs_inode
}
return 0;
}
#endif
/**
* format_mft_record - initialize an empty mft record
* @ni: ntfs inode of mft record
......@@ -340,71 +302,6 @@ MFT_RECORD *map_mft_record(const int rw, ntfs_inode *ni)
return m;
}
/**
* iget_map_mft_record - iget, map, pin, lock an mft record
* @rw: map for read (rw = READ) or write (rw = WRITE)
* @vfs_sb: vfs super block of mounted volume
* @inum: inode number / MFT record number whose mft record to map
* @vfs_ino: output parameter which we set to the inode on successful return
*
* Does the same as map_mft_record(), except that it starts out only with the
* knowledge of the super block (@vfs_sb) and the mft record number which is of
* course the same as the inode number (@inum).
*
* On success, *@vfs_ino will contain a pointer to the inode structure of the
* mft record on return. On error return, *@vfs_ino is undefined.
*
* See map_mft_record() description for details and for a description of how
* errors are returned and what error codes are defined.
*
* IMPROTANT: The caller is responsible for calling iput(@vfs_ino) when
* finished with the inode, i.e. after unmap_mft_record() has been called. If
* that is omitted you will get busy inodes upon umount...
*/
#if 0
// this is no longer possible. iget() cannot be called as we may be loading
// an ntfs inode which will never have a corresponding vfs inode counter part.
// this is not going to be pretty. )-:
// we need our own hash for ntfs inodes now, ugh. )-:
// not having vfs inodes associated with all ntfs inodes is a bad mistake I am
// getting the impression. this will in the end turn out uglier than just
// having iget_no_wait().
// my only hope is that we can get away without this functionality in the driver
// altogether. we are ok for extent inodes already because we only handle them
// via map_extent_mft_record().
// if we really need it, we could have a list or hash of "pure ntfs inodes"
// to cope with this situation, so the lookup would be:
// look for the inode and if not present look for pure ntfs inode and if not
// present add a new pure ntfs inode. under this scheme extent inodes have to
// also be added to the list/hash of pure inodes.
MFT_RECORD *iget_map_mft_record(const int rw, struct super_block *vfs_sb,
const unsigned long inum, struct inode **vfs_ino)
{
struct inode *inode;
MFT_RECORD *mrec;
/*
* The corresponding iput() happens when clear_inode() is called on the
* base mft record of this extent mft record.
* When used on base mft records, caller has to perform the iput().
*/
inode = iget(vfs_sb, inum);
if (inode && !is_bad_inode(inode)) {
mrec = map_mft_record(rw, inode);
if (!IS_ERR(mrec)) {
ntfs_debug("Success for i_ino 0x%lx.", inum);
*vfs_ino = inode;
return mrec;
}
} else
mrec = ERR_PTR(-EIO);
if (inode)
iput(inode);
ntfs_debug("Failed for i_ino 0x%lx.", inum);
return mrec;
}
#endif
/**
* unmap_mft_record - release a mapped mft record
* @rw: unmap from read (@rw = READ) or write (@rw = WRITE)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment