Commit 9b1edda1 authored by Anton Altaparmakov's avatar Anton Altaparmakov Committed by Anton Altaparmakov

- Fix memory leak in reading compressed data.

- Don't read the run list of file data and directory indexes in read_inode.
- Various cleanups.
parent 7b5911d1
......@@ -8,7 +8,7 @@ ToDo:
functions need to clone block_read_full_page and modify it to cope
with the significance of the different attribute sizes.
Still need to go through:
aops.c, compress.c, dir.c
aops.c, dir.c
- Find and fix bugs.
- W.r.t. s_maxbytes still need to be careful on reading/truncating as
there are dragons lurking in the details, e.g. read_inode() currently
......@@ -17,13 +17,8 @@ ToDo:
truncate the visible i_size? Will the user just get -E2BIG (or
whatever) on open()? Or will (s)he be able to open() but lseek() and
read() will fail when s_maxbytes is reached? -> Investigate this!
- Perhaps don't bother getting the run list in ntfs_read_inode() at
all. But we do have to find the data/index root attribute to get the
inode size so we might want to decompress the mapping pairs of the
first extent in there anyway. -> Ponder this. Directory listings
would have significant speedups but the first access to each file/dir
would have a small speed penalty.
- Implement/allow non-resident index bitmaps in ntfs_readdir().
- Implement/allow non-resident index bitmaps in ntfs_readdir() and
consider initialized_size, etc.
- vcn_to_lcn() should somehow return the correct pointer within the
->run_list so we can get at the lcns for the following vcns, this is
strictly a speed optimization. Obviously need to keep the ->run_list
......@@ -33,6 +28,9 @@ ToDo:
initialized_size, just zero the buffer heads instead. Question: How
to setup the buffer heads so they point to the on disk location
correctly (after all they are allocated) but are not read from disk?
- Consider if ntfs_file_read_compressed_block() shouldn't be coping
with initialized_size < data_size. I don't think it can happen but
it requires more careful consideration.
tng-0.0.9 - Work in progress
......@@ -44,6 +42,12 @@ tng-0.0.9 - Work in progress
mft.c::ntfs_mft_readpage(), aops.c::end_buffer_read_index_async(),
and attrib.c::load_attribute_list().
- Lock the run list in attrib.c::load_attribute_list() while using it.
- Fix memory leak in ntfs_file_read_compressed_block() and generally
clean up compress.c a little, removing some uncommented/unused debug
code.
- Tidy up dir.c a little bit.
- Don't bother getting the run list in inode.c::ntfs_read_inode().
tng-0.0.8 - 08/03/2002 - Now using BitKeeper, http://linux-ntfs.bkbits.net/
......
......@@ -305,7 +305,6 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
/* Get the next tag and advance to first token. */
tag = *cb++;
//ntfs_debug("Found tag = 0x%x.", tag);
/* Parse the eight tokens described by the tag. */
for (token = 0; token < 8; token++, tag >>= 1) {
......@@ -319,8 +318,6 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
/* Determine token type and parse appropriately.*/
if ((tag & NTFS_TOKEN_MASK) == NTFS_SYMBOL_TOKEN) {
//ntfs_debug("Found symbol token = %c (0x%x).", *cb,
// *cb);
/*
* We have a symbol token, copy the symbol across, and
* advance the source and destination positions.
......@@ -332,7 +329,6 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
continue;
}
//ntfs_debug("Found phrase token = 0x%x.", le16_to_cpup(cb));
/*
* We have a phrase token. Make sure it is not the first tag in
* the sb as this is illegal and would confuse the code below.
......@@ -365,11 +361,7 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
/* Now calculate the length of the byte sequence. */
length = (pt & (0xfff >> lg)) + 3;
#if 0
ntfs_debug("starting position = 0x%x, back pointer = 0x%x, "
"length = 0x%x.", *dest_ofs - do_sb_start -
1, (pt >> (12 - lg)) + 1, length);
#endif
/* Advance destination position and verify it is in range. */
*dest_ofs += length;
if (*dest_ofs > do_sb_end)
......@@ -379,14 +371,12 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
max_non_overlap = dp_addr - dp_back_addr;
if (length <= max_non_overlap) {
//ntfs_debug("Found non-overlapping byte sequence.");
/* The byte sequence doesn't overlap, just copy it. */
memcpy(dp_addr, dp_back_addr, length);
/* Advance destination pointer. */
dp_addr += length;
} else {
//ntfs_debug("Found overlapping byte sequence.");
/*
* The byte sequence does overlap, copy non-overlapping
* part and then do a slow byte by byte copy for the
......@@ -441,6 +431,12 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
*
* FIXME: Again for PAGE_CACHE_SIZE > cb_size we are screwing up both in
* handling sparse and compressed cbs. (AIA)
*
* FIXME: At the moment we don't do any zeroing out in the case that
* initialized_size is less than data_size. This should be safe because of the
* nature of the compression algorithm used. Just in case we check and output
* an error message in read inode if the two sizes are not equal for a
* compressed file.
*/
int ntfs_file_read_compressed_block(struct page *page)
{
......@@ -485,16 +481,6 @@ int ntfs_file_read_compressed_block(struct page *page)
ntfs_debug("Entering, page->index = 0x%lx, cb_size = 0x%x, nr_pages = "
"%i.", index, cb_size, nr_pages);
/*
* Uncommenting the below line results in the compressed data being
* read without any decompression. Compression blocks are padded with
* zeroes in order to give them in their proper alignments. I am
* leaving this here as it is a handy debugging / studying tool for
* compressed data.
*/
#if 0
return block_read_full_page(page, ntfs_file_get_block);
#endif
pages = kmalloc(nr_pages * sizeof(struct page *), GFP_NOFS);
/* Allocate memory to store the buffer heads we need. */
......@@ -558,16 +544,11 @@ int ntfs_file_read_compressed_block(struct page *page)
nr_cbs--;
nr_bhs = 0;
/* Read all cb buffer heads one cluster run at a time. */
/* Read all cb buffer heads one cluster at a time. */
for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn;
vcn++) {
BOOL is_retry = FALSE;
retry_remap:
/* Make sure we are not overflowing the file limits. */
if (vcn << vol->cluster_size_bits >= ni->initialized_size) {
/* Overflow, just zero this region. */
// TODO: AIA
}
/* Find lcn of vcn and convert it into blocks. */
down_read(&ni->run_list.lock);
lcn = vcn_to_lcn(ni->run_list.rl, vcn);
......@@ -593,7 +574,6 @@ int ntfs_file_read_compressed_block(struct page *page)
/* Read the lcn from device in chunks of block_size bytes. */
max_block = block + (vol->cluster_size >> block_size_bits);
do {
// TODO: Need overflow checks here, too! (AIA)
ntfs_debug("block = 0x%x.", block);
if (unlikely(!(bhs[nr_bhs] = getblk(dev, block,
block_size))))
......@@ -834,6 +814,9 @@ int ntfs_file_read_compressed_block(struct page *page)
}
}
/* We no longer need the list of pages. */
kfree(pages);
/* If we have completed the requested page, we return success. */
if (likely(xpage_done))
return 0;
......@@ -876,6 +859,7 @@ int ntfs_file_read_compressed_block(struct page *page)
page_cache_release(page);
}
}
kfree(pages);
return -EIO;
}
......@@ -203,8 +203,8 @@ u64 ntfs_lookup_inode_by_name(ntfs_inode *dir_ni, const uchar_t *uname,
* of PAGE_CACHE_SIZE and map the page cache page, reading it from
* disk if necessary.
*/
page = ntfs_map_page(ia_mapping, vcn << dir_ni->_IDM(index_vcn_size_bits)
>> PAGE_CACHE_SHIFT);
page = ntfs_map_page(ia_mapping, vcn <<
dir_ni->_IDM(index_vcn_size_bits) >> PAGE_CACHE_SHIFT);
if (IS_ERR(page)) {
ntfs_error(sb, "Failed to map directory index page, error %ld.",
-PTR_ERR(page));
......@@ -213,8 +213,8 @@ u64 ntfs_lookup_inode_by_name(ntfs_inode *dir_ni, const uchar_t *uname,
kaddr = (u8*)page_address(page);
fast_descend_into_child_node:
/* Get to the index allocation block. */
ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << dir_ni->_IDM(index_vcn_size_bits)) &
~PAGE_CACHE_MASK));
ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
dir_ni->_IDM(index_vcn_size_bits)) & ~PAGE_CACHE_MASK));
/* Bounds checks. */
if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) {
ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
......@@ -373,7 +373,8 @@ u64 ntfs_lookup_inode_by_name(ntfs_inode *dir_ni, const uchar_t *uname,
}
/* Child node present, descend into it. */
old_vcn = vcn;
vcn = sle64_to_cpup((u8*)ie + le16_to_cpu(ie->_IEH(length)) - 8);
vcn = sle64_to_cpup((u8*)ie +
le16_to_cpu(ie->_IEH(length)) - 8);
if (vcn >= 0) {
/* If vcn is in the same page cache page as old_vcn we
* recycle the mapped page. */
......@@ -641,7 +642,8 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
err = -EIO;
goto kf_unm_err_out;
}
bmp = (u8*)ctx->attr + le16_to_cpu(ctx->attr->_ARA(value_offset));
bmp = (u8*)ctx->attr +
le16_to_cpu(ctx->attr->_ARA(value_offset));
}
/* Get the offset into the index allocation attribute. */
ia_pos = (s64)filp->f_pos - vol->mft_record_size;
......@@ -698,7 +700,8 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
"Directory inode 0x%Lx is corrupt or driver "
"bug. ",
(long long)sle64_to_cpu(ia->index_block_vcn),
(long long)ia_pos >> ndir->_IDM(index_vcn_size_bits),
(long long)ia_pos >>
ndir->_IDM(index_vcn_size_bits),
(unsigned long long)ndir->mft_no);
err = -EIO;
goto unm_dir_err_out;
......@@ -732,7 +735,8 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
if (index_end > (u8*)ia + ndir->_IDM(index_block_size)) {
ntfs_error(sb, "Size of index buffer (VCN 0x%Lx) of directory "
"inode 0x%Lx exceeds maximum size.",
(long long)ia_pos >> ndir->_IDM(index_vcn_size_bits),
(long long)ia_pos >>
ndir->_IDM(index_vcn_size_bits),
(unsigned long long)ndir->mft_no);
err = -EIO;
goto unm_dir_err_out;
......@@ -810,30 +814,3 @@ struct file_operations ntfs_dir_ops = {
readdir: ntfs_readdir, /* Read directory. */
};
#if 0
/* NOTE: write, poll, fsync, readv, writev can be called without the big
* kernel lock held in all filesystems. */
struct file_operations {
loff_t (*llseek) (struct file *, loff_t, int);
ssize_t (*write) (struct file *, const char *, size_t, loff_t *);
unsigned int (*poll) (struct file *, struct poll_table_struct *);
int (*ioctl) (struct inode *, struct file *, unsigned int,
unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
int (*open) (struct inode *, struct file *);
int (*flush) (struct file *);
int (*release) (struct inode *, struct file *);
int (*fsync) (struct file *, struct dentry *, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
ssize_t (*readv) (struct file *, const struct iovec *, unsigned long,
loff_t *);
ssize_t (*writev) (struct file *, const struct iovec *, unsigned long,
loff_t *);
ssize_t (*sendpage) (struct file *, struct page *, int, size_t,
loff_t *, int);
unsigned long (*get_unmapped_area)(struct file *, unsigned long,
unsigned long, unsigned long, unsigned long);
};
#endif
......@@ -569,19 +569,6 @@ void ntfs_read_inode(struct inode *vi)
ctx->attr->_ANR(initialized_size));
ni->allocated_size = sle64_to_cpu(
ctx->attr->_ANR(allocated_size));
/*
* Setup the run list. No need for locking as we have exclusive
* access to the inode at this time.
*/
ni->run_list.rl = decompress_mapping_pairs(vol, ctx->attr,
NULL);
if (IS_ERR(ni->run_list.rl)) {
err = PTR_ERR(ni->run_list.rl);
ni->run_list.rl = NULL;
ntfs_error(vi->i_sb, "Mapping pairs decompression "
"failed with error code %i.", -err);
goto ec_put_unm_err_out;
}
/* Find bitmap attribute. */
reinit_attr_search_ctx(ctx);
if (!lookup_attr(AT_BITMAP, I30, 4, CASE_SENSITIVE, 0, NULL, 0,
......@@ -737,34 +724,30 @@ void ntfs_read_inode(struct inode *vi)
"You should run chkdsk.");
goto put_unm_err_out;
}
/* $MFT is special as we have the run_list already. */
if (likely(vi->i_ino != FILE_MFT)) {
/*
* Setup the run list. No need for locking as
* we have exclusive access to the inode at
* this time.
*/
ni->run_list.rl = decompress_mapping_pairs(vol,
ctx->attr, NULL);
if (IS_ERR(ni->run_list.rl)) {
err = PTR_ERR(ni->run_list.rl);
ni->run_list.rl = NULL;
ntfs_error(vi->i_sb, "Mapping pairs "
"decompression failed "
"with error code %i.",
-err);
goto ec_put_unm_err_out;
}
}
/* Setup all the sizes. */
vi->i_size = sle64_to_cpu(ctx->attr->_ANR(data_size));
ni->initialized_size = sle64_to_cpu(
ctx->attr->_ANR(initialized_size));
ni->allocated_size = sle64_to_cpu(
ctx->attr->_ANR(allocated_size));
if (NInoCompressed(ni))
if (NInoCompressed(ni)) {
ni->_ICF(compressed_size) = sle64_to_cpu(
ctx->attr->_ANR(compressed_size));
if (vi->i_size != ni->initialized_size)
ntfs_warning(vi->i_sb, "Compressed "
"file with data_size "
"unequal to "
"initialized size "
"found. This will "
"probably cause "
"problems when trying "
"to access the file. "
"Please notify "
"linux-ntfs-dev@"
"lists.sf.net that you"
"saw this message."
"Thanks!");
}
} else { /* Resident attribute. */
/*
* Make all sizes equal for simplicity in read code
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment