Commit 7ff4e129 authored by Anton Altaparmakov's avatar Anton Altaparmakov

NTFS: 2.1.9 release - Fix two bugs in the decompression engine

      in handling of corner cases.
parent b404fc90
...@@ -273,6 +273,8 @@ ChangeLog ...@@ -273,6 +273,8 @@ ChangeLog
Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog. Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
2.1.9:
- Fix two bugs in handling of corner cases in the decompression engine.
2.1.8: 2.1.8:
- Read the $MFT mirror and compare it to the $MFT and if the two do not - Read the $MFT mirror and compare it to the $MFT and if the two do not
match, force a read-only mount and do not allow read-write remounts. match, force a read-only mount and do not allow read-write remounts.
......
...@@ -19,7 +19,19 @@ ToDo: ...@@ -19,7 +19,19 @@ ToDo:
sufficient for synchronisation here. We then just need to make sure sufficient for synchronisation here. We then just need to make sure
ntfs_readpage/writepage/truncate interoperate properly with us. ntfs_readpage/writepage/truncate interoperate properly with us.
2.1.8 - Handle $MFT mirror and $LogFile, improve time ihandling, and cleanups. 2.1.9 - Fix two bugs in decompression engine.
- Fix a bug where we would not always detect that we have reached the
end of a compression block because we were ending at minus one byte
which is effectively the same as being at the end. The fix is to
check whether the uncompressed buffer has been fully filled and if so
we assume we have reached the end of the compression block. A big
thank you to Marcin Gibuła for the bug report, the assistance in
tracking down the bug and testing the fix.
- Fix a possible bug where when a compressed read is truncated to the
end of the file, the offset inside the last page was not truncated.
2.1.8 - Handle $MFT mirror and $LogFile, improve time handling, and cleanups.
- Use get_bh() instead of manual atomic_inc() in fs/ntfs/compress.c. - Use get_bh() instead of manual atomic_inc() in fs/ntfs/compress.c.
- Modify fs/ntfs/time.c::ntfs2utc(), get_current_ntfs_time(), and - Modify fs/ntfs/time.c::ntfs2utc(), get_current_ntfs_time(), and
......
...@@ -5,7 +5,7 @@ obj-$(CONFIG_NTFS_FS) += ntfs.o ...@@ -5,7 +5,7 @@ obj-$(CONFIG_NTFS_FS) += ntfs.o
ntfs-objs := aops.o attrib.o compress.o debug.o dir.o file.o inode.o logfile.o \ ntfs-objs := aops.o attrib.o compress.o debug.o dir.o file.o inode.o logfile.o \
mft.o mst.o namei.o super.o sysctl.o unistr.o upcase.o mft.o mst.o namei.o super.o sysctl.o unistr.o upcase.o
EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.8\" EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.9\"
ifeq ($(CONFIG_NTFS_DEBUG),y) ifeq ($(CONFIG_NTFS_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG EXTRA_CFLAGS += -DDEBUG
......
...@@ -197,9 +197,15 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index, ...@@ -197,9 +197,15 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
do_next_sb: do_next_sb:
ntfs_debug("Beginning sub-block at offset = 0x%x in the cb.", ntfs_debug("Beginning sub-block at offset = 0x%x in the cb.",
cb - cb_start); cb - cb_start);
/*
/* Have we reached the end of the compression block? */ * Have we reached the end of the compression block or the end of the
if (cb == cb_end || !le16_to_cpup((u16*)cb)) { * decompressed data? The latter can happen for example if the current
* position in the compression block is one byte before its end so the
* first two checks do not detect it.
*/
if (cb == cb_end || !le16_to_cpup((u16*)cb) ||
(*dest_index == dest_max_index &&
*dest_ofs == dest_max_ofs)) {
int i; int i;
ntfs_debug("Completed. Returning success (0)."); ntfs_debug("Completed. Returning success (0).");
...@@ -501,7 +507,7 @@ int ntfs_read_compressed_block(struct page *page) ...@@ -501,7 +507,7 @@ int ntfs_read_compressed_block(struct page *page)
*/ */
unsigned int nr_pages = (end_vcn - start_vcn) << unsigned int nr_pages = (end_vcn - start_vcn) <<
vol->cluster_size_bits >> PAGE_CACHE_SHIFT; vol->cluster_size_bits >> PAGE_CACHE_SHIFT;
unsigned int xpage, max_page, cur_page, cur_ofs, i; unsigned int xpage, max_page, max_ofs, cur_page, cur_ofs, i;
unsigned int cb_clusters, cb_max_ofs; unsigned int cb_clusters, cb_max_ofs;
int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0; int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
struct page **pages; struct page **pages;
...@@ -544,8 +550,11 @@ int ntfs_read_compressed_block(struct page *page) ...@@ -544,8 +550,11 @@ int ntfs_read_compressed_block(struct page *page)
*/ */
max_page = ((VFS_I(ni)->i_size + PAGE_CACHE_SIZE - 1) >> max_page = ((VFS_I(ni)->i_size + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT) - offset; PAGE_CACHE_SHIFT) - offset;
if (nr_pages < max_page) max_ofs = (VFS_I(ni)->i_size + PAGE_CACHE_SIZE - 1) & ~PAGE_CACHE_MASK;
if (nr_pages < max_page) {
max_page = nr_pages; max_page = nr_pages;
max_ofs = 0;
}
for (i = 0; i < max_page; i++, offset++) { for (i = 0; i < max_page; i++, offset++) {
if (i != xpage) if (i != xpage)
pages[i] = grab_cache_page_nowait(mapping, offset); pages[i] = grab_cache_page_nowait(mapping, offset);
...@@ -713,8 +722,14 @@ int ntfs_read_compressed_block(struct page *page) ...@@ -713,8 +722,14 @@ int ntfs_read_compressed_block(struct page *page)
cb_max_page >>= PAGE_CACHE_SHIFT; cb_max_page >>= PAGE_CACHE_SHIFT;
/* Catch end of file inside a compression block. */ /* Catch end of file inside a compression block. */
if (cb_max_page > max_page) if (cb_max_page >= max_page) {
cb_max_page = max_page; if (cb_max_page > max_page) {
cb_max_page = max_page;
cb_max_ofs = max_ofs;
} else if (cb_max_ofs > max_ofs) {
cb_max_ofs = max_ofs;
}
}
if (vcn == start_vcn - cb_clusters) { if (vcn == start_vcn - cb_clusters) {
/* Sparse cb, zero out page range overlapping the cb. */ /* Sparse cb, zero out page range overlapping the cb. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment