Commit 07de3c3d authored by Anton Altaparmakov's avatar Anton Altaparmakov

Merge ssh://linux-ntfs@bkbits.net/ntfs-2.5

into cantab.net:/usr/src/ntfs-2.5
parents fd0dacff 53f99682
......@@ -20,6 +20,10 @@ ToDo:
sufficient for synchronisation here. We then just need to make sure
ntfs_readpage/writepage/truncate interoperate properly with us.
2.1.1 - WIP
- Add handling for initialized_size != data_size in compressed files.
2.1.0 - First steps towards write support: implement file overwrite.
- Add configuration option for developmental write support with an
......
......@@ -5,7 +5,7 @@ obj-$(CONFIG_NTFS_FS) += ntfs.o
ntfs-objs := aops.o attrib.o compress.o debug.o dir.o file.o inode.o mft.o \
mst.o namei.o super.o sysctl.o time.o unistr.o upcase.o
EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.0\"
EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.1-WIP\"
ifeq ($(CONFIG_NTFS_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
......
......@@ -2,8 +2,8 @@
* compress.c - NTFS kernel compressed attributes handling.
* Part of the Linux-NTFS project.
*
* Copyright (c) 2001,2002 Anton Altaparmakov.
* Copyright (C) 2002 Richard Russon.
* Copyright (c) 2001-2003 Anton Altaparmakov
* Copyright (c) 2002 Richard Russon
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
......@@ -44,7 +44,7 @@ typedef enum {
* The maximum compression block size is by definition 16 * the cluster
* size, with the maximum supported cluster size being 4kiB. Thus the
* maximum compression buffer size is 64kiB, so we use this when
* initializing the per-CPU buffers.
* initializing the compression buffer.
*/
NTFS_MAX_CB_SIZE = 64 * 1024,
} ntfs_compression_constants;
......@@ -88,6 +88,40 @@ void free_compression_buffers(void)
ntfs_compression_buffer = NULL;
}
/**
* zero_partial_compressed_page - zero out of bounds compressed page region
*/
static void zero_partial_compressed_page(ntfs_inode *ni, struct page *page)
{
u8 *kp = page_address(page);
unsigned int kp_ofs;
ntfs_debug("Zeroing page region outside initialized size.");
if (((s64)page->index << PAGE_CACHE_SHIFT) >= ni->initialized_size) {
/*
* FIXME: Using clear_page() will become wrong when we get
* PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem.
*/
clear_page(kp);
return;
}
kp_ofs = ni->initialized_size & ~PAGE_CACHE_MASK;
memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs);
return;
}
/**
* handle_bounds_compressed_page - test for&handle out of bounds compressed page
*/
static inline void handle_bounds_compressed_page(ntfs_inode *ni,
struct page *page)
{
if ((page->index >= (ni->initialized_size >> PAGE_CACHE_SHIFT)) &&
(ni->initialized_size < VFS_I(ni)->i_size))
zero_partial_compressed_page(ni, page);
return;
}
/**
* ntfs_decompress - decompress a compression block into an array of pages
* @dest_pages: destination array of pages
......@@ -173,19 +207,29 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
/* We can sleep from now on, so we drop lock. */
spin_unlock(&ntfs_cb_lock);
/* Second stage: finalize completed pages. */
for (i = 0; i < nr_completed_pages; i++) {
int di = completed_pages[i];
dp = dest_pages[di];
flush_dcache_page(dp);
kunmap(dp);
SetPageUptodate(dp);
unlock_page(dp);
if (di == xpage)
*xpage_done = 1;
else
page_cache_release(dp);
dest_pages[di] = NULL;
if (nr_completed_pages > 0) {
struct page *page = dest_pages[completed_pages[0]];
ntfs_inode *ni = NTFS_I(page->mapping->host);
for (i = 0; i < nr_completed_pages; i++) {
int di = completed_pages[i];
dp = dest_pages[di];
/*
* If we are outside the initialized size, zero
* the out of bounds page range.
*/
handle_bounds_compressed_page(ni, dp);
flush_dcache_page(dp);
kunmap(dp);
SetPageUptodate(dp);
unlock_page(dp);
if (di == xpage)
*xpage_done = 1;
else
page_cache_release(dp);
dest_pages[di] = NULL;
}
}
return err;
}
......@@ -763,6 +807,11 @@ int ntfs_read_compressed_block(struct page *page)
for (; cur2_page < cb_max_page; cur2_page++) {
page = pages[cur2_page];
if (page) {
/*
* If we are outside the initialized size, zero
* the out of bounds page range.
*/
handle_bounds_compressed_page(ni, page);
flush_dcache_page(page);
kunmap(page);
SetPageUptodate(page);
......
/**
* inode.c - NTFS kernel inode handling. Part of the Linux-NTFS project.
*
* Copyright (c) 2001,2002 Anton Altaparmakov.
* Copyright (c) 2001-2003 Anton Altaparmakov
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
......@@ -996,16 +996,6 @@ static int ntfs_read_locked_inode(struct inode *vi)
if (NInoCompressed(ni)) {
ni->_ICF(compressed_size) = sle64_to_cpu(
ctx->attr->_ANR(compressed_size));
if (vi->i_size != ni->initialized_size)
ntfs_warning(vi->i_sb, "BUG: Found "
"compressed file with "
"data_size not equal to "
"initialized_size. This will "
"probably cause problems when "
"trying to access the file. "
"Please notify linux-ntfs-dev@"
"lists.sf.net that you saw "
"this message. Thanks!");
}
} else { /* Resident attribute. */
/*
......
/*
* super.c - NTFS kernel super block handling. Part of the Linux-NTFS project.
*
* Copyright (c) 2001,2002 Anton Altaparmakov.
* Copyright (c) 2001,2002 Richard Russon.
* Copyright (c) 2001-2003 Anton Altaparmakov
* Copyright (c) 2001,2002 Richard Russon
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
......@@ -1761,7 +1761,7 @@ static void __exit exit_ntfs_fs(void)
}
MODULE_AUTHOR("Anton Altaparmakov <aia21@cantab.net>");
MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2002 Anton Altaparmakov");
MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2003 Anton Altaparmakov");
MODULE_LICENSE("GPL");
#ifdef DEBUG
MODULE_PARM(debug_msgs, "i");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment