Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
42f79ff7
Commit
42f79ff7
authored
Feb 18, 2003
by
Anton Altaparmakov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
NTFS: Add handling for initialized_size != data_size in compressed files.
parent
063e9f83
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
70 additions
and
27 deletions
+70
-27
fs/ntfs/ChangeLog
fs/ntfs/ChangeLog
+4
-0
fs/ntfs/compress.c
fs/ntfs/compress.c
+65
-16
fs/ntfs/inode.c
fs/ntfs/inode.c
+1
-11
No files found.
fs/ntfs/ChangeLog
View file @
42f79ff7
...
...
@@ -20,6 +20,10 @@ ToDo:
sufficient for synchronisation here. We then just need to make sure
ntfs_readpage/writepage/truncate interoperate properly with us.
2.1.1 - WIP
- Add handling for initialized_size != data_size in compressed files.
2.1.0 - First steps towards write support: implement file overwrite.
- Add configuration option for developmental write support with an
...
...
fs/ntfs/compress.c
View file @
42f79ff7
...
...
@@ -2,8 +2,8 @@
* compress.c - NTFS kernel compressed attributes handling.
* Part of the Linux-NTFS project.
*
* Copyright (c) 2001
,2002 Anton Altaparmakov.
* Copyright (
C) 2002 Richard Russon.
* Copyright (c) 2001
-2003 Anton Altaparmakov
* Copyright (
c) 2002 Richard Russon
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
...
...
@@ -44,7 +44,7 @@ typedef enum {
* The maximum compression block size is by definition 16 * the cluster
* size, with the maximum supported cluster size being 4kiB. Thus the
* maximum compression buffer size is 64kiB, so we use this when
* initializing the
per-CPU buffers
.
* initializing the
compression buffer
.
*/
NTFS_MAX_CB_SIZE
=
64
*
1024
,
}
ntfs_compression_constants
;
...
...
@@ -88,6 +88,40 @@ void free_compression_buffers(void)
ntfs_compression_buffer
=
NULL
;
}
/**
* zero_partial_compressed_page - zero out of bounds compressed page region
*/
static
void
zero_partial_compressed_page
(
ntfs_inode
*
ni
,
struct
page
*
page
)
{
u8
*
kp
=
page_address
(
page
);
unsigned
int
kp_ofs
;
ntfs_debug
(
"Zeroing page region outside initialized size."
);
if
(((
s64
)
page
->
index
<<
PAGE_CACHE_SHIFT
)
>=
ni
->
initialized_size
)
{
/*
* FIXME: Using clear_page() will become wrong when we get
* PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem.
*/
clear_page
(
kp
);
return
;
}
kp_ofs
=
ni
->
initialized_size
&
~
PAGE_CACHE_MASK
;
memset
(
kp
+
kp_ofs
,
0
,
PAGE_CACHE_SIZE
-
kp_ofs
);
return
;
}
/**
* handle_bounds_compressed_page - test for&handle out of bounds compressed page
*/
static
inline
void
handle_bounds_compressed_page
(
ntfs_inode
*
ni
,
struct
page
*
page
)
{
if
((
page
->
index
>=
(
ni
->
initialized_size
>>
PAGE_CACHE_SHIFT
))
&&
(
ni
->
initialized_size
<
VFS_I
(
ni
)
->
i_size
))
zero_partial_compressed_page
(
ni
,
page
);
return
;
}
/**
* ntfs_decompress - decompress a compression block into an array of pages
* @dest_pages: destination array of pages
...
...
@@ -173,19 +207,29 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
/* We can sleep from now on, so we drop lock. */
spin_unlock
(
&
ntfs_cb_lock
);
/* Second stage: finalize completed pages. */
for
(
i
=
0
;
i
<
nr_completed_pages
;
i
++
)
{
int
di
=
completed_pages
[
i
];
dp
=
dest_pages
[
di
];
flush_dcache_page
(
dp
);
kunmap
(
dp
);
SetPageUptodate
(
dp
);
unlock_page
(
dp
);
if
(
di
==
xpage
)
*
xpage_done
=
1
;
else
page_cache_release
(
dp
);
dest_pages
[
di
]
=
NULL
;
if
(
nr_completed_pages
>
0
)
{
struct
page
*
page
=
dest_pages
[
completed_pages
[
0
]];
ntfs_inode
*
ni
=
NTFS_I
(
page
->
mapping
->
host
);
for
(
i
=
0
;
i
<
nr_completed_pages
;
i
++
)
{
int
di
=
completed_pages
[
i
];
dp
=
dest_pages
[
di
];
/*
* If we are outside the initialized size, zero
* the out of bounds page range.
*/
handle_bounds_compressed_page
(
ni
,
dp
);
flush_dcache_page
(
dp
);
kunmap
(
dp
);
SetPageUptodate
(
dp
);
unlock_page
(
dp
);
if
(
di
==
xpage
)
*
xpage_done
=
1
;
else
page_cache_release
(
dp
);
dest_pages
[
di
]
=
NULL
;
}
}
return
err
;
}
...
...
@@ -763,6 +807,11 @@ int ntfs_read_compressed_block(struct page *page)
for
(;
cur2_page
<
cb_max_page
;
cur2_page
++
)
{
page
=
pages
[
cur2_page
];
if
(
page
)
{
/*
* If we are outside the initialized size, zero
* the out of bounds page range.
*/
handle_bounds_compressed_page
(
ni
,
page
);
flush_dcache_page
(
page
);
kunmap
(
page
);
SetPageUptodate
(
page
);
...
...
fs/ntfs/inode.c
View file @
42f79ff7
/**
* inode.c - NTFS kernel inode handling. Part of the Linux-NTFS project.
*
* Copyright (c) 2001
,2002 Anton Altaparmakov.
* Copyright (c) 2001
-2003 Anton Altaparmakov
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
...
...
@@ -996,16 +996,6 @@ static int ntfs_read_locked_inode(struct inode *vi)
if
(
NInoCompressed
(
ni
))
{
ni
->
_ICF
(
compressed_size
)
=
sle64_to_cpu
(
ctx
->
attr
->
_ANR
(
compressed_size
));
if
(
vi
->
i_size
!=
ni
->
initialized_size
)
ntfs_warning
(
vi
->
i_sb
,
"BUG: Found "
"compressed file with "
"data_size not equal to "
"initialized_size. This will "
"probably cause problems when "
"trying to access the file. "
"Please notify linux-ntfs-dev@"
"lists.sf.net that you saw "
"this message. Thanks!"
);
}
}
else
{
/* Resident attribute. */
/*
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment