Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
d73e26f5
Commit
d73e26f5
authored
Apr 29, 2003
by
Anton Altaparmakov
Browse files
Options
Browse Files
Download
Plain Diff
Merge cantab.net:/home/aia21/bklinux-2.5
into cantab.net:/home/aia21/ntfs-2.5
parents
a00ad804
75f40dac
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
231 additions
and
128 deletions
+231
-128
Documentation/filesystems/ntfs.txt
Documentation/filesystems/ntfs.txt
+10
-0
fs/ntfs/ChangeLog
fs/ntfs/ChangeLog
+19
-0
fs/ntfs/Makefile
fs/ntfs/Makefile
+1
-1
fs/ntfs/attrib.c
fs/ntfs/attrib.c
+12
-11
fs/ntfs/compress.c
fs/ntfs/compress.c
+70
-20
fs/ntfs/inode.c
fs/ntfs/inode.c
+1
-11
fs/ntfs/super.c
fs/ntfs/super.c
+108
-77
fs/ntfs/unistr.c
fs/ntfs/unistr.c
+5
-3
fs/ntfs/upcase.c
fs/ntfs/upcase.c
+5
-5
No files found.
Documentation/filesystems/ntfs.txt
View file @
d73e26f5
...
...
@@ -247,6 +247,16 @@ ChangeLog
Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
2.1.3:
- Major bug fixes for reading files and volumes in corner cases which
were being hit by Windows 2k/XP users.
2.1.2:
- Major bug fixes aleviating the hangs in statfs experienced by some
users.
2.1.1:
- Update handling of compressed files so people no longer get the
frequently reported warning messages about initialized_size !=
data_size.
2.1.0:
- Add configuration option for developmental write support.
- Initial implementation of file overwriting. (Writes to resident files
...
...
fs/ntfs/ChangeLog
View file @
d73e26f5
...
...
@@ -20,6 +20,25 @@ ToDo:
sufficient for synchronisation here. We then just need to make sure
ntfs_readpage/writepage/truncate interoperate properly with us.
2.1.3 - Important bug fixes in corner cases.
- super.c::parse_ntfs_boot_sector(): Correct the check for 64-bit
clusters. (Philipp Thomas)
- attrib.c::load_attribute_list(): Fix bug when initialized_size is a
multiple of the block_size but not the cluster size. (Szabolcs
Szakacsits <szaka@sienet.hu>)
2.1.2 - Important bug fixes aleviating the hangs in statfs.
- Fix buggy free cluster and free inode determination logic.
2.1.1 - Minor updates.
- Add handling for initialized_size != data_size in compressed files.
- Reduce function local stack usage from 0x3d4 bytes to just noise in
fs/ntfs/upcase.c. (Randy Dunlap <rddunlap@osdl.ord>)
- Remove compiler warnings for newer gcc.
2.1.0 - First steps towards write support: implement file overwrite.
- Add configuration option for developmental write support with an
...
...
fs/ntfs/Makefile
View file @
d73e26f5
...
...
@@ -5,7 +5,7 @@ obj-$(CONFIG_NTFS_FS) += ntfs.o
ntfs-objs
:=
aops.o attrib.o compress.o debug.o dir.o file.o inode.o mft.o
\
mst.o namei.o super.o sysctl.o time.o unistr.o upcase.o
EXTRA_CFLAGS
=
-DNTFS_VERSION
=
\"
2.1.
0
\"
EXTRA_CFLAGS
=
-DNTFS_VERSION
=
\"
2.1.
3
\"
ifeq
($(CONFIG_NTFS_DEBUG),y)
EXTRA_CFLAGS
+=
-DDEBUG
...
...
fs/ntfs/attrib.c
View file @
d73e26f5
/**
* attrib.c - NTFS attribute operations. Part of the Linux-NTFS project.
*
* Copyright (c) 2001
,2002 Anton Altaparmakov.
* Copyright (
C) 2002 Richard Russon.
* Copyright (c) 2001
-2003 Anton Altaparmakov
* Copyright (
c) 2002 Richard Russon
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
...
...
@@ -1180,12 +1180,15 @@ BOOL find_attr(const ATTR_TYPES type, const uchar_t *name, const u32 name_len,
return
TRUE
;
/* @val is present; compare values. */
else
{
u32
vl
;
register
int
rc
;
vl
=
le32_to_cpu
(
a
->
_ARA
(
value_length
));
if
(
vl
>
val_len
)
vl
=
val_len
;
rc
=
memcmp
(
val
,
(
u8
*
)
a
+
le16_to_cpu
(
a
->
_ARA
(
value_offset
)),
min_t
(
const
u32
,
val_len
,
le32_to_cpu
(
a
->
_ARA
(
value_length
))));
a
->
_ARA
(
value_offset
)),
vl
);
/*
* If @val collates before the current attribute's
* value, there is no matching attribute.
...
...
@@ -1235,11 +1238,9 @@ int load_attribute_list(ntfs_volume *vol, run_list *run_list, u8 *al,
unsigned
char
block_size_bits
=
sb
->
s_blocksize_bits
;
ntfs_debug
(
"Entering."
);
#ifdef DEBUG
if
(
!
vol
||
!
run_list
||
!
al
||
size
<=
0
||
initialized_size
<
0
||
initialized_size
>
size
)
return
-
EINVAL
;
#endif
if
(
!
initialized_size
)
{
memset
(
al
,
0
,
size
);
return
0
;
...
...
@@ -1270,8 +1271,8 @@ int load_attribute_list(ntfs_volume *vol, run_list *run_list, u8 *al,
"read attribute list."
);
goto
err_out
;
}
if
(
al
+
block_size
>
al_end
)
goto
do_
parti
al
;
if
(
al
+
block_size
>
=
al_end
)
goto
do_
fin
al
;
memcpy
(
al
,
bh
->
b_data
,
block_size
);
brelse
(
bh
);
al
+=
block_size
;
...
...
@@ -1285,7 +1286,7 @@ int load_attribute_list(ntfs_volume *vol, run_list *run_list, u8 *al,
done:
up_read
(
&
run_list
->
lock
);
return
err
;
do_
parti
al:
do_
fin
al:
if
(
al
<
al_end
)
{
/* Partial block. */
memcpy
(
al
,
bh
->
b_data
,
al_end
-
al
);
...
...
fs/ntfs/compress.c
View file @
d73e26f5
...
...
@@ -2,8 +2,8 @@
* compress.c - NTFS kernel compressed attributes handling.
* Part of the Linux-NTFS project.
*
* Copyright (c) 2001
,2002 Anton Altaparmakov.
* Copyright (
C) 2002 Richard Russon.
* Copyright (c) 2001
-2003 Anton Altaparmakov
* Copyright (
c) 2002 Richard Russon
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
...
...
@@ -44,7 +44,7 @@ typedef enum {
* The maximum compression block size is by definition 16 * the cluster
* size, with the maximum supported cluster size being 4kiB. Thus the
* maximum compression buffer size is 64kiB, so we use this when
* initializing the
per-CPU buffers
.
* initializing the
compression buffer
.
*/
NTFS_MAX_CB_SIZE
=
64
*
1024
,
}
ntfs_compression_constants
;
...
...
@@ -88,6 +88,40 @@ void free_compression_buffers(void)
ntfs_compression_buffer
=
NULL
;
}
/**
* zero_partial_compressed_page - zero out of bounds compressed page region
*/
static
void
zero_partial_compressed_page
(
ntfs_inode
*
ni
,
struct
page
*
page
)
{
u8
*
kp
=
page_address
(
page
);
unsigned
int
kp_ofs
;
ntfs_debug
(
"Zeroing page region outside initialized size."
);
if
(((
s64
)
page
->
index
<<
PAGE_CACHE_SHIFT
)
>=
ni
->
initialized_size
)
{
/*
* FIXME: Using clear_page() will become wrong when we get
* PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem.
*/
clear_page
(
kp
);
return
;
}
kp_ofs
=
ni
->
initialized_size
&
~
PAGE_CACHE_MASK
;
memset
(
kp
+
kp_ofs
,
0
,
PAGE_CACHE_SIZE
-
kp_ofs
);
return
;
}
/**
* handle_bounds_compressed_page - test for&handle out of bounds compressed page
*/
static
inline
void
handle_bounds_compressed_page
(
ntfs_inode
*
ni
,
struct
page
*
page
)
{
if
((
page
->
index
>=
(
ni
->
initialized_size
>>
PAGE_CACHE_SHIFT
))
&&
(
ni
->
initialized_size
<
VFS_I
(
ni
)
->
i_size
))
zero_partial_compressed_page
(
ni
,
page
);
return
;
}
/**
* ntfs_decompress - decompress a compression block into an array of pages
* @dest_pages: destination array of pages
...
...
@@ -164,7 +198,7 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
cb
-
cb_start
);
/* Have we reached the end of the compression block? */
if
(
cb
==
cb_end
||
!
le16_to_cpup
(
cb
))
{
if
(
cb
==
cb_end
||
!
le16_to_cpup
(
(
u16
*
)
cb
))
{
int
i
;
ntfs_debug
(
"Completed. Returning success (0)."
);
...
...
@@ -173,19 +207,29 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
/* We can sleep from now on, so we drop lock. */
spin_unlock
(
&
ntfs_cb_lock
);
/* Second stage: finalize completed pages. */
for
(
i
=
0
;
i
<
nr_completed_pages
;
i
++
)
{
int
di
=
completed_pages
[
i
];
dp
=
dest_pages
[
di
];
flush_dcache_page
(
dp
);
kunmap
(
dp
);
SetPageUptodate
(
dp
);
unlock_page
(
dp
);
if
(
di
==
xpage
)
*
xpage_done
=
1
;
else
page_cache_release
(
dp
);
dest_pages
[
di
]
=
NULL
;
if
(
nr_completed_pages
>
0
)
{
struct
page
*
page
=
dest_pages
[
completed_pages
[
0
]];
ntfs_inode
*
ni
=
NTFS_I
(
page
->
mapping
->
host
);
for
(
i
=
0
;
i
<
nr_completed_pages
;
i
++
)
{
int
di
=
completed_pages
[
i
];
dp
=
dest_pages
[
di
];
/*
* If we are outside the initialized size, zero
* the out of bounds page range.
*/
handle_bounds_compressed_page
(
ni
,
dp
);
flush_dcache_page
(
dp
);
kunmap
(
dp
);
SetPageUptodate
(
dp
);
unlock_page
(
dp
);
if
(
di
==
xpage
)
*
xpage_done
=
1
;
else
page_cache_release
(
dp
);
dest_pages
[
di
]
=
NULL
;
}
}
return
err
;
}
...
...
@@ -204,7 +248,8 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
/* Setup the current sub-block source pointers and validate range. */
cb_sb_start
=
cb
;
cb_sb_end
=
cb_sb_start
+
(
le16_to_cpup
(
cb
)
&
NTFS_SB_SIZE_MASK
)
+
3
;
cb_sb_end
=
cb_sb_start
+
(
le16_to_cpup
((
u16
*
)
cb
)
&
NTFS_SB_SIZE_MASK
)
+
3
;
if
(
cb_sb_end
>
cb_end
)
goto
return_overflow
;
...
...
@@ -225,7 +270,7 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
dp_addr
=
(
u8
*
)
page_address
(
dp
)
+
do_sb_start
;
/* Now, we are ready to process the current sub-block (sb). */
if
(
!
(
le16_to_cpup
(
cb
)
&
NTFS_SB_IS_COMPRESSED
))
{
if
(
!
(
le16_to_cpup
(
(
u16
*
)
cb
)
&
NTFS_SB_IS_COMPRESSED
))
{
ntfs_debug
(
"Found uncompressed sub-block."
);
/* This sb is not compressed, just copy it into destination. */
...
...
@@ -330,7 +375,7 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
lg
++
;
/* Get the phrase token into i. */
pt
=
le16_to_cpup
(
cb
);
pt
=
le16_to_cpup
(
(
u16
*
)
cb
);
/*
* Calculate starting position of the byte sequence in
...
...
@@ -763,6 +808,11 @@ int ntfs_read_compressed_block(struct page *page)
for
(;
cur2_page
<
cb_max_page
;
cur2_page
++
)
{
page
=
pages
[
cur2_page
];
if
(
page
)
{
/*
* If we are outside the initialized size, zero
* the out of bounds page range.
*/
handle_bounds_compressed_page
(
ni
,
page
);
flush_dcache_page
(
page
);
kunmap
(
page
);
SetPageUptodate
(
page
);
...
...
fs/ntfs/inode.c
View file @
d73e26f5
/**
* inode.c - NTFS kernel inode handling. Part of the Linux-NTFS project.
*
* Copyright (c) 2001
,2002 Anton Altaparmakov.
* Copyright (c) 2001
-2003 Anton Altaparmakov
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
...
...
@@ -996,16 +996,6 @@ static int ntfs_read_locked_inode(struct inode *vi)
if
(
NInoCompressed
(
ni
))
{
ni
->
_ICF
(
compressed_size
)
=
sle64_to_cpu
(
ctx
->
attr
->
_ANR
(
compressed_size
));
if
(
vi
->
i_size
!=
ni
->
initialized_size
)
ntfs_warning
(
vi
->
i_sb
,
"BUG: Found "
"compressed file with "
"data_size not equal to "
"initialized_size. This will "
"probably cause problems when "
"trying to access the file. "
"Please notify linux-ntfs-dev@"
"lists.sf.net that you saw "
"this message. Thanks!"
);
}
}
else
{
/* Resident attribute. */
/*
...
...
fs/ntfs/super.c
View file @
d73e26f5
/*
* super.c - NTFS kernel super block handling. Part of the Linux-NTFS project.
*
* Copyright (c) 2001
,2002 Anton Altaparmakov.
* Copyright (c) 2001,2002 Richard Russon
.
* Copyright (c) 2001
-2003 Anton Altaparmakov
* Copyright (c) 2001,2002 Richard Russon
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
...
...
@@ -619,9 +619,8 @@ static BOOL parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b)
* the same as it is much faster on 32-bit CPUs.
*/
ll
=
sle64_to_cpu
(
b
->
number_of_sectors
)
>>
sectors_per_cluster_bits
;
if
((
u64
)
ll
>=
1ULL
<<
(
sizeof
(
unsigned
long
)
*
8
))
{
ntfs_error
(
vol
->
sb
,
"Cannot handle %i-bit clusters. Sorry."
,
sizeof
(
unsigned
long
)
*
4
);
if
((
u64
)
ll
>=
1ULL
<<
32
)
{
ntfs_error
(
vol
->
sb
,
"Cannot handle 64-bit clusters. Sorry."
);
return
FALSE
;
}
vol
->
nr_clusters
=
ll
;
...
...
@@ -1060,78 +1059,93 @@ static void ntfs_put_super(struct super_block *vfs_sb)
* get_nr_free_clusters - return the number of free clusters on a volume
* @vol: ntfs volume for which to obtain free cluster count
*
* Calculate the number of free clusters on the mounted NTFS volume @vol.
* Calculate the number of free clusters on the mounted NTFS volume @vol. We
* actually calculate the number of clusters in use instead because this
* allows us to not care about partial pages as these will be just zero filled
* and hence not be counted as allocated clusters.
*
* Errors are ignored and we just return the number of free clusters we have
* found. This means we return an underestimate on error.
* The only particularity is that clusters beyond the end of the logical ntfs
* volume will be marked as allocated to prevent errors which means we have to
* discount those at the end. This is important as the cluster bitmap always
* has a size in multiples of 8 bytes, i.e. up to 63 clusters could be outside
* the logical volume and marked in use when they are not as they do not exist.
*
* If any pages cannot be read we assume all clusters in the erroring pages are
* in use. This means we return an underestimate on errors which is better than
* an overestimate.
*/
static
s64
get_nr_free_clusters
(
ntfs_volume
*
vol
)
{
s64
nr_free
=
vol
->
nr_clusters
;
u32
*
kaddr
;
struct
address_space
*
mapping
=
vol
->
lcnbmp_ino
->
i_mapping
;
filler_t
*
readpage
=
(
filler_t
*
)
mapping
->
a_ops
->
readpage
;
struct
page
*
page
;
unsigned
long
index
,
max_index
;
unsigned
int
max_size
,
i
;
s64
nr_free
=
0LL
;
u32
*
b
;
unsigned
int
max_size
;
ntfs_debug
(
"Entering."
);
/* Serialize accesses to the cluster bitmap. */
down_read
(
&
vol
->
lcnbmp_lock
);
/*
* Convert the number of bits into bytes rounded up, then convert into
* multiples of PAGE_CACHE_SIZE.
* multiples of PAGE_CACHE_SIZE, rounding up so that if we have one
* full and one partial page max_index = 2.
*/
max_index
=
(
vol
->
nr_clusters
+
7
)
>>
(
3
+
PAGE_CACHE_SHIFT
);
max_index
=
(((
vol
->
nr_clusters
+
7
)
>>
3
)
+
PAGE_CACHE_SIZE
-
1
)
>>
PAGE_CACHE_SHIFT
;
/* Use multiples of 4 bytes. */
max_size
=
PAGE_CACHE_SIZE
>>
2
;
ntfs_debug
(
"Reading $B
ITMAP
, max_index = 0x%lx, max_size = 0x%x."
,
ntfs_debug
(
"Reading $B
itmap
, max_index = 0x%lx, max_size = 0x%x."
,
max_index
,
max_size
);
for
(
index
=
0UL
;
index
<
max_index
;)
{
handle_partial_page:
for
(
index
=
0UL
;
index
<
max_index
;
index
++
)
{
unsigned
int
i
;
/*
* Read the page from page cache, getting it from backing store
* if necessary, and increment the use count.
*/
page
=
read_cache_page
(
mapping
,
index
++
,
(
filler_t
*
)
readpage
,
page
=
read_cache_page
(
mapping
,
index
,
(
filler_t
*
)
readpage
,
NULL
);
/* Ignore pages which errored synchronously. */
if
(
IS_ERR
(
page
))
{
ntfs_debug
(
"Sync read_cache_page() error. Skipping "
"page (index 0x%lx)."
,
index
-
1
);
"page (index 0x%lx)."
,
index
);
nr_free
-=
PAGE_CACHE_SIZE
*
8
;
continue
;
}
wait_on_page_locked
(
page
);
/* Ignore pages which errored asynchronously. */
if
(
!
PageUptodate
(
page
))
{
ntfs_debug
(
"Async read_cache_page() error. Skipping "
"page (index 0x%lx)."
,
index
-
1
);
/* Ignore pages which errored asynchronously. */
"page (index 0x%lx)."
,
index
);
page_cache_release
(
page
);
nr_free
-=
PAGE_CACHE_SIZE
*
8
;
continue
;
}
b
=
(
u32
*
)
kmap
(
page
);
/* For each 4 bytes, add up the number zero bits. */
for
(
i
=
0
;
i
<
max_size
;
i
++
)
nr_free
+=
(
s64
)(
32
-
hweight32
(
b
[
i
]));
kunmap
(
page
);
page_cache_release
(
page
);
}
if
(
max_size
==
PAGE_CACHE_SIZE
>>
2
)
{
kaddr
=
(
u32
*
)
kmap_atomic
(
page
,
KM_USER0
);
/*
* Get the multiples of 4 bytes in use in the final partial
* page.
* For each 4 bytes, subtract the number of set bits. If this
* is the last page and it is partial we don't really care as
* it just means we do a little extra work but it won't affect
* the result as all out of range bytes are set to zero by
* ntfs_readpage().
*/
max_size
=
((((
vol
->
nr_clusters
+
7
)
>>
3
)
&
~
PAGE_CACHE_MASK
)
+
3
)
>>
2
;
/* If there is a partial page go back and do it. */
if
(
max_size
)
{
ntfs_debug
(
"Handling partial page, max_size = 0x%x."
,
max_size
);
goto
handle_partial_page
;
}
for
(
i
=
0
;
i
<
max_size
;
i
++
)
nr_free
-=
(
s64
)
hweight32
(
kaddr
[
i
]);
kunmap_atomic
(
kaddr
,
KM_USER0
);
page_cache_release
(
page
);
}
ntfs_debug
(
"Finished reading $BITMAP, last index = 0x%lx"
,
index
-
1
);
ntfs_debug
(
"Finished reading $Bitmap, last index = 0x%lx."
,
index
-
1
);
/*
* Fixup for eventual bits outside logical ntfs volume (see function
* description above).
*/
if
(
vol
->
nr_clusters
&
63
)
nr_free
+=
64
-
(
vol
->
nr_clusters
&
63
);
up_read
(
&
vol
->
lcnbmp_lock
);
/* If errors occured we may well have gone below zero, fix this. */
if
(
nr_free
<
0
)
nr_free
=
0
;
ntfs_debug
(
"Exiting."
);
return
nr_free
;
}
...
...
@@ -1141,64 +1155,81 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
* @vol: ntfs volume for which to obtain free inode count
*
* Calculate the number of free mft records (inodes) on the mounted NTFS
* volume @vol.
* volume @vol. We actually calculate the number of mft records in use instead
* because this allows us to not care about partial pages as these will be just
* zero filled and hence not be counted as allocated mft record.
*
* Errors are ignored and we just return the number of free inodes we have
* found. This means we return an underestimate on error.
* If any pages cannot be read we assume all mft records in the erroring pages
* are in use. This means we return an underestimate on errors which is better
* than an overestimate.
*
* NOTE: Caller must hold mftbmp_lock rw_semaphore for reading or writing.
*/
static
unsigned
long
__get_nr_free_mft_records
(
ntfs_volume
*
vol
)
{
struct
address_space
*
mapping
;
s64
nr_free
=
vol
->
nr_mft_records
;
u32
*
kaddr
;
struct
address_space
*
mapping
=
vol
->
mftbmp_ino
->
i_mapping
;
filler_t
*
readpage
=
(
filler_t
*
)
mapping
->
a_ops
->
readpage
;
struct
page
*
page
;
unsigned
long
index
,
max_index
,
nr_free
=
0
;
unsigned
int
max_size
,
i
;
u32
*
b
;
unsigned
long
index
,
max_index
;
unsigned
int
max_size
;
mapping
=
vol
->
mftbmp_ino
->
i_mapping
;
ntfs_debug
(
"Entering."
)
;
/*
* Convert the number of bits into bytes rounded up to a multiple of 8
* bytes, then convert into multiples of PAGE_CACHE_SIZE.
* Convert the number of bits into bytes rounded up, then convert into
* multiples of PAGE_CACHE_SIZE, rounding up so that if we have one
* full and one partial page max_index = 2.
*/
max_index
=
(((
vol
->
nr_mft_records
+
7
)
>>
3
)
+
7
)
>>
PAGE_CACHE_SHIFT
;
max_index
=
(((
vol
->
nr_mft_records
+
7
)
>>
3
)
+
PAGE_CACHE_SIZE
-
1
)
>>
PAGE_CACHE_SHIFT
;
/* Use multiples of 4 bytes. */
max_size
=
PAGE_CACHE_SIZE
>>
2
;
ntfs_debug
(
"Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = "
"0x%x."
,
max_index
,
max_size
);
for
(
index
=
0UL
;
index
<
max_index
;)
{
handle_partial_page:
page
=
ntfs_map_page
(
mapping
,
index
++
);
for
(
index
=
0UL
;
index
<
max_index
;
index
++
)
{
unsigned
int
i
;
/*
* Read the page from page cache, getting it from backing store
* if necessary, and increment the use count.
*/
page
=
read_cache_page
(
mapping
,
index
,
(
filler_t
*
)
readpage
,
NULL
);
/* Ignore pages which errored synchronously. */
if
(
IS_ERR
(
page
))
{
ntfs_debug
(
"ntfs_map_page() error. Skipping page "
"(index 0x%lx)."
,
index
-
1
);
ntfs_debug
(
"Sync read_cache_page() error. Skipping "
"page (index 0x%lx)."
,
index
);
nr_free
-=
PAGE_CACHE_SIZE
*
8
;
continue
;
}
b
=
(
u32
*
)
page_address
(
page
);
/* For each 4 bytes, add up the number of zero bits. */
for
(
i
=
0
;
i
<
max_size
;
i
++
)
nr_free
+=
32
-
hweight32
(
b
[
i
]);
ntfs_unmap_page
(
page
);
}
if
(
index
==
max_index
)
{
wait_on_page_locked
(
page
);
/* Ignore pages which errored asynchronously. */
if
(
!
PageUptodate
(
page
))
{
ntfs_debug
(
"Async read_cache_page() error. Skipping "
"page (index 0x%lx)."
,
index
);
page_cache_release
(
page
);
nr_free
-=
PAGE_CACHE_SIZE
*
8
;
continue
;
}
kaddr
=
(
u32
*
)
kmap_atomic
(
page
,
KM_USER0
);
/*
* Get the multiples of 4 bytes in use in the final partial
* page.
* For each 4 bytes, subtract the number of set bits. If this
* is the last page and it is partial we don't really care as
* it just means we do a little extra work but it won't affect
* the result as all out of range bytes are set to zero by
* ntfs_readpage().
*/
max_size
=
((((((
vol
->
nr_mft_records
+
7
)
>>
3
)
+
7
)
&
~
7
)
&
~
PAGE_CACHE_MASK
)
+
3
)
>>
2
;
/* If there is a partial page go back and do it. */
if
(
max_size
)
{
/* Compensate for out of bounds zero bits. */
if
((
i
=
vol
->
nr_mft_records
&
31
))
nr_free
-=
32
-
i
;
ntfs_debug
(
"Handling partial page, max_size = 0x%x"
,
max_size
);
goto
handle_partial_page
;
}
for
(
i
=
0
;
i
<
max_size
;
i
++
)
nr_free
-=
(
s64
)
hweight32
(
kaddr
[
i
]);
kunmap_atomic
(
kaddr
,
KM_USER0
);
page_cache_release
(
page
);
}
ntfs_debug
(
"Finished reading $MFT/$BITMAP, last index = 0x%lx"
,
ntfs_debug
(
"Finished reading $MFT/$BITMAP, last index = 0x%lx
.
"
,
index
-
1
);
/* If errors occured we may well have gone below zero, fix this. */
if
(
nr_free
<
0
)
nr_free
=
0
;
ntfs_debug
(
"Exiting."
);
return
nr_free
;
}
...
...
@@ -1761,7 +1792,7 @@ static void __exit exit_ntfs_fs(void)
}
MODULE_AUTHOR
(
"Anton Altaparmakov <aia21@cantab.net>"
);
MODULE_DESCRIPTION
(
"NTFS 1.2/3.x driver - Copyright (c) 2001-200
2
Anton Altaparmakov"
);
MODULE_DESCRIPTION
(
"NTFS 1.2/3.x driver - Copyright (c) 2001-200
3
Anton Altaparmakov"
);
MODULE_LICENSE
(
"GPL"
);
#ifdef DEBUG
MODULE_PARM
(
debug_msgs
,
"i"
);
...
...
fs/ntfs/unistr.c
View file @
d73e26f5
/*
* unistr.c - NTFS Unicode string handling. Part of the Linux-NTFS project.
*
* Copyright (c) 2001
Anton Altaparmakov.
* Copyright (c) 2001
-2003 Anton Altaparmakov
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
...
...
@@ -96,10 +96,12 @@ int ntfs_collate_names(const uchar_t *name1, const u32 name1_len,
const
int
err_val
,
const
IGNORE_CASE_BOOL
ic
,
const
uchar_t
*
upcase
,
const
u32
upcase_len
)
{
u32
cnt
;
const
u32
min_len
=
min_t
(
const
u32
,
name1_len
,
name2_len
);
u32
cnt
,
min_len
;
uchar_t
c1
,
c2
;
min_len
=
name1_len
;
if
(
name1_len
>
name2_len
)
min_len
=
name2_len
;
for
(
cnt
=
0
;
cnt
<
min_len
;
++
cnt
)
{
c1
=
le16_to_cpu
(
*
name1
++
);
c2
=
le16_to_cpu
(
*
name2
++
);
...
...
fs/ntfs/upcase.c
View file @
d73e26f5
...
...
@@ -2,8 +2,8 @@
* upcase.c - Generate the full NTFS Unicode upcase table in little endian.
* Part of the Linux-NTFS project.
*
* Copyright (
C
) 2001 Richard Russon <ntfs@flatcap.org>
* Copyright (c) 2001
,2002
Anton Altaparmakov
* Copyright (
c
) 2001 Richard Russon <ntfs@flatcap.org>
* Copyright (c) 2001
-2003
Anton Altaparmakov
*
* Modified for mkntfs inclusion 9 June 2001 by Anton Altaparmakov.
* Modified for kernel inclusion 10 September 2001 by Anton Altparmakov.
...
...
@@ -28,7 +28,7 @@
uchar_t
*
generate_default_upcase
(
void
)
{
const
int
uc_run_table
[][
3
]
=
{
/* Start, End, Add */
static
const
int
uc_run_table
[][
3
]
=
{
/* Start, End, Add */
{
0x0061
,
0x007B
,
-
32
},
{
0x0451
,
0x045D
,
-
80
},
{
0x1F70
,
0x1F72
,
74
},
{
0x00E0
,
0x00F7
,
-
32
},
{
0x045E
,
0x0460
,
-
80
},
{
0x1F72
,
0x1F76
,
86
},
{
0x00F8
,
0x00FF
,
-
32
},
{
0x0561
,
0x0587
,
-
48
},
{
0x1F76
,
0x1F78
,
100
},
...
...
@@ -45,7 +45,7 @@ uchar_t *generate_default_upcase(void)
{
0
}
};
const
int
uc_dup_table
[][
2
]
=
{
/* Start, End */
static
const
int
uc_dup_table
[][
2
]
=
{
/* Start, End */
{
0x0100
,
0x012F
},
{
0x01A0
,
0x01A6
},
{
0x03E2
,
0x03EF
},
{
0x04CB
,
0x04CC
},
{
0x0132
,
0x0137
},
{
0x01B3
,
0x01B7
},
{
0x0460
,
0x0481
},
{
0x04D0
,
0x04EB
},
{
0x0139
,
0x0149
},
{
0x01CD
,
0x01DD
},
{
0x0490
,
0x04BF
},
{
0x04EE
,
0x04F5
},
...
...
@@ -55,7 +55,7 @@ uchar_t *generate_default_upcase(void)
{
0
}
};
const
int
uc_word_table
[][
2
]
=
{
/* Offset, Value */
static
const
int
uc_word_table
[][
2
]
=
{
/* Offset, Value */
{
0x00FF
,
0x0178
},
{
0x01AD
,
0x01AC
},
{
0x01F3
,
0x01F1
},
{
0x0269
,
0x0196
},
{
0x0183
,
0x0182
},
{
0x01B0
,
0x01AF
},
{
0x0253
,
0x0181
},
{
0x026F
,
0x019C
},
{
0x0185
,
0x0184
},
{
0x01B9
,
0x01B8
},
{
0x0254
,
0x0186
},
{
0x0272
,
0x019D
},
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment