Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
98a58fbe
Commit
98a58fbe
authored
Jul 05, 2002
by
Anton Altaparmakov
Browse files
Options
Browse Files
Download
Plain Diff
Merge cantab.net:/usr/src/bklinux-2.5 into cantab.net:/usr/src/tng
parents
6fe152cf
1d209b00
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
514 additions
and
357 deletions
+514
-357
Documentation/filesystems/ntfs.txt
Documentation/filesystems/ntfs.txt
+3
-0
fs/ntfs/ChangeLog
fs/ntfs/ChangeLog
+11
-0
fs/ntfs/Makefile
fs/ntfs/Makefile
+1
-1
fs/ntfs/attrib.c
fs/ntfs/attrib.c
+487
-346
fs/ntfs/mst.c
fs/ntfs/mst.c
+2
-2
fs/ntfs/super.c
fs/ntfs/super.c
+10
-8
No files found.
Documentation/filesystems/ntfs.txt
View file @
98a58fbe
...
...
@@ -247,6 +247,9 @@ ChangeLog
Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
2.0.14:
- Internal changes improving run list merging code and minor locking
change to not rely on BKL in ntfs_statfs().
2.0.13:
- Internal changes towards using iget5_locked() in preparation for
fake inodes and small cleanups to ntfs_volume structure.
...
...
fs/ntfs/ChangeLog
View file @
98a58fbe
...
...
@@ -28,6 +28,17 @@ ToDo:
- Enable NFS exporting of NTFS.
- Use fake inodes for address space i/o.
2.0.14 - Run list merging code cleanup, minor locking changes, typo fixes.
- Change fs/ntfs/super.c::ntfs_statfs() to not rely on BKL by moving
the locking out of super.c::get_nr_free_mft_records() and taking and
dropping the mftbmp_lock rw_semaphore in ntfs_statfs() itself.
- Bring attribute run list merging code (fs/ntfs/attrib.c) in sync with
current userspace ntfs library code. This means that if a merge
fails the original run lists are always left unmodified instead of
being silently corrupted.
- Misc typo fixes.
2.0.13 - Use iget5_locked() in preparation for fake inodes and small cleanups.
- Remove nr_mft_bits and the now superfluous union with nr_mft_records
...
...
fs/ntfs/Makefile
View file @
98a58fbe
...
...
@@ -5,7 +5,7 @@ obj-$(CONFIG_NTFS_FS) += ntfs.o
ntfs-objs
:=
aops.o attrib.o compress.o debug.o dir.o file.o inode.o mft.o
\
mst.o namei.o super.o sysctl.o time.o unistr.o upcase.o
EXTRA_CFLAGS
=
-DNTFS_VERSION
=
\"
2.0.1
3
\"
EXTRA_CFLAGS
=
-DNTFS_VERSION
=
\"
2.0.1
4
\"
ifeq
($(CONFIG_NTFS_DEBUG),y)
EXTRA_CFLAGS
+=
-DDEBUG
...
...
fs/ntfs/attrib.c
View file @
98a58fbe
...
...
@@ -27,338 +27,433 @@
/* Temporary helper functions -- might become macros */
/**
* rl_mm - run_list memmove
*
ntfs_
rl_mm - run_list memmove
*
* It is up to the caller to serialize access to the run list @base.
*/
static
inline
void
rl_mm
(
run_list_element
*
base
,
int
dst
,
int
src
,
int
size
)
static
inline
void
ntfs_rl_mm
(
run_list_element
*
base
,
int
dst
,
int
src
,
int
size
)
{
if
(
(
dst
!=
src
)
&&
(
size
>
0
))
memmove
(
base
+
dst
,
base
+
src
,
size
*
sizeof
(
*
base
));
if
(
likely
((
dst
!=
src
)
&&
(
size
>
0
)
))
memmove
(
base
+
dst
,
base
+
src
,
size
*
sizeof
(
*
base
));
}
/**
* rl_mc - run_list memory copy
*
ntfs_
rl_mc - run_list memory copy
*
* It is up to the caller to serialize access to the run lists @dstbase and
* @srcbase.
*/
static
inline
void
rl_mc
(
run_list_element
*
dstbase
,
int
dst
,
static
inline
void
ntfs_
rl_mc
(
run_list_element
*
dstbase
,
int
dst
,
run_list_element
*
srcbase
,
int
src
,
int
size
)
{
if
(
size
>
0
)
memcpy
(
dstbase
+
dst
,
srcbase
+
src
,
size
*
sizeof
(
*
dstbase
));
if
(
likely
(
size
>
0
)
)
memcpy
(
dstbase
+
dst
,
srcbase
+
src
,
size
*
sizeof
(
*
dstbase
));
}
/**
* ntfs_rl_realloc - Reallocate memory for run_lists
* @
orig: The original memory allocation
* @old
: The number of run_lists in the origina
l
* @new
: The number of run_lis
ts we need space for
* @
rl: original run list
* @old
_size: number of run list elements in the original run list @r
l
* @new
_size: number of run list elemen
ts we need space for
*
* As the run_lists grow, more memory will be required. To prevent the
* kernel having to allocate and reallocate large numbers of small bits of
* memory, this function returns and entire page of memory.
*
* It is up to the caller to serialize access to the run list @
orig
.
* It is up to the caller to serialize access to the run list @
rl
.
*
* N.B. If the new allocation doesn't require a different number of pages in
* memory, the function will return the original pointer.
*
*
Return: Pointer The newly allocated, or recycled,
memory.
*
*
Errors: -ENOMEM,
Not enough memory to allocate run list array.
*
-EINVAL,
Invalid parameters were passed in.
*
On success, return a pointer to the newly allocated, or recycled,
memory.
*
On error, return -errno. The following error codes are defined:
*
-ENOMEM -
Not enough memory to allocate run list array.
*
-EINVAL -
Invalid parameters were passed in.
*/
static
inline
run_list_element
*
ntfs_rl_realloc
(
run_list_element
*
orig
,
int
old
,
int
new
)
static
inline
run_list_element
*
ntfs_rl_realloc
(
run_list_element
*
rl
,
int
old
_size
,
int
new_size
)
{
run_list_element
*
nrl
;
run_list_element
*
n
ew_
rl
;
old
=
PAGE_ALIGN
(
old
*
sizeof
(
*
orig
));
new
=
PAGE_ALIGN
(
new
*
sizeof
(
*
orig
));
if
(
old
==
new
)
return
orig
;
old
_size
=
PAGE_ALIGN
(
old_size
*
sizeof
(
*
rl
));
new
_size
=
PAGE_ALIGN
(
new_size
*
sizeof
(
*
rl
));
if
(
old
_size
==
new_size
)
return
rl
;
n
rl
=
ntfs_malloc_nofs
(
new
);
if
(
!
nrl
)
return
ERR_PTR
(
-
ENOMEM
);
n
ew_rl
=
ntfs_malloc_nofs
(
new_size
);
if
(
unlikely
(
!
new_rl
)
)
return
ERR_PTR
(
-
ENOMEM
);
if
(
orig
)
{
memcpy
(
nrl
,
orig
,
min
(
old
,
new
));
ntfs_free
(
orig
);
if
(
likely
(
rl
!=
NULL
))
{
if
(
unlikely
(
old_size
>
new_size
))
old_size
=
new_size
;
memcpy
(
new_rl
,
rl
,
old_size
);
ntfs_free
(
rl
);
}
return
nrl
;
return
n
ew_
rl
;
}
/**
* ntfs_
rl_merge - Join together two run_lists
* @
one: The first run_list and destination
* @
two: The second run_li
st
* ntfs_
are_rl_mergeable - test if two run lists can be joined together
* @
dst: original run list
* @
src: new run list to test for mergeability with @d
st
*
*
If possible merge together two run_lists.
For this, their VCNs and LCNs
*
Test if two run lists can be joined together.
For this, their VCNs and LCNs
* must be adjacent.
*
* It is up to the caller to serialize access to the run lists @
one and @two
.
* It is up to the caller to serialize access to the run lists @
dst and @src
.
*
* Return: TRUE Success, the run
_lists were merged
* FALSE Failure, the run
_lists were not merged
* Return: TRUE Success, the run
lists can be merged.
* FALSE Failure, the run
lists cannot be merged.
*/
static
inline
BOOL
ntfs_rl_merge
(
run_list_element
*
one
,
run_list_element
*
two
)
static
inline
BOOL
ntfs_are_rl_mergeable
(
run_list_element
*
dst
,
run_list_element
*
src
)
{
BUG_ON
(
!
one
||
!
two
);
BUG_ON
(
!
dst
||
!
src
);
if
((
one
->
lcn
<
0
)
||
(
two
->
lcn
<
0
))
/* Are we merging holes? */
if
((
dst
->
lcn
<
0
)
||
(
src
->
lcn
<
0
))
/* Are we merging holes? */
return
FALSE
;
if
((
one
->
lcn
+
one
->
length
)
!=
two
->
lcn
)
/* Are the runs contiguous? */
if
((
dst
->
lcn
+
dst
->
length
)
!=
src
->
lcn
)
/* Are the runs contiguous? */
return
FALSE
;
if
((
one
->
vcn
+
one
->
length
)
!=
two
->
vcn
)
/* Are the runs misaligned? */
if
((
dst
->
vcn
+
dst
->
length
)
!=
src
->
vcn
)
/* Are the runs misaligned? */
return
FALSE
;
one
->
length
+=
two
->
length
;
return
TRUE
;
}
/**
* ntfs_rl_append - Append a run_list after the given element
* @orig: The original run_list to be worked on.
* @osize: The number of elements in @orig (including end marker).
* @new: The run_list to be inserted.
* @nsize: The number of elements in @new (excluding end marker).
* @loc: Append the new run_list after this element in @orig.
* __ntfs_rl_merge - merge two run lists without testing if they can be merged
* @dst: original, destination run list
* @src: new run list to merge with @dst
*
* Merge the two run lists, writing into the destination run list @dst. The
* caller must make sure the run lists can be merged or this will corrupt the
* destination run list.
*
* Append a run_list after element @loc in @orig. Merge the right end of
* the new run_list, if necessary. Adjust the size of the hole before the
* appended run_list.
* It is up to the caller to serialize access to the run lists @dst and @src.
*/
static
inline
void
__ntfs_rl_merge
(
run_list_element
*
dst
,
run_list_element
*
src
)
{
dst
->
length
+=
src
->
length
;
}
/**
* ntfs_rl_merge - test if two run lists can be joined together and merge them
* @dst: original, destination run list
* @src: new run list to merge with @dst
*
* It is up to the caller to serialize access to the run lists @orig and @new.
* Test if two run lists can be joined together. For this, their VCNs and LCNs
* must be adjacent. If they can be merged, perform the merge, writing into
* the destination run list @dst.
*
*
Return: Pointer, The new, combined, run_list
*
It is up to the caller to serialize access to the run lists @dst and @src.
*
* Errors: -ENOMEM, Not enough memory to allocate run list array.
* -EINVAL, Invalid parameters were passed in.
* Return: TRUE Success, the run lists have been merged.
* FALSE Failure, the run lists cannot be merged and have not been
* modified.
*/
static
inline
BOOL
ntfs_rl_merge
(
run_list_element
*
dst
,
run_list_element
*
src
)
{
BOOL
merge
=
ntfs_are_rl_mergeable
(
dst
,
src
);
if
(
merge
)
__ntfs_rl_merge
(
dst
,
src
);
return
merge
;
}
/**
* ntfs_rl_append - append a run list after a given element
* @dst: original run list to be worked on
* @dsize: number of elements in @dst (including end marker)
* @src: run list to be inserted into @dst
* @ssize: number of elements in @src (excluding end marker)
* @loc: append the new run list @src after this element in @dst
*
* Append the run list @src after element @loc in @dst. Merge the right end of
* the new run list, if necessary. Adjust the size of the hole before the
* appended run list.
*
* It is up to the caller to serialize access to the run lists @dst and @src.
*
* On success, return a pointer to the new, combined, run list. Note, both
* run lists @dst and @src are deallocated before returning so you cannot use
* the pointers for anything any more. (Strictly speaking the returned run list
* may be the same as @dst but this is irrelevant.)
*
* On error, return -errno. Both run lists are left unmodified. The following
* error codes are defined:
* -ENOMEM - Not enough memory to allocate run list array.
* -EINVAL - Invalid parameters were passed in.
*/
static
inline
run_list_element
*
ntfs_rl_append
(
run_list_element
*
orig
,
int
osize
,
run_list_element
*
new
,
int
n
size
,
int
loc
)
static
inline
run_list_element
*
ntfs_rl_append
(
run_list_element
*
dst
,
int
dsize
,
run_list_element
*
src
,
int
s
size
,
int
loc
)
{
run_list_element
*
res
;
BOOL
right
;
int
magic
;
BUG_ON
(
!
dst
||
!
src
);
BUG_ON
(
!
orig
||
!
new
);
/* First, check if the right hand end needs merging. */
right
=
ntfs_are_rl_mergeable
(
src
+
ssize
-
1
,
dst
+
loc
+
1
);
/* Space required: @dst size + @src size, less one if we merged. */
dst
=
ntfs_rl_realloc
(
dst
,
dsize
,
dsize
+
ssize
-
right
);
if
(
IS_ERR
(
dst
))
return
dst
;
/*
* We are guaranteed to succeed from here so can start modifying the
* original run lists.
*/
/* First, merge the right hand end, if necessary. */
right
=
ntfs_rl_merge
(
new
+
nsize
-
1
,
orig
+
loc
+
1
);
if
(
right
)
__ntfs_rl_merge
(
src
+
ssize
-
1
,
dst
+
loc
+
1
);
/* Space required: Orig size + New size, less one if we merged. */
res
=
ntfs_rl_realloc
(
orig
,
osize
,
osize
+
nsize
-
right
);
if
(
IS_ERR
(
res
))
return
res
;
magic
=
loc
+
ssize
;
/* Move the tail of
Orig out of the way, then copy in New
. */
rl_mm
(
res
,
loc
+
1
+
nsize
,
loc
+
1
+
right
,
o
size
-
loc
-
1
-
right
);
rl_mc
(
res
,
loc
+
1
,
new
,
0
,
n
size
);
/* Move the tail of
@dst out of the way, then copy in @src
. */
ntfs_rl_mm
(
dst
,
magic
+
1
,
loc
+
1
+
right
,
d
size
-
loc
-
1
-
right
);
ntfs_rl_mc
(
dst
,
loc
+
1
,
src
,
0
,
s
size
);
/* Adjust the size of the preceding hole. */
res
[
loc
].
length
=
res
[
loc
+
1
].
vcn
-
res
[
loc
].
vcn
;
dst
[
loc
].
length
=
dst
[
loc
+
1
].
vcn
-
dst
[
loc
].
vcn
;
/* We may have changed the length of the file, so fix the end marker */
if
(
res
[
loc
+
nsize
+
1
].
lcn
==
LCN_ENOENT
)
res
[
loc
+
nsize
+
1
].
vcn
=
res
[
loc
+
nsize
].
vcn
+
res
[
loc
+
nsize
].
length
;
if
(
dst
[
magic
+
1
].
lcn
==
LCN_ENOENT
)
dst
[
magic
+
1
].
vcn
=
dst
[
magic
].
vcn
+
dst
[
magic
].
length
;
return
res
;
return
dst
;
}
/**
* ntfs_rl_insert - Insert a run_list into another
* @orig: The original run_list to be worked on.
* @osize: The number of elements in @orig (including end marker).
* @new: The run_list to be inserted.
* @nsize: The number of elements in @new (excluding end marker).
* @loc: Insert the new run_list before this element in @orig.
*
* Insert a run_list before element @loc in @orig. Merge the left end of
* the new run_list, if necessary. Adjust the size of the hole after the
* inserted run_list.
*
* It is up to the caller to serialize access to the run lists @orig and @new.
*
* Return: Pointer, The new, combined, run_list
*
* Errors: -ENOMEM, Not enough memory to allocate run list array.
* -EINVAL, Invalid parameters were passed in.
* ntfs_rl_insert - insert a run list into another
* @dst: original run list to be worked on
* @dsize: number of elements in @dst (including end marker)
* @src: new run list to be inserted
* @ssize: number of elements in @src (excluding end marker)
* @loc: insert the new run list @src before this element in @dst
*
* Insert the run list @src before element @loc in the run list @dst. Merge the
* left end of the new run list, if necessary. Adjust the size of the hole
* after the inserted run list.
*
* It is up to the caller to serialize access to the run lists @dst and @src.
*
* On success, return a pointer to the new, combined, run list. Note, both
* run lists @dst and @src are deallocated before returning so you cannot use
* the pointers for anything any more. (Strictly speaking the returned run list
* may be the same as @dst but this is irrelevant.)
*
* On error, return -errno. Both run lists are left unmodified. The following
* error codes are defined:
* -ENOMEM - Not enough memory to allocate run list array.
* -EINVAL - Invalid parameters were passed in.
*/
static
inline
run_list_element
*
ntfs_rl_insert
(
run_list_element
*
orig
,
int
osize
,
run_list_element
*
new
,
int
n
size
,
int
loc
)
static
inline
run_list_element
*
ntfs_rl_insert
(
run_list_element
*
dst
,
int
dsize
,
run_list_element
*
src
,
int
s
size
,
int
loc
)
{
run_list_element
*
res
;
BOOL
left
=
FALSE
;
BOOL
disc
=
FALSE
;
/* Discontinuity */
BOOL
hole
=
FALSE
;
/* Following a hole */
int
magic
;
BUG_ON
(
!
orig
||
!
new
);
BUG_ON
(
!
dst
||
!
src
);
/* disc => Discontinuity between the end of
Orig and the start of New
.
/* disc => Discontinuity between the end of
@dst and the start of @src
.
* This means we might need to insert a hole.
* hole =>
Orig
ends with a hole or an unmapped region which we can
* hole =>
@dst
ends with a hole or an unmapped region which we can
* extend to match the discontinuity. */
if
(
loc
==
0
)
{
disc
=
(
new
[
0
].
vcn
>
0
);
}
else
{
left
=
ntfs_rl_merge
(
orig
+
loc
-
1
,
new
)
;
if
(
loc
==
0
)
disc
=
(
src
[
0
].
vcn
>
0
);
else
{
s64
merged_length
;
disc
=
(
new
[
0
].
vcn
>
(
orig
[
loc
-
1
].
vcn
+
orig
[
loc
-
1
].
length
));
left
=
ntfs_are_rl_mergeable
(
dst
+
loc
-
1
,
src
);
merged_length
=
dst
[
loc
-
1
].
length
;
if
(
left
)
merged_length
+=
src
->
length
;
disc
=
(
src
[
0
].
vcn
>
dst
[
loc
-
1
].
vcn
+
merged_length
);
if
(
disc
)
hole
=
(
orig
[
loc
-
1
].
lcn
==
LCN_HOLE
);
hole
=
(
dst
[
loc
-
1
].
lcn
==
LCN_HOLE
);
}
/* Space required: Orig size + New size, less one if we merged,
* plus one if there was a discontinuity, less one for a trailing hole */
res
=
ntfs_rl_realloc
(
orig
,
osize
,
osize
+
nsize
-
left
+
disc
-
hole
);
if
(
IS_ERR
(
res
))
return
res
;
/* Space required: @dst size + @src size, less one if we merged, plus
* one if there was a discontinuity, less one for a trailing hole. */
dst
=
ntfs_rl_realloc
(
dst
,
dsize
,
dsize
+
ssize
-
left
+
disc
-
hole
);
if
(
IS_ERR
(
dst
))
return
dst
;
/*
* We are guaranteed to succeed from here so can start modifying the
* original run list.
*/
if
(
left
)
__ntfs_rl_merge
(
dst
+
loc
-
1
,
src
);
magic
=
loc
+
ssize
-
left
+
disc
-
hole
;
/* Move the tail of
Orig out of the way, then copy in New
. */
rl_mm
(
res
,
loc
+
nsize
-
left
+
disc
-
hole
,
loc
,
o
size
-
loc
);
rl_mc
(
res
,
loc
+
disc
-
hole
,
new
,
left
,
n
size
-
left
);
/* Move the tail of
@dst out of the way, then copy in @src
. */
ntfs_rl_mm
(
dst
,
magic
,
loc
,
d
size
-
loc
);
ntfs_rl_mc
(
dst
,
loc
+
disc
-
hole
,
src
,
left
,
s
size
-
left
);
/* Adjust the VCN of the last run ... */
if
(
res
[
loc
+
nsize
-
left
+
disc
-
hole
].
lcn
<=
LCN_HOLE
)
{
res
[
loc
+
nsize
-
left
+
disc
-
hole
].
vcn
=
res
[
loc
+
nsize
-
left
+
disc
-
hole
-
1
].
vcn
+
res
[
loc
+
nsize
-
left
+
disc
-
hole
-
1
].
length
;
}
if
(
dst
[
magic
].
lcn
<=
LCN_HOLE
)
dst
[
magic
].
vcn
=
dst
[
magic
-
1
].
vcn
+
dst
[
magic
-
1
].
length
;
/* ... and the length. */
if
((
res
[
loc
+
nsize
-
left
+
disc
-
hole
].
lcn
==
LCN_HOLE
)
||
(
res
[
loc
+
nsize
-
left
+
disc
-
hole
].
lcn
==
LCN_RL_NOT_MAPPED
))
{
res
[
loc
+
nsize
-
left
+
disc
-
hole
].
length
=
res
[
loc
+
nsize
-
left
+
disc
-
hole
+
1
].
vcn
-
res
[
loc
+
nsize
-
left
+
disc
-
hole
].
vcn
;
}
if
(
dst
[
magic
].
lcn
==
LCN_HOLE
||
dst
[
magic
].
lcn
==
LCN_RL_NOT_MAPPED
)
dst
[
magic
].
length
=
dst
[
magic
+
1
].
vcn
-
dst
[
magic
].
vcn
;
/* Writing beyond the end of the file and there's a discontinuity. */
if
(
disc
)
{
if
(
hole
)
{
res
[
loc
-
1
].
length
=
res
[
loc
].
vcn
-
res
[
loc
-
1
].
vcn
;
}
else
{
if
(
hole
)
dst
[
loc
-
1
].
length
=
dst
[
loc
].
vcn
-
dst
[
loc
-
1
].
vcn
;
else
{
if
(
loc
>
0
)
{
res
[
loc
].
vcn
=
res
[
loc
-
1
].
vcn
+
res
[
loc
-
1
].
length
;
res
[
loc
].
length
=
res
[
loc
+
1
].
vcn
-
res
[
loc
].
vcn
;
dst
[
loc
].
vcn
=
dst
[
loc
-
1
].
vcn
+
dst
[
loc
-
1
].
length
;
dst
[
loc
].
length
=
dst
[
loc
+
1
].
vcn
-
dst
[
loc
].
vcn
;
}
else
{
res
[
loc
].
vcn
=
0
;
res
[
loc
].
length
=
res
[
loc
+
1
].
vcn
;
dst
[
loc
].
vcn
=
0
;
dst
[
loc
].
length
=
dst
[
loc
+
1
].
vcn
;
}
res
[
loc
].
lcn
=
LCN_RL_NOT_MAPPED
;
dst
[
loc
].
lcn
=
LCN_RL_NOT_MAPPED
;
}
if
(
res
[
loc
+
nsize
-
left
+
disc
].
lcn
==
LCN_ENOENT
)
res
[
loc
+
nsize
-
left
+
disc
].
vcn
=
res
[
loc
+
nsize
-
left
+
disc
-
1
].
vcn
+
res
[
loc
+
nsize
-
left
+
disc
-
1
].
length
;
}
magic
+=
hole
;
return
res
;
if
(
dst
[
magic
].
lcn
==
LCN_ENOENT
)
dst
[
magic
].
vcn
=
dst
[
magic
-
1
].
vcn
+
dst
[
magic
-
1
].
length
;
}
return
dst
;
}
/**
* ntfs_rl_replace - Overwrite a run_list element with another run_list
* @orig: The original run_list to be worked on.
* @osize: The number of elements in @orig (including end marker).
* @new: The run_list to be inserted.
* @nsize: The number of elements in @new (excluding end marker).
* @loc: Index of run_list @orig to overwrite with @new.
*
* Replace the run_list at @loc with @new. Merge the left and right ends of
* the inserted run_list, if necessary.
*
* It is up to the caller to serialize access to the run lists @orig and @new.
*
* Return: Pointer, The new, combined, run_list
*
* Errors: -ENOMEM, Not enough memory to allocate run list array.
* -EINVAL, Invalid parameters were passed in.
* ntfs_rl_replace - overwrite a run_list element with another run list
* @dst: original run list to be worked on
* @dsize: number of elements in @dst (including end marker)
* @src: new run list to be inserted
* @ssize: number of elements in @src (excluding end marker)
* @loc: index in run list @dst to overwrite with @src
*
* Replace the run list element @dst at @loc with @src. Merge the left and
* right ends of the inserted run list, if necessary.
*
* It is up to the caller to serialize access to the run lists @dst and @src.
*
* On success, return a pointer to the new, combined, run list. Note, both
* run lists @dst and @src are deallocated before returning so you cannot use
* the pointers for anything any more. (Strictly speaking the returned run list
* may be the same as @dst but this is irrelevant.)
*
* On error, return -errno. Both run lists are left unmodified. The following
* error codes are defined:
* -ENOMEM - Not enough memory to allocate run list array.
* -EINVAL - Invalid parameters were passed in.
*/
static
inline
run_list_element
*
ntfs_rl_replace
(
run_list_element
*
orig
,
int
osize
,
run_list_element
*
new
,
int
n
size
,
int
loc
)
static
inline
run_list_element
*
ntfs_rl_replace
(
run_list_element
*
dst
,
int
dsize
,
run_list_element
*
src
,
int
s
size
,
int
loc
)
{
run_list_element
*
res
;
BOOL
left
=
FALSE
;
BOOL
right
;
int
magic
;
BUG_ON
(
!
orig
||
!
new
);
BUG_ON
(
!
dst
||
!
src
);
/* First, merge the left and right ends, if necessary. */
right
=
ntfs_
rl_merge
(
new
+
nsize
-
1
,
orig
+
loc
+
1
);
right
=
ntfs_
are_rl_mergeable
(
src
+
ssize
-
1
,
dst
+
loc
+
1
);
if
(
loc
>
0
)
left
=
ntfs_
rl_merge
(
orig
+
loc
-
1
,
new
);
left
=
ntfs_
are_rl_mergeable
(
dst
+
loc
-
1
,
src
);
/* Allocate some space. We'll need less if the left, right
* or both ends were merged. */
res
=
ntfs_rl_realloc
(
orig
,
osize
,
osize
+
nsize
-
left
-
right
);
if
(
IS_ERR
(
res
))
return
res
;
/* Allocate some space. We'll need less if the left, right, or both
* ends were merged. */
dst
=
ntfs_rl_realloc
(
dst
,
dsize
,
dsize
+
ssize
-
left
-
right
);
if
(
IS_ERR
(
dst
))
return
dst
;
/*
* We are guaranteed to succeed from here so can start modifying the
* original run lists.
*/
if
(
right
)
__ntfs_rl_merge
(
src
+
ssize
-
1
,
dst
+
loc
+
1
);
if
(
left
)
__ntfs_rl_merge
(
dst
+
loc
-
1
,
src
);
/* Move the tail of Orig out of the way, then copy in New. */
rl_mm
(
res
,
loc
+
nsize
-
left
,
loc
+
right
+
1
,
osize
-
loc
-
right
-
1
);
rl_mc
(
res
,
loc
,
new
,
left
,
nsize
-
left
);
/* FIXME: What does this mean? (AIA) */
magic
=
loc
+
ssize
-
left
;
/* Move the tail of @dst out of the way, then copy in @src. */
ntfs_rl_mm
(
dst
,
magic
,
loc
+
right
+
1
,
dsize
-
loc
-
right
-
1
);
ntfs_rl_mc
(
dst
,
loc
,
src
,
left
,
ssize
-
left
);
/* We may have changed the length of the file, so fix the end marker */
if
(
res
[
loc
+
nsize
-
left
].
lcn
==
LCN_ENOENT
)
res
[
loc
+
nsize
-
left
].
vcn
=
res
[
loc
+
nsize
-
left
-
1
].
vcn
+
res
[
loc
+
nsize
-
left
-
1
].
length
;
return
res
;
if
(
dst
[
magic
].
lcn
==
LCN_ENOENT
)
dst
[
magic
].
vcn
=
dst
[
magic
-
1
].
vcn
+
dst
[
magic
-
1
].
length
;
return
dst
;
}
/**
* ntfs_rl_split - Insert a run_list into the centre of a hole
* @orig: The original run_list to be worked on.
* @osize: The number of elements in @orig (including end marker).
* @new: The run_list to be inserted.
* @nsize: The number of elements in @new (excluding end marker).
* @loc: Index of run_list in @orig to split with @new.
*
* Split the run_list at @loc into two and insert @new. No merging of
* run_lists is necessary. Adjust the size of the holes either side.
*
* It is up to the caller to serialize access to the run lists @orig and @new.
*
* Return: Pointer, The new, combined, run_list
*
* Errors: -ENOMEM, Not enough memory to allocate run list array.
* -EINVAL, Invalid parameters were passed in.
* ntfs_rl_split - insert a run list into the centre of a hole
* @dst: original run list to be worked on
* @dsize: number of elements in @dst (including end marker)
* @src: new run list to be inserted
* @ssize: number of elements in @src (excluding end marker)
* @loc: index in run list @dst at which to split and insert @src
*
* Split the run list @dst at @loc into two and insert @new in between the two
* fragments. No merging of run lists is necessary. Adjust the size of the
* holes either side.
*
* It is up to the caller to serialize access to the run lists @dst and @src.
*
* On success, return a pointer to the new, combined, run list. Note, both
* run lists @dst and @src are deallocated before returning so you cannot use
* the pointers for anything any more. (Strictly speaking the returned run list
* may be the same as @dst but this is irrelevant.)
*
* On error, return -errno. Both run lists are left unmodified. The following
* error codes are defined:
* -ENOMEM - Not enough memory to allocate run list array.
* -EINVAL - Invalid parameters were passed in.
*/
static
inline
run_list_element
*
ntfs_rl_split
(
run_list_element
*
orig
,
int
o
size
,
run_list_element
*
new
,
int
n
size
,
int
loc
)
static
inline
run_list_element
*
ntfs_rl_split
(
run_list_element
*
dst
,
int
d
size
,
run_list_element
*
src
,
int
s
size
,
int
loc
)
{
run_list_element
*
res
;
BUG_ON
(
!
dst
||
!
src
)
;
BUG_ON
(
!
orig
||
!
new
);
/* Space required: Orig size + New size + One new hole. */
res
=
ntfs_rl_realloc
(
orig
,
osize
,
osize
+
nsize
+
1
);
if
(
IS_ERR
(
res
))
return
res
;
/* Space required: @dst size + @src size + one new hole. */
dst
=
ntfs_rl_realloc
(
dst
,
dsize
,
dsize
+
ssize
+
1
);
if
(
IS_ERR
(
dst
))
return
dst
;
/*
* We are guaranteed to succeed from here so can start modifying the
* original run lists.
*/
/* Move the tail of
Orig out of the way, then copy in New
. */
rl_mm
(
res
,
loc
+
1
+
nsize
,
loc
,
o
size
-
loc
);
rl_mc
(
res
,
loc
+
1
,
new
,
0
,
n
size
);
/* Move the tail of
@dst out of the way, then copy in @src
. */
ntfs_rl_mm
(
dst
,
loc
+
1
+
ssize
,
loc
,
d
size
-
loc
);
ntfs_rl_mc
(
dst
,
loc
+
1
,
src
,
0
,
s
size
);
/* Adjust the size of the holes either size of
New
. */
res
[
loc
].
length
=
res
[
loc
+
1
].
vcn
-
res
[
loc
].
vcn
;
res
[
loc
+
nsize
+
1
].
vcn
=
res
[
loc
+
nsize
].
vcn
+
res
[
loc
+
n
size
].
length
;
res
[
loc
+
nsize
+
1
].
length
=
res
[
loc
+
nsize
+
2
].
vcn
-
res
[
loc
+
n
size
+
1
].
vcn
;
/* Adjust the size of the holes either size of
@src
. */
dst
[
loc
].
length
=
dst
[
loc
+
1
].
vcn
-
dst
[
loc
].
vcn
;
dst
[
loc
+
ssize
+
1
].
vcn
=
dst
[
loc
+
ssize
].
vcn
+
dst
[
loc
+
s
size
].
length
;
dst
[
loc
+
ssize
+
1
].
length
=
dst
[
loc
+
ssize
+
2
].
vcn
-
dst
[
loc
+
s
size
+
1
].
vcn
;
return
res
;
return
dst
;
}
/**
* merge_run_lists - merge two run_lists into one
* @drl:
The original run_list.
* @srl:
The new run_list to be merge into @drl.
*
ntfs_
merge_run_lists - merge two run_lists into one
* @drl:
original run list to be worked on
* @srl:
new run list to be merged into @drl
*
* First we sanity check the two run
_lists to make sure that they are sensible
* a
nd can be merged. The @srl run_list must be either after the @drl run_list
*
or completely within a hole
in @drl.
* First we sanity check the two run
lists @srl and @drl to make sure that they
* a
re sensible and can be merged. The run list @srl must be either after the
*
run list @drl or completely within a hole (or unmapped region)
in @drl.
*
* It is up to the caller to serialize access to the run lists @drl and @srl.
*
...
...
@@ -366,25 +461,28 @@ static inline run_list_element *ntfs_rl_split(run_list_element *orig, int osize,
* 1. When attribute lists are used and a further extent is being mapped.
* 2. When new clusters are allocated to fill a hole or extend a file.
*
* There are four possible ways @srl can be merged. It can be inserted at
* the beginning of a hole; split the hole in two; appended at the end of
* a hole; replace the whole hole. It can also be appended to the end of
* the run_list, which is just a variant of the insert case.
*
* N.B. Either, or both, of the input pointers may be freed if the function
* is successful. Only the returned pointer may be used.
*
* If the function fails, neither of the input run_lists may be safe.
*
* Return: Pointer, The resultant merged run_list.
*
* Errors: -ENOMEM, Not enough memory to allocate run list array.
* -EINVAL, Invalid parameters were passed in.
* -ERANGE, The run_lists overlap and cannot be merged.
* There are four possible ways @srl can be merged. It can:
* - be inserted at the beginning of a hole,
* - split the hole in two and be inserted between the two fragments,
* - be appended at the end of a hole, or it can
* - replace the whole hole.
* It can also be appended to the end of the run list, which is just a variant
* of the insert case.
*
* On success, return a pointer to the new, combined, run list. Note, both
* run lists @drl and @srl are deallocated before returning so you cannot use
* the pointers for anything any more. (Strictly speaking the returned run list
* may be the same as @dst but this is irrelevant.)
*
* On error, return -errno. Both run lists are left unmodified. The following
* error codes are defined:
* -ENOMEM - Not enough memory to allocate run list array.
* -EINVAL - Invalid parameters were passed in.
* -ERANGE - The run lists overlap and cannot be merged.
*/
run_list_element
*
merge_run_lists
(
run_list_element
*
drl
,
run_list_element
*
srl
)
run_list_element
*
ntfs_merge_run_lists
(
run_list_element
*
drl
,
run_list_element
*
srl
)
{
run_list_element
*
nrl
;
/* New run list. */
int
di
,
si
;
/* Current index into @[ds]rl. */
int
sstart
;
/* First index with lcn > LCN_RL_NOT_MAPPED. */
int
dins
;
/* Index into @drl at which to insert @srl. */
...
...
@@ -392,49 +490,49 @@ run_list_element *merge_run_lists(run_list_element *drl, run_list_element *srl)
int
dfinal
,
sfinal
;
/* The last index into @[ds]rl with
lcn >= LCN_HOLE. */
int
marker
=
0
;
VCN
marker_vcn
=
0
;
#if
1
ntfs_debug
(
"dst:"
);
ntfs_debug_dump_runlist
(
drl
);
ntfs_debug
(
"src:"
);
ntfs_debug_dump_runlist
(
srl
);
#if
def DEBUG
ntfs_debug
(
"dst:"
);
ntfs_debug_dump_runlist
(
drl
);
ntfs_debug
(
"src:"
);
ntfs_debug_dump_runlist
(
srl
);
#endif
/* Check for silly calling... */
if
(
unlikely
(
!
srl
))
if
(
unlikely
(
!
srl
))
return
drl
;
if
(
unlikely
(
IS_ERR
(
srl
)
||
IS_ERR
(
drl
)))
return
ERR_PTR
(
-
EINVAL
);
if
(
unlikely
(
IS_ERR
(
srl
)
||
IS_ERR
(
drl
)))
return
ERR_PTR
(
-
EINVAL
);
/* Check for the case where the first mapping is being done now. */
if
(
unlikely
(
!
drl
))
{
nrl
=
srl
;
if
(
unlikely
(
!
drl
))
{
drl
=
srl
;
/* Complete the source run list if necessary. */
if
(
unlikely
(
s
rl
[
0
].
vcn
))
{
if
(
unlikely
(
d
rl
[
0
].
vcn
))
{
/* Scan to the end of the source run list. */
for
(
send
=
0
;
likely
(
srl
[
send
].
length
);
s
end
++
)
for
(
dend
=
0
;
likely
(
drl
[
dend
].
length
);
d
end
++
)
;
nrl
=
ntfs_rl_realloc
(
srl
,
send
,
s
end
+
1
);
if
(
!
nrl
)
return
ERR_PTR
(
-
ENOMEM
)
;
rl_mm
(
nrl
,
1
,
0
,
s
end
);
nrl
[
0
].
vcn
=
0
;
/* Add start element. */
n
rl
[
0
].
lcn
=
LCN_RL_NOT_MAPPED
;
nrl
[
0
].
length
=
n
rl
[
1
].
vcn
;
drl
=
ntfs_rl_realloc
(
drl
,
dend
,
d
end
+
1
);
if
(
IS_ERR
(
drl
)
)
return
drl
;
/* Insert start element at the front of the run list. */
ntfs_rl_mm
(
drl
,
1
,
0
,
d
end
);
drl
[
0
].
vcn
=
0
;
d
rl
[
0
].
lcn
=
LCN_RL_NOT_MAPPED
;
drl
[
0
].
length
=
d
rl
[
1
].
vcn
;
}
goto
finished
;
}
si
=
di
=
0
;
/* Skip
the unmapped start element(s) in each run_list if presen
t. */
/* Skip
any unmapped start element(s) in the source run_lis
t. */
while
(
srl
[
si
].
length
&&
srl
[
si
].
lcn
<
(
LCN
)
LCN_HOLE
)
si
++
;
/* Can't have an entirely unmapped s
rl run_
list. */
BUG_ON
(
!
srl
[
si
].
length
);
/* Can't have an entirely unmapped s
ource run
list. */
BUG_ON
(
!
srl
[
si
].
length
);
/* Record the starting points. */
sstart
=
si
;
...
...
@@ -445,19 +543,16 @@ run_list_element *merge_run_lists(run_list_element *drl, run_list_element *srl)
* appended to @drl.
*/
for
(;
drl
[
di
].
length
;
di
++
)
{
if
(
(
drl
[
di
].
vcn
+
drl
[
di
].
length
)
>
srl
[
sstart
].
vcn
)
if
(
drl
[
di
].
vcn
+
drl
[
di
].
length
>
srl
[
sstart
].
vcn
)
break
;
}
dins
=
di
;
/* Sanity check for illegal overlaps. */
if
((
drl
[
di
].
vcn
==
srl
[
si
].
vcn
)
&&
(
drl
[
di
].
lcn
>=
0
)
&&
if
((
drl
[
di
].
vcn
==
srl
[
si
].
vcn
)
&&
(
drl
[
di
].
lcn
>=
0
)
&&
(
srl
[
si
].
lcn
>=
0
))
{
ntfs_error
(
NULL
,
"Run lists overlap. Cannot merge! Returning "
"ERANGE."
);
nrl
=
ERR_PTR
(
-
ERANGE
);
goto
exit
;
ntfs_error
(
NULL
,
"Run lists overlap. Cannot merge!"
);
return
ERR_PTR
(
-
ERANGE
);
}
/* Scan to the end of both run lists in order to know their sizes. */
...
...
@@ -466,9 +561,8 @@ run_list_element *merge_run_lists(run_list_element *drl, run_list_element *srl)
for
(
dend
=
di
;
drl
[
dend
].
length
;
dend
++
)
;
if
(
srl
[
send
].
lcn
==
LCN_ENOENT
)
{
marker
=
send
;
}
if
(
srl
[
send
].
lcn
==
(
LCN
)
LCN_ENOENT
)
marker_vcn
=
srl
[
marker
=
send
].
vcn
;
/* Scan to the last element with lcn >= LCN_HOLE. */
for
(
sfinal
=
send
;
sfinal
>=
0
&&
srl
[
sfinal
].
lcn
<
LCN_HOLE
;
sfinal
--
)
...
...
@@ -486,89 +580,135 @@ run_list_element *merge_run_lists(run_list_element *drl, run_list_element *srl)
(
drl
[
dins
].
vcn
==
srl
[
sstart
].
vcn
));
/* Start of hole */
finish
=
((
drl
[
dins
].
lcn
>=
LCN_RL_NOT_MAPPED
)
&&
/* End of file */
((
drl
[
dins
].
vcn
+
drl
[
dins
].
length
)
<=
/* End of hole */
(
srl
[
send
-
1
].
vcn
+
srl
[
send
-
1
].
length
)));
(
srl
[
send
-
1
].
vcn
+
srl
[
send
-
1
].
length
)));
/* Or we'll lose an end marker */
if
(
start
&&
finish
&&
(
drl
[
dins
].
length
==
0
))
ss
++
;
if
(
marker
&&
(
drl
[
dins
].
vcn
+
drl
[
dins
].
length
>
srl
[
send
-
1
].
vcn
))
if
(
marker
&&
(
drl
[
dins
].
vcn
+
drl
[
dins
].
length
>
srl
[
send
-
1
].
vcn
))
finish
=
FALSE
;
#if 0
ntfs_debug("dfinal = %i, dend = %i", dfinal, dend);
ntfs_debug("sstart = %i, sfinal = %i, send = %i", sstart, sfinal, send);
ntfs_debug("start = %i, finish = %i", start, finish);
ntfs_debug("ds = %i, ss = %i, dins = %i", ds, ss, dins);
#endif
if
(
start
)
if
(
start
)
{
if
(
finish
)
nrl
=
ntfs_rl_replace
(
drl
,
ds
,
srl
+
sstart
,
ss
,
dins
);
else
nrl
=
ntfs_rl_insert
(
drl
,
ds
,
srl
+
sstart
,
ss
,
dins
);
drl
=
ntfs_rl_replace
(
drl
,
ds
,
srl
+
sstart
,
ss
,
dins
);
else
drl
=
ntfs_rl_insert
(
drl
,
ds
,
srl
+
sstart
,
ss
,
dins
);
}
else
{
if
(
finish
)
nrl
=
ntfs_rl_append
(
drl
,
ds
,
srl
+
sstart
,
ss
,
dins
);
drl
=
ntfs_rl_append
(
drl
,
ds
,
srl
+
sstart
,
ss
,
dins
);
else
nrl
=
ntfs_rl_split
(
drl
,
ds
,
srl
+
sstart
,
ss
,
dins
);
if
(
marker
&&
!
IS_ERR
(
nrl
))
{
for
(
ds
=
0
;
nrl
[
ds
].
length
;
ds
++
)
drl
=
ntfs_rl_split
(
drl
,
ds
,
srl
+
sstart
,
ss
,
dins
);
}
if
(
IS_ERR
(
drl
))
{
ntfs_error
(
NULL
,
"Merge failed."
);
return
drl
;
}
ntfs_free
(
srl
);
if
(
marker
)
{
ntfs_debug
(
"Triggering marker code."
);
for
(
ds
=
dend
;
drl
[
ds
].
length
;
ds
++
)
;
nrl
=
ntfs_rl_insert
(
nrl
,
ds
+
1
,
srl
+
marker
,
1
,
ds
);
/* We only need to care if @srl ended after @drl. */
if
(
drl
[
ds
].
vcn
<=
marker_vcn
)
{
int
slots
=
0
;
if
(
drl
[
ds
].
vcn
==
marker_vcn
)
{
ntfs_debug
(
"Old marker = 0x%Lx, replacing with "
"LCN_ENOENT.
\n
"
,
(
unsigned
long
long
)
drl
[
ds
].
lcn
);
drl
[
ds
].
lcn
=
(
LCN
)
LCN_ENOENT
;
goto
finished
;
}
/*
* We need to create an unmapped run list element in
* @drl or extend an existing one before adding the
* ENOENT terminator.
*/
if
(
drl
[
ds
].
lcn
==
(
LCN
)
LCN_ENOENT
)
{
ds
--
;
slots
=
1
;
}
if
(
drl
[
ds
].
lcn
!=
(
LCN
)
LCN_RL_NOT_MAPPED
)
{
/* Add an unmapped run list element. */
if
(
!
slots
)
{
/* FIXME/TODO: We need to have the
* extra memory already! (AIA) */
drl
=
ntfs_rl_realloc
(
drl
,
ds
,
ds
+
2
);
if
(
!
drl
)
goto
critical_error
;
slots
=
2
;
}
ds
++
;
/* Need to set vcn if it isn't set already. */
if
(
slots
!=
1
)
drl
[
ds
].
vcn
=
drl
[
ds
-
1
].
vcn
+
drl
[
ds
-
1
].
length
;
drl
[
ds
].
lcn
=
(
LCN
)
LCN_RL_NOT_MAPPED
;
/* We now used up a slot. */
slots
--
;
}
drl
[
ds
].
length
=
marker_vcn
-
drl
[
ds
].
vcn
;
/* Finally add the ENOENT terminator. */
ds
++
;
if
(
!
slots
)
{
/* FIXME/TODO: We need to have the extra
* memory already! (AIA) */
drl
=
ntfs_rl_realloc
(
drl
,
ds
,
ds
+
1
);
if
(
!
drl
)
goto
critical_error
;
}
drl
[
ds
].
vcn
=
marker_vcn
;
drl
[
ds
].
lcn
=
(
LCN
)
LCN_ENOENT
;
drl
[
ds
].
length
=
(
s64
)
0
;
}
}
}
if
(
likely
(
!
IS_ERR
(
nrl
)))
{
/* The merge was completed successfully. */
finished:
if
(
nrl
!=
srl
)
ntfs_free
(
srl
);
/*ntfs_debug ("Done.");*/
/*ntfs_debug ("Merged run list:");*/
#if 1
ntfs_debug
(
"res:"
);
ntfs_debug_dump_runlist
(
nrl
);
#endif
}
else
{
ntfs_error
(
NULL
,
"Merge failed, returning error code %ld."
,
-
PTR_ERR
(
nrl
));
}
exit:
return
nrl
;
/* The merge was completed successfully. */
ntfs_debug
(
"Merged run list:"
);
ntfs_debug_dump_runlist
(
drl
);
return
drl
;
critical_error:
/* Critical error! We cannot afford to fail here. */
ntfs_error
(
NULL
,
"Critical error! Not enough memory."
);
panic
(
"NTFS: Cannot continue."
);
}
/**
* decompress_mapping_pairs - convert mapping pairs array to run list
* @vol: ntfs volume on which the attribute resides
* @attr: attribute record whose mapping pairs array to decompress
* @run_list: optional run list in which to insert @attr's run list
*
* Decompress the attribute @attr's mapping pairs array into a run_list and
* return the run list or -errno on error. If @run_list is not NULL then
* the mapping pairs array of @attr is decompressed and the run list inserted
* into the appropriate place in @run_list. If this is the case and the
* function returns success, the original pointer passed into @run_list is no
* longer valid.
* @old_rl: optional run list in which to insert @attr's run list
*
* It is up to the caller to serialize access to the run list @old_rl.
*
* Check the return value for error with IS_ERR(ret_val). If this is FALSE,
* the function was successful, the return value is the new run list, and if
* an existing run list pointer was passed in, this is no longer valid.
* If IS_ERR(ret_val) returns true, there was an error, the return value is not
* a run_list pointer and the existing run list pointer if one was passed in
* has not been touched. In this case use PTR_ERR(ret_val) to obtain the error
* code. Following error codes are defined:
* -ENOMEM Not enough memory to allocate run list array.
* -EIO Corrupt run list.
* -EINVAL Invalid parameters were passed in.
* -ERANGE The two run lists overlap.
* Decompress the attribute @attr's mapping pairs array into a run list. On
* success, return the decompressed run list.
*
* If @old_rl is not NULL, decompressed run list is inserted into the
* appropriate place in @old_rl and the resultant, combined run list is
* returned. The original @old_rl is deallocated.
*
* On error, return -errno. @old_rl is left unmodified in that case.
*
* The following error codes are defined:
* -ENOMEM - Not enough memory to allocate run list array.
* -EIO - Corrupt run list.
* -EINVAL - Invalid parameters were passed in.
* -ERANGE - The two run lists overlap.
*
* FIXME: For now we take the conceptionally simplest approach of creating the
* new run list disregarding the already existing one and then splicing the
* two into one if that is possible (we check for overlap and discard the new
* run list if overlap present
and return error
).
* two into one
,
if that is possible (we check for overlap and discard the new
* run list if overlap present
before returning ERR_PTR(-ERANGE)
).
*/
run_list_element
*
decompress_mapping_pairs
(
const
ntfs_volume
*
vol
,
const
ATTR_RECORD
*
attr
,
run_list_element
*
old_rl
)
...
...
@@ -576,12 +716,12 @@ run_list_element *decompress_mapping_pairs(const ntfs_volume *vol,
VCN
vcn
;
/* Current vcn. */
LCN
lcn
;
/* Current lcn. */
s64
deltaxcn
;
/* Change in [vl]cn. */
run_list_element
*
rl
=
NULL
;
/* The output run_list. */
run_list_element
*
rl2
;
/* Temporary run_list. */
run_list_element
*
rl
;
/* The output run list. */
u8
*
buf
;
/* Current position in mapping pairs array. */
u8
*
attr_end
;
/* End of attribute. */
int
rlsize
;
/* Size of run_list buffer. */
int
rlpos
;
/* Current run_list position. */
int
rlsize
;
/* Size of run list buffer. */
u16
rlpos
;
/* Current run list position in units of
run_list_elements. */
u8
b
;
/* Current byte offset in buf. */
#ifdef DEBUG
...
...
@@ -602,14 +742,12 @@ run_list_element *decompress_mapping_pairs(const ntfs_volume *vol,
ntfs_error
(
vol
->
sb
,
"Corrupt attribute."
);
return
ERR_PTR
(
-
EIO
);
}
/* Current position in run
_
list array. */
/* Current position in run
list array. */
rlpos
=
0
;
/* Allocate first page. */
rl
=
ntfs_malloc_nofs
(
PAGE_SIZE
);
/* Allocate first page
and set current run list size to one page
. */
rl
=
ntfs_malloc_nofs
(
rlsize
=
PAGE_SIZE
);
if
(
unlikely
(
!
rl
))
return
ERR_PTR
(
-
ENOMEM
);
/* Current run_list buffer size in bytes. */
rlsize
=
PAGE_SIZE
;
/* Insert unmapped starting element if necessary. */
if
(
vcn
)
{
rl
->
vcn
=
(
VCN
)
0
;
...
...
@@ -624,18 +762,20 @@ run_list_element *decompress_mapping_pairs(const ntfs_volume *vol,
* operates on whole pages only.
*/
if
(((
rlpos
+
3
)
*
sizeof
(
*
old_rl
))
>
rlsize
)
{
run_list_element
*
rl2
;
rl2
=
ntfs_malloc_nofs
(
rlsize
+
(
int
)
PAGE_SIZE
);
if
(
unlikely
(
!
rl2
))
{
ntfs_free
(
rl
);
return
ERR_PTR
(
-
ENOMEM
);
}
mem
move
(
rl2
,
rl
,
rlsize
);
mem
cpy
(
rl2
,
rl
,
rlsize
);
ntfs_free
(
rl
);
rl
=
rl2
;
rlsize
+=
PAGE_SIZE
;
}
/* Enter the current vcn into the current run_list element. */
(
rl
+
rlpos
)
->
vcn
=
vcn
;
rl
[
rlpos
].
vcn
=
vcn
;
/*
* Get the change in vcn, i.e. the run length in clusters.
* Doing it this way ensures that we signextend negative values.
...
...
@@ -664,10 +804,10 @@ run_list_element *decompress_mapping_pairs(const ntfs_volume *vol,
goto
err_out
;
}
/*
* Enter the current run length into the current run
_
list
* Enter the current run length into the current run
list
* element.
*/
(
rl
+
rlpos
)
->
length
=
deltaxcn
;
rl
[
rlpos
].
length
=
deltaxcn
;
/* Increment the current vcn by the current run length. */
vcn
+=
deltaxcn
;
/*
...
...
@@ -676,7 +816,7 @@ run_list_element *decompress_mapping_pairs(const ntfs_volume *vol,
* to LCN_HOLE.
*/
if
(
!
(
*
buf
&
0xf0
))
(
rl
+
rlpos
)
->
lcn
=
(
LCN
)
LCN_HOLE
;
rl
[
rlpos
].
lcn
=
(
LCN
)
LCN_HOLE
;
else
{
/* Get the lcn change which really can be negative. */
u8
b2
=
*
buf
&
0xf
;
...
...
@@ -709,7 +849,7 @@ run_list_element *decompress_mapping_pairs(const ntfs_volume *vol,
goto
err_out
;
}
/* Enter the current lcn into the run_list element. */
(
rl
+
rlpos
)
->
lcn
=
lcn
;
rl
[
rlpos
].
lcn
=
lcn
;
}
/* Get to the next run_list element. */
rlpos
++
;
...
...
@@ -729,7 +869,7 @@ run_list_element *decompress_mapping_pairs(const ntfs_volume *vol,
"non-resident attribute."
);
goto
err_out
;
}
/* Setup not mapped run
_
list element if this is the base extent. */
/* Setup not mapped run
list element if this is the base extent. */
if
(
!
attr
->
_ANR
(
lowest_vcn
))
{
VCN
max_cluster
;
...
...
@@ -742,13 +882,13 @@ run_list_element *decompress_mapping_pairs(const ntfs_volume *vol,
* likely, there are more extents following this one.
*/
if
(
deltaxcn
<
--
max_cluster
)
{
//RAR
ntfs_debug("More extents to follow; deltaxcn = 0x%Lx, "
//RAR
"max_cluster = 0x%Lx",
//RAR
(long long)deltaxcn,
//RAR
(long long)max_cluster);
(
rl
+
rlpos
)
->
vcn
=
vcn
;
vcn
+=
(
rl
+
rlpos
)
->
length
=
max_cluster
-
deltaxcn
;
(
rl
+
rlpos
)
->
lcn
=
(
LCN
)
LCN_RL_NOT_MAPPED
;
ntfs_debug
(
"More extents to follow; deltaxcn = 0x%Lx, "
"max_cluster = 0x%Lx"
,
(
long
long
)
deltaxcn
,
(
long
long
)
max_cluster
);
rl
[
rlpos
].
vcn
=
vcn
;
vcn
+=
rl
[
rlpos
].
length
=
max_cluster
-
deltaxcn
;
rl
[
rlpos
].
lcn
=
(
LCN
)
LCN_RL_NOT_MAPPED
;
rlpos
++
;
}
else
if
(
unlikely
(
deltaxcn
>
max_cluster
))
{
ntfs_error
(
vol
->
sb
,
"Corrupt attribute. deltaxcn = "
...
...
@@ -757,25 +897,26 @@ run_list_element *decompress_mapping_pairs(const ntfs_volume *vol,
(
long
long
)
max_cluster
);
goto
mpa_err
;
}
(
rl
+
rlpos
)
->
lcn
=
(
LCN
)
LCN_ENOENT
;
rl
[
rlpos
].
lcn
=
(
LCN
)
LCN_ENOENT
;
}
else
/* Not the base extent. There may be more extents to follow. */
(
rl
+
rlpos
)
->
lcn
=
(
LCN
)
LCN_RL_NOT_MAPPED
;
rl
[
rlpos
].
lcn
=
(
LCN
)
LCN_RL_NOT_MAPPED
;
/* Setup terminating run_list element. */
(
rl
+
rlpos
)
->
vcn
=
vcn
;
(
rl
+
rlpos
)
->
length
=
(
s64
)
0
;
//RAR ntfs_debug("Mapping pairs array successfully decompressed.");
//RAR ntfs_debug_dump_runlist(rl);
rl
[
rlpos
].
vcn
=
vcn
;
rl
[
rlpos
].
length
=
(
s64
)
0
;
/* If no existing run list was specified, we are done. */
if
(
!
old_rl
)
if
(
!
old_rl
)
{
ntfs_debug
(
"Mapping pairs array successfully decompressed:"
);
ntfs_debug_dump_runlist
(
rl
);
return
rl
;
}
/* Now combine the new and old run lists checking for overlaps. */
rl2
=
merge_run_lists
(
old_rl
,
rl
);
if
(
likely
(
!
IS_ERR
(
rl2
)))
return
rl2
;
old_rl
=
ntfs_
merge_run_lists
(
old_rl
,
rl
);
if
(
likely
(
!
IS_ERR
(
old_rl
)))
return
old_rl
;
ntfs_free
(
rl
);
ntfs_error
(
vol
->
sb
,
"Failed to merge run lists."
);
return
rl2
;
return
old_rl
;
io_error:
ntfs_error
(
vol
->
sb
,
"Corrupt attribute."
);
err_out:
...
...
fs/ntfs/mst.c
View file @
98a58fbe
...
...
@@ -48,7 +48,7 @@ int post_read_mst_fixup(NTFS_RECORD *b, const u32 size)
usa_ofs
=
le16_to_cpu
(
b
->
usa_ofs
);
/* Decrement usa_count to get number of fixups. */
usa_count
=
le16_to_cpu
(
b
->
usa_count
)
-
1
;
/* Size and align
e
ment checks. */
/* Size and alignment checks. */
if
(
size
&
(
NTFS_BLOCK_SIZE
-
1
)
||
usa_ofs
&
1
||
usa_ofs
+
(
usa_count
*
2
)
>
size
||
...
...
@@ -132,7 +132,7 @@ int pre_write_mst_fixup(NTFS_RECORD *b, const u32 size)
usa_ofs
=
le16_to_cpu
(
b
->
usa_ofs
);
/* Decrement usa_count to get number of fixups. */
usa_count
=
le16_to_cpu
(
b
->
usa_count
)
-
1
;
/* Size and align
e
ment checks. */
/* Size and alignment checks. */
if
(
size
&
(
NTFS_BLOCK_SIZE
-
1
)
||
usa_ofs
&
1
||
usa_ofs
+
(
usa_count
*
2
)
>
size
||
...
...
fs/ntfs/super.c
View file @
98a58fbe
...
...
@@ -1077,7 +1077,7 @@ static BOOL load_system_files(ntfs_volume *vol)
* releases all inodes and memory belonging to the NTFS specific part of the
* super block.
*/
void
ntfs_put_super
(
struct
super_block
*
vfs_sb
)
static
void
ntfs_put_super
(
struct
super_block
*
vfs_sb
)
{
ntfs_volume
*
vol
=
NTFS_SB
(
vfs_sb
);
...
...
@@ -1155,7 +1155,7 @@ void ntfs_put_super(struct super_block *vfs_sb)
* Errors are ignored and we just return the number of free clusters we have
* found. This means we return an underestimate on error.
*/
s64
get_nr_free_clusters
(
ntfs_volume
*
vol
)
s
tatic
s
64
get_nr_free_clusters
(
ntfs_volume
*
vol
)
{
struct
address_space
*
mapping
=
vol
->
lcnbmp_ino
->
i_mapping
;
filler_t
*
readpage
=
(
filler_t
*
)
mapping
->
a_ops
->
readpage
;
...
...
@@ -1227,7 +1227,7 @@ s64 get_nr_free_clusters(ntfs_volume *vol)
}
/**
* get_nr_free_mft_records - return the number of free inodes on a volume
*
__
get_nr_free_mft_records - return the number of free inodes on a volume
* @vol: ntfs volume for which to obtain free inode count
*
* Calculate the number of free mft records (inodes) on the mounted NTFS
...
...
@@ -1235,8 +1235,10 @@ s64 get_nr_free_clusters(ntfs_volume *vol)
*
* Errors are ignored and we just return the number of free inodes we have
* found. This means we return an underestimate on error.
*
* NOTE: Caller must hold mftbmp_lock rw_semaphore for reading or writing.
*/
unsigned
long
get_nr_free_mft_records
(
ntfs_volume
*
vol
)
static
unsigned
long
__
get_nr_free_mft_records
(
ntfs_volume
*
vol
)
{
struct
address_space
*
mapping
;
filler_t
*
readpage
;
...
...
@@ -1247,7 +1249,6 @@ unsigned long get_nr_free_mft_records(ntfs_volume *vol)
ntfs_debug
(
"Entering."
);
/* Serialize accesses to the inode bitmap. */
down_read
(
&
vol
->
mftbmp_lock
);
mapping
=
&
vol
->
mftbmp_mapping
;
readpage
=
(
filler_t
*
)
mapping
->
a_ops
->
readpage
;
/*
...
...
@@ -1307,7 +1308,6 @@ unsigned long get_nr_free_mft_records(ntfs_volume *vol)
}
ntfs_debug
(
"Finished reading $MFT/$BITMAP, last index = 0x%lx"
,
index
-
1
);
up_read
(
&
vol
->
mftbmp_lock
);
ntfs_debug
(
"Exiting."
);
return
nr_free
;
}
...
...
@@ -1330,7 +1330,7 @@ unsigned long get_nr_free_mft_records(ntfs_volume *vol)
*
* Return 0 on success or -errno on error.
*/
int
ntfs_statfs
(
struct
super_block
*
sb
,
struct
statfs
*
sfs
)
static
int
ntfs_statfs
(
struct
super_block
*
sb
,
struct
statfs
*
sfs
)
{
ntfs_volume
*
vol
=
NTFS_SB
(
sb
);
s64
size
;
...
...
@@ -1354,10 +1354,12 @@ int ntfs_statfs(struct super_block *sb, struct statfs *sfs)
size
=
0LL
;
/* Free blocks avail to non-superuser, same as above on NTFS. */
sfs
->
f_bavail
=
sfs
->
f_bfree
=
size
;
down_read
(
&
vol
->
mftbmp_lock
);
/* Total file nodes in file system (at this moment in time). */
sfs
->
f_files
=
vol
->
mft_ino
->
i_size
>>
vol
->
mft_record_size_bits
;
/* Free file nodes in fs (based on current total count). */
sfs
->
f_ffree
=
get_nr_free_mft_records
(
vol
);
sfs
->
f_ffree
=
__get_nr_free_mft_records
(
vol
);
up_read
(
&
vol
->
mftbmp_lock
);
/*
* File system id. This is extremely *nix flavour dependent and even
* within Linux itself all fs do their own thing. I interpret this to
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment