Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
b888db2b
Commit
b888db2b
authored
Aug 27, 2007
by
Chris Mason
Committed by
David Woodhouse
Aug 27, 2007
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Btrfs: Add delayed allocation to the extent based page tree code
Signed-off-by:
Chris Mason
<
chris.mason@oracle.com
>
parent
a52d9a80
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
216 additions
and
135 deletions
+216
-135
fs/btrfs/extent-tree.c
fs/btrfs/extent-tree.c
+0
-9
fs/btrfs/extent_map.c
fs/btrfs/extent_map.c
+112
-35
fs/btrfs/extent_map.h
fs/btrfs/extent_map.h
+3
-0
fs/btrfs/file.c
fs/btrfs/file.c
+21
-29
fs/btrfs/inode.c
fs/btrfs/inode.c
+79
-62
fs/btrfs/super.c
fs/btrfs/super.c
+1
-0
No files found.
fs/btrfs/extent-tree.c
View file @
b888db2b
...
...
@@ -39,7 +39,6 @@ static int cache_block_group(struct btrfs_root *root,
u64
i
;
u64
last
=
0
;
u64
hole_size
;
u64
limit
;
int
found
=
0
;
root
=
root
->
fs_info
->
extent_root
;
...
...
@@ -62,7 +61,6 @@ static int cache_block_group(struct btrfs_root *root,
return
ret
;
if
(
ret
&&
path
->
slots
[
0
]
>
0
)
path
->
slots
[
0
]
--
;
limit
=
block_group
->
key
.
objectid
+
block_group
->
key
.
offset
;
while
(
1
)
{
leaf
=
btrfs_buffer_leaf
(
path
->
nodes
[
0
]);
slot
=
path
->
slots
[
0
];
...
...
@@ -982,7 +980,6 @@ static int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
struct
btrfs_block_group_cache
*
block_group
;
int
full_scan
=
0
;
int
wrapped
=
0
;
u64
limit
;
WARN_ON
(
num_blocks
<
1
);
ins
->
flags
=
0
;
...
...
@@ -1049,12 +1046,6 @@ static int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
l
=
btrfs_buffer_leaf
(
path
->
nodes
[
0
]);
slot
=
path
->
slots
[
0
];
if
(
slot
>=
btrfs_header_nritems
(
&
l
->
header
))
{
if
(
start_found
)
limit
=
last_block
+
(
block_group
->
key
.
offset
>>
1
);
else
limit
=
search_start
+
(
block_group
->
key
.
offset
>>
1
);
ret
=
btrfs_next_leaf
(
root
,
path
);
if
(
ret
==
0
)
continue
;
...
...
fs/btrfs/extent_map.c
View file @
b888db2b
...
...
@@ -30,9 +30,6 @@ struct tree_entry {
#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
static
LIST_HEAD
(
all_states
);
spinlock_t
state_lock
=
SPIN_LOCK_UNLOCKED
;
void
__init
extent_map_init
(
void
)
{
extent_map_cache
=
kmem_cache_create
(
"extent_map"
,
...
...
@@ -49,15 +46,6 @@ void __init extent_map_init(void)
void
__exit
extent_map_exit
(
void
)
{
while
(
!
list_empty
(
&
all_states
))
{
struct
extent_state
*
state
;
struct
list_head
*
cur
=
all_states
.
next
;
state
=
list_entry
(
cur
,
struct
extent_state
,
list
);
printk
(
"found leaked state %Lu %Lu state %d in_tree %d
\n
"
,
state
->
start
,
state
->
end
,
state
->
state
,
state
->
in_tree
);
list_del
(
&
state
->
list
);
kfree
(
state
);
}
if
(
extent_map_cache
)
kmem_cache_destroy
(
extent_map_cache
);
if
(
extent_state_cache
)
...
...
@@ -69,6 +57,7 @@ void extent_map_tree_init(struct extent_map_tree *tree,
{
tree
->
map
.
rb_node
=
NULL
;
tree
->
state
.
rb_node
=
NULL
;
tree
->
fill_delalloc
=
NULL
;
rwlock_init
(
&
tree
->
lock
);
tree
->
mapping
=
mapping
;
}
...
...
@@ -106,9 +95,6 @@ struct extent_state *alloc_extent_state(gfp_t mask)
state
->
in_tree
=
0
;
atomic_set
(
&
state
->
refs
,
1
);
init_waitqueue_head
(
&
state
->
wq
);
spin_lock_irq
(
&
state_lock
);
list_add
(
&
state
->
list
,
&
all_states
);
spin_unlock_irq
(
&
state_lock
);
return
state
;
}
EXPORT_SYMBOL
(
alloc_extent_state
);
...
...
@@ -117,9 +103,6 @@ void free_extent_state(struct extent_state *state)
{
if
(
atomic_dec_and_test
(
&
state
->
refs
))
{
WARN_ON
(
state
->
in_tree
);
spin_lock_irq
(
&
state_lock
);
list_del_init
(
&
state
->
list
);
spin_unlock_irq
(
&
state_lock
);
kmem_cache_free
(
extent_state_cache
,
state
);
}
}
...
...
@@ -369,7 +352,7 @@ static int insert_state(struct extent_map_tree *tree,
if
(
node
)
{
struct
extent_state
*
found
;
found
=
rb_entry
(
node
,
struct
extent_state
,
rb_node
);
printk
(
"found node %Lu %Lu on insert of %Lu %Lu
\n
"
,
found
->
start
,
found
->
end
,
start
,
end
);
printk
(
"found node %Lu %Lu on insert of %Lu %Lu
\n
"
,
found
->
start
,
found
->
end
,
start
,
end
);
free_extent_state
(
state
);
return
-
EEXIST
;
}
...
...
@@ -408,7 +391,7 @@ static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
if
(
node
)
{
struct
extent_state
*
found
;
found
=
rb_entry
(
node
,
struct
extent_state
,
rb_node
);
printk
(
"found node %Lu %Lu on insert of %Lu %Lu
\n
"
,
found
->
start
,
found
->
end
,
prealloc
->
start
,
prealloc
->
end
);
printk
(
"found node %Lu %Lu on insert of %Lu %Lu
\n
"
,
found
->
start
,
found
->
end
,
prealloc
->
start
,
prealloc
->
end
);
free_extent_state
(
prealloc
);
return
-
EEXIST
;
}
...
...
@@ -792,10 +775,20 @@ int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
}
EXPORT_SYMBOL
(
set_extent_dirty
);
int
set_extent_delalloc
(
struct
extent_map_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
)
{
return
set_extent_bit
(
tree
,
start
,
end
,
EXTENT_DELALLOC
|
EXTENT_DIRTY
,
0
,
NULL
,
mask
);
}
EXPORT_SYMBOL
(
set_extent_delalloc
);
int
clear_extent_dirty
(
struct
extent_map_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
)
{
return
clear_extent_bit
(
tree
,
start
,
end
,
EXTENT_DIRTY
,
0
,
0
,
mask
);
return
clear_extent_bit
(
tree
,
start
,
end
,
EXTENT_DIRTY
|
EXTENT_DELALLOC
,
0
,
0
,
mask
);
}
EXPORT_SYMBOL
(
clear_extent_dirty
);
...
...
@@ -922,6 +915,62 @@ int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
}
EXPORT_SYMBOL
(
set_range_writeback
);
u64
find_lock_delalloc_range
(
struct
extent_map_tree
*
tree
,
u64
start
,
u64
lock_start
,
u64
*
end
,
u64
max_bytes
)
{
struct
rb_node
*
node
;
struct
extent_state
*
state
;
u64
cur_start
=
start
;
u64
found
=
0
;
u64
total_bytes
=
0
;
write_lock_irq
(
&
tree
->
lock
);
/*
* this search will find all the extents that end after
* our range starts.
*/
search_again:
node
=
tree_search
(
&
tree
->
state
,
cur_start
);
if
(
!
node
||
IS_ERR
(
node
))
{
goto
out
;
}
while
(
1
)
{
state
=
rb_entry
(
node
,
struct
extent_state
,
rb_node
);
if
(
state
->
start
!=
cur_start
)
{
goto
out
;
}
if
(
!
(
state
->
state
&
EXTENT_DELALLOC
))
{
goto
out
;
}
if
(
state
->
start
>=
lock_start
)
{
if
(
state
->
state
&
EXTENT_LOCKED
)
{
DEFINE_WAIT
(
wait
);
atomic_inc
(
&
state
->
refs
);
write_unlock_irq
(
&
tree
->
lock
);
schedule
();
write_lock_irq
(
&
tree
->
lock
);
finish_wait
(
&
state
->
wq
,
&
wait
);
free_extent_state
(
state
);
goto
search_again
;
}
state
->
state
|=
EXTENT_LOCKED
;
}
found
++
;
*
end
=
state
->
end
;
cur_start
=
state
->
end
+
1
;
node
=
rb_next
(
node
);
if
(
!
node
)
break
;
total_bytes
=
state
->
end
-
state
->
start
+
1
;
if
(
total_bytes
>=
max_bytes
)
break
;
}
out:
write_unlock_irq
(
&
tree
->
lock
);
return
found
;
}
/*
* helper function to lock both pages and extents in the tree.
* pages must be locked first.
...
...
@@ -1285,6 +1334,7 @@ int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
if
(
!
PagePrivate
(
page
))
{
SetPagePrivate
(
page
);
set_page_private
(
page
,
1
);
WARN_ON
(
!
page
->
mapping
->
a_ops
->
invalidatepage
);
page_cache_get
(
page
);
}
...
...
@@ -1384,7 +1434,10 @@ int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
size_t
blocksize
;
loff_t
i_size
=
i_size_read
(
inode
);
unsigned
long
end_index
=
i_size
>>
PAGE_CACHE_SHIFT
;
u64
nr_delalloc
;
u64
delalloc_end
;
WARN_ON
(
!
PageLocked
(
page
));
if
(
page
->
index
>
end_index
)
{
clear_extent_dirty
(
tree
,
start
,
page_end
,
GFP_NOFS
);
unlock_page
(
page
);
...
...
@@ -1400,11 +1453,34 @@ int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
if
(
!
PagePrivate
(
page
))
{
SetPagePrivate
(
page
);
set_page_private
(
page
,
1
);
WARN_ON
(
!
page
->
mapping
->
a_ops
->
invalidatepage
);
page_cache_get
(
page
);
}
end
=
page_end
;
lock_extent
(
tree
,
start
,
page_end
,
GFP_NOFS
);
nr_delalloc
=
find_lock_delalloc_range
(
tree
,
start
,
page_end
+
1
,
&
delalloc_end
,
128
*
1024
*
1024
);
if
(
nr_delalloc
)
{
tree
->
fill_delalloc
(
inode
,
start
,
delalloc_end
);
if
(
delalloc_end
>=
page_end
+
1
)
{
clear_extent_bit
(
tree
,
page_end
+
1
,
delalloc_end
,
EXTENT_LOCKED
|
EXTENT_DELALLOC
,
1
,
0
,
GFP_NOFS
);
}
clear_extent_bit
(
tree
,
start
,
page_end
,
EXTENT_DELALLOC
,
0
,
0
,
GFP_NOFS
);
if
(
test_range_bit
(
tree
,
start
,
page_end
,
EXTENT_DELALLOC
,
0
))
{
printk
(
"found delalloc bits after clear extent_bit
\n
"
);
}
}
else
if
(
test_range_bit
(
tree
,
start
,
page_end
,
EXTENT_DELALLOC
,
0
))
{
printk
(
"found delalloc bits after find_delalloc_range returns 0
\n
"
);
}
end
=
page_end
;
if
(
test_range_bit
(
tree
,
start
,
page_end
,
EXTENT_DELALLOC
,
0
))
{
printk
(
"found delalloc bits after lock_extent
\n
"
);
}
if
(
last_byte
<=
start
)
{
clear_extent_dirty
(
tree
,
start
,
page_end
,
GFP_NOFS
);
...
...
@@ -1419,7 +1495,7 @@ int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
clear_extent_dirty
(
tree
,
cur
,
page_end
,
GFP_NOFS
);
break
;
}
em
=
get_extent
(
inode
,
page
,
page_offset
,
cur
,
end
,
1
);
em
=
get_extent
(
inode
,
page
,
page_offset
,
cur
,
end
,
0
);
if
(
IS_ERR
(
em
)
||
!
em
)
{
SetPageError
(
page
);
break
;
...
...
@@ -1507,6 +1583,7 @@ int extent_commit_write(struct extent_map_tree *tree,
if
(
!
PagePrivate
(
page
))
{
SetPagePrivate
(
page
);
set_page_private
(
page
,
1
);
WARN_ON
(
!
page
->
mapping
->
a_ops
->
invalidatepage
);
page_cache_get
(
page
);
}
...
...
@@ -1543,6 +1620,7 @@ int extent_prepare_write(struct extent_map_tree *tree,
if
(
!
PagePrivate
(
page
))
{
SetPagePrivate
(
page
);
set_page_private
(
page
,
1
);
WARN_ON
(
!
page
->
mapping
->
a_ops
->
invalidatepage
);
page_cache_get
(
page
);
}
block_start
=
(
page_start
+
from
)
&
~
((
u64
)
blocksize
-
1
);
...
...
@@ -1628,29 +1706,28 @@ int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
u64
start
=
page
->
index
<<
PAGE_CACHE_SHIFT
;
u64
end
=
start
+
PAGE_CACHE_SIZE
-
1
;
u64
orig_start
=
start
;
int
ret
=
1
;
while
(
start
<=
end
)
{
em
=
lookup_extent_mapping
(
tree
,
start
,
end
);
if
(
!
em
||
IS_ERR
(
em
))
break
;
if
(
test_range_bit
(
tree
,
em
->
start
,
em
->
end
,
EXTENT_LOCKED
,
0
))
{
if
(
!
test_range_bit
(
tree
,
em
->
start
,
em
->
end
,
EXTENT_LOCKED
,
0
))
{
remove_extent_mapping
(
tree
,
em
);
/* once for the rb tree */
free_extent_map
(
em
);
start
=
em
->
end
+
1
;
printk
(
"range still locked %Lu %Lu
\n
"
,
em
->
start
,
em
->
end
);
break
;
}
remove_extent_mapping
(
tree
,
em
);
start
=
em
->
end
+
1
;
/* once for the rb tree */
free_extent_map
(
em
);
/* once for us */
free_extent_map
(
em
);
}
WARN_ON
(
test_range_bit
(
tree
,
orig_start
,
end
,
EXTENT_WRITEBACK
,
0
));
clear_extent_bit
(
tree
,
orig_start
,
end
,
EXTENT_UPTODATE
,
1
,
1
,
GFP_NOFS
);
return
1
;
if
(
test_range_bit
(
tree
,
orig_start
,
end
,
EXTENT_LOCKED
,
0
))
ret
=
0
;
else
clear_extent_bit
(
tree
,
orig_start
,
end
,
EXTENT_UPTODATE
,
1
,
1
,
GFP_NOFS
);
return
ret
;
}
EXPORT_SYMBOL
(
try_release_extent_mapping
);
fs/btrfs/extent_map.h
View file @
b888db2b
...
...
@@ -11,6 +11,7 @@ struct extent_map_tree {
struct
rb_root
state
;
struct
address_space
*
mapping
;
rwlock_t
lock
;
int
(
*
fill_delalloc
)(
struct
inode
*
inode
,
u64
start
,
u64
end
);
};
/* note, this must start with the same fields as fs/extent_map.c:tree_entry */
...
...
@@ -74,6 +75,8 @@ int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
gfp_t
mask
);
int
set_extent_dirty
(
struct
extent_map_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
);
int
set_extent_delalloc
(
struct
extent_map_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
);
int
extent_invalidatepage
(
struct
extent_map_tree
*
tree
,
struct
page
*
page
,
unsigned
long
offset
);
int
extent_write_full_page
(
struct
extent_map_tree
*
tree
,
struct
page
*
page
,
...
...
fs/btrfs/file.c
View file @
b888db2b
...
...
@@ -143,7 +143,6 @@ static int dirty_and_release_pages(struct btrfs_trans_handle *trans,
struct
inode
*
inode
=
file
->
f_path
.
dentry
->
d_inode
;
struct
extent_map
*
em
;
struct
extent_map_tree
*
em_tree
=
&
BTRFS_I
(
inode
)
->
extent_tree
;
struct
btrfs_key
ins
;
u64
hint_block
;
u64
num_blocks
;
u64
start_pos
;
...
...
@@ -162,6 +161,7 @@ static int dirty_and_release_pages(struct btrfs_trans_handle *trans,
inode
->
i_blkbits
;
end_of_last_block
=
start_pos
+
(
num_blocks
<<
inode
->
i_blkbits
)
-
1
;
lock_extent
(
em_tree
,
start_pos
,
end_of_last_block
,
GFP_NOFS
);
mutex_lock
(
&
root
->
fs_info
->
fs_mutex
);
trans
=
btrfs_start_transaction
(
root
,
1
);
if
(
!
trans
)
{
...
...
@@ -179,16 +179,6 @@ static int dirty_and_release_pages(struct btrfs_trans_handle *trans,
/* FIXME...EIEIO, ENOSPC and more */
/* step one, delete the existing extents in this range */
/* FIXME blocksize != pagesize */
if
(
start_pos
<
inode
->
i_size
)
{
err
=
btrfs_drop_extents
(
trans
,
root
,
inode
,
start_pos
,
(
pos
+
write_bytes
+
root
->
blocksize
-
1
)
&
~
((
u64
)
root
->
blocksize
-
1
),
&
hint_block
);
if
(
err
)
goto
failed
;
}
/* insert any holes we need to create */
if
(
inode
->
i_size
<
start_pos
)
{
u64
last_pos_in_file
;
...
...
@@ -213,29 +203,28 @@ static int dirty_and_release_pages(struct btrfs_trans_handle *trans,
*/
if
(
isize
>=
PAGE_CACHE_SIZE
||
pos
+
write_bytes
<
inode
->
i_size
||
pos
+
write_bytes
-
start_pos
>
BTRFS_MAX_INLINE_DATA_SIZE
(
root
))
{
err
=
btrfs_alloc_extent
(
trans
,
root
,
inode
->
i_ino
,
num_blocks
,
0
,
hint_block
,
(
u64
)
-
1
,
&
ins
,
1
);
BUG_ON
(
err
);
err
=
btrfs_insert_file_extent
(
trans
,
root
,
inode
->
i_ino
,
start_pos
,
ins
.
objectid
,
ins
.
offset
,
ins
.
offset
);
BUG_ON
(
err
);
em
->
start
=
start_pos
;
em
->
end
=
end_of_last_block
;
em
->
block_start
=
ins
.
objectid
<<
inode
->
i_blkbits
;
em
->
block_end
=
em
->
block_start
+
(
ins
.
offset
<<
inode
->
i_blkbits
)
-
1
;
set_extent_dirty
(
em_tree
,
start_pos
,
end_of_last_block
,
GFP_NOFS
);
err
=
add_extent_mapping
(
em_tree
,
em
);
u64
last_end
;
for
(
i
=
0
;
i
<
num_pages
;
i
++
)
{
struct
page
*
p
=
pages
[
i
];
SetPageUptodate
(
p
);
__set_page_dirty_nobuffers
(
p
);
set_page_dirty
(
p
);
}
last_end
=
pages
[
num_pages
-
1
]
->
index
<<
PAGE_CACHE_SHIFT
;
last_end
+=
PAGE_CACHE_SIZE
-
1
;
set_extent_delalloc
(
em_tree
,
start_pos
,
end_of_last_block
,
GFP_NOFS
);
}
else
{
struct
page
*
p
=
pages
[
0
];
/* step one, delete the existing extents in this range */
/* FIXME blocksize != pagesize */
if
(
start_pos
<
inode
->
i_size
)
{
err
=
btrfs_drop_extents
(
trans
,
root
,
inode
,
start_pos
,
(
pos
+
write_bytes
+
root
->
blocksize
-
1
)
&
~
((
u64
)
root
->
blocksize
-
1
),
&
hint_block
);
if
(
err
)
goto
failed
;
}
err
=
insert_inline_extent
(
trans
,
root
,
inode
,
start_pos
,
end_pos
-
start_pos
,
p
,
0
);
BUG_ON
(
err
);
...
...
@@ -253,6 +242,7 @@ static int dirty_and_release_pages(struct btrfs_trans_handle *trans,
err
=
btrfs_end_transaction
(
trans
,
root
);
out_unlock:
mutex_unlock
(
&
root
->
fs_info
->
fs_mutex
);
unlock_extent
(
em_tree
,
start_pos
,
end_of_last_block
,
GFP_NOFS
);
free_extent_map
(
em
);
return
err
;
}
...
...
@@ -531,7 +521,7 @@ static int prepare_pages(struct btrfs_root *root,
u64
num_blocks
;
u64
start_pos
;
start_pos
=
pos
&
~
((
u64
)
PAGE_CACHE_SIZE
-
1
);
start_pos
=
pos
&
~
((
u64
)
root
->
blocksize
-
1
);
num_blocks
=
(
write_bytes
+
pos
-
start_pos
+
root
->
blocksize
-
1
)
>>
inode
->
i_blkbits
;
...
...
@@ -548,8 +538,10 @@ static int prepare_pages(struct btrfs_root *root,
if
(
!
PagePrivate
(
pages
[
i
]))
{
SetPagePrivate
(
pages
[
i
]);
set_page_private
(
pages
[
i
],
1
);
WARN_ON
(
!
pages
[
i
]
->
mapping
->
a_ops
->
invalidatepage
);
page_cache_get
(
pages
[
i
]);
}
WARN_ON
(
!
PageLocked
(
pages
[
i
]));
}
return
0
;
}
...
...
fs/btrfs/inode.c
View file @
b888db2b
...
...
@@ -70,6 +70,40 @@ static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
[
S_IFLNK
>>
S_SHIFT
]
=
BTRFS_FT_SYMLINK
,
};
static
int
run_delalloc_range
(
struct
inode
*
inode
,
u64
start
,
u64
end
)
{
struct
btrfs_root
*
root
=
BTRFS_I
(
inode
)
->
root
;
struct
btrfs_trans_handle
*
trans
;
struct
btrfs_key
ins
;
u64
alloc_hint
=
0
;
u64
num_blocks
;
int
ret
;
u64
blocksize
=
1
<<
inode
->
i_blkbits
;
mutex_lock
(
&
root
->
fs_info
->
fs_mutex
);
trans
=
btrfs_start_transaction
(
root
,
1
);
btrfs_set_trans_block_group
(
trans
,
inode
);
BUG_ON
(
!
trans
);
num_blocks
=
(
end
-
start
+
blocksize
)
&
~
(
blocksize
-
1
);
ret
=
btrfs_drop_extents
(
trans
,
root
,
inode
,
start
,
start
+
num_blocks
,
&
alloc_hint
);
num_blocks
=
num_blocks
>>
inode
->
i_blkbits
;
ret
=
btrfs_alloc_extent
(
trans
,
root
,
inode
->
i_ino
,
num_blocks
,
0
,
alloc_hint
,
(
u64
)
-
1
,
&
ins
,
1
);
if
(
ret
)
{
WARN_ON
(
1
);
goto
out
;
}
ret
=
btrfs_insert_file_extent
(
trans
,
root
,
inode
->
i_ino
,
start
,
ins
.
objectid
,
ins
.
offset
,
ins
.
offset
);
out:
btrfs_end_transaction
(
trans
,
root
);
mutex_unlock
(
&
root
->
fs_info
->
fs_mutex
);
return
ret
;
}
void
btrfs_read_locked_inode
(
struct
inode
*
inode
)
{
struct
btrfs_path
*
path
;
...
...
@@ -121,8 +155,7 @@ void btrfs_read_locked_inode(struct inode *inode)
switch
(
inode
->
i_mode
&
S_IFMT
)
{
case
S_IFREG
:
inode
->
i_mapping
->
a_ops
=
&
btrfs_aops
;
extent_map_tree_init
(
&
BTRFS_I
(
inode
)
->
extent_tree
,
inode
->
i_mapping
,
GFP_NOFS
);
BTRFS_I
(
inode
)
->
extent_tree
.
fill_delalloc
=
run_delalloc_range
;
inode
->
i_fop
=
&
btrfs_file_operations
;
inode
->
i_op
=
&
btrfs_file_inode_operations
;
break
;
...
...
@@ -450,11 +483,17 @@ static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans,
BUG_ON
(
ret
);
}
if
(
item_end
<
inode
->
i_size
)
{
if
(
found_type
)
{
btrfs_set_key_type
(
&
key
,
found_type
-
1
);
continue
;
if
(
found_type
==
BTRFS_DIR_ITEM_KEY
)
{
found_type
=
BTRFS_INODE_ITEM_KEY
;
}
else
if
(
found_type
==
BTRFS_EXTENT_ITEM_KEY
)
{
found_type
=
BTRFS_CSUM_ITEM_KEY
;
}
else
if
(
found_type
)
{
found_type
--
;
}
else
{
break
;
}
break
;
btrfs_set_key_type
(
&
key
,
found_type
-
1
);
continue
;
}
if
(
btrfs_disk_key_offset
(
found_key
)
>=
inode
->
i_size
)
del_item
=
1
;
...
...
@@ -514,47 +553,34 @@ static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans,
return
ret
;
}
static
int
btrfs_cow_one_page
(
struct
btrfs_trans_handle
*
trans
,
struct
inode
*
inode
,
struct
page
*
page
,
static
int
btrfs_cow_one_page
(
struct
inode
*
inode
,
struct
page
*
page
,
size_t
zero_start
)
{
char
*
kaddr
;
int
ret
=
0
;
struct
btrfs_root
*
root
=
BTRFS_I
(
inode
)
->
root
;
u64
alloc_hint
=
0
;
struct
extent_map_tree
*
em_tree
=
&
BTRFS_I
(
inode
)
->
extent_tree
;
u64
page_start
=
page
->
index
<<
PAGE_CACHE_SHIFT
;
struct
btrfs_key
ins
;
u64
page_end
=
page_start
+
PAGE_CACHE_SIZE
-
1
;
if
(
!
PagePrivate
(
page
))
{
SetPagePrivate
(
page
);
set_page_private
(
page
,
1
);
WARN_ON
(
!
page
->
mapping
->
a_ops
->
invalidatepage
);
page_cache_get
(
page
);
}
btrfs_set_trans_block_group
(
trans
,
inode
);
ret
=
btrfs_drop_extents
(
trans
,
root
,
inode
,
page_start
,
page_start
+
PAGE_CACHE_SIZE
,
&
alloc_hint
);
if
(
ret
)
goto
out
;
ret
=
btrfs_alloc_extent
(
trans
,
root
,
inode
->
i_ino
,
1
,
0
,
alloc_hint
,
(
u64
)
-
1
,
&
ins
,
1
);
if
(
ret
)
goto
out
;
ret
=
btrfs_insert_file_extent
(
trans
,
root
,
inode
->
i_ino
,
page_start
,
ins
.
objectid
,
1
,
1
);
if
(
ret
)
goto
out
;
SetPageChecked
(
page
);
kaddr
=
kmap
(
page
);
lock_extent
(
em_tree
,
page_start
,
page_end
,
GFP_NOFS
);
set_extent_delalloc
(
&
BTRFS_I
(
inode
)
->
extent_tree
,
page_start
,
page_end
,
GFP_NOFS
);
if
(
zero_start
!=
PAGE_CACHE_SIZE
)
{
kaddr
=
kmap
(
page
);
memset
(
kaddr
+
zero_start
,
0
,
PAGE_CACHE_SIZE
-
zero_start
);
flush_dcache_page
(
page
);
kunmap
(
page
);
}
kunmap
(
page
);
set_page_dirty
(
page
);
unlock_extent
(
em_tree
,
page_start
,
page_end
,
GFP_NOFS
);
out:
return
ret
;
}
...
...
@@ -565,8 +591,6 @@ static int btrfs_cow_one_page(struct btrfs_trans_handle *trans,
static
int
btrfs_truncate_page
(
struct
address_space
*
mapping
,
loff_t
from
)
{
struct
inode
*
inode
=
mapping
->
host
;
struct
btrfs_root
*
root
=
BTRFS_I
(
inode
)
->
root
;
struct
btrfs_trans_handle
*
trans
;
unsigned
blocksize
=
1
<<
inode
->
i_blkbits
;
pgoff_t
index
=
from
>>
PAGE_CACHE_SHIFT
;
unsigned
offset
=
from
&
(
PAGE_CACHE_SIZE
-
1
);
...
...
@@ -591,21 +615,7 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
}
page_start
=
page
->
index
<<
PAGE_CACHE_SHIFT
;
mutex_lock
(
&
root
->
fs_info
->
fs_mutex
);
trans
=
btrfs_start_transaction
(
root
,
1
);
ret
=
btrfs_cow_one_page
(
trans
,
inode
,
page
,
offset
);
if
(
!
ret
)
{
char
*
kaddr
=
kmap
(
page
);
ret
=
btrfs_csum_file_block
(
trans
,
root
,
inode
->
i_ino
,
page_start
,
kaddr
,
PAGE_CACHE_SIZE
);
kunmap
(
page
);
}
set_extent_dirty
(
&
BTRFS_I
(
inode
)
->
extent_tree
,
page_start
,
page_start
+
PAGE_CACHE_SIZE
-
1
,
GFP_NOFS
);
set_page_dirty
(
page
);
btrfs_end_transaction
(
trans
,
root
);
mutex_unlock
(
&
root
->
fs_info
->
fs_mutex
);
ret
=
btrfs_cow_one_page
(
inode
,
page
,
offset
);
unlock_page
(
page
);
page_cache_release
(
page
);
...
...
@@ -757,6 +767,8 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p)
struct
btrfs_iget_args
*
args
=
p
;
inode
->
i_ino
=
args
->
ino
;
BTRFS_I
(
inode
)
->
root
=
args
->
root
;
extent_map_tree_init
(
&
BTRFS_I
(
inode
)
->
extent_tree
,
inode
->
i_mapping
,
GFP_NOFS
);
return
0
;
}
...
...
@@ -968,7 +980,10 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
if
(
!
inode
)
return
ERR_PTR
(
-
ENOMEM
);
extent_map_tree_init
(
&
BTRFS_I
(
inode
)
->
extent_tree
,
inode
->
i_mapping
,
GFP_NOFS
);
BTRFS_I
(
inode
)
->
root
=
root
;
if
(
mode
&
S_IFDIR
)
owner
=
0
;
else
...
...
@@ -1128,6 +1143,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
inode
->
i_op
=
&
btrfs_file_inode_operations
;
extent_map_tree_init
(
&
BTRFS_I
(
inode
)
->
extent_tree
,
inode
->
i_mapping
,
GFP_NOFS
);
BTRFS_I
(
inode
)
->
extent_tree
.
fill_delalloc
=
run_delalloc_range
;
}
dir
->
i_sb
->
s_dirt
=
1
;
btrfs_update_inode_block_group
(
trans
,
inode
);
...
...
@@ -1344,9 +1360,11 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
extent_end
=
extent_start
+
(
btrfs_file_extent_num_blocks
(
item
)
<<
inode
->
i_blkbits
);
err
=
0
;
if
(
start
<
extent_start
||
start
>
extent_end
)
{
if
(
start
<
extent_start
||
start
>
=
extent_end
)
{
em
->
start
=
start
;
if
(
start
<
extent_start
)
{
if
(
end
<
extent_start
)
goto
not_found
;
em
->
end
=
extent_end
-
1
;
}
else
{
em
->
end
=
end
;
...
...
@@ -1375,9 +1393,11 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
size
=
btrfs_file_extent_inline_len
(
leaf
->
items
+
path
->
slots
[
0
]);
extent_end
=
extent_start
+
size
;
if
(
start
<
extent_start
||
start
>
extent_end
)
{
if
(
start
<
extent_start
||
start
>
=
extent_end
)
{
em
->
start
=
start
;
if
(
start
<
extent_start
)
{
if
(
end
<
extent_start
)
goto
not_found
;
em
->
end
=
extent_end
-
1
;
}
else
{
em
->
end
=
end
;
...
...
@@ -1412,8 +1432,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
insert:
btrfs_release_path
(
root
,
path
);
if
(
em
->
start
>
start
||
em
->
end
<
start
)
{
printk
(
"bad extent! %Lu %Lu start %Lu end %Lu
\n
"
,
em
->
start
,
em
->
end
,
start
,
end
);
WARN_ON
(
1
);
printk
(
"bad extent! em: [%Lu %Lu] passed [%Lu %Lu]
\n
"
,
em
->
start
,
em
->
end
,
start
,
end
);
err
=
-
EIO
;
goto
out
;
}
...
...
@@ -1648,6 +1667,13 @@ int btrfs_readpage(struct file *file, struct page *page)
static
int
btrfs_writepage
(
struct
page
*
page
,
struct
writeback_control
*
wbc
)
{
struct
extent_map_tree
*
tree
;
if
(
current
->
flags
&
PF_MEMALLOC
)
{
redirty_page_for_writepage
(
wbc
,
page
);
unlock_page
(
page
);
return
0
;
}
tree
=
&
BTRFS_I
(
page
->
mapping
->
host
)
->
extent_tree
;
return
extent_write_full_page
(
tree
,
page
,
btrfs_get_extent
,
wbc
);
}
...
...
@@ -1698,8 +1724,6 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
int
btrfs_page_mkwrite
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
)
{
struct
inode
*
inode
=
vma
->
vm_file
->
f_path
.
dentry
->
d_inode
;
struct
btrfs_root
*
root
=
BTRFS_I
(
inode
)
->
root
;
struct
btrfs_trans_handle
*
trans
;
unsigned
long
end
;
loff_t
size
;
int
ret
=
-
EINVAL
;
...
...
@@ -1722,15 +1746,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
else
end
=
PAGE_CACHE_SIZE
;
mutex_lock
(
&
root
->
fs_info
->
fs_mutex
);
trans
=
btrfs_start_transaction
(
root
,
1
);
ret
=
btrfs_cow_one_page
(
trans
,
inode
,
page
,
end
);
btrfs_end_transaction
(
trans
,
root
);
mutex_unlock
(
&
root
->
fs_info
->
fs_mutex
);
set_extent_dirty
(
&
BTRFS_I
(
inode
)
->
extent_tree
,
page_start
,
page_start
+
PAGE_CACHE_SIZE
-
1
,
GFP_NOFS
);
set_page_dirty
(
page
);
ret
=
btrfs_cow_one_page
(
inode
,
page
,
end
);
out_unlock:
unlock_page
(
page
);
...
...
@@ -2264,6 +2280,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
inode
->
i_op
=
&
btrfs_file_inode_operations
;
extent_map_tree_init
(
&
BTRFS_I
(
inode
)
->
extent_tree
,
inode
->
i_mapping
,
GFP_NOFS
);
BTRFS_I
(
inode
)
->
extent_tree
.
fill_delalloc
=
run_delalloc_range
;
}
dir
->
i_sb
->
s_dirt
=
1
;
btrfs_update_inode_block_group
(
trans
,
inode
);
...
...
fs/btrfs/super.c
View file @
b888db2b
...
...
@@ -83,6 +83,7 @@ static int btrfs_fill_super(struct super_block * sb, void * data, int silent)
bi
->
location
.
offset
=
0
;
bi
->
location
.
flags
=
0
;
bi
->
root
=
tree_root
;
btrfs_set_key_type
(
&
bi
->
location
,
BTRFS_INODE_ITEM_KEY
);
if
(
!
inode
)
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment