Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
e597d4aa
Commit
e597d4aa
authored
Dec 04, 2002
by
Nathan Scott
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[XFS] Cleanup after initially investigating unwritten extents.
SGI Modid: 2.5.x-xfs:slinx:134059a
parent
44e70659
Changes
9
Show whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
159 additions
and
218 deletions
+159
-218
fs/xfs/linux/xfs_aops.c
fs/xfs/linux/xfs_aops.c
+10
-11
fs/xfs/linux/xfs_ioctl.c
fs/xfs/linux/xfs_ioctl.c
+49
-34
fs/xfs/linux/xfs_iomap.c
fs/xfs/linux/xfs_iomap.c
+36
-31
fs/xfs/linux/xfs_lrw.c
fs/xfs/linux/xfs_lrw.c
+4
-10
fs/xfs/pagebuf/page_buf.c
fs/xfs/pagebuf/page_buf.c
+1
-50
fs/xfs/xfs_bmap.c
fs/xfs/xfs_bmap.c
+56
-77
fs/xfs/xfs_bmap_btree.h
fs/xfs/xfs_bmap_btree.h
+1
-1
fs/xfs/xfs_buf.h
fs/xfs/xfs_buf.h
+2
-2
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_buf_item.c
+0
-2
No files found.
fs/xfs/linux/xfs_aops.c
View file @
e597d4aa
...
@@ -101,7 +101,6 @@ map_buffer_at_offset(
...
@@ -101,7 +101,6 @@ map_buffer_at_offset(
ASSERT
(
!
(
mp
->
pbm_flags
&
PBMF_HOLE
));
ASSERT
(
!
(
mp
->
pbm_flags
&
PBMF_HOLE
));
ASSERT
(
!
(
mp
->
pbm_flags
&
PBMF_DELAY
));
ASSERT
(
!
(
mp
->
pbm_flags
&
PBMF_DELAY
));
ASSERT
(
!
(
mp
->
pbm_flags
&
PBMF_UNWRITTEN
));
ASSERT
(
mp
->
pbm_bn
!=
PAGE_BUF_DADDR_NULL
);
ASSERT
(
mp
->
pbm_bn
!=
PAGE_BUF_DADDR_NULL
);
delta
=
page
->
index
;
delta
=
page
->
index
;
...
@@ -348,14 +347,14 @@ cluster_write(
...
@@ -348,14 +347,14 @@ cluster_write(
* page ready for freeing it's buffers. When called with startio set then
* page ready for freeing it's buffers. When called with startio set then
* we are coming from writepage.
* we are coming from writepage.
*
*
* When called with startio
e.g. from
* When called with startio
set it is important that we write the WHOLE
*
write page it is important that we write WHOLE page if possible. The
*
page if possible.
*
bh->b_state's can not know o
f any of the blocks or which block for
*
The bh->b_state's cannot know i
f any of the blocks or which block for
* that matter are dirty due to map writes, and therefore bh uptodate is
* that matter are dirty due to m
m
ap writes, and therefore bh uptodate is
* only vaild if the page
i itself isn't completely uptodate.
Some layers
* only vaild if the page
itself isn't completely uptodate.
Some layers
* may clear the page dirty flag prior to calling write page under the
* may clear the page dirty flag prior to calling write page
,
under the
* assumption the entire page will be written out
,
by not writing out the
* assumption the entire page will be written out
;
by not writing out the
* whole page the page can be reused before all va
il
d dirty data is
* whole page the page can be reused before all va
li
d dirty data is
* written out. Note: in the case of a page that has been dirty'd by
* written out. Note: in the case of a page that has been dirty'd by
* mapwrite and but partially setup by block_prepare_write the
* mapwrite and but partially setup by block_prepare_write the
* bh->b_states's will not agree and only ones setup by BPW/BCW will have
* bh->b_states's will not agree and only ones setup by BPW/BCW will have
...
...
fs/xfs/linux/xfs_ioctl.c
View file @
e597d4aa
...
@@ -521,39 +521,46 @@ xfs_attrmulti_by_handle(
...
@@ -521,39 +521,46 @@ xfs_attrmulti_by_handle(
* their own functions. Functions are defined after their use
* their own functions. Functions are defined after their use
* so gcc doesn't get fancy and inline them with -03 */
* so gcc doesn't get fancy and inline them with -03 */
int
xfs_ioc_space
(
STATIC
int
xfs_ioc_space
(
bhv_desc_t
*
bdp
,
bhv_desc_t
*
bdp
,
vnode_t
*
vp
,
vnode_t
*
vp
,
struct
file
*
filp
,
struct
file
*
filp
,
unsigned
int
cmd
,
unsigned
int
cmd
,
unsigned
long
arg
);
unsigned
long
arg
);
int
xfs_ioc_bulkstat
(
STATIC
int
xfs_ioc_bulkstat
(
xfs_mount_t
*
mp
,
xfs_mount_t
*
mp
,
unsigned
int
cmd
,
unsigned
int
cmd
,
unsigned
long
arg
);
unsigned
long
arg
);
int
xfs_ioc_fsgeometry_v1
(
STATIC
int
xfs_ioc_fsgeometry_v1
(
xfs_mount_t
*
mp
,
xfs_mount_t
*
mp
,
unsigned
long
arg
);
unsigned
long
arg
);
int
xfs_ioc_fsgeometry
(
STATIC
int
xfs_ioc_fsgeometry
(
xfs_mount_t
*
mp
,
xfs_mount_t
*
mp
,
unsigned
long
arg
);
unsigned
long
arg
);
int
xfs_ioc_xattr
(
STATIC
int
xfs_ioc_xattr
(
vnode_t
*
vp
,
vnode_t
*
vp
,
struct
file
*
filp
,
struct
file
*
filp
,
unsigned
int
cmd
,
unsigned
int
cmd
,
unsigned
long
arg
);
unsigned
long
arg
);
int
xfs_ioc_getbmap
(
STATIC
int
xfs_ioc_getbmap
(
bhv_desc_t
*
bdp
,
bhv_desc_t
*
bdp
,
struct
file
*
filp
,
struct
file
*
filp
,
unsigned
int
cmd
,
unsigned
int
cmd
,
unsigned
long
arg
);
unsigned
long
arg
);
int
xfs_ioc_getbmapx
(
STATIC
int
xfs_ioc_getbmapx
(
bhv_desc_t
*
bdp
,
bhv_desc_t
*
bdp
,
unsigned
long
arg
);
unsigned
long
arg
);
...
@@ -800,7 +807,8 @@ xfs_ioctl(
...
@@ -800,7 +807,8 @@ xfs_ioctl(
}
}
}
}
int
xfs_ioc_space
(
STATIC
int
xfs_ioc_space
(
bhv_desc_t
*
bdp
,
bhv_desc_t
*
bdp
,
vnode_t
*
vp
,
vnode_t
*
vp
,
struct
file
*
filp
,
struct
file
*
filp
,
...
@@ -833,7 +841,8 @@ int xfs_ioc_space(
...
@@ -833,7 +841,8 @@ int xfs_ioc_space(
return
-
error
;
return
-
error
;
}
}
int
xfs_ioc_bulkstat
(
STATIC
int
xfs_ioc_bulkstat
(
xfs_mount_t
*
mp
,
xfs_mount_t
*
mp
,
unsigned
int
cmd
,
unsigned
int
cmd
,
unsigned
long
arg
)
unsigned
long
arg
)
...
@@ -843,6 +852,7 @@ int xfs_ioc_bulkstat(
...
@@ -843,6 +852,7 @@ int xfs_ioc_bulkstat(
xfs_ino_t
inlast
;
/* last inode number */
xfs_ino_t
inlast
;
/* last inode number */
int
done
;
int
done
;
int
error
;
int
error
;
/* done = 1 if there are more stats to get and if bulkstat */
/* done = 1 if there are more stats to get and if bulkstat */
/* should be called again (unused here, but used in dmapi) */
/* should be called again (unused here, but used in dmapi) */
...
@@ -901,7 +911,8 @@ int xfs_ioc_bulkstat(
...
@@ -901,7 +911,8 @@ int xfs_ioc_bulkstat(
return
0
;
return
0
;
}
}
int
xfs_ioc_fsgeometry_v1
(
STATIC
int
xfs_ioc_fsgeometry_v1
(
xfs_mount_t
*
mp
,
xfs_mount_t
*
mp
,
unsigned
long
arg
)
unsigned
long
arg
)
{
{
...
@@ -917,7 +928,8 @@ int xfs_ioc_fsgeometry_v1(
...
@@ -917,7 +928,8 @@ int xfs_ioc_fsgeometry_v1(
return
0
;
return
0
;
}
}
int
xfs_ioc_fsgeometry
(
STATIC
int
xfs_ioc_fsgeometry
(
xfs_mount_t
*
mp
,
xfs_mount_t
*
mp
,
unsigned
long
arg
)
unsigned
long
arg
)
{
{
...
@@ -933,7 +945,8 @@ int xfs_ioc_fsgeometry(
...
@@ -933,7 +945,8 @@ int xfs_ioc_fsgeometry(
return
0
;
return
0
;
}
}
int
xfs_ioc_xattr
(
STATIC
int
xfs_ioc_xattr
(
vnode_t
*
vp
,
vnode_t
*
vp
,
struct
file
*
filp
,
struct
file
*
filp
,
unsigned
int
cmd
,
unsigned
int
cmd
,
...
@@ -998,7 +1011,8 @@ int xfs_ioc_xattr(
...
@@ -998,7 +1011,8 @@ int xfs_ioc_xattr(
}
}
}
}
int
xfs_ioc_getbmap
(
STATIC
int
xfs_ioc_getbmap
(
bhv_desc_t
*
bdp
,
bhv_desc_t
*
bdp
,
struct
file
*
filp
,
struct
file
*
filp
,
unsigned
int
cmd
,
unsigned
int
cmd
,
...
@@ -1027,7 +1041,8 @@ int xfs_ioc_getbmap(
...
@@ -1027,7 +1041,8 @@ int xfs_ioc_getbmap(
return
0
;
return
0
;
}
}
int
xfs_ioc_getbmapx
(
STATIC
int
xfs_ioc_getbmapx
(
bhv_desc_t
*
bdp
,
bhv_desc_t
*
bdp
,
unsigned
long
arg
)
unsigned
long
arg
)
{
{
...
...
fs/xfs/linux/xfs_iomap.c
View file @
e597d4aa
...
@@ -56,7 +56,8 @@ STATIC int _xfs_imap_to_bmap(xfs_iocore_t *, xfs_off_t, xfs_bmbt_irec_t *,
...
@@ -56,7 +56,8 @@ STATIC int _xfs_imap_to_bmap(xfs_iocore_t *, xfs_off_t, xfs_bmbt_irec_t *,
int
int
xfs_strategy
(
xfs_inode_t
*
ip
,
xfs_strategy
(
xfs_inode_t
*
ip
,
xfs_off_t
offset
,
xfs_off_t
offset
,
ssize_t
count
,
ssize_t
count
,
int
flags
,
int
flags
,
...
@@ -74,13 +75,13 @@ xfs_strategy(xfs_inode_t *ip,
...
@@ -74,13 +75,13 @@ xfs_strategy(xfs_inode_t *ip,
xfs_bmap_free_t
free_list
;
xfs_bmap_free_t
free_list
;
xfs_filblks_t
count_fsb
;
xfs_filblks_t
count_fsb
;
int
committed
,
i
,
loops
,
nimaps
;
int
committed
,
i
,
loops
,
nimaps
;
int
is_xfs
=
1
;
/* This will be a variable at some point */
int
is_xfs
;
xfs_bmbt_irec_t
imap
[
XFS_MAX_RW_NBMAPS
];
xfs_bmbt_irec_t
imap
[
XFS_MAX_RW_NBMAPS
];
xfs_trans_t
*
tp
;
xfs_trans_t
*
tp
;
io
=
&
ip
->
i_iocore
;
mp
=
ip
->
i_mount
;
mp
=
ip
->
i_mount
;
/* is_xfs = IO_IS_XFS(io); */
io
=
&
ip
->
i_iocore
;
is_xfs
=
IO_IS_XFS
(
io
);
ASSERT
((
ip
->
i_d
.
di_mode
&
IFMT
)
==
IFREG
);
ASSERT
((
ip
->
i_d
.
di_mode
&
IFMT
)
==
IFREG
);
ASSERT
(((
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_REALTIME
)
!=
0
)
==
ASSERT
(((
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_REALTIME
)
!=
0
)
==
((
io
->
io_flags
&
XFS_IOCORE_RT
)
!=
0
));
((
io
->
io_flags
&
XFS_IOCORE_RT
)
!=
0
));
...
@@ -238,14 +239,19 @@ xfs_strategy(xfs_inode_t *ip,
...
@@ -238,14 +239,19 @@ xfs_strategy(xfs_inode_t *ip,
*/
*/
offset_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
);
offset_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
);
for
(
i
=
0
;
i
<
nimaps
;
i
++
)
{
for
(
i
=
0
;
i
<
nimaps
;
i
++
)
{
int
maps
;
int
maps
;
if
(
offset_fsb
>=
imap
[
i
].
br_startoff
&&
(
offset_fsb
<
(
imap
[
i
].
br_startoff
+
imap
[
i
].
br_blockcount
)))
{
if
((
offset_fsb
>=
imap
[
i
].
br_startoff
)
&&
XFS_IUNLOCK
(
mp
,
io
,
XFS_ILOCK_EXCL
|
XFS_EXTSIZE_WR
);
(
offset_fsb
<
(
imap
[
i
].
br_startoff
+
imap
[
i
].
br_blockcount
)))
{
XFS_IUNLOCK
(
mp
,
io
,
XFS_ILOCK_EXCL
|
XFS_EXTSIZE_WR
);
maps
=
min
(
nimaps
,
*
npbmaps
);
maps
=
min
(
nimaps
,
*
npbmaps
);
*
npbmaps
=
_xfs_imap_to_bmap
(
io
,
offset
,
&
imap
[
i
],
*
npbmaps
=
_xfs_imap_to_bmap
(
io
,
offset
,
pbmapp
,
maps
,
*
npbmaps
);
&
imap
[
i
],
pbmapp
,
maps
,
*
npbmaps
);
XFS_STATS_INC
(
xfsstats
.
xs_xstrat_quick
);
XFS_STATS_INC
(
xfsstats
.
xs_xstrat_quick
);
return
0
;
return
0
;
}
}
...
@@ -260,9 +266,11 @@ xfs_strategy(xfs_inode_t *ip,
...
@@ -260,9 +266,11 @@ xfs_strategy(xfs_inode_t *ip,
nimaps
--
;
/* Index of last entry */
nimaps
--
;
/* Index of last entry */
ASSERT
(
nimaps
>=
0
);
ASSERT
(
nimaps
>=
0
);
ASSERT
(
offset_fsb
>=
imap
[
nimaps
].
br_startoff
+
imap
[
nimaps
].
br_blockcount
);
ASSERT
(
offset_fsb
>=
imap
[
nimaps
].
br_startoff
+
imap
[
nimaps
].
br_blockcount
);
ASSERT
(
count_fsb
);
ASSERT
(
count_fsb
);
offset_fsb
=
imap
[
nimaps
].
br_startoff
+
imap
[
nimaps
].
br_blockcount
;
offset_fsb
=
imap
[
nimaps
].
br_startoff
+
imap
[
nimaps
].
br_blockcount
;
map_start_fsb
=
offset_fsb
;
map_start_fsb
=
offset_fsb
;
XFS_STATS_INC
(
xfsstats
.
xs_xstrat_split
);
XFS_STATS_INC
(
xfsstats
.
xs_xstrat_split
);
XFS_IUNLOCK
(
mp
,
io
,
XFS_ILOCK_EXCL
|
XFS_EXTSIZE_WR
);
XFS_IUNLOCK
(
mp
,
io
,
XFS_ILOCK_EXCL
|
XFS_EXTSIZE_WR
);
...
@@ -397,10 +405,9 @@ _xfs_imap_to_bmap(
...
@@ -397,10 +405,9 @@ _xfs_imap_to_bmap(
if
(
io
->
io_new_size
>
nisize
)
if
(
io
->
io_new_size
>
nisize
)
nisize
=
io
->
io_new_size
;
nisize
=
io
->
io_new_size
;
for
(
im
=
0
,
pbm
=
0
;
im
<
imaps
&&
pbm
<
pbmaps
;
im
++
,
pbmapp
++
,
imap
++
,
pbm
++
)
{
for
(
im
=
pbm
=
0
;
im
<
imaps
&&
pbm
<
pbmaps
;
im
++
,
pbmapp
++
,
imap
++
,
pbm
++
)
{
pbmapp
->
pbm_target
=
io
->
io_flags
&
XFS_IOCORE_RT
?
pbmapp
->
pbm_target
=
io
->
io_flags
&
XFS_IOCORE_RT
?
mp
->
m_rtdev_targp
:
mp
->
m_rtdev_targp
:
mp
->
m_ddev_targp
;
mp
->
m_ddev_targp
;
pbmapp
->
pbm_offset
=
XFS_FSB_TO_B
(
mp
,
imap
->
br_startoff
);
pbmapp
->
pbm_offset
=
XFS_FSB_TO_B
(
mp
,
imap
->
br_startoff
);
pbmapp
->
pbm_delta
=
offset
-
pbmapp
->
pbm_offset
;
pbmapp
->
pbm_delta
=
offset
-
pbmapp
->
pbm_offset
;
pbmapp
->
pbm_bsize
=
XFS_FSB_TO_B
(
mp
,
imap
->
br_blockcount
);
pbmapp
->
pbm_bsize
=
XFS_FSB_TO_B
(
mp
,
imap
->
br_blockcount
);
...
@@ -415,9 +422,10 @@ _xfs_imap_to_bmap(
...
@@ -415,9 +422,10 @@ _xfs_imap_to_bmap(
pbmapp
->
pbm_flags
=
PBMF_DELAY
;
pbmapp
->
pbm_flags
=
PBMF_DELAY
;
}
else
{
}
else
{
pbmapp
->
pbm_bn
=
XFS_FSB_TO_DB_IO
(
io
,
start_block
);
pbmapp
->
pbm_bn
=
XFS_FSB_TO_DB_IO
(
io
,
start_block
);
if
(
imap
->
br_state
==
XFS_EXT_UNWRITTEN
)
if
(
ISUNWRITTEN
(
imap
))
{
pbmapp
->
pbm_flags
|=
PBMF_UNWRITTEN
;
pbmapp
->
pbm_flags
|=
PBMF_UNWRITTEN
;
}
}
}
if
((
pbmapp
->
pbm_offset
+
pbmapp
->
pbm_bsize
)
>=
nisize
)
{
if
((
pbmapp
->
pbm_offset
+
pbmapp
->
pbm_bsize
)
>=
nisize
)
{
pbmapp
->
pbm_flags
|=
PBMF_EOF
;
pbmapp
->
pbm_flags
|=
PBMF_EOF
;
...
@@ -425,7 +433,7 @@ _xfs_imap_to_bmap(
...
@@ -425,7 +433,7 @@ _xfs_imap_to_bmap(
offset
+=
pbmapp
->
pbm_bsize
-
pbmapp
->
pbm_delta
;
offset
+=
pbmapp
->
pbm_bsize
-
pbmapp
->
pbm_delta
;
}
}
return
(
pbm
)
;
/* Return the number filled */
return
pbm
;
/* Return the number filled */
}
}
STATIC
int
STATIC
int
...
@@ -475,7 +483,7 @@ xfs_iomap_read(
...
@@ -475,7 +483,7 @@ xfs_iomap_read(
* 2 must allocate.
* 2 must allocate.
* There are 3 cases when we allocate:
* There are 3 cases when we allocate:
* delay allocation (doesn't really allocate or use transactions)
* delay allocation (doesn't really allocate or use transactions)
* direct allocation (no previous delay allocation
* direct allocation (no previous delay allocation
)
* convert delay to real allocations
* convert delay to real allocations
*/
*/
...
@@ -716,17 +724,14 @@ xfs_iomap_write_direct(
...
@@ -716,17 +724,14 @@ xfs_iomap_write_direct(
if
(
io
->
io_new_size
>
isize
)
if
(
io
->
io_new_size
>
isize
)
isize
=
io
->
io_new_size
;
isize
=
io
->
io_new_size
;
if
((
offset
+
count
)
>
isize
)
{
aeof
=
((
offset
+
count
)
>
isize
)
?
1
:
0
;
aeof
=
1
;
}
else
{
aeof
=
0
;
}
offset_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
);
offset_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
);
last_fsb
=
XFS_B_TO_FSB
(
mp
,
((
xfs_ufsize_t
)(
offset
+
count
)));
last_fsb
=
XFS_B_TO_FSB
(
mp
,
((
xfs_ufsize_t
)(
offset
+
count
)));
count_fsb
=
last_fsb
-
offset_fsb
;
count_fsb
=
last_fsb
-
offset_fsb
;
if
(
found
&&
(
pbmapp
->
pbm_flags
&
PBMF_HOLE
))
{
if
(
found
&&
(
pbmapp
->
pbm_flags
&
PBMF_HOLE
))
{
xfs_fileoff_t
map_last_fsb
;
xfs_fileoff_t
map_last_fsb
;
map_last_fsb
=
XFS_B_TO_FSB
(
mp
,
map_last_fsb
=
XFS_B_TO_FSB
(
mp
,
(
pbmapp
->
pbm_bsize
+
pbmapp
->
pbm_offset
));
(
pbmapp
->
pbm_bsize
+
pbmapp
->
pbm_offset
));
...
@@ -744,13 +749,13 @@ xfs_iomap_write_direct(
...
@@ -744,13 +749,13 @@ xfs_iomap_write_direct(
if
(
!
found
&&
mp
->
m_dalign
&&
(
isize
>=
524288
)
&&
aeof
)
{
if
(
!
found
&&
mp
->
m_dalign
&&
(
isize
>=
524288
)
&&
aeof
)
{
int
eof
;
int
eof
;
xfs_fileoff_t
new_last_fsb
;
xfs_fileoff_t
new_last_fsb
;
new_last_fsb
=
roundup_64
(
last_fsb
,
mp
->
m_dalign
);
new_last_fsb
=
roundup_64
(
last_fsb
,
mp
->
m_dalign
);
printk
(
"xfs_iomap_write_direct: about to XFS_BMAP_EOF %Ld
\n
"
,
printk
(
"xfs_iomap_write_direct: about to XFS_BMAP_EOF %Ld
\n
"
,
new_last_fsb
);
new_last_fsb
);
error
=
XFS_BMAP_EOF
(
mp
,
io
,
new_last_fsb
,
XFS_DATA_FORK
,
&
eof
);
error
=
XFS_BMAP_EOF
(
mp
,
io
,
new_last_fsb
,
XFS_DATA_FORK
,
&
eof
);
if
(
error
)
{
if
(
error
)
goto
error_out
;
goto
error_out
;
}
if
(
eof
)
if
(
eof
)
last_fsb
=
new_last_fsb
;
last_fsb
=
new_last_fsb
;
}
}
...
@@ -867,13 +872,14 @@ xfs_iomap_write_direct(
...
@@ -867,13 +872,14 @@ xfs_iomap_write_direct(
maps
=
min
(
nimaps
,
maps
);
maps
=
min
(
nimaps
,
maps
);
*
npbmaps
=
_xfs_imap_to_bmap
(
io
,
offset
,
&
imap
[
0
],
pbmapp
,
maps
,
*
npbmaps
=
_xfs_imap_to_bmap
(
io
,
offset
,
&
imap
[
0
],
pbmapp
,
maps
,
*
npbmaps
);
*
npbmaps
);
if
(
*
npbmaps
)
{
if
(
*
npbmaps
)
{
/*
/*
* this is new since xfs_iomap_read
* this is new since xfs_iomap_read
* didn't find it.
* didn't find it.
*/
*/
if
(
*
npbmaps
!=
1
)
{
if
(
*
npbmaps
!=
1
)
{
printk
(
"NEED MORE WORK FOR MULTIPLE BMAPS (which are new)
\n
"
);
/* NEED MORE WORK FOR MULTIPLE BMAPS (which are new) */
BUG
();
}
}
}
}
goto
out
;
goto
out
;
...
@@ -889,4 +895,3 @@ xfs_iomap_write_direct(
...
@@ -889,4 +895,3 @@ xfs_iomap_write_direct(
out:
/* Just return error and any tracing at end of routine */
out:
/* Just return error and any tracing at end of routine */
return
XFS_ERROR
(
error
);
return
XFS_ERROR
(
error
);
}
}
fs/xfs/linux/xfs_lrw.c
View file @
e597d4aa
...
@@ -321,10 +321,9 @@ xfs_zero_last_block(
...
@@ -321,10 +321,9 @@ xfs_zero_last_block(
return
0
;
return
0
;
}
}
/*
/*
* Get a pagebuf for the last block, zero the part beyond the
* Zero the part of the last block beyond the EOF, and write it
* EOF, and write it out sync. We need to drop the ilock
* out sync. We need to drop the ilock while we do this so we
* while we do this so we don't deadlock when the buffer cache
* don't deadlock when the buffer cache calls back to us.
* calls back to us.
*/
*/
XFS_IUNLOCK
(
mp
,
io
,
XFS_ILOCK_EXCL
|
XFS_EXTSIZE_RD
);
XFS_IUNLOCK
(
mp
,
io
,
XFS_ILOCK_EXCL
|
XFS_EXTSIZE_RD
);
loff
=
XFS_FSB_TO_B
(
mp
,
last_fsb
);
loff
=
XFS_FSB_TO_B
(
mp
,
last_fsb
);
...
@@ -401,7 +400,6 @@ xfs_zero_eof(
...
@@ -401,7 +400,6 @@ xfs_zero_eof(
last_fsb
=
isize
?
XFS_B_TO_FSBT
(
mp
,
isize
-
1
)
:
(
xfs_fileoff_t
)
-
1
;
last_fsb
=
isize
?
XFS_B_TO_FSBT
(
mp
,
isize
-
1
)
:
(
xfs_fileoff_t
)
-
1
;
start_zero_fsb
=
XFS_B_TO_FSB
(
mp
,
(
xfs_ufsize_t
)
isize
);
start_zero_fsb
=
XFS_B_TO_FSB
(
mp
,
(
xfs_ufsize_t
)
isize
);
end_zero_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
-
1
);
end_zero_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
-
1
);
ASSERT
((
xfs_sfiloff_t
)
last_fsb
<
(
xfs_sfiloff_t
)
start_zero_fsb
);
ASSERT
((
xfs_sfiloff_t
)
last_fsb
<
(
xfs_sfiloff_t
)
start_zero_fsb
);
if
(
last_fsb
==
end_zero_fsb
)
{
if
(
last_fsb
==
end_zero_fsb
)
{
/*
/*
...
@@ -414,10 +412,6 @@ xfs_zero_eof(
...
@@ -414,10 +412,6 @@ xfs_zero_eof(
ASSERT
(
start_zero_fsb
<=
end_zero_fsb
);
ASSERT
(
start_zero_fsb
<=
end_zero_fsb
);
prev_zero_fsb
=
NULLFILEOFF
;
prev_zero_fsb
=
NULLFILEOFF
;
prev_zero_count
=
0
;
prev_zero_count
=
0
;
/*
* Maybe change this loop to do the bmapi call and
* loop while we split the mappings into pagebufs?
*/
while
(
start_zero_fsb
<=
end_zero_fsb
)
{
while
(
start_zero_fsb
<=
end_zero_fsb
)
{
nimaps
=
1
;
nimaps
=
1
;
zero_count_fsb
=
end_zero_fsb
-
start_zero_fsb
+
1
;
zero_count_fsb
=
end_zero_fsb
-
start_zero_fsb
+
1
;
...
@@ -792,7 +786,6 @@ xfs_write(
...
@@ -792,7 +786,6 @@ xfs_write(
return
(
ret
);
return
(
ret
);
}
}
/*
/*
* All xfs metadata buffers except log state machine buffers
* All xfs metadata buffers except log state machine buffers
* get this attached as their b_bdstrat callback function.
* get this attached as their b_bdstrat callback function.
...
@@ -822,6 +815,7 @@ xfs_bdstrat_cb(struct xfs_buf *bp)
...
@@ -822,6 +815,7 @@ xfs_bdstrat_cb(struct xfs_buf *bp)
return
(
xfs_bioerror
(
bp
));
return
(
xfs_bioerror
(
bp
));
}
}
}
}
/*
/*
* Wrapper around bdstrat so that we can stop data
* Wrapper around bdstrat so that we can stop data
* from going to disk in case we are shutting down the filesystem.
* from going to disk in case we are shutting down the filesystem.
...
...
fs/xfs/pagebuf/page_buf.c
View file @
e597d4aa
...
@@ -113,45 +113,6 @@ pb_trace_func(
...
@@ -113,45 +113,6 @@ pb_trace_func(
}
}
#endif
/* PAGEBUF_TRACE */
#endif
/* PAGEBUF_TRACE */
#ifdef PAGEBUF_TRACKING
#define MAX_PB 10000
page_buf_t
*
pb_array
[
MAX_PB
];
EXPORT_SYMBOL
(
pb_array
);
void
pb_tracking_get
(
page_buf_t
*
pb
)
{
int
i
;
for
(
i
=
0
;
(
pb_array
[
i
]
!=
0
)
&&
(
i
<
MAX_PB
);
i
++
)
{
}
if
(
i
==
MAX_PB
)
printk
(
"pb 0x%p not recorded in pb_array
\n
"
,
pb
);
else
{
//printk("pb_get 0x%p in pb_array[%d]\n", pb, i);
pb_array
[
i
]
=
pb
;
}
}
void
pb_tracking_free
(
page_buf_t
*
pb
)
{
int
i
;
for
(
i
=
0
;
(
pb_array
[
i
]
!=
pb
)
&&
(
i
<
MAX_PB
);
i
++
)
{
}
if
(
i
<
MAX_PB
)
{
//printk("pb_free 0x%p from pb_array[%d]\n", pb, i);
pb_array
[
i
]
=
NULL
;
}
else
printk
(
"Freed unmonitored pagebuf 0x%p
\n
"
,
pb
);
}
#else
#define pb_tracking_get(pb) do { } while (0)
#define pb_tracking_free(pb) do { } while (0)
#endif
/* PAGEBUF_TRACKING */
/*
/*
* File wide globals
* File wide globals
*/
*/
...
@@ -314,8 +275,6 @@ _pagebuf_initialize(
...
@@ -314,8 +275,6 @@ _pagebuf_initialize(
*/
*/
flags
&=
~
(
PBF_LOCK
|
PBF_MAPPED
|
PBF_DONT_BLOCK
|
PBF_READ_AHEAD
);
flags
&=
~
(
PBF_LOCK
|
PBF_MAPPED
|
PBF_DONT_BLOCK
|
PBF_READ_AHEAD
);
pb_tracking_get
(
pb
);
memset
(
pb
,
0
,
sizeof
(
page_buf_private_t
));
memset
(
pb
,
0
,
sizeof
(
page_buf_private_t
));
atomic_set
(
&
pb
->
pb_hold
,
1
);
atomic_set
(
&
pb
->
pb_hold
,
1
);
init_MUTEX_LOCKED
(
&
pb
->
pb_iodonesema
);
init_MUTEX_LOCKED
(
&
pb
->
pb_iodonesema
);
...
@@ -444,7 +403,6 @@ _pagebuf_free_object(
...
@@ -444,7 +403,6 @@ _pagebuf_free_object(
}
}
}
}
pb_tracking_free
(
pb
);
pagebuf_deallocate
(
pb
);
pagebuf_deallocate
(
pb
);
}
}
...
@@ -1743,8 +1701,7 @@ pagebuf_daemon(
...
@@ -1743,8 +1701,7 @@ pagebuf_daemon(
spin_unlock
(
&
pb_daemon
->
pb_delwrite_lock
);
spin_unlock
(
&
pb_daemon
->
pb_delwrite_lock
);
while
(
!
list_empty
(
&
tmp
))
{
while
(
!
list_empty
(
&
tmp
))
{
pb
=
list_entry
(
tmp
.
next
,
pb
=
list_entry
(
tmp
.
next
,
page_buf_t
,
pb_list
);
page_buf_t
,
pb_list
);
list_del_init
(
&
pb
->
pb_list
);
list_del_init
(
&
pb
->
pb_list
);
pb
->
pb_flags
&=
~
PBF_DELWRI
;
pb
->
pb_flags
&=
~
PBF_DELWRI
;
pb
->
pb_flags
|=
PBF_WRITE
;
pb
->
pb_flags
|=
PBF_WRITE
;
...
@@ -2029,14 +1986,8 @@ pagebuf_init(void)
...
@@ -2029,14 +1986,8 @@ pagebuf_init(void)
}
}
#ifdef PAGEBUF_TRACE
#ifdef PAGEBUF_TRACE
# if 1
pb_trace
.
buf
=
(
pagebuf_trace_t
*
)
kmalloc
(
pb_trace
.
buf
=
(
pagebuf_trace_t
*
)
kmalloc
(
PB_TRACE_BUFSIZE
*
sizeof
(
pagebuf_trace_t
),
GFP_KERNEL
);
PB_TRACE_BUFSIZE
*
sizeof
(
pagebuf_trace_t
),
GFP_KERNEL
);
# else
/* Alternatively, for really really long trace bufs */
pb_trace
.
buf
=
(
pagebuf_trace_t
*
)
vmalloc
(
PB_TRACE_BUFSIZE
*
sizeof
(
pagebuf_trace_t
));
# endif
memset
(
pb_trace
.
buf
,
0
,
PB_TRACE_BUFSIZE
*
sizeof
(
pagebuf_trace_t
));
memset
(
pb_trace
.
buf
,
0
,
PB_TRACE_BUFSIZE
*
sizeof
(
pagebuf_trace_t
));
pb_trace
.
start
=
0
;
pb_trace
.
start
=
0
;
pb_trace
.
end
=
PB_TRACE_BUFSIZE
-
1
;
pb_trace
.
end
=
PB_TRACE_BUFSIZE
-
1
;
...
...
fs/xfs/xfs_bmap.c
View file @
e597d4aa
...
@@ -5526,14 +5526,13 @@ xfs_getbmap(
...
@@ -5526,14 +5526,13 @@ xfs_getbmap(
int
bmapi_flags
;
/* flags for xfs_bmapi */
int
bmapi_flags
;
/* flags for xfs_bmapi */
__int32_t
oflags
;
/* getbmapx bmv_oflags field */
__int32_t
oflags
;
/* getbmapx bmv_oflags field */
ip
=
XFS_BHVTOI
(
bdp
);
vp
=
BHV_TO_VNODE
(
bdp
);
vp
=
BHV_TO_VNODE
(
bdp
);
ip
=
XFS_BHVTOI
(
bdp
);
mp
=
ip
->
i_mount
;
whichfork
=
interface
&
BMV_IF_ATTRFORK
?
whichfork
=
interface
&
BMV_IF_ATTRFORK
?
XFS_ATTR_FORK
:
XFS_DATA_FORK
;
XFS_ATTR_FORK
:
XFS_DATA_FORK
;
sh_unwritten
=
(
interface
&
BMV_IF_PREALLOC
)
!=
0
;
sh_unwritten
=
(
interface
&
BMV_IF_PREALLOC
)
!=
0
;
/* If the BMV_IF_NO_DMAPI_READ interface bit specified, do not
/* If the BMV_IF_NO_DMAPI_READ interface bit specified, do not
* generate a DMAPI read event. Otherwise, if the DM_EVENT_READ
* generate a DMAPI read event. Otherwise, if the DM_EVENT_READ
* bit is set for the file, generate a read event in order
* bit is set for the file, generate a read event in order
...
@@ -5575,8 +5574,6 @@ xfs_getbmap(
...
@@ -5575,8 +5574,6 @@ xfs_getbmap(
ip
->
i_d
.
di_format
!=
XFS_DINODE_FMT_LOCAL
)
ip
->
i_d
.
di_format
!=
XFS_DINODE_FMT_LOCAL
)
return
XFS_ERROR
(
EINVAL
);
return
XFS_ERROR
(
EINVAL
);
mp
=
ip
->
i_mount
;
if
(
whichfork
==
XFS_DATA_FORK
)
{
if
(
whichfork
==
XFS_DATA_FORK
)
{
if
(
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_PREALLOC
)
{
if
(
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_PREALLOC
)
{
prealloced
=
1
;
prealloced
=
1
;
...
@@ -5600,18 +5597,15 @@ xfs_getbmap(
...
@@ -5600,18 +5597,15 @@ xfs_getbmap(
bmv
->
bmv_entries
=
0
;
bmv
->
bmv_entries
=
0
;
return
0
;
return
0
;
}
}
nex
=
bmv
->
bmv_count
-
1
;
nex
=
bmv
->
bmv_count
-
1
;
if
(
nex
<=
0
)
if
(
nex
<=
0
)
return
XFS_ERROR
(
EINVAL
);
return
XFS_ERROR
(
EINVAL
);
bmvend
=
bmv
->
bmv_offset
+
bmv
->
bmv_length
;
bmvend
=
bmv
->
bmv_offset
+
bmv
->
bmv_length
;
xfs_ilock
(
ip
,
XFS_IOLOCK_SHARED
);
xfs_ilock
(
ip
,
XFS_IOLOCK_SHARED
);
if
(
whichfork
==
XFS_DATA_FORK
&&
ip
->
i_delayed_blks
)
{
if
(
whichfork
==
XFS_DATA_FORK
&&
ip
->
i_delayed_blks
)
{
/* xfs_fsize_t last_byte = xfs_file_last_byte(ip); */
VOP_FLUSH_PAGES
(
vp
,
(
xfs_off_t
)
0
,
-
1
,
0
,
FI_REMAPF
,
error
);
VOP_FLUSH_PAGES
(
vp
,
(
xfs_off_t
)
0
,
-
1
,
0
,
FI_REMAPF
,
error
);
}
}
...
@@ -5629,11 +5623,10 @@ xfs_getbmap(
...
@@ -5629,11 +5623,10 @@ xfs_getbmap(
bmapi_flags
=
XFS_BMAPI_AFLAG
(
whichfork
)
|
bmapi_flags
=
XFS_BMAPI_AFLAG
(
whichfork
)
|
((
sh_unwritten
)
?
0
:
XFS_BMAPI_IGSTATE
);
((
sh_unwritten
)
?
0
:
XFS_BMAPI_IGSTATE
);
subnex
=
16
;
/* XXXjtk - need a #define? */
/*
/*
* Allocate enough space to handle "subnex" maps at a time.
* Allocate enough space to handle "subnex" maps at a time.
*/
*/
subnex
=
16
;
map
=
kmem_alloc
(
subnex
*
sizeof
(
*
map
),
KM_SLEEP
);
map
=
kmem_alloc
(
subnex
*
sizeof
(
*
map
),
KM_SLEEP
);
bmv
->
bmv_entries
=
0
;
bmv
->
bmv_entries
=
0
;
...
@@ -5646,75 +5639,61 @@ xfs_getbmap(
...
@@ -5646,75 +5639,61 @@ xfs_getbmap(
nexleft
=
nex
;
nexleft
=
nex
;
do
{
do
{
if
(
nexleft
>
subnex
)
nmap
=
(
nexleft
>
subnex
)
?
subnex
:
nexleft
;
nmap
=
subnex
;
else
nmap
=
nexleft
;
error
=
xfs_bmapi
(
NULL
,
ip
,
XFS_BB_TO_FSBT
(
mp
,
bmv
->
bmv_offset
),
error
=
xfs_bmapi
(
NULL
,
ip
,
XFS_BB_TO_FSBT
(
mp
,
bmv
->
bmv_offset
),
XFS_BB_TO_FSB
(
mp
,
bmv
->
bmv_length
),
XFS_BB_TO_FSB
(
mp
,
bmv
->
bmv_length
),
bmapi_flags
,
NULL
,
0
,
bmapi_flags
,
NULL
,
0
,
map
,
&
nmap
,
NULL
);
map
,
&
nmap
,
NULL
);
ASSERT
(
nmap
<=
subnex
);
if
(
error
)
if
(
error
)
goto
unlock_and_return
;
goto
unlock_and_return
;
ASSERT
(
nmap
<=
subnex
);
for
(
error
=
i
=
0
;
i
<
nmap
&&
nexleft
&&
bmv
->
bmv_length
;
i
++
)
{
for
(
i
=
0
;
i
<
nmap
&&
nexleft
&&
bmv
->
bmv_length
;
i
++
)
{
nexleft
--
;
nexleft
--
;
oflags
=
(
map
[
i
].
br_state
==
XFS_EXT_UNWRITTEN
)
?
oflags
=
0
;
BMV_OF_PREALLOC
:
0
;
out
.
bmv_offset
=
XFS_FSB_TO_BB
(
mp
,
map
[
i
].
br_startoff
);
out
.
bmv_offset
=
XFS_FSB_TO_BB
(
mp
,
map
[
i
].
br_startoff
);
out
.
bmv_length
=
XFS_FSB_TO_BB
(
mp
,
map
[
i
].
br_blockcount
);
out
.
bmv_length
=
XFS_FSB_TO_BB
(
mp
,
map
[
i
].
br_blockcount
);
ASSERT
(
map
[
i
].
br_startblock
!=
DELAYSTARTBLOCK
);
ASSERT
(
map
[
i
].
br_startblock
!=
DELAYSTARTBLOCK
);
if
(
prealloced
&&
if
(
prealloced
map
[
i
].
br_startblock
==
HOLESTARTBLOCK
&&
&&
map
[
i
].
br_startblock
==
HOLESTARTBLOCK
out
.
bmv_offset
+
out
.
bmv_length
==
bmvend
)
{
&&
out
.
bmv_offset
+
out
.
bmv_length
==
bmvend
)
{
/*
/*
* came to hole at end of file
* came to hole at end of file
*/
*/
goto
unlock_and_return
;
goto
unlock_and_return
;
}
else
{
}
else
{
if
(
map
[
i
].
br_startblock
==
HOLESTARTBLOCK
)
out
.
bmv_block
=
-
1
;
else
out
.
bmv_block
=
out
.
bmv_block
=
(
map
[
i
].
br_startblock
==
HOLESTARTBLOCK
)
?
-
1
:
XFS_FSB_TO_DB
(
ip
,
map
[
i
].
br_startblock
);
XFS_FSB_TO_DB
(
ip
,
map
[
i
].
br_startblock
);
/* return either a getbmap or a getbmapx structure. */
/* return either getbmap/getbmapx structure. */
if
(
interface
&
BMV_IF_EXTENDED
)
{
if
(
interface
&
BMV_IF_EXTENDED
)
{
struct
getbmapx
outx
;
struct
getbmapx
outx
;
GETBMAP_CONVERT
(
out
,
outx
);
GETBMAP_CONVERT
(
out
,
outx
);
outx
.
bmv_oflags
=
oflags
;
outx
.
bmv_oflags
=
oflags
;
outx
.
bmv_unused1
=
outx
.
bmv_unused2
=
0
;
outx
.
bmv_unused1
=
outx
.
bmv_unused2
=
0
;
if
(
copy_to_user
(
ap
,
&
outx
,
if
(
copy_to_user
(
ap
,
&
outx
,
sizeof
(
outx
)))
{
sizeof
(
outx
)))
{
error
=
XFS_ERROR
(
EFAULT
);
error
=
XFS_ERROR
(
EFAULT
);
goto
unlock_and_return
;
goto
unlock_and_return
;
}
}
}
else
{
}
else
{
if
(
copy_to_user
(
ap
,
&
out
,
sizeof
(
out
)))
{
if
(
copy_to_user
(
ap
,
&
out
,
sizeof
(
out
)))
{
error
=
XFS_ERROR
(
EFAULT
);
error
=
XFS_ERROR
(
EFAULT
);
goto
unlock_and_return
;
goto
unlock_and_return
;
}
}
}
}
bmv
->
bmv_offset
=
bmv
->
bmv_offset
=
out
.
bmv_offset
+
out
.
bmv_length
;
out
.
bmv_offset
+
out
.
bmv_length
;
bmv
->
bmv_length
=
MAX
(
(
__int64_t
)
0
,
bmv
->
bmv_length
=
MAX
((
__int64_t
)
0
,
(
__int64_t
)(
bmvend
-
bmv
->
bmv_offset
)
);
(
__int64_t
)(
bmvend
-
bmv
->
bmv_offset
));
bmv
->
bmv_entries
++
;
bmv
->
bmv_entries
++
;
ap
=
(
interface
&
BMV_IF_EXTENDED
)
?
if
(
interface
&
BMV_IF_EXTENDED
)
(
void
*
)((
struct
getbmapx
*
)
ap
+
1
)
:
ap
=
(
void
*
)((
struct
getbmapx
*
)
ap
+
1
);
(
void
*
)((
struct
getbmap
*
)
ap
+
1
);
else
ap
=
(
void
*
)((
struct
getbmap
*
)
ap
+
1
);
}
}
}
}
}
while
(
nmap
&&
nexleft
&&
bmv
->
bmv_length
);
}
while
(
nmap
&&
nexleft
&&
bmv
->
bmv_length
);
...
...
fs/xfs/xfs_bmap_btree.h
View file @
e597d4aa
...
@@ -167,7 +167,7 @@ xfs_exntfmt_t xfs_extfmt_inode(struct xfs_inode *ip);
...
@@ -167,7 +167,7 @@ xfs_exntfmt_t xfs_extfmt_inode(struct xfs_inode *ip);
(XFS_SB_VERSION_HASEXTFLGBIT(&((x)->i_mount->m_sb)) ? \
(XFS_SB_VERSION_HASEXTFLGBIT(&((x)->i_mount->m_sb)) ? \
XFS_EXTFMT_HASSTATE : XFS_EXTFMT_NOSTATE)
XFS_EXTFMT_HASSTATE : XFS_EXTFMT_NOSTATE)
#endif
#endif
#define ISUNWRITTEN(x)
((x)
== XFS_EXT_UNWRITTEN)
#define ISUNWRITTEN(x)
((x)->br_state
== XFS_EXT_UNWRITTEN)
/*
/*
* Incore version of above.
* Incore version of above.
...
...
fs/xfs/xfs_buf.h
View file @
e597d4aa
...
@@ -107,8 +107,8 @@ static inline void xfs_buf_undelay(page_buf_t *pb)
...
@@ -107,8 +107,8 @@ static inline void xfs_buf_undelay(page_buf_t *pb)
#define XFS_BUF_UNWRITE(x) ((x)->pb_flags &= ~PBF_WRITE)
#define XFS_BUF_UNWRITE(x) ((x)->pb_flags &= ~PBF_WRITE)
#define XFS_BUF_ISWRITE(x) ((x)->pb_flags & PBF_WRITE)
#define XFS_BUF_ISWRITE(x) ((x)->pb_flags & PBF_WRITE)
#define XFS_BUF_ISUNINITIAL(x) (
(x)->pb_flags & PBF_UNINITIAL
)
#define XFS_BUF_ISUNINITIAL(x) (
0
)
#define XFS_BUF_UNUNINITIAL(x) (
(x)->pb_flags &= ~PBF_UNINITIAL
)
#define XFS_BUF_UNUNINITIAL(x) (
0
)
#define XFS_BUF_BP_ISMAPPED(bp) 1
#define XFS_BUF_BP_ISMAPPED(bp) 1
...
...
fs/xfs/xfs_buf_item.c
View file @
e597d4aa
...
@@ -902,9 +902,7 @@ xfs_buf_item_relse(
...
@@ -902,9 +902,7 @@ xfs_buf_item_relse(
XFS_BUF_SET_FSPRIVATE
(
bp
,
bip
->
bli_item
.
li_bio_list
);
XFS_BUF_SET_FSPRIVATE
(
bp
,
bip
->
bli_item
.
li_bio_list
);
if
((
XFS_BUF_FSPRIVATE
(
bp
,
void
*
)
==
NULL
)
&&
if
((
XFS_BUF_FSPRIVATE
(
bp
,
void
*
)
==
NULL
)
&&
(
XFS_BUF_IODONE_FUNC
(
bp
)
!=
NULL
))
{
(
XFS_BUF_IODONE_FUNC
(
bp
)
!=
NULL
))
{
/**
ASSERT
((
XFS_BUF_ISUNINITIAL
(
bp
))
==
0
);
ASSERT
((
XFS_BUF_ISUNINITIAL
(
bp
))
==
0
);
***/
XFS_BUF_CLR_IODONE_FUNC
(
bp
);
XFS_BUF_CLR_IODONE_FUNC
(
bp
);
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment