Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
c2e95c3f
Commit
c2e95c3f
authored
Dec 20, 2002
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge master.kernel.org:/home/hch/BK/xfs/linux-2.5
into home.transmeta.com:/home/torvalds/v2.5/linux
parents
028d2f3e
b33cc8f7
Changes
25
Show whitespace changes
Inline
Side-by-side
Showing
25 changed files
with
814 additions
and
974 deletions
+814
-974
fs/buffer.c
fs/buffer.c
+3
-2
fs/xfs/Makefile
fs/xfs/Makefile
+11
-12
fs/xfs/linux/xfs_aops.c
fs/xfs/linux/xfs_aops.c
+66
-52
fs/xfs/linux/xfs_iomap.c
fs/xfs/linux/xfs_iomap.c
+496
-659
fs/xfs/linux/xfs_iops.c
fs/xfs/linux/xfs_iops.c
+8
-6
fs/xfs/linux/xfs_lrw.c
fs/xfs/linux/xfs_lrw.c
+19
-0
fs/xfs/linux/xfs_lrw.h
fs/xfs/linux/xfs_lrw.h
+12
-0
fs/xfs/linux/xfs_super.c
fs/xfs/linux/xfs_super.c
+9
-2
fs/xfs/linux/xfs_super.h
fs/xfs/linux/xfs_super.h
+9
-7
fs/xfs/pagebuf/page_buf.h
fs/xfs/pagebuf/page_buf.h
+7
-12
fs/xfs/xfs_alloc_btree.h
fs/xfs/xfs_alloc_btree.h
+2
-2
fs/xfs/xfs_buf.h
fs/xfs/xfs_buf.h
+19
-13
fs/xfs/xfs_dmapi.h
fs/xfs/xfs_dmapi.h
+0
-17
fs/xfs/xfs_iget.c
fs/xfs/xfs_iget.c
+1
-3
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.c
+1
-1
fs/xfs/xfs_iocore.c
fs/xfs/xfs_iocore.c
+11
-2
fs/xfs/xfs_mount.c
fs/xfs/xfs_mount.c
+39
-17
fs/xfs/xfs_mount.h
fs/xfs/xfs_mount.h
+67
-21
fs/xfs/xfs_rtalloc.c
fs/xfs/xfs_rtalloc.c
+1
-1
fs/xfs/xfs_rw.c
fs/xfs/xfs_rw.c
+1
-1
fs/xfs/xfs_trans_buf.c
fs/xfs/xfs_trans_buf.c
+1
-1
fs/xfs/xfs_vfsops.c
fs/xfs/xfs_vfsops.c
+8
-7
fs/xfs/xfs_vnodeops.c
fs/xfs/xfs_vnodeops.c
+19
-134
fs/xfs/xfsidbg.c
fs/xfs/xfsidbg.c
+2
-2
include/linux/buffer_head.h
include/linux/buffer_head.h
+2
-0
No files found.
fs/buffer.c
View file @
c2e95c3f
...
...
@@ -1453,6 +1453,7 @@ static inline void discard_buffer(struct buffer_head * bh)
clear_buffer_mapped
(
bh
);
clear_buffer_req
(
bh
);
clear_buffer_new
(
bh
);
clear_buffer_delay
(
bh
);
unlock_buffer
(
bh
);
}
...
...
@@ -1871,7 +1872,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
set_buffer_uptodate
(
bh
);
continue
;
}
if
(
!
buffer_uptodate
(
bh
)
&&
if
(
!
buffer_uptodate
(
bh
)
&&
!
buffer_delay
(
bh
)
&&
(
block_start
<
from
||
block_end
>
to
))
{
ll_rw_block
(
READ
,
1
,
&
bh
);
*
wait_bh
++=
bh
;
...
...
@@ -2457,7 +2458,7 @@ int block_truncate_page(struct address_space *mapping,
if
(
PageUptodate
(
page
))
set_buffer_uptodate
(
bh
);
if
(
!
buffer_uptodate
(
bh
))
{
if
(
!
buffer_uptodate
(
bh
)
&&
!
buffer_delay
(
bh
)
)
{
err
=
-
EIO
;
ll_rw_block
(
READ
,
1
,
&
bh
);
wait_on_buffer
(
bh
);
...
...
fs/xfs/Makefile
View file @
c2e95c3f
...
...
@@ -51,23 +51,22 @@ export-objs := pagebuf/page_buf.o support/ktrace.o \
obj-$(CONFIG_XFS_FS)
+=
xfs.o
xfs-
obj-$(CONFIG_XFS_RT)
+=
xfs_rtalloc.o
xfs-
$(CONFIG_XFS_RT)
+=
xfs_rtalloc.o
xfs-
obj-$(CONFIG_XFS_QUOTA)
+=
xfs_dquot.o
\
xfs-
$(CONFIG_XFS_QUOTA)
+=
xfs_dquot.o
\
xfs_dquot_item.o
\
xfs_trans_dquot.o
\
xfs_qm_syscalls.o
\
xfs_qm.o
xfs-
obj-
$(CONFIG_XFS_POSIX_ACL)
+=
xfs_acl.o
xfs-
obj-
$(CONFIG_FS_POSIX_CAP)
+=
xfs_cap.o
xfs-
obj-
$(CONFIG_FS_POSIX_MAC)
+=
xfs_mac.o
xfs-
obj-$(CONFIG_PROC_FS)
+=
linux/xfs_stats.o
xfs-
obj-$(CONFIG_SYSCTL)
+=
linux/xfs_sysctl.o
xfs-$(CONFIG_XFS_POSIX_ACL)
+=
xfs_acl.o
xfs-$(CONFIG_FS_POSIX_CAP)
+=
xfs_cap.o
xfs-$(CONFIG_FS_POSIX_MAC)
+=
xfs_mac.o
xfs-
$(CONFIG_PROC_FS)
+=
linux/xfs_stats.o
xfs-
$(CONFIG_SYSCTL)
+=
linux/xfs_sysctl.o
xfs-objs
+=
$
(
xfs-obj-y
)
\
xfs_alloc.o
\
xfs-y
+=
xfs_alloc.o
\
xfs_alloc_btree.o
\
xfs_attr.o
\
xfs_attr_fetch.o
\
...
...
@@ -115,12 +114,12 @@ xfs-objs += $(xfs-obj-y) \
xfs_rw.o
# Objects in pagebuf/
xfs-
objs
+=
$(
addprefix
pagebuf/,
\
xfs-
y
+=
$(
addprefix
pagebuf/,
\
page_buf.o
\
page_buf_locking.o
)
# Objects in linux/
xfs-
objs
+=
$(
addprefix
linux/,
\
xfs-
y
+=
$(
addprefix
linux/,
\
xfs_aops.o
\
xfs_behavior.o
\
xfs_file.o
\
...
...
@@ -134,7 +133,7 @@ xfs-objs += $(addprefix linux/, \
xfs_vnode.o
)
# Objects in support/
xfs-
objs
+=
$(
addprefix
support/,
\
xfs-
y
+=
$(
addprefix
support/,
\
debug.o
\
kmem.o
\
ktrace.o
\
...
...
fs/xfs/linux/xfs_aops.c
View file @
c2e95c3f
...
...
@@ -48,6 +48,9 @@ map_blocks(
vnode_t
*
vp
=
LINVFS_GET_VP
(
inode
);
int
error
,
nmaps
=
1
;
if
(((
flags
&
(
PBF_DIRECT
|
PBF_SYNC
))
==
PBF_DIRECT
)
&&
(
offset
>=
inode
->
i_size
))
count
=
max
(
count
,
XFS_WRITE_IO_LOG
);
retry:
VOP_BMAP
(
vp
,
offset
,
count
,
flags
,
pbmapp
,
&
nmaps
,
error
);
if
(
flags
&
PBF_WRITE
)
{
...
...
@@ -145,9 +148,8 @@ probe_unmapped_page(
struct
buffer_head
*
bh
,
*
head
;
bh
=
head
=
page_buffers
(
page
);
do
{
if
(
buffer_mapped
(
bh
)
||
!
buffer_uptodate
(
bh
))
{
if
(
buffer_mapped
(
bh
)
||
!
buffer_uptodate
(
bh
))
break
;
}
ret
+=
bh
->
b_size
;
if
(
ret
>=
pg_offset
)
break
;
...
...
@@ -289,7 +291,7 @@ convert_page(
bh
=
head
=
page_buffers
(
page
);
do
{
offset
=
i
<<
bbits
;
if
(
!
buffer_uptodate
(
bh
))
if
(
!
(
PageUptodate
(
page
)
||
buffer_uptodate
(
bh
)
))
continue
;
if
(
buffer_mapped
(
bh
)
&&
!
buffer_delay
(
bh
)
&&
all_bh
)
{
if
(
startio
&&
(
offset
<
end
))
{
...
...
@@ -372,7 +374,7 @@ delalloc_convert(
page_buf_bmap_t
*
mp
,
map
;
unsigned
long
p_offset
=
0
,
end_index
;
loff_t
offset
,
end_offset
;
int
len
,
err
,
i
,
cnt
=
0
;
int
len
,
err
,
i
,
cnt
=
0
,
uptodate
=
1
;
/* Are we off the end of the file ? */
end_index
=
inode
->
i_size
>>
PAGE_CACHE_SHIFT
;
...
...
@@ -396,7 +398,7 @@ delalloc_convert(
len
=
bh
->
b_size
;
do
{
if
(
!
buffer_uptodate
(
bh
)
&&
!
startio
)
{
if
(
!
(
PageUptodate
(
page
)
||
buffer_uptodate
(
bh
)
)
&&
!
startio
)
{
goto
next_bh
;
}
...
...
@@ -423,20 +425,22 @@ delalloc_convert(
unlock_buffer
(
bh
);
}
}
}
else
if
(
!
buffer_mapped
(
bh
)
&&
(
buffer_uptodate
(
bh
)
||
PageUptodate
(
page
))
&&
(
allocate_space
||
startio
))
{
}
else
if
(
(
buffer_uptodate
(
bh
)
||
PageUptodate
(
page
))
&&
(
allocate_space
||
startio
))
{
if
(
!
buffer_mapped
(
bh
))
{
int
size
;
/* Getting here implies an unmapped buffer was found,
* and we are in a path where we need to write the
* whole page out.
/*
* Getting here implies an unmapped buffer
* was found, and we are in a path where we
* need to write the whole page out.
*/
if
(
!
mp
)
{
size
=
probe_unmapped_cluster
(
inode
,
page
,
bh
,
head
);
err
=
map_blocks
(
inode
,
offset
,
size
,
&
map
,
PBF_WRITE
|
PBF_DIRECT
);
size
=
probe_unmapped_cluster
(
inode
,
page
,
bh
,
head
);
err
=
map_blocks
(
inode
,
offset
,
size
,
&
map
,
PBF_WRITE
|
PBF_DIRECT
);
if
(
err
)
{
goto
error
;
}
...
...
@@ -444,7 +448,8 @@ delalloc_convert(
p_offset
);
}
if
(
mp
)
{
map_buffer_at_offset
(
page
,
bh
,
p_offset
,
map_buffer_at_offset
(
page
,
bh
,
p_offset
,
inode
->
i_blkbits
,
mp
);
if
(
startio
)
{
bh_arr
[
cnt
++
]
=
bh
;
...
...
@@ -453,18 +458,24 @@ delalloc_convert(
}
}
}
else
if
(
startio
&&
buffer_mapped
(
bh
))
{
if
(
buffer_uptodate
(
bh
)
&&
allocate_space
)
{
if
(
buffer_uptodate
(
bh
)
&&
allocate_space
)
{
lock_buffer
(
bh
);
bh_arr
[
cnt
++
]
=
bh
;
}
}
}
next_bh:
if
(
!
buffer_uptodate
(
bh
))
uptodate
=
0
;
offset
+=
len
;
p_offset
+=
len
;
bh
=
bh
->
b_this_page
;
}
while
(
offset
<
end_offset
);
if
(
uptodate
)
SetPageUptodate
(
page
);
if
(
startio
)
{
submit_page
(
page
,
bh_arr
,
cnt
);
}
...
...
@@ -509,17 +520,15 @@ linvfs_get_block_core(
ssize_t
size
;
loff_t
offset
=
(
loff_t
)
iblock
<<
inode
->
i_blkbits
;
if
(
blocks
)
{
size
=
blocks
<<
inode
->
i_blkbits
;
}
else
{
/* If we are doing writes at the end of the file,
* allocate in chunks
*/
if
(
create
&&
(
offset
>=
inode
->
i_size
)
&&
!
(
flags
&
PBF_SYNC
))
if
(
blocks
)
size
=
blocks
<<
inode
->
i_blkbits
;
else
if
(
create
&&
(
offset
>=
inode
->
i_size
))
size
=
1
<<
XFS_WRITE_IO_LOG
;
else
size
=
1
<<
inode
->
i_blkbits
;
}
VOP_BMAP
(
vp
,
offset
,
size
,
create
?
flags
:
PBF_READ
,
...
...
@@ -534,6 +543,10 @@ linvfs_get_block_core(
page_buf_daddr_t
bn
;
loff_t
delta
;
/* For unwritten extents do not report a disk address on
* the read case.
*/
if
(
create
||
((
pbmap
.
pbm_flags
&
PBMF_UNWRITTEN
)
==
0
))
{
delta
=
offset
-
pbmap
.
pbm_offset
;
delta
>>=
inode
->
i_blkbits
;
...
...
@@ -544,6 +557,7 @@ linvfs_get_block_core(
bh_result
->
b_bdev
=
pbmap
.
pbm_target
->
pbr_bdev
;
set_buffer_mapped
(
bh_result
);
}
}
/* If we previously allocated a block out beyond eof and
* we are now coming back to use it then we will need to
...
...
fs/xfs/linux/xfs_iomap.c
View file @
c2e95c3f
...
...
@@ -29,362 +29,13 @@
*
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/
/*
* fs/xfs/linux/xfs_lrw.c (Linux Read Write stuff)
*
*/
#include <xfs.h>
#include <linux/pagemap.h>
#include <linux/capability.h>
#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
<< mp->m_writeio_log)
#define XFS_STRAT_WRITE_IMAPS 2
STATIC
int
xfs_iomap_read
(
xfs_iocore_t
*
,
loff_t
,
size_t
,
int
,
page_buf_bmap_t
*
,
int
*
);
STATIC
int
xfs_iomap_write
(
xfs_iocore_t
*
,
loff_t
,
size_t
,
page_buf_bmap_t
*
,
int
*
,
int
);
STATIC
int
xfs_iomap_write_delay
(
xfs_iocore_t
*
,
loff_t
,
size_t
,
page_buf_bmap_t
*
,
int
*
,
int
,
int
);
STATIC
int
xfs_iomap_write_direct
(
xfs_iocore_t
*
,
loff_t
,
size_t
,
page_buf_bmap_t
*
,
int
*
,
int
,
int
);
STATIC
int
_xfs_imap_to_bmap
(
xfs_iocore_t
*
,
xfs_off_t
,
xfs_bmbt_irec_t
*
,
page_buf_bmap_t
*
,
int
,
int
);
int
xfs_strategy
(
xfs_inode_t
*
ip
,
xfs_off_t
offset
,
ssize_t
count
,
int
flags
,
page_buf_bmap_t
*
pbmapp
,
int
*
npbmaps
)
{
xfs_iocore_t
*
io
;
xfs_mount_t
*
mp
;
int
error
;
xfs_fileoff_t
offset_fsb
;
xfs_fileoff_t
end_fsb
;
xfs_fileoff_t
map_start_fsb
;
xfs_fileoff_t
last_block
;
xfs_fsblock_t
first_block
;
xfs_bmap_free_t
free_list
;
xfs_filblks_t
count_fsb
;
int
committed
,
i
,
loops
,
nimaps
;
int
is_xfs
;
xfs_bmbt_irec_t
imap
[
XFS_MAX_RW_NBMAPS
];
xfs_trans_t
*
tp
;
mp
=
ip
->
i_mount
;
io
=
&
ip
->
i_iocore
;
is_xfs
=
IO_IS_XFS
(
io
);
ASSERT
((
ip
->
i_d
.
di_mode
&
IFMT
)
==
IFREG
);
ASSERT
(((
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_REALTIME
)
!=
0
)
==
((
io
->
io_flags
&
XFS_IOCORE_RT
)
!=
0
));
if
(
XFS_FORCED_SHUTDOWN
(
mp
))
return
XFS_ERROR
(
EIO
);
offset_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
);
nimaps
=
min
(
XFS_MAX_RW_NBMAPS
,
*
npbmaps
);
end_fsb
=
XFS_B_TO_FSB
(
mp
,
((
xfs_ufsize_t
)(
offset
+
count
)));
first_block
=
NULLFSBLOCK
;
XFS_ILOCK
(
mp
,
io
,
XFS_ILOCK_SHARED
|
XFS_EXTSIZE_RD
);
error
=
XFS_BMAPI
(
mp
,
NULL
,
io
,
offset_fsb
,
(
xfs_filblks_t
)(
end_fsb
-
offset_fsb
),
XFS_BMAPI_ENTIRE
,
&
first_block
,
0
,
imap
,
&
nimaps
,
NULL
);
XFS_IUNLOCK
(
mp
,
io
,
XFS_ILOCK_SHARED
|
XFS_EXTSIZE_RD
);
if
(
error
)
{
return
XFS_ERROR
(
error
);
}
if
(
nimaps
&&
!
ISNULLSTARTBLOCK
(
imap
[
0
].
br_startblock
))
{
*
npbmaps
=
_xfs_imap_to_bmap
(
&
ip
->
i_iocore
,
offset
,
imap
,
pbmapp
,
nimaps
,
*
npbmaps
);
return
0
;
}
/*
* Make sure that the dquots are there.
*/
if
(
XFS_IS_QUOTA_ON
(
mp
))
{
if
(
XFS_NOT_DQATTACHED
(
mp
,
ip
))
{
if
((
error
=
xfs_qm_dqattach
(
ip
,
0
)))
{
return
XFS_ERROR
(
error
);
}
}
}
XFS_STATS_ADD
(
xfsstats
.
xs_xstrat_bytes
,
XFS_FSB_TO_B
(
mp
,
imap
[
0
].
br_blockcount
));
offset_fsb
=
imap
[
0
].
br_startoff
;
count_fsb
=
imap
[
0
].
br_blockcount
;
map_start_fsb
=
offset_fsb
;
while
(
count_fsb
!=
0
)
{
/*
* Set up a transaction with which to allocate the
* backing store for the file. Do allocations in a
* loop until we get some space in the range we are
* interested in. The other space that might be allocated
* is in the delayed allocation extent on which we sit
* but before our buffer starts.
*/
nimaps
=
0
;
loops
=
0
;
while
(
nimaps
==
0
)
{
if
(
is_xfs
)
{
tp
=
xfs_trans_alloc
(
mp
,
XFS_TRANS_STRAT_WRITE
);
error
=
xfs_trans_reserve
(
tp
,
0
,
XFS_WRITE_LOG_RES
(
mp
),
0
,
XFS_TRANS_PERM_LOG_RES
,
XFS_WRITE_LOG_COUNT
);
if
(
error
)
{
xfs_trans_cancel
(
tp
,
0
);
goto
error0
;
}
xfs_ilock
(
ip
,
XFS_ILOCK_EXCL
);
xfs_trans_ijoin
(
tp
,
ip
,
XFS_ILOCK_EXCL
);
xfs_trans_ihold
(
tp
,
ip
);
}
else
{
tp
=
NULL
;
XFS_ILOCK
(
mp
,
io
,
XFS_ILOCK_EXCL
|
XFS_EXTSIZE_WR
);
}
/*
* Allocate the backing store for the file.
*/
XFS_BMAP_INIT
(
&
(
free_list
),
&
(
first_block
));
nimaps
=
XFS_STRAT_WRITE_IMAPS
;
/*
* Ensure we don't go beyond eof - it is possible
* the extents changed since we did the read call,
* we dropped the ilock in the interim.
*/
end_fsb
=
XFS_B_TO_FSB
(
mp
,
XFS_SIZE
(
mp
,
io
));
xfs_bmap_last_offset
(
NULL
,
ip
,
&
last_block
,
XFS_DATA_FORK
);
last_block
=
XFS_FILEOFF_MAX
(
last_block
,
end_fsb
);
if
((
map_start_fsb
+
count_fsb
)
>
last_block
)
{
count_fsb
=
last_block
-
map_start_fsb
;
if
(
count_fsb
==
0
)
{
if
(
is_xfs
)
{
xfs_bmap_cancel
(
&
free_list
);
xfs_trans_cancel
(
tp
,
(
XFS_TRANS_RELEASE_LOG_RES
|
XFS_TRANS_ABORT
));
}
XFS_IUNLOCK
(
mp
,
io
,
XFS_ILOCK_EXCL
|
XFS_EXTSIZE_WR
);
return
XFS_ERROR
(
EAGAIN
);
}
}
error
=
XFS_BMAPI
(
mp
,
tp
,
io
,
map_start_fsb
,
count_fsb
,
XFS_BMAPI_WRITE
,
&
first_block
,
1
,
imap
,
&
nimaps
,
&
free_list
);
if
(
error
)
{
xfs_bmap_cancel
(
&
free_list
);
xfs_trans_cancel
(
tp
,
(
XFS_TRANS_RELEASE_LOG_RES
|
XFS_TRANS_ABORT
));
XFS_IUNLOCK
(
mp
,
io
,
XFS_ILOCK_EXCL
|
XFS_EXTSIZE_WR
);
goto
error0
;
}
if
(
is_xfs
)
{
error
=
xfs_bmap_finish
(
&
(
tp
),
&
(
free_list
),
first_block
,
&
committed
);
if
(
error
)
{
xfs_bmap_cancel
(
&
free_list
);
xfs_trans_cancel
(
tp
,
(
XFS_TRANS_RELEASE_LOG_RES
|
XFS_TRANS_ABORT
));
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
goto
error0
;
}
error
=
xfs_trans_commit
(
tp
,
XFS_TRANS_RELEASE_LOG_RES
,
NULL
);
if
(
error
)
{
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
goto
error0
;
}
}
if
(
nimaps
==
0
)
{
XFS_IUNLOCK
(
mp
,
io
,
XFS_ILOCK_EXCL
|
XFS_EXTSIZE_WR
);
}
/* else hold 'till we maybe loop again below */
}
/*
* See if we were able to allocate an extent that
* covers at least part of the user's requested size.
*/
offset_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
);
for
(
i
=
0
;
i
<
nimaps
;
i
++
)
{
int
maps
;
if
((
offset_fsb
>=
imap
[
i
].
br_startoff
)
&&
(
offset_fsb
<
(
imap
[
i
].
br_startoff
+
imap
[
i
].
br_blockcount
)))
{
XFS_IUNLOCK
(
mp
,
io
,
XFS_ILOCK_EXCL
|
XFS_EXTSIZE_WR
);
maps
=
min
(
nimaps
,
*
npbmaps
);
*
npbmaps
=
_xfs_imap_to_bmap
(
io
,
offset
,
&
imap
[
i
],
pbmapp
,
maps
,
*
npbmaps
);
XFS_STATS_INC
(
xfsstats
.
xs_xstrat_quick
);
return
0
;
}
count_fsb
-=
imap
[
i
].
br_blockcount
;
/* for next bmapi,
if needed. */
}
/*
* We didn't get an extent the caller can write into so
* loop around and try starting after the last imap we got back.
*/
nimaps
--
;
/* Index of last entry */
ASSERT
(
nimaps
>=
0
);
ASSERT
(
offset_fsb
>=
imap
[
nimaps
].
br_startoff
+
imap
[
nimaps
].
br_blockcount
);
ASSERT
(
count_fsb
);
offset_fsb
=
imap
[
nimaps
].
br_startoff
+
imap
[
nimaps
].
br_blockcount
;
map_start_fsb
=
offset_fsb
;
XFS_STATS_INC
(
xfsstats
.
xs_xstrat_split
);
XFS_IUNLOCK
(
mp
,
io
,
XFS_ILOCK_EXCL
|
XFS_EXTSIZE_WR
);
}
ASSERT
(
0
);
/* Should never get here */
error0:
if
(
error
)
{
ASSERT
(
count_fsb
!=
0
);
ASSERT
(
is_xfs
||
XFS_FORCED_SHUTDOWN
(
mp
));
}
return
XFS_ERROR
(
error
);
}
/*
* xfs_bmap() is the same as the irix xfs_bmap from xfs_rw.c
* execpt for slight changes to the params
*/
int
xfs_bmap
(
bhv_desc_t
*
bdp
,
xfs_off_t
offset
,
ssize_t
count
,
int
flags
,
page_buf_bmap_t
*
pbmapp
,
int
*
npbmaps
)
{
xfs_inode_t
*
ip
;
int
error
;
int
lockmode
;
int
fsynced
=
0
;
vnode_t
*
vp
;
ip
=
XFS_BHVTOI
(
bdp
);
ASSERT
((
ip
->
i_d
.
di_mode
&
IFMT
)
==
IFREG
);
ASSERT
(((
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_REALTIME
)
!=
0
)
==
((
ip
->
i_iocore
.
io_flags
&
XFS_IOCORE_RT
)
!=
0
));
if
(
XFS_FORCED_SHUTDOWN
(
ip
->
i_iocore
.
io_mount
))
return
XFS_ERROR
(
EIO
);
if
(
flags
&
PBF_READ
)
{
lockmode
=
xfs_ilock_map_shared
(
ip
);
error
=
xfs_iomap_read
(
&
ip
->
i_iocore
,
offset
,
count
,
XFS_BMAPI_ENTIRE
,
pbmapp
,
npbmaps
);
xfs_iunlock_map_shared
(
ip
,
lockmode
);
}
else
if
(
flags
&
PBF_FILE_ALLOCATE
)
{
error
=
xfs_strategy
(
ip
,
offset
,
count
,
flags
,
pbmapp
,
npbmaps
);
}
else
{
/* PBF_WRITE */
ASSERT
(
flags
&
PBF_WRITE
);
vp
=
BHV_TO_VNODE
(
bdp
);
xfs_ilock
(
ip
,
XFS_ILOCK_EXCL
);
/*
* Make sure that the dquots are there. This doesn't hold
* the ilock across a disk read.
*/
if
(
XFS_IS_QUOTA_ON
(
ip
->
i_mount
))
{
if
(
XFS_NOT_DQATTACHED
(
ip
->
i_mount
,
ip
))
{
if
((
error
=
xfs_qm_dqattach
(
ip
,
XFS_QMOPT_ILOCKED
)))
{
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
return
XFS_ERROR
(
error
);
}
}
}
retry:
error
=
xfs_iomap_write
(
&
ip
->
i_iocore
,
offset
,
count
,
pbmapp
,
npbmaps
,
flags
);
/* xfs_iomap_write unlocks/locks/unlocks */
if
(
error
==
ENOSPC
)
{
switch
(
fsynced
)
{
case
0
:
if
(
ip
->
i_delayed_blks
)
{
filemap_fdatawrite
(
LINVFS_GET_IP
(
vp
)
->
i_mapping
);
fsynced
=
1
;
}
else
{
fsynced
=
2
;
flags
|=
PBF_SYNC
;
}
error
=
0
;
xfs_ilock
(
ip
,
XFS_ILOCK_EXCL
);
goto
retry
;
case
1
:
fsynced
=
2
;
if
(
!
(
flags
&
PBF_SYNC
))
{
flags
|=
PBF_SYNC
;
error
=
0
;
xfs_ilock
(
ip
,
XFS_ILOCK_EXCL
);
goto
retry
;
}
case
2
:
sync_blockdev
(
vp
->
v_vfsp
->
vfs_super
->
s_bdev
);
xfs_log_force
(
ip
->
i_mount
,
(
xfs_lsn_t
)
0
,
XFS_LOG_FORCE
|
XFS_LOG_SYNC
);
error
=
0
;
/**
delay(HZ);
**/
fsynced
++
;
xfs_ilock
(
ip
,
XFS_ILOCK_EXCL
);
goto
retry
;
}
}
}
return
XFS_ERROR
(
error
);
}
#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
STATIC
int
_xfs_imap_to_bmap
(
...
...
@@ -397,7 +48,7 @@ _xfs_imap_to_bmap(
{
xfs_mount_t
*
mp
;
xfs_fsize_t
nisize
;
int
im
,
pbm
;
int
pbm
;
xfs_fsblock_t
start_block
;
mp
=
io
->
io_mount
;
...
...
@@ -405,7 +56,7 @@ _xfs_imap_to_bmap(
if
(
io
->
io_new_size
>
nisize
)
nisize
=
io
->
io_new_size
;
for
(
im
=
pbm
=
0
;
im
<
imaps
&&
pbm
<
pbmaps
;
im
++
,
pbmapp
++
,
imap
++
,
pbm
++
)
{
for
(
pbm
=
0
;
imaps
&&
pbm
<
pbmaps
;
imaps
--
,
pbmapp
++
,
imap
++
,
pbm
++
)
{
pbmapp
->
pbm_target
=
io
->
io_flags
&
XFS_IOCORE_RT
?
mp
->
m_rtdev_targp
:
mp
->
m_ddev_targp
;
pbmapp
->
pbm_offset
=
XFS_FSB_TO_B
(
mp
,
imap
->
br_startoff
);
...
...
@@ -422,10 +73,9 @@ _xfs_imap_to_bmap(
pbmapp
->
pbm_flags
=
PBMF_DELAY
;
}
else
{
pbmapp
->
pbm_bn
=
XFS_FSB_TO_DB_IO
(
io
,
start_block
);
if
(
ISUNWRITTEN
(
imap
))
{
if
(
ISUNWRITTEN
(
imap
))
pbmapp
->
pbm_flags
|=
PBMF_UNWRITTEN
;
}
}
if
((
pbmapp
->
pbm_offset
+
pbmapp
->
pbm_bsize
)
>=
nisize
)
{
pbmapp
->
pbm_flags
|=
PBMF_EOF
;
...
...
@@ -436,149 +86,344 @@ _xfs_imap_to_bmap(
return
pbm
;
/* Return the number filled */
}
STATIC
int
xfs_iomap
_read
(
int
xfs_iomap
(
xfs_iocore_t
*
io
,
loff_t
offset
,
size_t
count
,
xfs_off_t
offset
,
s
s
ize_t
count
,
int
flags
,
page_buf_bmap_t
*
pbmapp
,
int
*
npbmaps
)
{
xfs_fileoff_t
offset_fsb
;
xfs_fileoff_t
end_fsb
;
int
nimaps
;
xfs_mount_t
*
mp
=
io
->
io_mount
;
xfs_fileoff_t
offset_fsb
,
end_fsb
;
int
error
;
xfs_mount_t
*
mp
;
xfs_bmbt_irec_t
imap
[
XFS_MAX_RW_NBMAPS
];
int
lockmode
=
0
;
xfs_bmbt_irec_t
imap
;
int
nimaps
=
1
;
int
bmap_flags
=
0
;
if
(
XFS_FORCED_SHUTDOWN
(
mp
))
return
XFS_ERROR
(
EIO
);
ASSERT
(
ismrlocked
(
io
->
io_lock
,
MR_UPDATE
|
MR_ACCESS
)
!=
0
);
switch
(
flags
&
(
PBF_READ
|
PBF_WRITE
|
PBF_FILE_ALLOCATE
|
PBF_FILE_UNWRITTEN
))
{
case
PBF_READ
:
lockmode
=
XFS_LCK_MAP_SHARED
(
mp
,
io
);
bmap_flags
=
XFS_BMAPI_ENTIRE
;
break
;
case
PBF_WRITE
:
lockmode
=
XFS_ILOCK_EXCL
|
XFS_EXTSIZE_WR
;
bmap_flags
=
0
;
XFS_ILOCK
(
mp
,
io
,
lockmode
);
break
;
case
PBF_FILE_ALLOCATE
:
lockmode
=
XFS_ILOCK_SHARED
|
XFS_EXTSIZE_RD
;
bmap_flags
=
XFS_BMAPI_ENTIRE
;
XFS_ILOCK
(
mp
,
io
,
lockmode
);
break
;
case
PBF_FILE_UNWRITTEN
:
lockmode
=
XFS_ILOCK_EXCL
|
XFS_EXTSIZE_WR
;
bmap_flags
=
XFS_BMAPI_ENTIRE
|
XFS_BMAPI_IGSTATE
;
XFS_ILOCK
(
mp
,
io
,
lockmode
);
break
;
default:
BUG
();
}
mp
=
io
->
io_mount
;
offset_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
);
nimaps
=
sizeof
(
imap
)
/
sizeof
(
imap
[
0
]);
nimaps
=
min
(
nimaps
,
*
npbmaps
);
/* Don't ask for more than caller has */
end_fsb
=
XFS_B_TO_FSB
(
mp
,
((
xfs_ufsize_t
)(
offset
+
count
)));
error
=
XFS_BMAPI
(
mp
,
NULL
,
io
,
offset_fsb
,
(
xfs_filblks_t
)(
end_fsb
-
offset_fsb
)
,
flags
,
NULL
,
0
,
imap
,
(
xfs_filblks_t
)(
end_fsb
-
offset_fsb
)
,
bmap_flags
,
NULL
,
0
,
&
imap
,
&
nimaps
,
NULL
);
if
(
error
)
{
return
XFS_ERROR
(
error
);
if
(
error
)
goto
out
;
switch
(
flags
&
(
PBF_WRITE
|
PBF_FILE_ALLOCATE
))
{
case
PBF_WRITE
:
/* If we found an extent, return it */
if
(
nimaps
&&
(
imap
.
br_startblock
!=
HOLESTARTBLOCK
))
break
;
if
(
flags
&
PBF_DIRECT
)
{
error
=
XFS_IOMAP_WRITE_DIRECT
(
mp
,
io
,
offset
,
count
,
flags
,
&
imap
,
&
nimaps
,
nimaps
);
}
else
{
error
=
XFS_IOMAP_WRITE_DELAY
(
mp
,
io
,
offset
,
count
,
flags
,
&
imap
,
&
nimaps
);
}
break
;
case
PBF_FILE_ALLOCATE
:
/* If we found an extent, return it */
XFS_IUNLOCK
(
mp
,
io
,
lockmode
);
lockmode
=
0
;
if
(
nimaps
&&
!
ISNULLSTARTBLOCK
(
imap
.
br_startblock
))
break
;
error
=
XFS_IOMAP_WRITE_ALLOCATE
(
mp
,
io
,
&
imap
,
&
nimaps
);
break
;
}
if
(
nimaps
)
{
*
npbmaps
=
_xfs_imap_to_bmap
(
io
,
offset
,
imap
,
pbmapp
,
nimaps
,
*
npbmaps
);
}
else
if
(
nimaps
)
{
*
npbmaps
=
_xfs_imap_to_bmap
(
io
,
offset
,
&
imap
,
pbmapp
,
nimaps
,
*
npbmaps
);
}
else
{
*
npbmaps
=
0
;
}
out:
if
(
lockmode
)
XFS_IUNLOCK
(
mp
,
io
,
lockmode
);
return
XFS_ERROR
(
error
);
}
/*
* xfs_iomap_write: return pagebuf_bmap_t's telling higher layers
* where to write.
* There are 2 main cases:
* 1 the extents already exist
* 2 must allocate.
* There are 3 cases when we allocate:
* delay allocation (doesn't really allocate or use transactions)
* direct allocation (no previous delay allocation)
* convert delay to real allocations
*/
STATIC
int
xfs_iomap_write
(
xfs_iocore_t
*
io
,
xfs_flush_space
(
xfs_inode_t
*
ip
,
int
*
fsynced
,
int
*
ioflags
)
{
vnode_t
*
vp
=
XFS_ITOV
(
ip
);
switch
(
*
fsynced
)
{
case
0
:
if
(
ip
->
i_delayed_blks
)
{
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
filemap_fdatawrite
(
LINVFS_GET_IP
(
vp
)
->
i_mapping
);
xfs_ilock
(
ip
,
XFS_ILOCK_EXCL
);
*
fsynced
=
1
;
}
else
{
*
ioflags
|=
PBF_SYNC
;
*
fsynced
=
2
;
}
return
0
;
case
1
:
*
fsynced
=
2
;
*
ioflags
|=
PBF_SYNC
;
return
0
;
case
2
:
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
sync_blockdev
(
vp
->
v_vfsp
->
vfs_super
->
s_bdev
);
xfs_log_force
(
ip
->
i_mount
,
(
xfs_lsn_t
)
0
,
XFS_LOG_FORCE
|
XFS_LOG_SYNC
);
xfs_ilock
(
ip
,
XFS_ILOCK_EXCL
);
*
fsynced
=
3
;
return
0
;
}
return
1
;
}
int
xfs_iomap_write_direct
(
xfs_inode_t
*
ip
,
loff_t
offset
,
size_t
count
,
page_buf_bmap_t
*
pbmapp
,
int
*
npbmaps
,
int
ioflag
)
int
ioflag
,
xfs_bmbt_irec_t
*
ret_imap
,
int
*
nmaps
,
int
found
)
{
int
maps
;
int
error
=
0
;
int
found
;
int
flags
=
0
;
xfs_mount_t
*
mp
=
ip
->
i_mount
;
xfs_iocore_t
*
io
=
&
ip
->
i_iocore
;
xfs_fileoff_t
offset_fsb
;
xfs_fileoff_t
last_fsb
;
xfs_filblks_t
count_fsb
;
xfs_fsize_t
isize
;
xfs_fsblock_t
firstfsb
;
int
nimaps
,
maps
;
int
error
;
int
bmapi_flag
;
int
rt
;
xfs_trans_t
*
tp
;
xfs_bmbt_irec_t
imap
[
XFS_WRITE_IMAPS
],
*
imapp
;
xfs_bmap_free_t
free_list
;
int
aeof
;
xfs_filblks_t
datablocks
;
int
committed
;
int
numrtextents
;
uint
resblks
;
maps
=
*
npbmaps
;
if
(
!
maps
)
goto
out
;
/*
* Make sure that the dquots are there. This doesn't hold
* the ilock across a disk read.
*/
if
(
XFS_IS_QUOTA_ON
(
mp
)
&&
XFS_NOT_DQATTACHED
(
mp
,
ip
))
{
if
((
error
=
xfs_qm_dqattach
(
ip
,
XFS_QMOPT_ILOCKED
)))
{
return
XFS_ERROR
(
error
);
}
}
maps
=
min
(
XFS_WRITE_IMAPS
,
*
nmaps
);
nimaps
=
maps
;
isize
=
ip
->
i_d
.
di_size
;
aeof
=
(
offset
+
count
)
>
isize
;
if
(
io
->
io_new_size
>
isize
)
isize
=
io
->
io_new_size
;
offset_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
);
last_fsb
=
XFS_B_TO_FSB
(
mp
,
((
xfs_ufsize_t
)(
offset
+
count
)));
count_fsb
=
last_fsb
-
offset_fsb
;
if
(
found
&&
(
ret_imap
->
br_startblock
==
HOLESTARTBLOCK
))
{
xfs_fileoff_t
map_last_fsb
;
map_last_fsb
=
ret_imap
->
br_blockcount
+
ret_imap
->
br_startoff
;
if
(
map_last_fsb
<
last_fsb
)
{
last_fsb
=
map_last_fsb
;
count_fsb
=
last_fsb
-
offset_fsb
;
}
ASSERT
(
count_fsb
>
0
);
}
/*
* determine if reserving space on
* the data or realtime partition.
*/
if
((
rt
=
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_REALTIME
))
{
int
sbrtextsize
,
iprtextsize
;
sbrtextsize
=
mp
->
m_sb
.
sb_rextsize
;
iprtextsize
=
ip
->
i_d
.
di_extsize
?
ip
->
i_d
.
di_extsize
:
sbrtextsize
;
numrtextents
=
(
count_fsb
+
iprtextsize
-
1
);
do_div
(
numrtextents
,
sbrtextsize
);
datablocks
=
0
;
}
else
{
datablocks
=
count_fsb
;
numrtextents
=
0
;
}
/*
* If we have extents that are allocated for this range,
* return them.
* allocate and setup the transaction
*/
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
tp
=
xfs_trans_alloc
(
mp
,
XFS_TRANS_DIOSTRAT
);
resblks
=
XFS_DIOSTRAT_SPACE_RES
(
mp
,
datablocks
);
error
=
xfs_trans_reserve
(
tp
,
resblks
,
XFS_WRITE_LOG_RES
(
mp
),
numrtextents
,
XFS_TRANS_PERM_LOG_RES
,
XFS_WRITE_LOG_COUNT
);
found
=
0
;
error
=
xfs_iomap_read
(
io
,
offset
,
count
,
flags
,
pbmapp
,
npbmaps
);
/*
* check for running out of space
*/
if
(
error
)
goto
out
;
/*
* Free the transaction structure.
*/
xfs_trans_cancel
(
tp
,
0
);
xfs_ilock
(
ip
,
XFS_ILOCK_EXCL
);
if
(
error
)
goto
error_out
;
/* Don't return in above if .. trans ..,
need lock to return */
if
(
XFS_IS_QUOTA_ON
(
mp
))
{
if
(
xfs_trans_reserve_blkquota
(
tp
,
ip
,
resblks
))
{
error
=
(
EDQUOT
);
goto
error1
;
}
}
nimaps
=
1
;
bmapi_flag
=
XFS_BMAPI_WRITE
;
xfs_trans_ijoin
(
tp
,
ip
,
XFS_ILOCK_EXCL
);
xfs_trans_ihold
(
tp
,
ip
);
if
(
offset
<
ip
->
i_d
.
di_size
||
rt
)
bmapi_flag
|=
XFS_BMAPI_PREALLOC
;
/*
* If we found mappings and they can just have data written
* without conversion,
* let the caller write these and call us again.
*
* If we have a HOLE or UNWRITTEN, proceed down lower to
* get the space or to convert to written.
* issue the bmapi() call to allocate the blocks
*/
XFS_BMAP_INIT
(
&
free_list
,
&
firstfsb
);
imapp
=
&
imap
[
0
];
error
=
xfs_bmapi
(
tp
,
ip
,
offset_fsb
,
count_fsb
,
bmapi_flag
,
&
firstfsb
,
0
,
imapp
,
&
nimaps
,
&
free_list
);
if
(
error
)
{
goto
error0
;
}
/*
* complete the transaction
*/
if
(
*
npbmaps
)
{
if
(
!
(
pbmapp
->
pbm_flags
&
PBMF_HOLE
))
{
*
npbmaps
=
1
;
/* Only checked the first one. */
/* We could check more, ... */
goto
out
;
error
=
xfs_bmap_finish
(
&
tp
,
&
free_list
,
firstfsb
,
&
committed
);
if
(
error
)
{
goto
error0
;
}
error
=
xfs_trans_commit
(
tp
,
XFS_TRANS_RELEASE_LOG_RES
,
NULL
);
if
(
error
)
{
goto
error_out
;
}
found
=
*
npbmaps
;
*
npbmaps
=
maps
;
/* Restore to original requested */
if
(
ioflag
&
PBF_DIRECT
)
{
error
=
xfs_iomap_write_direct
(
io
,
offset
,
count
,
pbmapp
,
npbmaps
,
ioflag
,
found
);
}
else
{
error
=
xfs_iomap_write_delay
(
io
,
offset
,
count
,
pbmapp
,
npbmaps
,
ioflag
,
found
);
/* copy any maps to caller's array and return any error. */
if
(
nimaps
==
0
)
{
error
=
(
ENOSPC
);
goto
error_out
;
}
out:
XFS_IUNLOCK
(
io
->
io_mount
,
io
,
XFS_ILOCK_EXCL
);
*
ret_imap
=
imap
[
0
];
*
nmaps
=
1
;
return
0
;
error0:
/* Cancel bmap, unlock inode, and cancel trans */
xfs_bmap_cancel
(
&
free_list
);
error1:
/* Just cancel transaction */
xfs_trans_cancel
(
tp
,
XFS_TRANS_RELEASE_LOG_RES
|
XFS_TRANS_ABORT
);
*
nmaps
=
0
;
/* nothing set-up here */
error_out:
return
XFS_ERROR
(
error
);
}
STATIC
int
int
xfs_iomap_write_delay
(
xfs_i
ocore_t
*
io
,
xfs_i
node_t
*
ip
,
loff_t
offset
,
size_t
count
,
page_buf_bmap_t
*
pbmapp
,
int
*
npbmaps
,
int
ioflag
,
int
found
)
xfs_bmbt_irec_t
*
ret_imap
,
int
*
nmaps
)
{
xfs_mount_t
*
mp
=
ip
->
i_mount
;
xfs_iocore_t
*
io
=
&
ip
->
i_iocore
;
xfs_fileoff_t
offset_fsb
;
xfs_fileoff_t
ioalign
;
xfs_fileoff_t
last_fsb
;
xfs_fileoff_t
start_fsb
;
xfs_filblks_t
count_fsb
;
xfs_off_t
aligned_offset
;
xfs_fsize_t
isize
;
xfs_fsblock_t
firstblock
;
int
nimaps
;
int
error
;
int
n
;
unsigned
int
iosize
;
xfs_mount_t
*
mp
;
#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
xfs_bmbt_irec_t
imap
[
XFS_WRITE_IMAPS
];
int
aeof
;
int
fsynced
=
0
;
ASSERT
(
ismrlocked
(
io
->
io
_lock
,
MR_UPDATE
)
!=
0
);
ASSERT
(
ismrlocked
(
&
ip
->
i
_lock
,
MR_UPDATE
)
!=
0
);
mp
=
io
->
io_mount
;
/*
* Make sure that the dquots are there. This doesn't hold
* the ilock across a disk read.
*/
isize
=
XFS_SIZE
(
mp
,
io
);
if
(
XFS_IS_QUOTA_ON
(
mp
)
&&
XFS_NOT_DQATTACHED
(
mp
,
ip
))
{
if
((
error
=
xfs_qm_dqattach
(
ip
,
XFS_QMOPT_ILOCKED
)))
{
return
XFS_ERROR
(
error
);
}
}
retry:
isize
=
ip
->
i_d
.
di_size
;
if
(
io
->
io_new_size
>
isize
)
{
isize
=
io
->
io_new_size
;
}
...
...
@@ -591,50 +436,22 @@ xfs_iomap_write_delay(
* then extend the allocation (and the buffer used for the write)
* out to the file system's write iosize. We clean up any extra
* space left over when the file is closed in xfs_inactive().
* We can only do this if we are sure that we will create buffers
* over all of the space we allocate beyond the end of the file.
* Not doing so would allow us to create delalloc blocks with
* no pages in memory covering them. So, we need to check that
* there are not any real blocks in the area beyond the end of
* the file which we are optimistically going to preallocate. If
* there are then our buffers will stop when they encounter them
* and we may accidentally create delalloc blocks beyond them
* that we never cover with a buffer. All of this is because
* we are not actually going to write the extra blocks preallocated
* at this point.
*
* We don't bother with this for sync writes, because we need
* to minimize the amount we write for good performance.
*/
if
(
!
(
ioflag
&
PBF_SYNC
)
&&
((
offset
+
count
)
>
XFS_SIZE
(
mp
,
io
)))
{
start_fsb
=
XFS_B_TO_FSBT
(
mp
,
((
xfs_ufsize_t
)(
offset
+
count
-
1
)));
count_fsb
=
mp
->
m_writeio_blocks
;
while
(
count_fsb
>
0
)
{
nimaps
=
XFS_WRITE_IMAPS
;
error
=
XFS_BMAPI
(
mp
,
NULL
,
io
,
start_fsb
,
count_fsb
,
0
,
NULL
,
0
,
imap
,
&
nimaps
,
NULL
);
if
(
error
)
{
return
error
;
}
for
(
n
=
0
;
n
<
nimaps
;
n
++
)
{
if
((
imap
[
n
].
br_startblock
!=
HOLESTARTBLOCK
)
&&
(
imap
[
n
].
br_startblock
!=
DELAYSTARTBLOCK
))
{
goto
write_map
;
}
start_fsb
+=
imap
[
n
].
br_blockcount
;
count_fsb
-=
imap
[
n
].
br_blockcount
;
ASSERT
(
count_fsb
<
0xffff000
);
}
}
if
(
!
(
ioflag
&
PBF_SYNC
)
&&
((
offset
+
count
)
>
ip
->
i_d
.
di_size
))
{
xfs_off_t
aligned_offset
;
unsigned
int
iosize
;
xfs_fileoff_t
ioalign
;
iosize
=
mp
->
m_writeio_blocks
;
aligned_offset
=
XFS_WRITEIO_ALIGN
(
mp
,
(
offset
+
count
-
1
));
ioalign
=
XFS_B_TO_FSBT
(
mp
,
aligned_offset
);
last_fsb
=
ioalign
+
iosize
;
aeof
=
1
;
}
write_map:
nimaps
=
XFS_WRITE_IMAPS
;
firstblock
=
NULLFSBLOCK
;
...
...
@@ -642,11 +459,11 @@ xfs_iomap_write_delay(
* roundup the allocation request to m_dalign boundary if file size
* is greater that 512K and we are allocating past the allocation eof
*/
if
(
mp
->
m_dalign
&&
(
XFS_SIZE
(
mp
,
io
)
>=
mp
->
m_dalign
)
&&
aeof
)
{
if
(
mp
->
m_dalign
&&
(
isize
>=
mp
->
m_dalign
)
&&
aeof
)
{
int
eof
;
xfs_fileoff_t
new_last_fsb
;
new_last_fsb
=
roundup_64
(
last_fsb
,
mp
->
m_dalign
);
error
=
XFS_BMAP_EOF
(
mp
,
io
,
new_last_fsb
,
XFS_DATA_FORK
,
&
eof
);
error
=
xfs_bmap_eof
(
ip
,
new_last_fsb
,
XFS_DATA_FORK
,
&
eof
);
if
(
error
)
{
return
error
;
}
...
...
@@ -655,7 +472,7 @@ xfs_iomap_write_delay(
}
}
error
=
XFS_BMAPI
(
mp
,
NULL
,
io
,
offset_fsb
,
error
=
xfs_bmapi
(
NULL
,
ip
,
offset_fsb
,
(
xfs_filblks_t
)(
last_fsb
-
offset_fsb
),
XFS_BMAPI_DELAY
|
XFS_BMAPI_WRITE
|
XFS_BMAPI_ENTIRE
,
&
firstblock
,
1
,
imap
,
...
...
@@ -663,235 +480,255 @@ xfs_iomap_write_delay(
/*
* This can be EDQUOT, if nimaps == 0
*/
if
(
error
)
{
if
(
error
&&
(
error
!=
ENOSPC
)
)
{
return
XFS_ERROR
(
error
);
}
/*
* If bmapi returned us nothing, and if we didn't get back EDQUOT,
* then we must have run out of space.
*/
if
(
nimaps
==
0
)
{
if
(
xfs_flush_space
(
ip
,
&
fsynced
,
&
ioflag
))
return
XFS_ERROR
(
ENOSPC
);
error
=
0
;
goto
retry
;
}
/*
* Now map our desired I/O size and alignment over the
* extents returned by xfs_bmapi().
*/
*
npbmaps
=
_xfs_imap_to_bmap
(
io
,
offset
,
imap
,
pbmapp
,
nimaps
,
*
npbmaps
);
*
ret_imap
=
imap
[
0
];
*
nmaps
=
1
;
return
0
;
}
STATIC
int
xfs_iomap_write_direct
(
xfs_iocore_t
*
io
,
loff_t
offset
,
size_t
count
,
page_buf_bmap_t
*
pbmapp
,
int
*
npbmaps
,
int
ioflag
,
int
found
)
/*
* Pass in a delayed allocate extent, convert it to real extents;
* return to the caller the extent we create which maps on top of
* the originating callers request.
*
* Called without a lock on the inode.
*/
int
xfs_iomap_write_allocate
(
xfs_inode_t
*
ip
,
xfs_bmbt_irec_t
*
map
,
int
*
retmap
)
{
xfs_inode_t
*
ip
=
XFS_IO_INODE
(
io
);
xfs_mount_t
*
mp
;
xfs_fileoff_t
offset_fsb
;
xfs_fileoff_t
last_fsb
;
xfs_mount_t
*
mp
=
ip
->
i_mount
;
xfs_fileoff_t
offset_fsb
,
last_block
;
xfs_fileoff_t
end_fsb
,
map_start_fsb
;
xfs_fsblock_t
first_block
;
xfs_bmap_free_t
free_list
;
xfs_filblks_t
count_fsb
;
xfs_fsize_t
isize
;
xfs_fsblock_t
firstfsb
;
int
nimaps
,
maps
;
int
error
;
xfs_bmbt_irec_t
imap
[
XFS_STRAT_WRITE_IMAPS
];
xfs_trans_t
*
tp
;
int
i
,
nimaps
,
committed
;
int
error
=
0
;
#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
xfs_bmbt_irec_t
imap
[
XFS_WRITE_IMAPS
],
*
imapp
;
xfs_bmap_free_t
free_list
;
int
aeof
;
int
bmapi_flags
;
xfs_filblks_t
datablocks
;
int
rt
;
int
committed
;
int
numrtextents
;
uint
resblks
;
int
rtextsize
;
*
retmap
=
0
;
maps
=
min
(
XFS_WRITE_IMAPS
,
*
npbmaps
);
nimaps
=
maps
;
/*
* Make sure that the dquots are there.
*/
mp
=
io
->
io_mount
;
isize
=
XFS_SIZE
(
mp
,
io
);
if
(
io
->
io_new_size
>
isize
)
isize
=
io
->
io_new_size
;
if
(
XFS_IS_QUOTA_ON
(
mp
)
&&
XFS_NOT_DQATTACHED
(
mp
,
ip
))
{
if
((
error
=
xfs_qm_dqattach
(
ip
,
0
)))
{
return
XFS_ERROR
(
error
);
}
}
aeof
=
((
offset
+
count
)
>
isize
)
?
1
:
0
;
offset_fsb
=
map
->
br_startoff
;
count_fsb
=
map
->
br_blockcount
;
map_start_fsb
=
offset_fsb
;
offset_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
);
last_fsb
=
XFS_B_TO_FSB
(
mp
,
((
xfs_ufsize_t
)(
offset
+
count
)));
count_fsb
=
last_fsb
-
offset_fsb
;
if
(
found
&&
(
pbmapp
->
pbm_flags
&
PBMF_HOLE
))
{
xfs_fileoff_t
map_last_fsb
;
XFS_STATS_ADD
(
xfsstats
.
xs_xstrat_bytes
,
XFS_FSB_TO_B
(
mp
,
count_fsb
));
map_last_fsb
=
XFS_B_TO_FSB
(
mp
,
(
pbmapp
->
pbm_bsize
+
pbmapp
->
pbm_offset
));
while
(
count_fsb
!=
0
)
{
/*
* Set up a transaction with which to allocate the
* backing store for the file. Do allocations in a
* loop until we get some space in the range we are
* interested in. The other space that might be allocated
* is in the delayed allocation extent on which we sit
* but before our buffer starts.
*/
if
(
map_last_fsb
<
last_fsb
)
{
last_fsb
=
map_last_fsb
;
count_fsb
=
last_fsb
-
offset_fsb
;
}
ASSERT
(
count_fsb
>
0
);
nimaps
=
0
;
while
(
nimaps
==
0
)
{
tp
=
xfs_trans_alloc
(
mp
,
XFS_TRANS_STRAT_WRITE
);
error
=
xfs_trans_reserve
(
tp
,
0
,
XFS_WRITE_LOG_RES
(
mp
),
0
,
XFS_TRANS_PERM_LOG_RES
,
XFS_WRITE_LOG_COUNT
);
if
(
error
)
{
xfs_trans_cancel
(
tp
,
0
);
return
XFS_ERROR
(
error
);
}
xfs_ilock
(
ip
,
XFS_ILOCK_EXCL
);
xfs_trans_ijoin
(
tp
,
ip
,
XFS_ILOCK_EXCL
);
xfs_trans_ihold
(
tp
,
ip
);
XFS_BMAP_INIT
(
&
free_list
,
&
first_block
);
nimaps
=
XFS_STRAT_WRITE_IMAPS
;
/*
* roundup the allocation request to m_dalign boundary if file size
* is greater that 512K and we are allocating past the allocation eof
* Ensure we don't go beyond eof - it is possible
* the extents changed since we did the read call,
* we dropped the ilock in the interim.
*/
if
(
!
found
&&
mp
->
m_dalign
&&
(
isize
>=
524288
)
&&
aeof
)
{
int
eof
;
xfs_fileoff_t
new_last_fsb
;
new_last_fsb
=
roundup_64
(
last_fsb
,
mp
->
m_dalign
);
printk
(
"xfs_iomap_write_direct: about to XFS_BMAP_EOF %Ld
\n
"
,
new_last_fsb
);
error
=
XFS_BMAP_EOF
(
mp
,
io
,
new_last_fsb
,
XFS_DATA_FORK
,
&
eof
);
if
(
error
)
goto
error_out
;
if
(
eof
)
last_fsb
=
new_last_fsb
;
end_fsb
=
XFS_B_TO_FSB
(
mp
,
ip
->
i_d
.
di_size
);
xfs_bmap_last_offset
(
NULL
,
ip
,
&
last_block
,
XFS_DATA_FORK
);
last_block
=
XFS_FILEOFF_MAX
(
last_block
,
end_fsb
);
if
((
map_start_fsb
+
count_fsb
)
>
last_block
)
{
count_fsb
=
last_block
-
map_start_fsb
;
if
(
count_fsb
==
0
)
{
error
=
EAGAIN
;
goto
trans_cancel
;
}
}
bmapi_flags
=
XFS_BMAPI_WRITE
|
XFS_BMAPI_DIRECT_IO
|
XFS_BMAPI_ENTIRE
;
bmapi_flags
&=
~
XFS_BMAPI_DIRECT_IO
;
/* Go get the actual blocks */
error
=
xfs_bmapi
(
tp
,
ip
,
map_start_fsb
,
count_fsb
,
XFS_BMAPI_WRITE
,
&
first_block
,
1
,
imap
,
&
nimaps
,
&
free_list
);
/*
* determine if this is a realtime file
*/
if
((
rt
=
(
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_REALTIME
))
!=
0
)
{
rtextsize
=
mp
->
m_sb
.
sb_rextsize
;
}
else
rtextsize
=
0
;
if
(
error
)
goto
trans_cancel
;
error
=
0
;
error
=
xfs_bmap_finish
(
&
tp
,
&
free_list
,
first_block
,
&
committed
);
/*
* allocate file space for the bmapp entries passed in.
*/
if
(
error
)
goto
trans_cancel
;
error
=
xfs_trans_commit
(
tp
,
XFS_TRANS_RELEASE_LOG_RES
,
NULL
);
if
(
error
)
goto
error0
;
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
}
/*
* determine if reserving space on
* the data or realtime partition.
* See if we were able to allocate an extent that
* covers at least part of the callers request
*/
if
(
rt
)
{
numrtextents
=
(
count_fsb
+
rtextsize
-
1
);
do_div
(
numrtextents
,
rtextsize
);
datablocks
=
0
;
}
else
{
datablocks
=
count_fsb
;
numrtextents
=
0
;
for
(
i
=
0
;
i
<
nimaps
;
i
++
)
{
if
((
map
->
br_startoff
>=
imap
[
i
].
br_startoff
)
&&
(
map
->
br_startoff
<
(
imap
[
i
].
br_startoff
+
imap
[
i
].
br_blockcount
)))
{
*
map
=
imap
[
i
];
*
retmap
=
1
;
XFS_STATS_INC
(
xfsstats
.
xs_xstrat_quick
);
return
0
;
}
count_fsb
-=
imap
[
i
].
br_blockcount
;
}
/*
* allocate and setup the transaction
/* So far we have not mapped the requested part of the
* file, just surrounding data, try again.
*/
tp
=
xfs_trans_alloc
(
mp
,
XFS_TRANS_DIOSTRAT
);
resblks
=
XFS_DIOSTRAT_SPACE_RES
(
mp
,
datablocks
);
nimaps
--
;
offset_fsb
=
imap
[
nimaps
].
br_startoff
+
imap
[
nimaps
].
br_blockcount
;
map_start_fsb
=
offset_fsb
;
}
trans_cancel:
xfs_bmap_cancel
(
&
free_list
);
xfs_trans_cancel
(
tp
,
XFS_TRANS_RELEASE_LOG_RES
|
XFS_TRANS_ABORT
);
error0:
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
return
XFS_ERROR
(
error
);
}
error
=
xfs_trans_reserve
(
tp
,
resblks
,
XFS_WRITE_LOG_RES
(
mp
),
numrtextents
,
XFS_TRANS_PERM_LOG_RES
,
XFS_WRITE_LOG_COUNT
);
int
xfs_iomap_write_unwritten
(
xfs_inode_t
*
ip
,
loff_t
offset
,
size_t
count
)
{
xfs_mount_t
*
mp
=
ip
->
i_mount
;
xfs_trans_t
*
tp
;
xfs_fileoff_t
offset_fsb
;
xfs_filblks_t
count_fsb
;
xfs_filblks_t
numblks_fsb
;
xfs_bmbt_irec_t
imap
;
int
committed
;
int
error
;
int
nres
;
int
nimaps
;
xfs_fsblock_t
firstfsb
;
xfs_bmap_free_t
free_list
;
offset_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
);
count_fsb
=
XFS_B_TO_FSB
(
mp
,
count
);
do
{
nres
=
XFS_DIOSTRAT_SPACE_RES
(
mp
,
0
);
/*
* check for running out of space
* set up a transaction to convert the range of extents
* from unwritten to real. Do allocations in a loop until
* we have covered the range passed in.
*/
tp
=
xfs_trans_alloc
(
mp
,
XFS_TRANS_STRAT_WRITE
);
error
=
xfs_trans_reserve
(
tp
,
nres
,
XFS_WRITE_LOG_RES
(
mp
),
0
,
XFS_TRANS_PERM_LOG_RES
,
XFS_WRITE_LOG_COUNT
);
if
(
error
)
{
/*
* Free the transaction structure.
*/
xfs_trans_cancel
(
tp
,
0
);
goto
error0
;
}
xfs_ilock
(
ip
,
XFS_ILOCK_EXCL
);
if
(
error
)
{
goto
error_out
;
/* Don't return in above if .. trans ..,
need lock to return */
}
if
(
XFS_IS_QUOTA_ON
(
mp
))
{
if
(
xfs_trans_reserve_quota
(
tp
,
ip
->
i_udquot
,
ip
->
i_gdquot
,
resblks
,
0
,
0
))
{
error
=
(
EDQUOT
);
goto
error1
;
}
nimaps
=
1
;
}
else
{
nimaps
=
2
;
}
xfs_trans_ijoin
(
tp
,
ip
,
XFS_ILOCK_EXCL
);
xfs_trans_ihold
(
tp
,
ip
);
/*
* issue the bmapi() call to allocate the blocks
* Modify the unwritten extent state of the buffer.
*/
XFS_BMAP_INIT
(
&
free_list
,
&
firstfsb
);
imapp
=
&
imap
[
0
];
error
=
XFS_BMAPI
(
mp
,
tp
,
io
,
offset_fsb
,
count_fsb
,
bmapi_flags
,
&
firstfsb
,
1
,
imapp
,
&
nimaps
,
&
free_list
);
if
(
error
)
{
goto
error0
;
}
/*
* complete the transaction
*/
nimaps
=
1
;
error
=
xfs_bmapi
(
tp
,
ip
,
offset_fsb
,
count_fsb
,
XFS_BMAPI_WRITE
,
&
firstfsb
,
1
,
&
imap
,
&
nimaps
,
&
free_list
);
if
(
error
)
goto
error_on_bmapi_transaction
;
error
=
xfs_bmap_finish
(
&
tp
,
&
free_list
,
firstfsb
,
&
committed
);
if
(
error
)
{
goto
error0
;
}
error
=
xfs_bmap_finish
(
&
(
tp
),
&
(
free_list
),
firstfsb
,
&
committed
);
if
(
error
)
goto
error_on_bmapi_transaction
;
error
=
xfs_trans_commit
(
tp
,
XFS_TRANS_RELEASE_LOG_RES
,
NULL
);
if
(
error
)
{
goto
error_out
;
}
/* copy any maps to caller's array and return any error. */
if
(
nimaps
==
0
)
{
error
=
(
ENOSPC
);
goto
error_out
;
}
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
if
(
error
)
goto
error0
;
maps
=
min
(
nimaps
,
maps
);
*
npbmaps
=
_xfs_imap_to_bmap
(
io
,
offset
,
&
imap
[
0
],
pbmapp
,
maps
,
*
npbmaps
);
if
(
*
npbmaps
)
{
if
((
numblks_fsb
=
imap
.
br_blockcount
)
==
0
)
{
/*
* this is new since xfs_iomap_read
* didn't find it
.
* The numblks_fsb value should always get
* smaller, otherwise the loop is stuck
.
*/
if
(
*
npbmaps
!=
1
)
{
/* NEED MORE WORK FOR MULTIPLE BMAPS (which are new) */
BUG
();
}
ASSERT
(
imap
.
br_blockcount
);
break
;
}
goto
out
;
error0:
/* Cancel bmap, unlock inode, and cancel trans */
xfs_bmap_cancel
(
&
free_list
);
offset_fsb
+=
numblks_fsb
;
count_fsb
-=
numblks_fsb
;
}
while
(
count_fsb
>
0
);
error1:
/* Just cancel transaction */
xfs_trans_cancel
(
tp
,
XFS_TRANS_RELEASE_LOG_RES
|
XFS_TRANS_ABORT
);
*
npbmaps
=
0
;
/* nothing set-up here */
return
0
;
error_out:
out:
/* Just return error and any tracing at end of routine */
error_on_bmapi_transaction:
xfs_bmap_cancel
(
&
free_list
);
xfs_trans_cancel
(
tp
,
(
XFS_TRANS_RELEASE_LOG_RES
|
XFS_TRANS_ABORT
));
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
error0:
return
XFS_ERROR
(
error
);
}
fs/xfs/linux/xfs_iops.c
View file @
c2e95c3f
...
...
@@ -575,7 +575,7 @@ STATIC int
linvfs_setxattr
(
struct
dentry
*
dentry
,
const
char
*
name
,
void
*
data
,
const
void
*
data
,
size_t
size
,
int
flags
)
{
...
...
@@ -593,13 +593,15 @@ linvfs_setxattr(
error
=
-
ENOATTR
;
p
+=
xfs_namespaces
[
SYSTEM_NAMES
].
namelen
;
if
(
strcmp
(
p
,
POSIXACL_ACCESS
)
==
0
)
{
error
=
xfs_acl_vset
(
vp
,
data
,
size
,
_ACL_TYPE_ACCESS
);
error
=
xfs_acl_vset
(
vp
,
(
void
*
)
data
,
size
,
_ACL_TYPE_ACCESS
);
}
else
if
(
strcmp
(
p
,
POSIXACL_DEFAULT
)
==
0
)
{
error
=
xfs_acl_vset
(
vp
,
data
,
size
,
_ACL_TYPE_DEFAULT
);
error
=
xfs_acl_vset
(
vp
,
(
void
*
)
data
,
size
,
_ACL_TYPE_DEFAULT
);
}
else
if
(
strcmp
(
p
,
POSIXCAP
)
==
0
)
{
error
=
xfs_cap_vset
(
vp
,
data
,
size
);
error
=
xfs_cap_vset
(
vp
,
(
void
*
)
data
,
size
);
}
if
(
!
error
)
{
error
=
vn_revalidate
(
vp
);
...
...
@@ -619,7 +621,7 @@ linvfs_setxattr(
return
-
EPERM
;
xflags
|=
ATTR_ROOT
;
p
+=
xfs_namespaces
[
ROOT_NAMES
].
namelen
;
VOP_ATTR_SET
(
vp
,
p
,
data
,
size
,
xflags
,
NULL
,
error
);
VOP_ATTR_SET
(
vp
,
p
,
(
void
*
)
data
,
size
,
xflags
,
NULL
,
error
);
return
-
error
;
}
if
(
strncmp
(
name
,
xfs_namespaces
[
USER_NAMES
].
name
,
...
...
@@ -627,7 +629,7 @@ linvfs_setxattr(
if
(
!
capable_user_xattr
(
inode
))
return
-
EPERM
;
p
+=
xfs_namespaces
[
USER_NAMES
].
namelen
;
VOP_ATTR_SET
(
vp
,
p
,
data
,
size
,
xflags
,
NULL
,
error
);
VOP_ATTR_SET
(
vp
,
p
,
(
void
*
)
data
,
size
,
xflags
,
NULL
,
error
);
return
-
error
;
}
return
-
ENOATTR
;
...
...
fs/xfs/linux/xfs_lrw.c
View file @
c2e95c3f
...
...
@@ -808,6 +808,25 @@ xfs_bdstrat_cb(struct xfs_buf *bp)
}
}
int
xfs_bmap
(
bhv_desc_t
*
bdp
,
xfs_off_t
offset
,
ssize_t
count
,
int
flags
,
page_buf_bmap_t
*
pbmapp
,
int
*
npbmaps
)
{
xfs_inode_t
*
ip
=
XFS_BHVTOI
(
bdp
);
xfs_iocore_t
*
io
=
&
ip
->
i_iocore
;
ASSERT
((
ip
->
i_d
.
di_mode
&
IFMT
)
==
IFREG
);
ASSERT
(((
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_REALTIME
)
!=
0
)
==
((
ip
->
i_iocore
.
io_flags
&
XFS_IOCORE_RT
)
!=
0
));
return
xfs_iomap
(
io
,
offset
,
count
,
flags
,
pbmapp
,
npbmaps
);
}
/*
* Wrapper around bdstrat so that we can stop data
* from going to disk in case we are shutting down the filesystem.
...
...
fs/xfs/linux/xfs_lrw.h
View file @
c2e95c3f
...
...
@@ -36,6 +36,8 @@ struct vnode;
struct
bhv_desc
;
struct
xfs_mount
;
struct
xfs_iocore
;
struct
xfs_inode
;
struct
xfs_bmbt_irec
;
struct
page_buf_s
;
struct
page_buf_bmap_s
;
...
...
@@ -62,6 +64,16 @@ extern ssize_t xfs_sendfile (struct bhv_desc *, struct file *,
loff_t
*
,
size_t
,
read_actor_t
,
void
*
,
struct
cred
*
);
extern
int
xfs_iomap
(
struct
xfs_iocore
*
,
xfs_off_t
,
ssize_t
,
int
,
struct
page_buf_bmap_s
*
,
int
*
);
extern
int
xfs_iomap_write_direct
(
struct
xfs_inode
*
,
loff_t
,
size_t
,
int
,
struct
xfs_bmbt_irec
*
,
int
*
,
int
);
extern
int
xfs_iomap_write_delay
(
struct
xfs_inode
*
,
loff_t
,
size_t
,
int
,
struct
xfs_bmbt_irec
*
,
int
*
);
extern
int
xfs_iomap_write_allocate
(
struct
xfs_inode
*
,
struct
xfs_bmbt_irec
*
,
int
*
);
extern
int
xfs_iomap_write_unwritten
(
struct
xfs_inode
*
,
loff_t
,
size_t
);
extern
int
xfs_dev_is_read_only
(
struct
xfs_mount
*
,
char
*
);
extern
void
XFS_log_write_unmount_ro
(
struct
bhv_desc
*
);
...
...
fs/xfs/linux/xfs_super.c
View file @
c2e95c3f
...
...
@@ -507,8 +507,15 @@ xfs_relse_buftarg(
truncate_inode_pages
(
btp
->
pbr_mapping
,
0LL
);
}
unsigned
int
xfs_getsize_buftarg
(
xfs_buftarg_t
*
btp
)
{
return
block_size
(
btp
->
pbr_bdev
);
}
void
xfs_size_buftarg
(
xfs_s
ets
ize_buftarg
(
xfs_buftarg_t
*
btp
,
unsigned
int
blocksize
,
unsigned
int
sectorsize
)
...
...
@@ -535,7 +542,7 @@ xfs_alloc_buftarg(
btp
->
pbr_dev
=
bdev
->
bd_dev
;
btp
->
pbr_bdev
=
bdev
;
btp
->
pbr_mapping
=
bdev
->
bd_inode
->
i_mapping
;
xfs_size_buftarg
(
btp
,
PAGE_CACHE_SIZE
,
bdev_hardsect_size
(
bdev
));
xfs_s
ets
ize_buftarg
(
btp
,
PAGE_CACHE_SIZE
,
bdev_hardsect_size
(
bdev
));
return
btp
;
}
...
...
fs/xfs/linux/xfs_super.h
View file @
c2e95c3f
...
...
@@ -82,15 +82,17 @@ struct xfs_mount;
struct
pb_target
;
struct
block_device
;
extern
void
xfs_initialize_vnode
(
bhv_desc_t
*
,
vnode_t
*
,
bhv_desc_t
*
,
int
);
extern
void
xfs_initialize_vnode
(
bhv_desc_t
*
,
vnode_t
*
,
bhv_desc_t
*
,
int
);
extern
int
xfs_blkdev_get
(
struct
xfs_mount
*
,
const
char
*
,
extern
int
xfs_blkdev_get
(
struct
xfs_mount
*
,
const
char
*
,
struct
block_device
**
);
extern
void
xfs_blkdev_put
(
struct
block_device
*
);
extern
void
xfs_blkdev_put
(
struct
block_device
*
);
extern
struct
pb_target
*
xfs_alloc_buftarg
(
struct
block_device
*
);
extern
void
xfs_size_buftarg
(
struct
pb_target
*
,
unsigned
int
,
unsigned
int
);
extern
void
xfs_relse_buftarg
(
struct
pb_target
*
);
extern
void
xfs_free_buftarg
(
struct
pb_target
*
);
extern
struct
pb_target
*
xfs_alloc_buftarg
(
struct
block_device
*
);
extern
void
xfs_relse_buftarg
(
struct
pb_target
*
);
extern
void
xfs_free_buftarg
(
struct
pb_target
*
);
extern
void
xfs_setsize_buftarg
(
struct
pb_target
*
,
unsigned
int
,
unsigned
int
);
extern
unsigned
int
xfs_getsize_buftarg
(
struct
pb_target
*
);
#endif
/* __XFS_SUPER_H__ */
fs/xfs/pagebuf/page_buf.h
View file @
c2e95c3f
...
...
@@ -48,9 +48,6 @@
#include <linux/buffer_head.h>
#include <linux/uio.h>
enum
xfs_buffer_state
{
BH_Delay
=
BH_PrivateStart
};
BUFFER_FNS
(
Delay
,
delay
);
/*
* Turn this on to get pagebuf lock ownership
#define PAGEBUF_LOCK_TRACKING
...
...
@@ -83,7 +80,7 @@ typedef enum { /* pbm_flags values */
PBMF_HOLE
=
0x02
,
/* mapping covers a hole */
PBMF_DELAY
=
0x04
,
/* mapping covers delalloc region */
PBMF_UNWRITTEN
=
0x20
/* mapping covers allocated */
/* but uninitialized
XFS
data */
/* but uninitialized
file
data */
}
bmap_flags_t
;
typedef
enum
page_buf_flags_e
{
/* pb_flags values */
...
...
@@ -106,15 +103,13 @@ typedef enum page_buf_flags_e { /* pb_flags values */
PBF_FILE_ALLOCATE
=
(
1
<<
15
),
/* allocate all file space */
PBF_DONT_BLOCK
=
(
1
<<
16
),
/* do not block in current thread */
PBF_DIRECT
=
(
1
<<
17
),
/* direct I/O desired */
PBF_FILE_UNWRITTEN
=
(
1
<<
18
),
/* convert unwritten extent space */
/* flags used only internally */
_PBF_LOCKABLE
=
(
1
<<
19
),
/* page_buf_t may be locked */
_PBF_ALL_PAGES_MAPPED
=
(
1
<<
21
),
/* all pages in rage are mapped */
_PBF_ADDR_ALLOCATED
=
(
1
<<
22
),
/* pb_addr space was allocated */
_PBF_MEM_ALLOCATED
=
(
1
<<
23
),
/* pb_mem and underlying pages allocated */
_PBF_ALL_PAGES_MAPPED
=
(
1
<<
21
),
/* all pages in range mapped */
_PBF_ADDR_ALLOCATED
=
(
1
<<
22
),
/* pb_addr space was allocated */
_PBF_MEM_ALLOCATED
=
(
1
<<
23
),
/* pb_mem+underlying pages alloc'd */
PBF_FORCEIO
=
(
1
<<
24
),
PBF_FLUSH
=
(
1
<<
25
),
/* flush disk write cache */
...
...
fs/xfs/xfs_alloc_btree.h
View file @
c2e95c3f
fs/xfs/xfs_buf.h
View file @
c2e95c3f
...
...
@@ -40,6 +40,7 @@
#define XFS_B_READ PBF_READ
#define XFS_B_WRITE PBF_WRITE
#define XFS_B_STALE PBF_STALE
#define XFS_BUF_TRYLOCK PBF_TRYLOCK
#define XFS_INCORE_TRYLOCK PBF_TRYLOCK
#define XFS_BUF_LOCK PBF_LOCK
...
...
@@ -47,16 +48,21 @@
#define BUF_BUSY PBF_DONT_BLOCK
#define XFS_BUF_BFLAGS(x)
((x)->pb_flags)
/* debugging routines might need this */
#define XFS_BUF_BFLAGS(x)
((x)->pb_flags)
#define XFS_BUF_ZEROFLAGS(x) \
((x)->pb_flags &= ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_SYNC|PBF_DELWRI))
#define XFS_BUF_STALE(x) ((x)->pb_flags |= XFS_B_STALE)
#define XFS_BUF_UNSTALE(x) ((x)->pb_flags &= ~XFS_B_STALE)
#define XFS_BUF_ISSTALE(x) ((x)->pb_flags & XFS_B_STALE)
#define XFS_BUF_SUPER_STALE(x) (x)->pb_flags |= XFS_B_STALE;\
xfs_buf_undelay(x);\
(x)->pb_flags &= ~(PBF_PARTIAL|PBF_NONE)
#define XFS_BUF_SUPER_STALE(x) do { \
XFS_BUF_STALE(x); \
xfs_buf_undelay(x); \
XFS_BUF_DONE(x); \
} while (0)
#define XFS_BUF_MANAGE PBF_FS_MANAGED
#define XFS_BUF_UNMANAGE(x) ((x)->pb_flags &= ~PBF_FS_MANAGED)
static
inline
void
xfs_buf_undelay
(
page_buf_t
*
pb
)
{
...
...
fs/xfs/xfs_dmapi.h
View file @
c2e95c3f
...
...
@@ -164,13 +164,6 @@ xfs_dm_send_data_event(
int
flags
,
vrwlock_t
*
locktype
);
extern
int
xfs_dm_send_create_event
(
bhv_desc_t
*
dir_bdp
,
char
*
name
,
mode_t
new_mode
,
int
*
good_event_sent
);
extern
int
xfs_dm_send_mmap_event
(
struct
vm_area_struct
*
vma
,
...
...
@@ -249,16 +242,6 @@ typedef enum {
* Stubs for XFS DMAPI utility routines.
*/
static
__inline
int
xfs_dm_send_create_event
(
bhv_desc_t
*
dir_bdp
,
char
*
name
,
mode_t
new_mode
,
int
*
good_event_sent
)
{
return
0
;
}
static
__inline
int
xfs_dm_send_data_event
(
dm_eventtype_t
event
,
...
...
fs/xfs/xfs_iget.c
View file @
c2e95c3f
...
...
@@ -246,9 +246,7 @@ xfs_iget_core(
/*
* Read the disk inode attributes into a new inode structure and get
* a new vnode for it. Initialize the inode lock so we can idestroy
* it soon if it's a dup. This should also initialize i_ino, i_bno,
* i_mount, and i_index.
* a new vnode for it. This should also initialize i_ino and i_mount.
*/
error
=
xfs_iread
(
mp
,
tp
,
ino
,
&
ip
,
bno
);
if
(
error
)
{
...
...
fs/xfs/xfs_inode.c
View file @
c2e95c3f
...
...
@@ -142,7 +142,7 @@ xfs_inobp_bwcheck(xfs_buf_t *bp)
}
if
(
INT_ISZERO
(
dip
->
di_next_unlinked
,
ARCH_CONVERT
))
{
cmn_err
(
CE_WARN
,
"Bad next_unlinked field (0) in XFS inode buffer 0x%
x
, starting blockno %Ld, offset 0x%x"
,
"Bad next_unlinked field (0) in XFS inode buffer 0x%
p
, starting blockno %Ld, offset 0x%x"
,
(
__uint64_t
)(
__psunsigned_t
)
bp
,
(
__int64_t
)
XFS_BUF_ADDR
(
bp
),
xfs_buf_offset
(
bp
,
i
*
mp
->
m_sb
.
sb_inodesize
));
...
...
fs/xfs/xfs_iocore.c
View file @
c2e95c3f
...
...
@@ -41,14 +41,24 @@ xfs_size_fn(
}
xfs_ioops_t
xfs_iocore_xfs
=
{
.
xfs_ioinit
=
(
xfs_ioinit_t
)
fs_noerr
,
.
xfs_bmapi_func
=
(
xfs_bmapi_t
)
xfs_bmapi
,
.
xfs_bmap_eof_func
=
(
xfs_bmap_eof_t
)
xfs_bmap_eof
,
.
xfs_iomap_write_direct
=
(
xfs_iomap_write_direct_t
)
xfs_iomap_write_direct
,
.
xfs_iomap_write_delay
=
(
xfs_iomap_write_delay_t
)
xfs_iomap_write_delay
,
.
xfs_iomap_write_allocate
=
(
xfs_iomap_write_allocate_t
)
xfs_iomap_write_allocate
,
.
xfs_iomap_write_unwritten
=
(
xfs_iomap_write_unwritten_t
)
xfs_iomap_write_unwritten
,
.
xfs_ilock
=
(
xfs_lock_t
)
xfs_ilock
,
.
xfs_lck_map_shared
=
(
xfs_lck_map_shared_t
)
xfs_ilock_map_shared
,
.
xfs_ilock_demote
=
(
xfs_lock_demote_t
)
xfs_ilock_demote
,
.
xfs_ilock_nowait
=
(
xfs_lock_nowait_t
)
xfs_ilock_nowait
,
.
xfs_unlock
=
(
xfs_unlk_t
)
xfs_iunlock
,
.
xfs_size_func
=
(
xfs_size_t
)
xfs_size_fn
,
.
xfs_
lastbyte
=
(
xfs_lastbyte_t
)
xfs_file_last_byte
,
.
xfs_
iodone
=
(
xfs_iodone_t
)
fs_noerr
,
};
void
...
...
@@ -83,4 +93,3 @@ xfs_iocore_inode_init(
xfs_iocore_inode_reinit
(
ip
);
}
fs/xfs/xfs_mount.c
View file @
c2e95c3f
...
...
@@ -419,42 +419,64 @@ xfs_xlatesb(
int
xfs_readsb
(
xfs_mount_t
*
mp
)
{
unsigned
int
sector_size
;
unsigned
int
extra_flags
;
xfs_buf_t
*
bp
;
xfs_sb_t
*
sbp
;
int
error
=
0
;
int
error
;
ASSERT
(
mp
->
m_sb_bp
==
0
);
ASSERT
(
mp
->
m_sb_bp
==
NULL
);
ASSERT
(
mp
->
m_ddev_targp
!=
NULL
);
/*
* Allocate a (locked) buffer to hold the superblock.
* This will be kept around at all time to optimize
* This will be kept around at all time
s
to optimize
* access to the superblock.
*/
bp
=
xfs_buf_read_flags
(
mp
->
m_ddev_targp
,
XFS_SB_DADDR
,
1
,
PBF_LOCK
|
PBF_READ
|
PBF_MAPPED
|
PBF_MAPPABLE
|
PBF_FS_MANAGED
);
ASSERT
(
bp
!=
NULL
);
ASSERT
(
XFS_BUF_ISBUSY
(
bp
)
&&
XFS_BUF_VALUSEMA
(
bp
)
<=
0
);
sector_size
=
xfs_getsize_buftarg
(
mp
->
m_ddev_targp
);
extra_flags
=
XFS_BUF_LOCK
|
XFS_BUF_MANAGE
|
XFS_BUF_MAPPED
;
bp
=
xfs_buf_read_flags
(
mp
->
m_ddev_targp
,
XFS_SB_DADDR
,
BTOBB
(
sector_size
),
extra_flags
);
ASSERT
(
bp
);
ASSERT
(
XFS_BUF_ISBUSY
(
bp
));
ASSERT
(
XFS_BUF_VALUSEMA
(
bp
)
<=
0
);
/*
* Initialize the mount structure from the superblock.
* But first do some basic consistency checking.
*/
sbp
=
XFS_BUF_TO_SBP
(
bp
);
xfs_xlatesb
(
XFS_BUF_PTR
(
bp
),
&
(
mp
->
m_sb
),
1
,
ARCH_CONVERT
,
XFS_SB_ALL_BITS
);
if
((
error
=
xfs_mount_validate_sb
(
mp
,
&
(
mp
->
m_sb
))))
{
xfs_xlatesb
(
XFS_BUF_PTR
(
bp
),
&
(
mp
->
m_sb
),
1
,
ARCH_CONVERT
,
XFS_SB_ALL_BITS
);
error
=
xfs_mount_validate_sb
(
mp
,
&
(
mp
->
m_sb
));
if
(
error
)
{
cmn_err
(
CE_WARN
,
"XFS: SB validate failed"
);
goto
err
;
XFS_BUF_UNMANAGE
(
bp
);
xfs_buf_relse
(
bp
);
return
error
;
}
/*
* Re-read the superblock so that our buffer is correctly sized.
* We only need to do this if sector size on-disk is different.
*/
if
(
sector_size
!=
mp
->
m_sb
.
sb_sectsize
)
{
XFS_BUF_UNMANAGE
(
bp
);
xfs_buf_relse
(
bp
);
sector_size
=
mp
->
m_sb
.
sb_sectsize
;
bp
=
xfs_buf_read_flags
(
mp
->
m_ddev_targp
,
XFS_SB_DADDR
,
BTOBB
(
sector_size
),
extra_flags
);
ASSERT
(
bp
);
ASSERT
(
XFS_BUF_ISBUSY
(
bp
));
ASSERT
(
XFS_BUF_VALUSEMA
(
bp
)
<=
0
);
}
mp
->
m_sb_bp
=
bp
;
xfs_buf_relse
(
bp
);
ASSERT
(
XFS_BUF_VALUSEMA
(
bp
)
>
0
);
return
0
;
err:
bp
->
pb_flags
&=
~
PBF_FS_MANAGED
;
xfs_buf_relse
(
bp
);
return
error
;
}
...
...
@@ -1531,10 +1553,10 @@ xfs_freesb(
/*
* Use xfs_getsb() so that the buffer will be locked
* when we call
nfreerbuf
().
* when we call
xfs_buf_relse
().
*/
bp
=
xfs_getsb
(
mp
,
0
);
bp
->
pb_flags
&=
~
PBF_FS_MANAGED
;
XFS_BUF_UNMANAGE
(
bp
)
;
xfs_buf_relse
(
bp
);
mp
->
m_sb_bp
=
NULL
;
}
...
...
fs/xfs/xfs_mount.h
View file @
c2e95c3f
...
...
@@ -87,41 +87,60 @@ struct xfs_bmap_free;
#define AIL_LOCK(mp,s) s=mutex_spinlock(&(mp)->m_ail_lock)
#define AIL_UNLOCK(mp,s) mutex_spinunlock(&(mp)->m_ail_lock, s)
/* Prototypes and functions for I/O core modularization, a vector
* of functions is used to indirect from xfs/cxfs independent code
* to the xfs/cxfs dependent code.
* The vector is placed in the mount structure so that we can
* minimize the number of memory indirections involved.
/*
* Prototypes and functions for I/O core modularization.
*/
struct
flid
;
struct
buf
;
typedef
int
(
*
xfs_ioinit_t
)(
struct
vfs
*
,
struct
xfs_mount_args
*
,
int
*
);
typedef
int
(
*
xfs_bmapi_t
)(
struct
xfs_trans
*
,
void
*
,
xfs_fileoff_t
,
xfs_filblks_t
,
int
,
xfs_fsblock_t
*
,
xfs_extlen_t
,
struct
xfs_bmbt_irec
*
,
int
*
,
struct
xfs_bmap_free
*
);
typedef
int
(
*
xfs_bmap_eof_t
)(
void
*
,
xfs_fileoff_t
,
int
,
int
*
);
typedef
int
(
*
xfs_iomap_write_direct_t
)(
void
*
,
loff_t
,
size_t
,
int
,
struct
xfs_bmbt_irec
*
,
int
*
,
int
);
typedef
int
(
*
xfs_iomap_write_delay_t
)(
void
*
,
loff_t
,
size_t
,
int
,
struct
xfs_bmbt_irec
*
,
int
*
);
typedef
int
(
*
xfs_iomap_write_allocate_t
)(
void
*
,
struct
xfs_bmbt_irec
*
,
int
*
);
typedef
int
(
*
xfs_iomap_write_unwritten_t
)(
void
*
,
loff_t
,
size_t
);
typedef
uint
(
*
xfs_lck_map_shared_t
)(
void
*
);
typedef
void
(
*
xfs_lock_t
)(
void
*
,
uint
);
typedef
void
(
*
xfs_lock_demote_t
)(
void
*
,
uint
);
typedef
int
(
*
xfs_lock_nowait_t
)(
void
*
,
uint
);
typedef
void
(
*
xfs_unlk_t
)(
void
*
,
unsigned
int
);
typedef
void
(
*
xfs_chgtime_t
)(
void
*
,
int
);
typedef
xfs_fsize_t
(
*
xfs_size_t
)(
void
*
);
typedef
xfs_fsize_t
(
*
xfs_
lastbyte_t
)(
void
*
);
typedef
xfs_fsize_t
(
*
xfs_
iodone_t
)(
struct
vfs
*
);
typedef
struct
xfs_ioops
{
xfs_ioinit_t
xfs_ioinit
;
xfs_bmapi_t
xfs_bmapi_func
;
xfs_bmap_eof_t
xfs_bmap_eof_func
;
xfs_iomap_write_direct_t
xfs_iomap_write_direct
;
xfs_iomap_write_delay_t
xfs_iomap_write_delay
;
xfs_iomap_write_allocate_t
xfs_iomap_write_allocate
;
xfs_iomap_write_unwritten_t
xfs_iomap_write_unwritten
;
xfs_lock_t
xfs_ilock
;
xfs_lck_map_shared_t
xfs_lck_map_shared
;
xfs_lock_demote_t
xfs_ilock_demote
;
xfs_lock_nowait_t
xfs_ilock_nowait
;
xfs_unlk_t
xfs_unlock
;
xfs_chgtime_t
xfs_chgtime
;
xfs_size_t
xfs_size_func
;
xfs_
lastbyte_t
xfs_lastbyt
e
;
xfs_
iodone_t
xfs_iodon
e
;
}
xfs_ioops_t
;
#define XFS_IOINIT(vfsp, args, flags) \
(*(mp)->m_io_ops.xfs_ioinit)(vfsp, args, flags)
#define XFS_BMAPI(mp, trans,io,bno,len,f,first,tot,mval,nmap,flist) \
(*(mp)->m_io_ops.xfs_bmapi_func) \
(trans,(io)->io_obj,bno,len,f,first,tot,mval,nmap,flist)
...
...
@@ -130,9 +149,31 @@ typedef struct xfs_ioops {
(*(mp)->m_io_ops.xfs_bmap_eof_func) \
((io)->io_obj, endoff, whichfork, eof)
#define XFS_IOMAP_WRITE_DIRECT(mp, io, offset, count, flags, mval, nmap, found)\
(*(mp)->m_io_ops.xfs_iomap_write_direct) \
((io)->io_obj, offset, count, flags, mval, nmap, found)
#define XFS_IOMAP_WRITE_DELAY(mp, io, offset, count, flags, mval, nmap) \
(*(mp)->m_io_ops.xfs_iomap_write_delay) \
((io)->io_obj, offset, count, flags, mval, nmap)
#define XFS_IOMAP_WRITE_ALLOCATE(mp, io, mval, nmap) \
(*(mp)->m_io_ops.xfs_iomap_write_allocate) \
((io)->io_obj, mval, nmap)
#define XFS_IOMAP_WRITE_UNWRITTEN(mp, io, offset, count) \
(*(mp)->m_io_ops.xfs_iomap_write_unwritten) \
((io)->io_obj, offset, count)
#define XFS_LCK_MAP_SHARED(mp, io) \
(*(mp)->m_io_ops.xfs_lck_map_shared)((io)->io_obj)
#define XFS_ILOCK(mp, io, mode) \
(*(mp)->m_io_ops.xfs_ilock)((io)->io_obj, mode)
#define XFS_ILOCK_NOWAIT(mp, io, mode) \
(*(mp)->m_io_ops.xfs_ilock_nowait)((io)->io_obj, mode)
#define XFS_IUNLOCK(mp, io, mode) \
(*(mp)->m_io_ops.xfs_unlock)((io)->io_obj, mode)
...
...
@@ -142,8 +183,13 @@ typedef struct xfs_ioops {
#define XFS_SIZE(mp, io) \
(*(mp)->m_io_ops.xfs_size_func)((io)->io_obj)
#define XFS_LASTBYTE(mp, io) \
(*(mp)->m_io_ops.xfs_lastbyte)((io)->io_obj)
#define XFS_IODONE(vfsp) \
(*(mp)->m_io_ops.xfs_iodone)(vfsp)
/*
* Prototypes and functions for the XFS realtime subsystem.
*/
typedef
struct
xfs_mount
{
...
...
@@ -303,8 +349,8 @@ typedef struct xfs_mount {
/*
* Default minimum read and write sizes.
*/
#define XFS_READIO_LOG_LARGE 1
2
#define XFS_WRITEIO_LOG_LARGE 1
2
#define XFS_READIO_LOG_LARGE 1
6
#define XFS_WRITEIO_LOG_LARGE 1
6
/*
* Default allocation size
*/
...
...
fs/xfs/xfs_rtalloc.c
View file @
c2e95c3f
...
...
@@ -2287,7 +2287,7 @@ xfs_rtmount_init(
return
XFS_ERROR
(
E2BIG
);
}
error
=
xfs_read_buf
(
mp
,
mp
->
m_rtdev_targp
,
XFS_FSB_TO_BB
(
mp
,
d
-
1
),
d
-
XFS_FSB_TO_BB
(
mp
,
1
),
XFS_FSB_TO_BB
(
mp
,
1
),
0
,
&
bp
);
if
(
error
)
{
cmn_err
(
CE_WARN
,
...
...
fs/xfs/xfs_rw.c
View file @
c2e95c3f
...
...
@@ -97,7 +97,7 @@ xfs_do_force_shutdown(
if
(
!
(
flags
&
XFS_FORCE_UMOUNT
))
{
cmn_err
(
CE_NOTE
,
"xfs_force_shutdown(%s,0x%x) called from line %d of file %s. Return address = 0x%
x
"
,
"xfs_force_shutdown(%s,0x%x) called from line %d of file %s. Return address = 0x%
p
"
,
mp
->
m_fsname
,
flags
,
lnnum
,
fname
,
__return_address
);
}
/*
...
...
fs/xfs/xfs_trans_buf.c
View file @
c2e95c3f
...
...
@@ -472,7 +472,7 @@ xfs_trans_read_buf(
*/
#if defined(DEBUG)
if
(
XFS_BUF_ISSTALE
(
bp
)
&&
XFS_BUF_ISDELAYWRITE
(
bp
))
cmn_err
(
CE_NOTE
,
"about to pop assert, bp == 0x%
x
"
,
bp
);
cmn_err
(
CE_NOTE
,
"about to pop assert, bp == 0x%
p
"
,
bp
);
#endif
ASSERT
((
XFS_BUF_BFLAGS
(
bp
)
&
(
XFS_B_STALE
|
XFS_B_DELWRI
))
!=
(
XFS_B_STALE
|
XFS_B_DELWRI
));
...
...
fs/xfs/xfs_vfsops.c
View file @
c2e95c3f
...
...
@@ -451,17 +451,18 @@ xfs_mount(
goto
error
;
}
xfs_size_buftarg
(
mp
->
m_ddev_targp
,
mp
->
m_sb
.
sb_blocksize
,
xfs_s
ets
ize_buftarg
(
mp
->
m_ddev_targp
,
mp
->
m_sb
.
sb_blocksize
,
mp
->
m_sb
.
sb_sectsize
);
if
(
logdev
&&
logdev
!=
ddev
)
{
unsigned
int
ss
=
BBSIZE
;
unsigned
int
log_sector_size
=
BBSIZE
;
if
(
XFS_SB_VERSION_HASSECTOR
(
&
mp
->
m_sb
))
ss
=
mp
->
m_sb
.
sb_logsectsize
;
xfs_size_buftarg
(
mp
->
m_logdev_targp
,
mp
->
m_sb
.
sb_blocksize
,
ss
);
log_sector_size
=
mp
->
m_sb
.
sb_logsectsize
;
xfs_setsize_buftarg
(
mp
->
m_logdev_targp
,
mp
->
m_sb
.
sb_blocksize
,
log_sector_size
);
}
if
(
rtdev
)
xfs_size_buftarg
(
mp
->
m_rtdev_targp
,
mp
->
m_sb
.
sb_blocksize
,
xfs_s
ets
ize_buftarg
(
mp
->
m_rtdev_targp
,
mp
->
m_sb
.
sb_blocksize
,
mp
->
m_sb
.
sb_blocksize
);
error
=
xfs_mountfs
(
vfsp
,
mp
,
ddev
->
bd_dev
,
flags
);
...
...
fs/xfs/xfs_vnodeops.c
View file @
c2e95c3f
...
...
@@ -44,15 +44,6 @@ extern int xfs_ioctl(bhv_desc_t *, struct inode *, struct file *,
unsigned
int
,
unsigned
long
);
#ifdef XFS_RW_TRACE
STATIC
void
xfs_ctrunc_trace
(
int
tag
,
xfs_inode_t
*
ip
);
#else
#define xfs_ctrunc_trace(tag, ip)
#endif
/* DEBUG */
/*
* For xfs, we check that the file isn't too big to be opened by this kernel.
* No other open action is required for regular files. Devices are handled
...
...
@@ -1880,7 +1871,6 @@ xfs_lookup(
cred_t
*
credp
)
{
xfs_inode_t
*
dp
,
*
ip
;
struct
vnode
*
vp
;
xfs_ino_t
e_inum
;
int
error
;
uint
lock_mode
;
...
...
@@ -1896,58 +1886,19 @@ xfs_lookup(
lock_mode
=
xfs_ilock_map_shared
(
dp
);
error
=
xfs_dir_lookup_int
(
dir_bdp
,
lock_mode
,
dentry
,
&
e_inum
,
&
ip
);
if
(
error
)
{
xfs_iunlock_map_shared
(
dp
,
lock_mode
);
return
error
;
}
vp
=
XFS_ITOV
(
ip
);
if
(
!
error
)
{
*
vpp
=
XFS_ITOV
(
ip
);
ITRACE
(
ip
);
}
xfs_iunlock_map_shared
(
dp
,
lock_mode
);
*
vpp
=
vp
;
return
0
;
return
error
;
}
#ifdef XFS_RW_TRACE
STATIC
void
xfs_ctrunc_trace
(
int
tag
,
xfs_inode_t
*
ip
)
{
if
(
ip
->
i_rwtrace
==
NULL
)
{
return
;
}
ktrace_enter
(
ip
->
i_rwtrace
,
(
void
*
)((
long
)
tag
),
(
void
*
)
ip
,
(
void
*
)((
long
)
private
.
p_cpuid
),
(
void
*
)
0
,
(
void
*
)
0
,
(
void
*
)
0
,
(
void
*
)
0
,
(
void
*
)
0
,
(
void
*
)
0
,
(
void
*
)
0
,
(
void
*
)
0
,
(
void
*
)
0
,
(
void
*
)
0
,
(
void
*
)
0
,
(
void
*
)
0
,
(
void
*
)
0
);
}
#endif
/* XFS_RW_TRACE */
#define XFS_CREATE_NEW_MAXTRIES 10000
/*
* xfs_create (create a new file).
* It might still find name exists out there, though.
* But vpp, doens't point at a vnode.
*/
STATIC
int
xfs_create
(
...
...
@@ -1968,7 +1919,6 @@ xfs_create(
xfs_bmap_free_t
free_list
;
xfs_fsblock_t
first_block
;
boolean_t
dp_joined_to_trans
;
int
dm_event_sent
=
0
;
uint
cancel_flags
;
int
committed
;
xfs_prid_t
prid
;
...
...
@@ -1989,8 +1939,10 @@ xfs_create(
return
XFS_ERROR
(
ENAMETOOLONG
);
if
(
DM_EVENT_ENABLED
(
dir_vp
->
v_vfsp
,
dp
,
DM_EVENT_CREATE
))
{
error
=
xfs_dm_send_create_event
(
dir_bdp
,
name
,
dm_di_mode
,
&
dm_event_sent
);
error
=
dm_send_namesp_event
(
DM_EVENT_CREATE
,
dir_bdp
,
DM_RIGHT_NULL
,
NULL
,
DM_RIGHT_NULL
,
name
,
NULL
,
dm_di_mode
,
0
,
0
);
if
(
error
)
return
error
;
}
...
...
@@ -2161,7 +2113,7 @@ xfs_create(
/* Fallthrough to std_return with error = 0 */
std_return:
if
((
error
!=
0
&&
dm_event_sent
!=
0
)
&&
if
((
error
!=
0
)
&&
DM_EVENT_ENABLED
(
dir_vp
->
v_vfsp
,
XFS_BHVTOI
(
dir_bdp
),
DM_EVENT_POSTCREATE
))
{
(
void
)
dm_send_namesp_event
(
DM_EVENT_POSTCREATE
,
...
...
@@ -2227,16 +2179,7 @@ int xfs_rm_attempts;
* vnode ref count will still include that from the .. entry in
* this case.
*
* The inode passed in will have been looked up using xfs_get_dir_entry().
* Since that lookup the directory lock will have been dropped, so
* we need to validate that the inode given is still pointed to by the
* directory. We use the directory inode in memory generation count
* as an optimization to tell if a new lookup is necessary. If the
* directory no longer points to the given inode with the given name,
* then we drop the directory lock, set the entry_changed parameter to 1,
* and return. It is up to the caller to drop the reference to the inode.
*
* There is a dealock we need to worry about. If the locked directory is
* There is a deadlock we need to worry about. If the locked directory is
* in the AIL, it might be blocking up the log. The next inode we lock
* could be already locked by another thread waiting for log space (e.g
* a permanent log reservation with a long running transaction (see
...
...
@@ -2249,8 +2192,7 @@ STATIC int
xfs_lock_dir_and_entry
(
xfs_inode_t
*
dp
,
vname_t
*
dentry
,
xfs_inode_t
*
ip
,
/* inode of entry 'name' */
int
*
entry_changed
)
xfs_inode_t
*
ip
)
/* inode of entry 'name' */
{
int
attempts
;
xfs_ino_t
e_inum
;
...
...
@@ -2263,7 +2205,6 @@ xfs_lock_dir_and_entry(
attempts
=
0
;
again:
*
entry_changed
=
0
;
xfs_ilock
(
dp
,
XFS_ILOCK_EXCL
);
e_inum
=
ip
->
i_ino
;
...
...
@@ -2477,7 +2418,6 @@ xfs_remove(
xfs_fsblock_t
first_block
;
int
cancel_flags
;
int
committed
;
int
entry_changed
;
int
dm_di_mode
=
0
;
int
link_zero
;
uint
resblks
;
...
...
@@ -2504,7 +2444,6 @@ xfs_remove(
}
/* From this point on, return through std_return */
retry:
ip
=
NULL
;
/*
...
...
@@ -2571,7 +2510,7 @@ xfs_remove(
return
error
;
}
error
=
xfs_lock_dir_and_entry
(
dp
,
dentry
,
ip
,
&
entry_changed
);
error
=
xfs_lock_dir_and_entry
(
dp
,
dentry
,
ip
);
if
(
error
)
{
REMOVE_DEBUG_TRACE
(
__LINE__
);
xfs_trans_cancel
(
tp
,
cancel_flags
);
...
...
@@ -2579,17 +2518,6 @@ xfs_remove(
goto
std_return
;
}
/*
* If the inode we found in the first pass is no longer
* the entry with the given name, then drop our transaction and
* inode reference and start over.
*/
if
(
entry_changed
)
{
xfs_trans_cancel
(
tp
,
cancel_flags
);
IRELE
(
ip
);
goto
retry
;
}
/*
* At this point, we've gotten both the directory and the entry
* inodes locked.
...
...
@@ -2610,28 +2538,6 @@ xfs_remove(
goto
error_return
;
}
if
((
ip
->
i_d
.
di_mode
&
IFMT
)
==
IFDIR
)
{
error
=
XFS_ERROR
(
EPERM
);
REMOVE_DEBUG_TRACE
(
__LINE__
);
goto
error_return
;
}
/*
* Return error when removing . and ..
*/
if
(
name
[
0
]
==
'.'
)
{
if
(
name
[
1
]
==
'\0'
)
{
error
=
XFS_ERROR
(
EINVAL
);
REMOVE_DEBUG_TRACE
(
__LINE__
);
goto
error_return
;
}
else
if
(
name
[
1
]
==
'.'
&&
name
[
2
]
==
'\0'
)
{
error
=
XFS_ERROR
(
EEXIST
);
REMOVE_DEBUG_TRACE
(
__LINE__
);
goto
error_return
;
}
}
/*
* Entry must exist since we did a lookup in xfs_lock_dir_and_entry.
*/
...
...
@@ -2696,8 +2602,7 @@ xfs_remove(
IRELE
(
ip
);
/* Fall through to std_return with error = 0 */
std_return:
std_return:
if
(
DM_EVENT_ENABLED
(
dir_vp
->
v_vfsp
,
dp
,
DM_EVENT_POSTREMOVE
))
{
(
void
)
dm_send_namesp_event
(
DM_EVENT_POSTREMOVE
,
...
...
@@ -2938,7 +2843,6 @@ xfs_mkdir(
vnode_t
*
dir_vp
;
boolean_t
dp_joined_to_trans
;
boolean_t
created
=
B_FALSE
;
int
dm_event_sent
=
0
;
xfs_prid_t
prid
;
xfs_dquot_t
*
udqp
,
*
gdqp
;
uint
resblks
;
...
...
@@ -2961,8 +2865,10 @@ xfs_mkdir(
dm_di_mode
=
vap
->
va_mode
|
VTTOIF
(
vap
->
va_type
);
if
(
DM_EVENT_ENABLED
(
dir_vp
->
v_vfsp
,
dp
,
DM_EVENT_CREATE
))
{
error
=
xfs_dm_send_create_event
(
dir_bdp
,
dir_name
,
dm_di_mode
,
&
dm_event_sent
);
error
=
dm_send_namesp_event
(
DM_EVENT_CREATE
,
dir_bdp
,
DM_RIGHT_NULL
,
NULL
,
DM_RIGHT_NULL
,
dir_name
,
NULL
,
dm_di_mode
,
0
,
0
);
if
(
error
)
return
error
;
}
...
...
@@ -3127,7 +3033,7 @@ xfs_mkdir(
* xfs_trans_commit. */
std_return:
if
(
(
created
||
(
error
!=
0
&&
dm_event_sent
!=
0
))
&&
if
(
(
created
||
(
error
!=
0
))
&&
DM_EVENT_ENABLED
(
dir_vp
->
v_vfsp
,
XFS_BHVTOI
(
dir_bdp
),
DM_EVENT_POSTCREATE
))
{
(
void
)
dm_send_namesp_event
(
DM_EVENT_POSTCREATE
,
...
...
@@ -3180,7 +3086,6 @@ xfs_rmdir(
xfs_fsblock_t
first_block
;
int
cancel_flags
;
int
committed
;
int
entry_changed
;
vnode_t
*
dir_vp
;
int
dm_di_mode
=
0
;
int
last_cdp_link
;
...
...
@@ -3209,7 +3114,6 @@ xfs_rmdir(
/* Return through std_return after this point. */
retry:
cdp
=
NULL
;
/*
...
...
@@ -3281,24 +3185,13 @@ xfs_rmdir(
* that the directory entry for the child directory inode has
* not changed while we were obtaining a log reservation.
*/
error
=
xfs_lock_dir_and_entry
(
dp
,
dentry
,
cdp
,
&
entry_changed
);
error
=
xfs_lock_dir_and_entry
(
dp
,
dentry
,
cdp
);
if
(
error
)
{
xfs_trans_cancel
(
tp
,
cancel_flags
);
IRELE
(
cdp
);
goto
std_return
;
}
/*
* If the inode we found in the first pass is no longer
* the entry with the given name, then drop our transaction and
* inode reference and start over.
*/
if
(
entry_changed
)
{
xfs_trans_cancel
(
tp
,
cancel_flags
);
IRELE
(
cdp
);
goto
retry
;
}
xfs_trans_ijoin
(
tp
,
dp
,
XFS_ILOCK_EXCL
);
if
(
dp
!=
cdp
)
{
/*
...
...
@@ -3455,20 +3348,12 @@ xfs_readdir(
}
lock_mode
=
xfs_ilock_map_shared
(
dp
);
if
((
dp
->
i_d
.
di_mode
&
IFMT
)
!=
IFDIR
)
{
xfs_iunlock_map_shared
(
dp
,
lock_mode
);
return
XFS_ERROR
(
ENOTDIR
);
}
start_offset
=
uiop
->
uio_offset
;
error
=
XFS_DIR_GETDENTS
(
dp
->
i_mount
,
tp
,
dp
,
uiop
,
eofp
);
if
(
start_offset
!=
uiop
->
uio_offset
)
{
xfs_ichgtime
(
dp
,
XFS_ICHGTIME_ACC
);
}
xfs_iunlock_map_shared
(
dp
,
lock_mode
);
return
error
;
}
...
...
fs/xfs/xfsidbg.c
View file @
c2e95c3f
...
...
@@ -1637,9 +1637,9 @@ static void printinode(struct inode *ip)
if
(
ip
==
NULL
)
return
;
kdb_printf
(
" i_ino = %lu i_count = %u i_
dev = 0x%x i_
size %Ld
\n
"
,
kdb_printf
(
" i_ino = %lu i_count = %u i_size %Ld
\n
"
,
ip
->
i_ino
,
atomic_read
(
&
ip
->
i_count
),
ip
->
i_s
b
->
s_dev
,
ip
->
i_s
ize
);
ip
->
i_size
);
kdb_printf
(
" i_mode = 0x%x i_nlink = %d i_rdev = 0x%x i_state = 0x%lx
\n
"
,
...
...
include/linux/buffer_head.h
View file @
c2e95c3f
...
...
@@ -22,6 +22,7 @@ enum bh_state_bits {
BH_New
,
/* Disk mapping was newly created by get_block */
BH_Async_Read
,
/* Is under end_buffer_async_read I/O */
BH_Async_Write
,
/* Is under end_buffer_async_write I/O */
BH_Delay
,
/* Buffer is not yet allocated on disk */
BH_Boundary
,
/* Block is followed by a discontiguity */
BH_PrivateStart
,
/* not a state bit, but the first bit available
...
...
@@ -105,6 +106,7 @@ BUFFER_FNS(Mapped, mapped)
BUFFER_FNS
(
New
,
new
)
BUFFER_FNS
(
Async_Read
,
async_read
)
BUFFER_FNS
(
Async_Write
,
async_write
)
BUFFER_FNS
(
Delay
,
delay
);
BUFFER_FNS
(
Boundary
,
boundary
)
#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment