Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
63510d9f
Commit
63510d9f
authored
Jan 24, 2023
by
Andreas Gruenbacher
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'iomap-for-next' of
git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
parents
3c006ad7
471859f5
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
103 additions
and
57 deletions
+103
-57
fs/gfs2/bmap.c
fs/gfs2/bmap.c
+26
-12
fs/iomap/buffered-io.c
fs/iomap/buffered-io.c
+60
-31
fs/xfs/xfs_iomap.c
fs/xfs/xfs_iomap.c
+2
-2
include/linux/iomap.h
include/linux/iomap.h
+15
-12
No files found.
fs/gfs2/bmap.c
View file @
63510d9f
...
...
@@ -956,26 +956,40 @@ static int __gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
goto
out
;
}
static
int
gfs2_iomap_page_prepare
(
struct
inode
*
inode
,
loff_t
pos
,
unsigned
len
)
static
struct
folio
*
gfs2_iomap_get_folio
(
struct
iomap_iter
*
iter
,
loff_t
pos
,
unsigned
len
)
{
struct
inode
*
inode
=
iter
->
inode
;
unsigned
int
blockmask
=
i_blocksize
(
inode
)
-
1
;
struct
gfs2_sbd
*
sdp
=
GFS2_SB
(
inode
);
unsigned
int
blocks
;
struct
folio
*
folio
;
int
status
;
blocks
=
((
pos
&
blockmask
)
+
len
+
blockmask
)
>>
inode
->
i_blkbits
;
return
gfs2_trans_begin
(
sdp
,
RES_DINODE
+
blocks
,
0
);
status
=
gfs2_trans_begin
(
sdp
,
RES_DINODE
+
blocks
,
0
);
if
(
status
)
return
ERR_PTR
(
status
);
folio
=
iomap_get_folio
(
iter
,
pos
);
if
(
IS_ERR
(
folio
))
gfs2_trans_end
(
sdp
);
return
folio
;
}
static
void
gfs2_iomap_p
age_done
(
struct
inode
*
inode
,
loff_t
pos
,
unsigned
copied
,
struct
page
*
page
)
static
void
gfs2_iomap_p
ut_folio
(
struct
inode
*
inode
,
loff_t
pos
,
unsigned
copied
,
struct
folio
*
folio
)
{
struct
gfs2_trans
*
tr
=
current
->
journal_info
;
struct
gfs2_inode
*
ip
=
GFS2_I
(
inode
);
struct
gfs2_sbd
*
sdp
=
GFS2_SB
(
inode
);
if
(
page
&&
!
gfs2_is_stuffed
(
ip
))
gfs2_page_add_databufs
(
ip
,
page
,
offset_in_page
(
pos
),
copied
);
if
(
!
gfs2_is_stuffed
(
ip
))
gfs2_page_add_databufs
(
ip
,
&
folio
->
page
,
offset_in_page
(
pos
),
copied
);
folio_unlock
(
folio
);
folio_put
(
folio
);
if
(
tr
->
tr_num_buf_new
)
__mark_inode_dirty
(
inode
,
I_DIRTY_DATASYNC
);
...
...
@@ -983,9 +997,9 @@ static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
gfs2_trans_end
(
sdp
);
}
static
const
struct
iomap_
page_ops
gfs2_iomap_page
_ops
=
{
.
page_prepare
=
gfs2_iomap_page_prepare
,
.
p
age_done
=
gfs2_iomap_page_done
,
static
const
struct
iomap_
folio_ops
gfs2_iomap_folio
_ops
=
{
.
get_folio
=
gfs2_iomap_get_folio
,
.
p
ut_folio
=
gfs2_iomap_put_folio
,
};
static
int
gfs2_iomap_begin_write
(
struct
inode
*
inode
,
loff_t
pos
,
...
...
@@ -1061,7 +1075,7 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
}
if
(
gfs2_is_stuffed
(
ip
)
||
gfs2_is_jdata
(
ip
))
iomap
->
page_ops
=
&
gfs2_iomap_page
_ops
;
iomap
->
folio_ops
=
&
gfs2_iomap_folio
_ops
;
return
0
;
out_trans_end:
...
...
@@ -1277,7 +1291,7 @@ int gfs2_alloc_extent(struct inode *inode, u64 lblock, u64 *dblock,
/*
* NOTE: Never call gfs2_block_zero_range with an open transaction because it
* uses iomap write to perform its actions, which begin their own transactions
* (iomap_begin,
page_prepare
, etc.)
* (iomap_begin,
get_folio
, etc.)
*/
static
int
gfs2_block_zero_range
(
struct
inode
*
inode
,
loff_t
from
,
unsigned
int
length
)
...
...
fs/iomap/buffered-io.c
View file @
63510d9f
...
...
@@ -457,6 +457,33 @@ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
}
EXPORT_SYMBOL_GPL
(
iomap_is_partially_uptodate
);
/**
* iomap_get_folio - get a folio reference for writing
* @iter: iteration structure
* @pos: start offset of write
*
* Returns a locked reference to the folio at @pos, or an error pointer if the
* folio could not be obtained.
*/
struct
folio
*
iomap_get_folio
(
struct
iomap_iter
*
iter
,
loff_t
pos
)
{
unsigned
fgp
=
FGP_LOCK
|
FGP_WRITE
|
FGP_CREAT
|
FGP_STABLE
|
FGP_NOFS
;
struct
folio
*
folio
;
if
(
iter
->
flags
&
IOMAP_NOWAIT
)
fgp
|=
FGP_NOWAIT
;
folio
=
__filemap_get_folio
(
iter
->
inode
->
i_mapping
,
pos
>>
PAGE_SHIFT
,
fgp
,
mapping_gfp_mask
(
iter
->
inode
->
i_mapping
));
if
(
folio
)
return
folio
;
if
(
iter
->
flags
&
IOMAP_NOWAIT
)
return
ERR_PTR
(
-
EAGAIN
);
return
ERR_PTR
(
-
ENOMEM
);
}
EXPORT_SYMBOL_GPL
(
iomap_get_folio
);
bool
iomap_release_folio
(
struct
folio
*
folio
,
gfp_t
gfp_flags
)
{
trace_iomap_release_folio
(
folio
->
mapping
->
host
,
folio_pos
(
folio
),
...
...
@@ -575,6 +602,30 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
return
0
;
}
static
struct
folio
*
__iomap_get_folio
(
struct
iomap_iter
*
iter
,
loff_t
pos
,
size_t
len
)
{
const
struct
iomap_folio_ops
*
folio_ops
=
iter
->
iomap
.
folio_ops
;
if
(
folio_ops
&&
folio_ops
->
get_folio
)
return
folio_ops
->
get_folio
(
iter
,
pos
,
len
);
else
return
iomap_get_folio
(
iter
,
pos
);
}
static
void
__iomap_put_folio
(
struct
iomap_iter
*
iter
,
loff_t
pos
,
size_t
ret
,
struct
folio
*
folio
)
{
const
struct
iomap_folio_ops
*
folio_ops
=
iter
->
iomap
.
folio_ops
;
if
(
folio_ops
&&
folio_ops
->
put_folio
)
{
folio_ops
->
put_folio
(
iter
->
inode
,
pos
,
ret
,
folio
);
}
else
{
folio_unlock
(
folio
);
folio_put
(
folio
);
}
}
static
int
iomap_write_begin_inline
(
const
struct
iomap_iter
*
iter
,
struct
folio
*
folio
)
{
...
...
@@ -587,15 +638,11 @@ static int iomap_write_begin_inline(const struct iomap_iter *iter,
static
int
iomap_write_begin
(
struct
iomap_iter
*
iter
,
loff_t
pos
,
size_t
len
,
struct
folio
**
foliop
)
{
const
struct
iomap_
page_ops
*
page_ops
=
iter
->
iomap
.
page
_ops
;
const
struct
iomap_
folio_ops
*
folio_ops
=
iter
->
iomap
.
folio
_ops
;
const
struct
iomap
*
srcmap
=
iomap_iter_srcmap
(
iter
);
struct
folio
*
folio
;
unsigned
fgp
=
FGP_LOCK
|
FGP_WRITE
|
FGP_CREAT
|
FGP_STABLE
|
FGP_NOFS
;
int
status
=
0
;
if
(
iter
->
flags
&
IOMAP_NOWAIT
)
fgp
|=
FGP_NOWAIT
;
BUG_ON
(
pos
+
len
>
iter
->
iomap
.
offset
+
iter
->
iomap
.
length
);
if
(
srcmap
!=
&
iter
->
iomap
)
BUG_ON
(
pos
+
len
>
srcmap
->
offset
+
srcmap
->
length
);
...
...
@@ -606,18 +653,9 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
if
(
!
mapping_large_folio_support
(
iter
->
inode
->
i_mapping
))
len
=
min_t
(
size_t
,
len
,
PAGE_SIZE
-
offset_in_page
(
pos
));
if
(
page_ops
&&
page_ops
->
page_prepare
)
{
status
=
page_ops
->
page_prepare
(
iter
->
inode
,
pos
,
len
);
if
(
status
)
return
status
;
}
folio
=
__filemap_get_folio
(
iter
->
inode
->
i_mapping
,
pos
>>
PAGE_SHIFT
,
fgp
,
mapping_gfp_mask
(
iter
->
inode
->
i_mapping
));
if
(
!
folio
)
{
status
=
(
iter
->
flags
&
IOMAP_NOWAIT
)
?
-
EAGAIN
:
-
ENOMEM
;
goto
out_no_page
;
}
folio
=
__iomap_get_folio
(
iter
,
pos
,
len
);
if
(
IS_ERR
(
folio
))
return
PTR_ERR
(
folio
);
/*
* Now we have a locked folio, before we do anything with it we need to
...
...
@@ -629,8 +667,8 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
* could do the wrong thing here (zero a page range incorrectly or fail
* to zero) and corrupt data.
*/
if
(
page_ops
&&
page
_ops
->
iomap_valid
)
{
bool
iomap_valid
=
page
_ops
->
iomap_valid
(
iter
->
inode
,
if
(
folio_ops
&&
folio
_ops
->
iomap_valid
)
{
bool
iomap_valid
=
folio
_ops
->
iomap_valid
(
iter
->
inode
,
&
iter
->
iomap
);
if
(
!
iomap_valid
)
{
iter
->
iomap
.
flags
|=
IOMAP_F_STALE
;
...
...
@@ -656,13 +694,9 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
return
0
;
out_unlock:
folio_unlock
(
folio
);
folio_put
(
folio
);
__iomap_put_folio
(
iter
,
pos
,
0
,
folio
);
iomap_write_failed
(
iter
->
inode
,
pos
,
len
);
out_no_page:
if
(
page_ops
&&
page_ops
->
page_done
)
page_ops
->
page_done
(
iter
->
inode
,
pos
,
0
,
NULL
);
return
status
;
}
...
...
@@ -712,7 +746,6 @@ static size_t iomap_write_end_inline(const struct iomap_iter *iter,
static
size_t
iomap_write_end
(
struct
iomap_iter
*
iter
,
loff_t
pos
,
size_t
len
,
size_t
copied
,
struct
folio
*
folio
)
{
const
struct
iomap_page_ops
*
page_ops
=
iter
->
iomap
.
page_ops
;
const
struct
iomap
*
srcmap
=
iomap_iter_srcmap
(
iter
);
loff_t
old_size
=
iter
->
inode
->
i_size
;
size_t
ret
;
...
...
@@ -735,14 +768,10 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
i_size_write
(
iter
->
inode
,
pos
+
ret
);
iter
->
iomap
.
flags
|=
IOMAP_F_SIZE_CHANGED
;
}
folio_unlock
(
folio
);
__iomap_put_folio
(
iter
,
pos
,
ret
,
folio
);
if
(
old_size
<
pos
)
pagecache_isize_extended
(
iter
->
inode
,
old_size
,
pos
);
if
(
page_ops
&&
page_ops
->
page_done
)
page_ops
->
page_done
(
iter
->
inode
,
pos
,
ret
,
&
folio
->
page
);
folio_put
(
folio
);
if
(
ret
<
len
)
iomap_write_failed
(
iter
->
inode
,
pos
+
ret
,
len
-
ret
);
return
ret
;
...
...
fs/xfs/xfs_iomap.c
View file @
63510d9f
...
...
@@ -83,7 +83,7 @@ xfs_iomap_valid(
return
true
;
}
static
const
struct
iomap_
page_ops
xfs_iomap_page
_ops
=
{
static
const
struct
iomap_
folio_ops
xfs_iomap_folio
_ops
=
{
.
iomap_valid
=
xfs_iomap_valid
,
};
...
...
@@ -133,7 +133,7 @@ xfs_bmbt_to_iomap(
iomap
->
flags
|=
IOMAP_F_DIRTY
;
iomap
->
validity_cookie
=
sequence_cookie
;
iomap
->
page_ops
=
&
xfs_iomap_page
_ops
;
iomap
->
folio_ops
=
&
xfs_iomap_folio
_ops
;
return
0
;
}
...
...
include/linux/iomap.h
View file @
63510d9f
...
...
@@ -13,6 +13,7 @@
struct
address_space
;
struct
fiemap_extent_info
;
struct
inode
;
struct
iomap_iter
;
struct
iomap_dio
;
struct
iomap_writepage_ctx
;
struct
iov_iter
;
...
...
@@ -85,7 +86,7 @@ struct vm_fault;
*/
#define IOMAP_NULL_ADDR -1ULL
/* addr is not valid */
struct
iomap_
page
_ops
;
struct
iomap_
folio
_ops
;
struct
iomap
{
u64
addr
;
/* disk offset of mapping, bytes */
...
...
@@ -97,7 +98,7 @@ struct iomap {
struct
dax_device
*
dax_dev
;
/* dax_dev for dax operations */
void
*
inline_data
;
void
*
private
;
/* filesystem private */
const
struct
iomap_
page_ops
*
page
_ops
;
const
struct
iomap_
folio_ops
*
folio
_ops
;
u64
validity_cookie
;
/* used with .iomap_valid() */
};
...
...
@@ -125,19 +126,20 @@ static inline bool iomap_inline_data_valid(const struct iomap *iomap)
}
/*
* When a filesystem sets
page_ops in an iomap mapping it returns, page_prepare
* and p
age_done will be called for each page written to. This only applies to
*
buffered writes as unbuffered writes will not typically have page
s
* When a filesystem sets
folio_ops in an iomap mapping it returns, get_folio
* and p
ut_folio will be called for each folio written to. This only applies
*
to buffered writes as unbuffered writes will not typically have folio
s
* associated with them.
*
* When
page_prepare succeeds, page_done
will always be called to do any
* cleanup work necessary.
In that page_done call, @page will be NULL if the
*
associated page could not be obtained
.
* When
get_folio succeeds, put_folio
will always be called to do any
* cleanup work necessary.
put_folio is responsible for unlocking and putting
*
@folio
.
*/
struct
iomap_page_ops
{
int
(
*
page_prepare
)(
struct
inode
*
inode
,
loff_t
pos
,
unsigned
len
);
void
(
*
page_done
)(
struct
inode
*
inode
,
loff_t
pos
,
unsigned
copied
,
struct
page
*
page
);
struct
iomap_folio_ops
{
struct
folio
*
(
*
get_folio
)(
struct
iomap_iter
*
iter
,
loff_t
pos
,
unsigned
len
);
void
(
*
put_folio
)(
struct
inode
*
inode
,
loff_t
pos
,
unsigned
copied
,
struct
folio
*
folio
);
/*
* Check that the cached iomap still maps correctly to the filesystem's
...
...
@@ -260,6 +262,7 @@ int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
int
iomap_read_folio
(
struct
folio
*
folio
,
const
struct
iomap_ops
*
ops
);
void
iomap_readahead
(
struct
readahead_control
*
,
const
struct
iomap_ops
*
ops
);
bool
iomap_is_partially_uptodate
(
struct
folio
*
,
size_t
from
,
size_t
count
);
struct
folio
*
iomap_get_folio
(
struct
iomap_iter
*
iter
,
loff_t
pos
);
bool
iomap_release_folio
(
struct
folio
*
folio
,
gfp_t
gfp_flags
);
void
iomap_invalidate_folio
(
struct
folio
*
folio
,
size_t
offset
,
size_t
len
);
int
iomap_file_unshare
(
struct
inode
*
inode
,
loff_t
pos
,
loff_t
len
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment