Commit 73c77e2c authored by James Bottomley's avatar James Bottomley Committed by James Bottomley

xfs: fix xfs to work with Virtually Indexed architectures

xfs_buf.c includes what is essentially a hand rolled version of
blk_rq_map_kern().  In order to work properly with the vmalloc buffers
that xfs uses, this hand rolled routine must also implement the flushing
API for vmap/vmalloc areas.

[style updates from hch@lst.de]
Acked-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@suse.de>
parent c9334f60
...@@ -76,6 +76,27 @@ struct workqueue_struct *xfsconvertd_workqueue; ...@@ -76,6 +76,27 @@ struct workqueue_struct *xfsconvertd_workqueue;
#define xfs_buf_deallocate(bp) \ #define xfs_buf_deallocate(bp) \
kmem_zone_free(xfs_buf_zone, (bp)); kmem_zone_free(xfs_buf_zone, (bp));
static inline int
xfs_buf_is_vmapped(
struct xfs_buf *bp)
{
/*
* Return true if the buffer is vmapped.
*
* The XBF_MAPPED flag is set if the buffer should be mapped, but the
* code is clever enough to know it doesn't have to map a single page,
* so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
*/
return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
}
static inline int
xfs_buf_vmap_len(
struct xfs_buf *bp)
{
return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
}
/* /*
* Page Region interfaces. * Page Region interfaces.
* *
...@@ -314,7 +335,7 @@ xfs_buf_free( ...@@ -314,7 +335,7 @@ xfs_buf_free(
if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) { if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
uint i; uint i;
if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1)) if (xfs_buf_is_vmapped(bp))
free_address(bp->b_addr - bp->b_offset); free_address(bp->b_addr - bp->b_offset);
for (i = 0; i < bp->b_page_count; i++) { for (i = 0; i < bp->b_page_count; i++) {
...@@ -1107,6 +1128,9 @@ xfs_buf_bio_end_io( ...@@ -1107,6 +1128,9 @@ xfs_buf_bio_end_io(
xfs_buf_ioerror(bp, -error); xfs_buf_ioerror(bp, -error);
if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
do { do {
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
...@@ -1216,6 +1240,10 @@ _xfs_buf_ioapply( ...@@ -1216,6 +1240,10 @@ _xfs_buf_ioapply(
submit_io: submit_io:
if (likely(bio->bi_size)) { if (likely(bio->bi_size)) {
if (xfs_buf_is_vmapped(bp)) {
flush_kernel_vmap_range(bp->b_addr,
xfs_buf_vmap_len(bp));
}
submit_bio(rw, bio); submit_bio(rw, bio);
if (size) if (size)
goto next_chunk; goto next_chunk;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment