Commit c2e95c3f authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/home/hch/BK/xfs/linux-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents 028d2f3e b33cc8f7
...@@ -1453,6 +1453,7 @@ static inline void discard_buffer(struct buffer_head * bh) ...@@ -1453,6 +1453,7 @@ static inline void discard_buffer(struct buffer_head * bh)
clear_buffer_mapped(bh); clear_buffer_mapped(bh);
clear_buffer_req(bh); clear_buffer_req(bh);
clear_buffer_new(bh); clear_buffer_new(bh);
clear_buffer_delay(bh);
unlock_buffer(bh); unlock_buffer(bh);
} }
...@@ -1871,7 +1872,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page, ...@@ -1871,7 +1872,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
continue; continue;
} }
if (!buffer_uptodate(bh) && if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
(block_start < from || block_end > to)) { (block_start < from || block_end > to)) {
ll_rw_block(READ, 1, &bh); ll_rw_block(READ, 1, &bh);
*wait_bh++=bh; *wait_bh++=bh;
...@@ -2457,7 +2458,7 @@ int block_truncate_page(struct address_space *mapping, ...@@ -2457,7 +2458,7 @@ int block_truncate_page(struct address_space *mapping,
if (PageUptodate(page)) if (PageUptodate(page))
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
err = -EIO; err = -EIO;
ll_rw_block(READ, 1, &bh); ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh); wait_on_buffer(bh);
......
...@@ -51,23 +51,22 @@ export-objs := pagebuf/page_buf.o support/ktrace.o \ ...@@ -51,23 +51,22 @@ export-objs := pagebuf/page_buf.o support/ktrace.o \
obj-$(CONFIG_XFS_FS) += xfs.o obj-$(CONFIG_XFS_FS) += xfs.o
xfs-obj-$(CONFIG_XFS_RT) += xfs_rtalloc.o xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o
xfs-obj-$(CONFIG_XFS_QUOTA) += xfs_dquot.o \ xfs-$(CONFIG_XFS_QUOTA) += xfs_dquot.o \
xfs_dquot_item.o \ xfs_dquot_item.o \
xfs_trans_dquot.o \ xfs_trans_dquot.o \
xfs_qm_syscalls.o \ xfs_qm_syscalls.o \
xfs_qm.o xfs_qm.o
xfs-obj-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o
xfs-obj-$(CONFIG_FS_POSIX_CAP) += xfs_cap.o xfs-$(CONFIG_FS_POSIX_CAP) += xfs_cap.o
xfs-obj-$(CONFIG_FS_POSIX_MAC) += xfs_mac.o xfs-$(CONFIG_FS_POSIX_MAC) += xfs_mac.o
xfs-obj-$(CONFIG_PROC_FS) += linux/xfs_stats.o xfs-$(CONFIG_PROC_FS) += linux/xfs_stats.o
xfs-obj-$(CONFIG_SYSCTL) += linux/xfs_sysctl.o xfs-$(CONFIG_SYSCTL) += linux/xfs_sysctl.o
xfs-objs += $(xfs-obj-y) \ xfs-y += xfs_alloc.o \
xfs_alloc.o \
xfs_alloc_btree.o \ xfs_alloc_btree.o \
xfs_attr.o \ xfs_attr.o \
xfs_attr_fetch.o \ xfs_attr_fetch.o \
...@@ -115,12 +114,12 @@ xfs-objs += $(xfs-obj-y) \ ...@@ -115,12 +114,12 @@ xfs-objs += $(xfs-obj-y) \
xfs_rw.o xfs_rw.o
# Objects in pagebuf/ # Objects in pagebuf/
xfs-objs += $(addprefix pagebuf/, \ xfs-y += $(addprefix pagebuf/, \
page_buf.o \ page_buf.o \
page_buf_locking.o) page_buf_locking.o)
# Objects in linux/ # Objects in linux/
xfs-objs += $(addprefix linux/, \ xfs-y += $(addprefix linux/, \
xfs_aops.o \ xfs_aops.o \
xfs_behavior.o \ xfs_behavior.o \
xfs_file.o \ xfs_file.o \
...@@ -134,7 +133,7 @@ xfs-objs += $(addprefix linux/, \ ...@@ -134,7 +133,7 @@ xfs-objs += $(addprefix linux/, \
xfs_vnode.o) xfs_vnode.o)
# Objects in support/ # Objects in support/
xfs-objs += $(addprefix support/, \ xfs-y += $(addprefix support/, \
debug.o \ debug.o \
kmem.o \ kmem.o \
ktrace.o \ ktrace.o \
......
...@@ -48,6 +48,9 @@ map_blocks( ...@@ -48,6 +48,9 @@ map_blocks(
vnode_t *vp = LINVFS_GET_VP(inode); vnode_t *vp = LINVFS_GET_VP(inode);
int error, nmaps = 1; int error, nmaps = 1;
if (((flags & (PBF_DIRECT|PBF_SYNC)) == PBF_DIRECT) &&
(offset >= inode->i_size))
count = max(count, XFS_WRITE_IO_LOG);
retry: retry:
VOP_BMAP(vp, offset, count, flags, pbmapp, &nmaps, error); VOP_BMAP(vp, offset, count, flags, pbmapp, &nmaps, error);
if (flags & PBF_WRITE) { if (flags & PBF_WRITE) {
...@@ -145,9 +148,8 @@ probe_unmapped_page( ...@@ -145,9 +148,8 @@ probe_unmapped_page(
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
bh = head = page_buffers(page); bh = head = page_buffers(page);
do { do {
if (buffer_mapped(bh) || !buffer_uptodate(bh)) { if (buffer_mapped(bh) || !buffer_uptodate(bh))
break; break;
}
ret += bh->b_size; ret += bh->b_size;
if (ret >= pg_offset) if (ret >= pg_offset)
break; break;
...@@ -289,7 +291,7 @@ convert_page( ...@@ -289,7 +291,7 @@ convert_page(
bh = head = page_buffers(page); bh = head = page_buffers(page);
do { do {
offset = i << bbits; offset = i << bbits;
if (!buffer_uptodate(bh)) if (!(PageUptodate(page) || buffer_uptodate(bh)))
continue; continue;
if (buffer_mapped(bh) && !buffer_delay(bh) && all_bh) { if (buffer_mapped(bh) && !buffer_delay(bh) && all_bh) {
if (startio && (offset < end)) { if (startio && (offset < end)) {
...@@ -372,7 +374,7 @@ delalloc_convert( ...@@ -372,7 +374,7 @@ delalloc_convert(
page_buf_bmap_t *mp, map; page_buf_bmap_t *mp, map;
unsigned long p_offset = 0, end_index; unsigned long p_offset = 0, end_index;
loff_t offset, end_offset; loff_t offset, end_offset;
int len, err, i, cnt = 0; int len, err, i, cnt = 0, uptodate = 1;
/* Are we off the end of the file ? */ /* Are we off the end of the file ? */
end_index = inode->i_size >> PAGE_CACHE_SHIFT; end_index = inode->i_size >> PAGE_CACHE_SHIFT;
...@@ -396,7 +398,7 @@ delalloc_convert( ...@@ -396,7 +398,7 @@ delalloc_convert(
len = bh->b_size; len = bh->b_size;
do { do {
if (!buffer_uptodate(bh) && !startio) { if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
goto next_bh; goto next_bh;
} }
...@@ -423,48 +425,57 @@ delalloc_convert( ...@@ -423,48 +425,57 @@ delalloc_convert(
unlock_buffer(bh); unlock_buffer(bh);
} }
} }
} else if (!buffer_mapped(bh) && } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
(buffer_uptodate(bh) || PageUptodate(page)) (allocate_space || startio)) {
&& (allocate_space || startio)) { if (!buffer_mapped(bh)) {
int size; int size;
/* Getting here implies an unmapped buffer was found, /*
* and we are in a path where we need to write the * Getting here implies an unmapped buffer
* whole page out. * was found, and we are in a path where we
*/ * need to write the whole page out.
if (!mp) { */
size = probe_unmapped_cluster(inode, page, if (!mp) {
bh, head); size = probe_unmapped_cluster(
err = map_blocks(inode, offset, size, &map, inode, page, bh, head);
PBF_WRITE|PBF_DIRECT); err = map_blocks(inode, offset,
if (err) { size, &map,
goto error; PBF_WRITE | PBF_DIRECT);
if (err) {
goto error;
}
mp = match_offset_to_mapping(page, &map,
p_offset);
} }
mp = match_offset_to_mapping(page, &map, if (mp) {
p_offset); map_buffer_at_offset(page,
} bh, p_offset,
if (mp) { inode->i_blkbits, mp);
map_buffer_at_offset(page, bh, p_offset, if (startio) {
inode->i_blkbits, mp); bh_arr[cnt++] = bh;
if (startio) { } else {
unlock_buffer(bh);
}
}
} else if (startio && buffer_mapped(bh)) {
if (buffer_uptodate(bh) && allocate_space) {
lock_buffer(bh);
bh_arr[cnt++] = bh; bh_arr[cnt++] = bh;
} else {
unlock_buffer(bh);
} }
} }
} else if (startio && buffer_mapped(bh)) {
if(buffer_uptodate(bh) && allocate_space) {
lock_buffer(bh);
bh_arr[cnt++] = bh;
}
} }
next_bh: next_bh:
if (!buffer_uptodate(bh))
uptodate = 0;
offset += len; offset += len;
p_offset += len; p_offset += len;
bh = bh->b_this_page; bh = bh->b_this_page;
} while (offset < end_offset); } while (offset < end_offset);
if (uptodate)
SetPageUptodate(page);
if (startio) { if (startio) {
submit_page(page, bh_arr, cnt); submit_page(page, bh_arr, cnt);
} }
...@@ -509,17 +520,15 @@ linvfs_get_block_core( ...@@ -509,17 +520,15 @@ linvfs_get_block_core(
ssize_t size; ssize_t size;
loff_t offset = (loff_t)iblock << inode->i_blkbits; loff_t offset = (loff_t)iblock << inode->i_blkbits;
if (blocks) { /* If we are doing writes at the end of the file,
* allocate in chunks
*/
if (blocks)
size = blocks << inode->i_blkbits; size = blocks << inode->i_blkbits;
} else { else if (create && (offset >= inode->i_size))
/* If we are doing writes at the end of the file, size = 1 << XFS_WRITE_IO_LOG;
* allocate in chunks else
*/ size = 1 << inode->i_blkbits;
if (create && (offset >= inode->i_size) && !(flags & PBF_SYNC))
size = 1 << XFS_WRITE_IO_LOG;
else
size = 1 << inode->i_blkbits;
}
VOP_BMAP(vp, offset, size, VOP_BMAP(vp, offset, size,
create ? flags : PBF_READ, create ? flags : PBF_READ,
...@@ -534,15 +543,20 @@ linvfs_get_block_core( ...@@ -534,15 +543,20 @@ linvfs_get_block_core(
page_buf_daddr_t bn; page_buf_daddr_t bn;
loff_t delta; loff_t delta;
delta = offset - pbmap.pbm_offset; /* For unwritten extents do not report a disk address on
delta >>= inode->i_blkbits; * the read case.
*/
if (create || ((pbmap.pbm_flags & PBMF_UNWRITTEN) == 0)) {
delta = offset - pbmap.pbm_offset;
delta >>= inode->i_blkbits;
bn = pbmap.pbm_bn >> (inode->i_blkbits - 9); bn = pbmap.pbm_bn >> (inode->i_blkbits - 9);
bn += delta; bn += delta;
bh_result->b_blocknr = bn; bh_result->b_blocknr = bn;
bh_result->b_bdev = pbmap.pbm_target->pbr_bdev; bh_result->b_bdev = pbmap.pbm_target->pbr_bdev;
set_buffer_mapped(bh_result); set_buffer_mapped(bh_result);
}
} }
/* If we previously allocated a block out beyond eof and /* If we previously allocated a block out beyond eof and
......
...@@ -29,362 +29,13 @@ ...@@ -29,362 +29,13 @@
* *
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/ */
/*
* fs/xfs/linux/xfs_lrw.c (Linux Read Write stuff)
*
*/
#include <xfs.h> #include <xfs.h>
#include <linux/pagemap.h>
#include <linux/capability.h>
#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
<< mp->m_writeio_log) << mp->m_writeio_log)
#define XFS_STRAT_WRITE_IMAPS 2 #define XFS_STRAT_WRITE_IMAPS 2
#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
STATIC int xfs_iomap_read(xfs_iocore_t *, loff_t, size_t, int, page_buf_bmap_t *,
int *);
STATIC int xfs_iomap_write(xfs_iocore_t *, loff_t, size_t, page_buf_bmap_t *,
int *, int);
STATIC int xfs_iomap_write_delay(xfs_iocore_t *, loff_t, size_t, page_buf_bmap_t *,
int *, int, int);
STATIC int xfs_iomap_write_direct(xfs_iocore_t *, loff_t, size_t, page_buf_bmap_t *,
int *, int, int);
STATIC int _xfs_imap_to_bmap(xfs_iocore_t *, xfs_off_t, xfs_bmbt_irec_t *,
page_buf_bmap_t *, int, int);
int
xfs_strategy(
xfs_inode_t *ip,
xfs_off_t offset,
ssize_t count,
int flags,
page_buf_bmap_t *pbmapp,
int *npbmaps)
{
xfs_iocore_t *io;
xfs_mount_t *mp;
int error;
xfs_fileoff_t offset_fsb;
xfs_fileoff_t end_fsb;
xfs_fileoff_t map_start_fsb;
xfs_fileoff_t last_block;
xfs_fsblock_t first_block;
xfs_bmap_free_t free_list;
xfs_filblks_t count_fsb;
int committed, i, loops, nimaps;
int is_xfs;
xfs_bmbt_irec_t imap[XFS_MAX_RW_NBMAPS];
xfs_trans_t *tp;
mp = ip->i_mount;
io = &ip->i_iocore;
is_xfs = IO_IS_XFS(io);
ASSERT((ip->i_d.di_mode & IFMT) == IFREG);
ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
((io->io_flags & XFS_IOCORE_RT) != 0));
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
offset_fsb = XFS_B_TO_FSBT(mp, offset);
nimaps = min(XFS_MAX_RW_NBMAPS, *npbmaps);
end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
first_block = NULLFSBLOCK;
XFS_ILOCK(mp, io, XFS_ILOCK_SHARED | XFS_EXTSIZE_RD);
error = XFS_BMAPI(mp, NULL, io, offset_fsb,
(xfs_filblks_t)(end_fsb - offset_fsb),
XFS_BMAPI_ENTIRE, &first_block, 0, imap,
&nimaps, NULL);
XFS_IUNLOCK(mp, io, XFS_ILOCK_SHARED | XFS_EXTSIZE_RD);
if (error) {
return XFS_ERROR(error);
}
if (nimaps && !ISNULLSTARTBLOCK(imap[0].br_startblock)) {
*npbmaps = _xfs_imap_to_bmap(&ip->i_iocore, offset, imap,
pbmapp, nimaps, *npbmaps);
return 0;
}
/*
* Make sure that the dquots are there.
*/
if (XFS_IS_QUOTA_ON(mp)) {
if (XFS_NOT_DQATTACHED(mp, ip)) {
if ((error = xfs_qm_dqattach(ip, 0))) {
return XFS_ERROR(error);
}
}
}
XFS_STATS_ADD(xfsstats.xs_xstrat_bytes,
XFS_FSB_TO_B(mp, imap[0].br_blockcount));
offset_fsb = imap[0].br_startoff;
count_fsb = imap[0].br_blockcount;
map_start_fsb = offset_fsb;
while (count_fsb != 0) {
/*
* Set up a transaction with which to allocate the
* backing store for the file. Do allocations in a
* loop until we get some space in the range we are
* interested in. The other space that might be allocated
* is in the delayed allocation extent on which we sit
* but before our buffer starts.
*/
nimaps = 0;
loops = 0;
while (nimaps == 0) {
if (is_xfs) {
tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
error = xfs_trans_reserve(tp, 0,
XFS_WRITE_LOG_RES(mp),
0, XFS_TRANS_PERM_LOG_RES,
XFS_WRITE_LOG_COUNT);
if (error) {
xfs_trans_cancel(tp, 0);
goto error0;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip,
XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
} else {
tp = NULL;
XFS_ILOCK(mp, io, XFS_ILOCK_EXCL |
XFS_EXTSIZE_WR);
}
/*
* Allocate the backing store for the file.
*/
XFS_BMAP_INIT(&(free_list),
&(first_block));
nimaps = XFS_STRAT_WRITE_IMAPS;
/*
* Ensure we don't go beyond eof - it is possible
* the extents changed since we did the read call,
* we dropped the ilock in the interim.
*/
end_fsb = XFS_B_TO_FSB(mp, XFS_SIZE(mp, io));
xfs_bmap_last_offset(NULL, ip, &last_block,
XFS_DATA_FORK);
last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
if ((map_start_fsb + count_fsb) > last_block) {
count_fsb = last_block - map_start_fsb;
if (count_fsb == 0) {
if (is_xfs) {
xfs_bmap_cancel(&free_list);
xfs_trans_cancel(tp,
(XFS_TRANS_RELEASE_LOG_RES |
XFS_TRANS_ABORT));
}
XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL |
XFS_EXTSIZE_WR);
return XFS_ERROR(EAGAIN);
}
}
error = XFS_BMAPI(mp, tp, io, map_start_fsb, count_fsb,
XFS_BMAPI_WRITE, &first_block, 1,
imap, &nimaps, &free_list);
if (error) {
xfs_bmap_cancel(&free_list);
xfs_trans_cancel(tp,
(XFS_TRANS_RELEASE_LOG_RES |
XFS_TRANS_ABORT));
XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL |
XFS_EXTSIZE_WR);
goto error0;
}
if (is_xfs) {
error = xfs_bmap_finish(&(tp), &(free_list),
first_block, &committed);
if (error) {
xfs_bmap_cancel(&free_list);
xfs_trans_cancel(tp,
(XFS_TRANS_RELEASE_LOG_RES |
XFS_TRANS_ABORT));
xfs_iunlock(ip, XFS_ILOCK_EXCL);
goto error0;
}
error = xfs_trans_commit(tp,
XFS_TRANS_RELEASE_LOG_RES,
NULL);
if (error) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
goto error0;
}
}
if (nimaps == 0) {
XFS_IUNLOCK(mp, io,
XFS_ILOCK_EXCL|XFS_EXTSIZE_WR);
} /* else hold 'till we maybe loop again below */
}
/*
* See if we were able to allocate an extent that
* covers at least part of the user's requested size.
*/
offset_fsb = XFS_B_TO_FSBT(mp, offset);
for (i = 0; i < nimaps; i++) {
int maps;
if ((offset_fsb >= imap[i].br_startoff) &&
(offset_fsb <
(imap[i].br_startoff + imap[i].br_blockcount))) {
XFS_IUNLOCK(mp, io,
XFS_ILOCK_EXCL|XFS_EXTSIZE_WR);
maps = min(nimaps, *npbmaps);
*npbmaps = _xfs_imap_to_bmap(io, offset,
&imap[i], pbmapp,
maps, *npbmaps);
XFS_STATS_INC(xfsstats.xs_xstrat_quick);
return 0;
}
count_fsb -= imap[i].br_blockcount; /* for next bmapi,
if needed. */
}
/*
* We didn't get an extent the caller can write into so
* loop around and try starting after the last imap we got back.
*/
nimaps--; /* Index of last entry */
ASSERT(nimaps >= 0);
ASSERT(offset_fsb >=
imap[nimaps].br_startoff + imap[nimaps].br_blockcount);
ASSERT(count_fsb);
offset_fsb =
imap[nimaps].br_startoff + imap[nimaps].br_blockcount;
map_start_fsb = offset_fsb;
XFS_STATS_INC(xfsstats.xs_xstrat_split);
XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_WR);
}
ASSERT(0); /* Should never get here */
error0:
if (error) {
ASSERT(count_fsb != 0);
ASSERT(is_xfs || XFS_FORCED_SHUTDOWN(mp));
}
return XFS_ERROR(error);
}
/*
* xfs_bmap() is the same as the irix xfs_bmap from xfs_rw.c
* execpt for slight changes to the params
*/
int
xfs_bmap(bhv_desc_t *bdp,
xfs_off_t offset,
ssize_t count,
int flags,
page_buf_bmap_t *pbmapp,
int *npbmaps)
{
xfs_inode_t *ip;
int error;
int lockmode;
int fsynced = 0;
vnode_t *vp;
ip = XFS_BHVTOI(bdp);
ASSERT((ip->i_d.di_mode & IFMT) == IFREG);
ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
if (XFS_FORCED_SHUTDOWN(ip->i_iocore.io_mount))
return XFS_ERROR(EIO);
if (flags & PBF_READ) {
lockmode = xfs_ilock_map_shared(ip);
error = xfs_iomap_read(&ip->i_iocore, offset, count,
XFS_BMAPI_ENTIRE, pbmapp, npbmaps);
xfs_iunlock_map_shared(ip, lockmode);
} else if (flags & PBF_FILE_ALLOCATE) {
error = xfs_strategy(ip, offset, count, flags,
pbmapp, npbmaps);
} else { /* PBF_WRITE */
ASSERT(flags & PBF_WRITE);
vp = BHV_TO_VNODE(bdp);
xfs_ilock(ip, XFS_ILOCK_EXCL);
/*
* Make sure that the dquots are there. This doesn't hold
* the ilock across a disk read.
*/
if (XFS_IS_QUOTA_ON(ip->i_mount)) {
if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) {
if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_ILOCKED))) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return XFS_ERROR(error);
}
}
}
retry:
error = xfs_iomap_write(&ip->i_iocore, offset, count,
pbmapp, npbmaps, flags);
/* xfs_iomap_write unlocks/locks/unlocks */
if (error == ENOSPC) {
switch (fsynced) {
case 0:
if (ip->i_delayed_blks) {
filemap_fdatawrite(LINVFS_GET_IP(vp)->i_mapping);
fsynced = 1;
} else {
fsynced = 2;
flags |= PBF_SYNC;
}
error = 0;
xfs_ilock(ip, XFS_ILOCK_EXCL);
goto retry;
case 1:
fsynced = 2;
if (!(flags & PBF_SYNC)) {
flags |= PBF_SYNC;
error = 0;
xfs_ilock(ip, XFS_ILOCK_EXCL);
goto retry;
}
case 2:
sync_blockdev(vp->v_vfsp->vfs_super->s_bdev);
xfs_log_force(ip->i_mount, (xfs_lsn_t)0,
XFS_LOG_FORCE|XFS_LOG_SYNC);
error = 0;
/**
delay(HZ);
**/
fsynced++;
xfs_ilock(ip, XFS_ILOCK_EXCL);
goto retry;
}
}
}
return XFS_ERROR(error);
}
STATIC int STATIC int
_xfs_imap_to_bmap( _xfs_imap_to_bmap(
...@@ -397,7 +48,7 @@ _xfs_imap_to_bmap( ...@@ -397,7 +48,7 @@ _xfs_imap_to_bmap(
{ {
xfs_mount_t *mp; xfs_mount_t *mp;
xfs_fsize_t nisize; xfs_fsize_t nisize;
int im, pbm; int pbm;
xfs_fsblock_t start_block; xfs_fsblock_t start_block;
mp = io->io_mount; mp = io->io_mount;
...@@ -405,7 +56,7 @@ _xfs_imap_to_bmap( ...@@ -405,7 +56,7 @@ _xfs_imap_to_bmap(
if (io->io_new_size > nisize) if (io->io_new_size > nisize)
nisize = io->io_new_size; nisize = io->io_new_size;
for (im=pbm=0; im < imaps && pbm < pbmaps; im++,pbmapp++,imap++,pbm++) { for (pbm = 0; imaps && pbm < pbmaps; imaps--, pbmapp++, imap++, pbm++) {
pbmapp->pbm_target = io->io_flags & XFS_IOCORE_RT ? pbmapp->pbm_target = io->io_flags & XFS_IOCORE_RT ?
mp->m_rtdev_targp : mp->m_ddev_targp; mp->m_rtdev_targp : mp->m_ddev_targp;
pbmapp->pbm_offset = XFS_FSB_TO_B(mp, imap->br_startoff); pbmapp->pbm_offset = XFS_FSB_TO_B(mp, imap->br_startoff);
...@@ -422,9 +73,8 @@ _xfs_imap_to_bmap( ...@@ -422,9 +73,8 @@ _xfs_imap_to_bmap(
pbmapp->pbm_flags = PBMF_DELAY; pbmapp->pbm_flags = PBMF_DELAY;
} else { } else {
pbmapp->pbm_bn = XFS_FSB_TO_DB_IO(io, start_block); pbmapp->pbm_bn = XFS_FSB_TO_DB_IO(io, start_block);
if (ISUNWRITTEN(imap)) { if (ISUNWRITTEN(imap))
pbmapp->pbm_flags |= PBMF_UNWRITTEN; pbmapp->pbm_flags |= PBMF_UNWRITTEN;
}
} }
if ((pbmapp->pbm_offset + pbmapp->pbm_bsize) >= nisize) { if ((pbmapp->pbm_offset + pbmapp->pbm_bsize) >= nisize) {
...@@ -436,149 +86,344 @@ _xfs_imap_to_bmap( ...@@ -436,149 +86,344 @@ _xfs_imap_to_bmap(
return pbm; /* Return the number filled */ return pbm; /* Return the number filled */
} }
STATIC int int
xfs_iomap_read( xfs_iomap(
xfs_iocore_t *io, xfs_iocore_t *io,
loff_t offset, xfs_off_t offset,
size_t count, ssize_t count,
int flags, int flags,
page_buf_bmap_t *pbmapp, page_buf_bmap_t *pbmapp,
int *npbmaps) int *npbmaps)
{ {
xfs_fileoff_t offset_fsb; xfs_mount_t *mp = io->io_mount;
xfs_fileoff_t end_fsb; xfs_fileoff_t offset_fsb, end_fsb;
int nimaps;
int error; int error;
xfs_mount_t *mp; int lockmode = 0;
xfs_bmbt_irec_t imap[XFS_MAX_RW_NBMAPS]; xfs_bmbt_irec_t imap;
int nimaps = 1;
int bmap_flags = 0;
ASSERT(ismrlocked(io->io_lock, MR_UPDATE | MR_ACCESS) != 0); if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
switch (flags &
(PBF_READ|PBF_WRITE|PBF_FILE_ALLOCATE|PBF_FILE_UNWRITTEN)) {
case PBF_READ:
lockmode = XFS_LCK_MAP_SHARED(mp, io);
bmap_flags = XFS_BMAPI_ENTIRE;
break;
case PBF_WRITE:
lockmode = XFS_ILOCK_EXCL|XFS_EXTSIZE_WR;
bmap_flags = 0;
XFS_ILOCK(mp, io, lockmode);
break;
case PBF_FILE_ALLOCATE:
lockmode = XFS_ILOCK_SHARED|XFS_EXTSIZE_RD;
bmap_flags = XFS_BMAPI_ENTIRE;
XFS_ILOCK(mp, io, lockmode);
break;
case PBF_FILE_UNWRITTEN:
lockmode = XFS_ILOCK_EXCL|XFS_EXTSIZE_WR;
bmap_flags = XFS_BMAPI_ENTIRE|XFS_BMAPI_IGSTATE;
XFS_ILOCK(mp, io, lockmode);
break;
default:
BUG();
}
mp = io->io_mount;
offset_fsb = XFS_B_TO_FSBT(mp, offset); offset_fsb = XFS_B_TO_FSBT(mp, offset);
nimaps = sizeof(imap) / sizeof(imap[0]);
nimaps = min(nimaps, *npbmaps); /* Don't ask for more than caller has */
end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
error = XFS_BMAPI(mp, NULL, io, offset_fsb, error = XFS_BMAPI(mp, NULL, io, offset_fsb,
(xfs_filblks_t)(end_fsb - offset_fsb), (xfs_filblks_t)(end_fsb - offset_fsb) ,
flags, NULL, 0, imap, bmap_flags, NULL, 0, &imap,
&nimaps, NULL); &nimaps, NULL);
if (error) {
return XFS_ERROR(error); if (error)
goto out;
switch (flags & (PBF_WRITE|PBF_FILE_ALLOCATE)) {
case PBF_WRITE:
/* If we found an extent, return it */
if (nimaps && (imap.br_startblock != HOLESTARTBLOCK))
break;
if (flags & PBF_DIRECT) {
error = XFS_IOMAP_WRITE_DIRECT(mp, io, offset,
count, flags, &imap, &nimaps, nimaps);
} else {
error = XFS_IOMAP_WRITE_DELAY(mp, io, offset, count,
flags, &imap, &nimaps);
}
break;
case PBF_FILE_ALLOCATE:
/* If we found an extent, return it */
XFS_IUNLOCK(mp, io, lockmode);
lockmode = 0;
if (nimaps && !ISNULLSTARTBLOCK(imap.br_startblock))
break;
error = XFS_IOMAP_WRITE_ALLOCATE(mp, io, &imap, &nimaps);
break;
} }
if(nimaps) { if (nimaps) {
*npbmaps = _xfs_imap_to_bmap(io, offset, imap, pbmapp, nimaps, *npbmaps = _xfs_imap_to_bmap(io, offset, &imap,
*npbmaps); pbmapp, nimaps, *npbmaps);
} else } else {
*npbmaps = 0; *npbmaps = 0;
}
out:
if (lockmode)
XFS_IUNLOCK(mp, io, lockmode);
return XFS_ERROR(error); return XFS_ERROR(error);
} }
/*
* xfs_iomap_write: return pagebuf_bmap_t's telling higher layers
* where to write.
* There are 2 main cases:
* 1 the extents already exist
* 2 must allocate.
* There are 3 cases when we allocate:
* delay allocation (doesn't really allocate or use transactions)
* direct allocation (no previous delay allocation)
* convert delay to real allocations
*/
STATIC int STATIC int
xfs_iomap_write( xfs_flush_space(
xfs_iocore_t *io, xfs_inode_t *ip,
int *fsynced,
int *ioflags)
{
vnode_t *vp = XFS_ITOV(ip);
switch (*fsynced) {
case 0:
if (ip->i_delayed_blks) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
filemap_fdatawrite(LINVFS_GET_IP(vp)->i_mapping);
xfs_ilock(ip, XFS_ILOCK_EXCL);
*fsynced = 1;
} else {
*ioflags |= PBF_SYNC;
*fsynced = 2;
}
return 0;
case 1:
*fsynced = 2;
*ioflags |= PBF_SYNC;
return 0;
case 2:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
sync_blockdev(vp->v_vfsp->vfs_super->s_bdev);
xfs_log_force(ip->i_mount, (xfs_lsn_t)0,
XFS_LOG_FORCE|XFS_LOG_SYNC);
xfs_ilock(ip, XFS_ILOCK_EXCL);
*fsynced = 3;
return 0;
}
return 1;
}
int
xfs_iomap_write_direct(
xfs_inode_t *ip,
loff_t offset, loff_t offset,
size_t count, size_t count,
page_buf_bmap_t *pbmapp, int ioflag,
int *npbmaps, xfs_bmbt_irec_t *ret_imap,
int ioflag) int *nmaps,
int found)
{ {
int maps; xfs_mount_t *mp = ip->i_mount;
int error = 0; xfs_iocore_t *io = &ip->i_iocore;
int found; xfs_fileoff_t offset_fsb;
int flags = 0; xfs_fileoff_t last_fsb;
xfs_filblks_t count_fsb;
xfs_fsize_t isize;
xfs_fsblock_t firstfsb;
int nimaps, maps;
int error;
int bmapi_flag;
int rt;
xfs_trans_t *tp;
xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS], *imapp;
xfs_bmap_free_t free_list;
int aeof;
xfs_filblks_t datablocks;
int committed;
int numrtextents;
uint resblks;
maps = *npbmaps; /*
if (!maps) * Make sure that the dquots are there. This doesn't hold
goto out; * the ilock across a disk read.
*/
if (XFS_IS_QUOTA_ON(mp) && XFS_NOT_DQATTACHED(mp, ip)) {
if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_ILOCKED))) {
return XFS_ERROR(error);
}
}
maps = min(XFS_WRITE_IMAPS, *nmaps);
nimaps = maps;
isize = ip->i_d.di_size;
aeof = (offset + count) > isize;
if (io->io_new_size > isize)
isize = io->io_new_size;
offset_fsb = XFS_B_TO_FSBT(mp, offset);
last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
count_fsb = last_fsb - offset_fsb;
if (found && (ret_imap->br_startblock == HOLESTARTBLOCK)) {
xfs_fileoff_t map_last_fsb;
map_last_fsb = ret_imap->br_blockcount + ret_imap->br_startoff;
if (map_last_fsb < last_fsb) {
last_fsb = map_last_fsb;
count_fsb = last_fsb - offset_fsb;
}
ASSERT(count_fsb > 0);
}
/*
* determine if reserving space on
* the data or realtime partition.
*/
if ((rt = ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) {
int sbrtextsize, iprtextsize;
sbrtextsize = mp->m_sb.sb_rextsize;
iprtextsize =
ip->i_d.di_extsize ? ip->i_d.di_extsize : sbrtextsize;
numrtextents = (count_fsb + iprtextsize - 1);
do_div(numrtextents, sbrtextsize);
datablocks = 0;
} else {
datablocks = count_fsb;
numrtextents = 0;
}
/*
* allocate and setup the transaction
*/
xfs_iunlock(ip, XFS_ILOCK_EXCL);
tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
resblks = XFS_DIOSTRAT_SPACE_RES(mp, datablocks);
error = xfs_trans_reserve(tp, resblks,
XFS_WRITE_LOG_RES(mp), numrtextents,
XFS_TRANS_PERM_LOG_RES,
XFS_WRITE_LOG_COUNT);
/* /*
* If we have extents that are allocated for this range, * check for running out of space
* return them.
*/ */
if (error)
/*
* Free the transaction structure.
*/
xfs_trans_cancel(tp, 0);
xfs_ilock(ip, XFS_ILOCK_EXCL);
found = 0;
error = xfs_iomap_read(io, offset, count, flags, pbmapp, npbmaps);
if (error) if (error)
goto out; goto error_out; /* Don't return in above if .. trans ..,
need lock to return */
if (XFS_IS_QUOTA_ON(mp)) {
if (xfs_trans_reserve_blkquota(tp, ip, resblks)) {
error = (EDQUOT);
goto error1;
}
}
nimaps = 1;
bmapi_flag = XFS_BMAPI_WRITE;
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
if (offset < ip->i_d.di_size || rt)
bmapi_flag |= XFS_BMAPI_PREALLOC;
/*
* issue the bmapi() call to allocate the blocks
*/
XFS_BMAP_INIT(&free_list, &firstfsb);
imapp = &imap[0];
error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,
bmapi_flag, &firstfsb, 0, imapp, &nimaps, &free_list);
if (error) {
goto error0;
}
/* /*
* If we found mappings and they can just have data written * complete the transaction
* without conversion,
* let the caller write these and call us again.
*
* If we have a HOLE or UNWRITTEN, proceed down lower to
* get the space or to convert to written.
*/ */
if (*npbmaps) { error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed);
if (!(pbmapp->pbm_flags & PBMF_HOLE)) { if (error) {
*npbmaps = 1; /* Only checked the first one. */ goto error0;
/* We could check more, ... */
goto out;
}
} }
found = *npbmaps;
*npbmaps = maps; /* Restore to original requested */
if (ioflag & PBF_DIRECT) { error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
error = xfs_iomap_write_direct(io, offset, count, pbmapp, if (error) {
npbmaps, ioflag, found); goto error_out;
} else {
error = xfs_iomap_write_delay(io, offset, count, pbmapp,
npbmaps, ioflag, found);
} }
out: /* copy any maps to caller's array and return any error. */
XFS_IUNLOCK(io->io_mount, io, XFS_ILOCK_EXCL); if (nimaps == 0) {
error = (ENOSPC);
goto error_out;
}
*ret_imap = imap[0];
*nmaps = 1;
return 0;
error0: /* Cancel bmap, unlock inode, and cancel trans */
xfs_bmap_cancel(&free_list);
error1: /* Just cancel transaction */
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
*nmaps = 0; /* nothing set-up here */
error_out:
return XFS_ERROR(error); return XFS_ERROR(error);
} }
STATIC int int
xfs_iomap_write_delay( xfs_iomap_write_delay(
xfs_iocore_t *io, xfs_inode_t *ip,
loff_t offset, loff_t offset,
size_t count, size_t count,
page_buf_bmap_t *pbmapp,
int *npbmaps,
int ioflag, int ioflag,
int found) xfs_bmbt_irec_t *ret_imap,
int *nmaps)
{ {
xfs_mount_t *mp = ip->i_mount;
xfs_iocore_t *io = &ip->i_iocore;
xfs_fileoff_t offset_fsb; xfs_fileoff_t offset_fsb;
xfs_fileoff_t ioalign;
xfs_fileoff_t last_fsb; xfs_fileoff_t last_fsb;
xfs_fileoff_t start_fsb;
xfs_filblks_t count_fsb;
xfs_off_t aligned_offset;
xfs_fsize_t isize; xfs_fsize_t isize;
xfs_fsblock_t firstblock; xfs_fsblock_t firstblock;
int nimaps; int nimaps;
int error; int error;
int n;
unsigned int iosize;
xfs_mount_t *mp;
#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS]; xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];
int aeof; int aeof;
int fsynced = 0;
ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0); ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
mp = io->io_mount; /*
* Make sure that the dquots are there. This doesn't hold
* the ilock across a disk read.
*/
if (XFS_IS_QUOTA_ON(mp) && XFS_NOT_DQATTACHED(mp, ip)) {
if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_ILOCKED))) {
return XFS_ERROR(error);
}
}
isize = XFS_SIZE(mp, io); retry:
isize = ip->i_d.di_size;
if (io->io_new_size > isize) { if (io->io_new_size > isize) {
isize = io->io_new_size; isize = io->io_new_size;
} }
...@@ -591,50 +436,22 @@ xfs_iomap_write_delay( ...@@ -591,50 +436,22 @@ xfs_iomap_write_delay(
* then extend the allocation (and the buffer used for the write) * then extend the allocation (and the buffer used for the write)
* out to the file system's write iosize. We clean up any extra * out to the file system's write iosize. We clean up any extra
* space left over when the file is closed in xfs_inactive(). * space left over when the file is closed in xfs_inactive().
* We can only do this if we are sure that we will create buffers
* over all of the space we allocate beyond the end of the file.
* Not doing so would allow us to create delalloc blocks with
* no pages in memory covering them. So, we need to check that
* there are not any real blocks in the area beyond the end of
* the file which we are optimistically going to preallocate. If
* there are then our buffers will stop when they encounter them
* and we may accidentally create delalloc blocks beyond them
* that we never cover with a buffer. All of this is because
* we are not actually going to write the extra blocks preallocated
* at this point.
* *
* We don't bother with this for sync writes, because we need * We don't bother with this for sync writes, because we need
* to minimize the amount we write for good performance. * to minimize the amount we write for good performance.
*/ */
if (!(ioflag & PBF_SYNC) && ((offset + count) > XFS_SIZE(mp, io))) { if (!(ioflag & PBF_SYNC) && ((offset + count) > ip->i_d.di_size)) {
start_fsb = XFS_B_TO_FSBT(mp, xfs_off_t aligned_offset;
((xfs_ufsize_t)(offset + count - 1))); unsigned int iosize;
count_fsb = mp->m_writeio_blocks; xfs_fileoff_t ioalign;
while (count_fsb > 0) {
nimaps = XFS_WRITE_IMAPS;
error = XFS_BMAPI(mp, NULL, io, start_fsb, count_fsb,
0, NULL, 0, imap, &nimaps,
NULL);
if (error) {
return error;
}
for (n = 0; n < nimaps; n++) {
if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
(imap[n].br_startblock != DELAYSTARTBLOCK)) {
goto write_map;
}
start_fsb += imap[n].br_blockcount;
count_fsb -= imap[n].br_blockcount;
ASSERT(count_fsb < 0xffff000);
}
}
iosize = mp->m_writeio_blocks; iosize = mp->m_writeio_blocks;
aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
ioalign = XFS_B_TO_FSBT(mp, aligned_offset); ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
last_fsb = ioalign + iosize; last_fsb = ioalign + iosize;
aeof = 1; aeof = 1;
} }
write_map:
nimaps = XFS_WRITE_IMAPS; nimaps = XFS_WRITE_IMAPS;
firstblock = NULLFSBLOCK; firstblock = NULLFSBLOCK;
...@@ -642,11 +459,11 @@ xfs_iomap_write_delay( ...@@ -642,11 +459,11 @@ xfs_iomap_write_delay(
* roundup the allocation request to m_dalign boundary if file size * roundup the allocation request to m_dalign boundary if file size
* is greater that 512K and we are allocating past the allocation eof * is greater that 512K and we are allocating past the allocation eof
*/ */
if (mp->m_dalign && (XFS_SIZE(mp, io) >= mp->m_dalign) && aeof) { if (mp->m_dalign && (isize >= mp->m_dalign) && aeof) {
int eof; int eof;
xfs_fileoff_t new_last_fsb; xfs_fileoff_t new_last_fsb;
new_last_fsb = roundup_64(last_fsb, mp->m_dalign); new_last_fsb = roundup_64(last_fsb, mp->m_dalign);
error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof); error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
if (error) { if (error) {
return error; return error;
} }
...@@ -655,7 +472,7 @@ xfs_iomap_write_delay( ...@@ -655,7 +472,7 @@ xfs_iomap_write_delay(
} }
} }
error = XFS_BMAPI(mp, NULL, io, offset_fsb, error = xfs_bmapi(NULL, ip, offset_fsb,
(xfs_filblks_t)(last_fsb - offset_fsb), (xfs_filblks_t)(last_fsb - offset_fsb),
XFS_BMAPI_DELAY | XFS_BMAPI_WRITE | XFS_BMAPI_DELAY | XFS_BMAPI_WRITE |
XFS_BMAPI_ENTIRE, &firstblock, 1, imap, XFS_BMAPI_ENTIRE, &firstblock, 1, imap,
...@@ -663,235 +480,255 @@ xfs_iomap_write_delay( ...@@ -663,235 +480,255 @@ xfs_iomap_write_delay(
/* /*
* This can be EDQUOT, if nimaps == 0 * This can be EDQUOT, if nimaps == 0
*/ */
if (error) { if (error && (error != ENOSPC)) {
return XFS_ERROR(error); return XFS_ERROR(error);
} }
/* /*
* If bmapi returned us nothing, and if we didn't get back EDQUOT, * If bmapi returned us nothing, and if we didn't get back EDQUOT,
* then we must have run out of space. * then we must have run out of space.
*/ */
if (nimaps == 0) { if (nimaps == 0) {
return XFS_ERROR(ENOSPC); if (xfs_flush_space(ip, &fsynced, &ioflag))
return XFS_ERROR(ENOSPC);
error = 0;
goto retry;
} }
/* *ret_imap = imap[0];
* Now map our desired I/O size and alignment over the *nmaps = 1;
* extents returned by xfs_bmapi().
*/
*npbmaps = _xfs_imap_to_bmap(io, offset, imap, pbmapp,
nimaps, *npbmaps);
return 0; return 0;
} }
STATIC int /*
xfs_iomap_write_direct( * Pass in a delayed allocate extent, convert it to real extents;
xfs_iocore_t *io, * return to the caller the extent we create which maps on top of
loff_t offset, * the originating callers request.
size_t count, *
page_buf_bmap_t *pbmapp, * Called without a lock on the inode.
int *npbmaps, */
int ioflag, int
int found) xfs_iomap_write_allocate(
xfs_inode_t *ip,
xfs_bmbt_irec_t *map,
int *retmap)
{ {
xfs_inode_t *ip = XFS_IO_INODE(io); xfs_mount_t *mp = ip->i_mount;
xfs_mount_t *mp; xfs_fileoff_t offset_fsb, last_block;
xfs_fileoff_t offset_fsb; xfs_fileoff_t end_fsb, map_start_fsb;
xfs_fileoff_t last_fsb; xfs_fsblock_t first_block;
xfs_bmap_free_t free_list;
xfs_filblks_t count_fsb; xfs_filblks_t count_fsb;
xfs_fsize_t isize; xfs_bmbt_irec_t imap[XFS_STRAT_WRITE_IMAPS];
xfs_fsblock_t firstfsb;
int nimaps, maps;
int error;
xfs_trans_t *tp; xfs_trans_t *tp;
int i, nimaps, committed;
int error = 0;
#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP *retmap = 0;
xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS], *imapp;
xfs_bmap_free_t free_list;
int aeof;
int bmapi_flags;
xfs_filblks_t datablocks;
int rt;
int committed;
int numrtextents;
uint resblks;
int rtextsize;
maps = min(XFS_WRITE_IMAPS, *npbmaps); /*
nimaps = maps; * Make sure that the dquots are there.
*/
mp = io->io_mount; if (XFS_IS_QUOTA_ON(mp) && XFS_NOT_DQATTACHED(mp, ip)) {
isize = XFS_SIZE(mp, io); if ((error = xfs_qm_dqattach(ip, 0))) {
if (io->io_new_size > isize) return XFS_ERROR(error);
isize = io->io_new_size; }
}
aeof = ((offset + count) > isize) ? 1 : 0; offset_fsb = map->br_startoff;
count_fsb = map->br_blockcount;
map_start_fsb = offset_fsb;
offset_fsb = XFS_B_TO_FSBT(mp, offset); XFS_STATS_ADD(xfsstats.xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
count_fsb = last_fsb - offset_fsb;
if (found && (pbmapp->pbm_flags & PBMF_HOLE)) {
xfs_fileoff_t map_last_fsb;
map_last_fsb = XFS_B_TO_FSB(mp, while (count_fsb != 0) {
(pbmapp->pbm_bsize + pbmapp->pbm_offset)); /*
* Set up a transaction with which to allocate the
* backing store for the file. Do allocations in a
* loop until we get some space in the range we are
* interested in. The other space that might be allocated
* is in the delayed allocation extent on which we sit
* but before our buffer starts.
*/
if (map_last_fsb < last_fsb) { nimaps = 0;
last_fsb = map_last_fsb; while (nimaps == 0) {
count_fsb = last_fsb - offset_fsb; tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
} error = xfs_trans_reserve(tp, 0, XFS_WRITE_LOG_RES(mp),
ASSERT(count_fsb > 0); 0, XFS_TRANS_PERM_LOG_RES,
} XFS_WRITE_LOG_COUNT);
if (error) {
xfs_trans_cancel(tp, 0);
return XFS_ERROR(error);
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
/* XFS_BMAP_INIT(&free_list, &first_block);
* roundup the allocation request to m_dalign boundary if file size
* is greater that 512K and we are allocating past the allocation eof
*/
if (!found && mp->m_dalign && (isize >= 524288) && aeof) {
int eof;
xfs_fileoff_t new_last_fsb;
new_last_fsb = roundup_64(last_fsb, mp->m_dalign); nimaps = XFS_STRAT_WRITE_IMAPS;
printk("xfs_iomap_write_direct: about to XFS_BMAP_EOF %Ld\n", /*
new_last_fsb); * Ensure we don't go beyond eof - it is possible
error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof); * the extents changed since we did the read call,
if (error) * we dropped the ilock in the interim.
goto error_out; */
if (eof)
last_fsb = new_last_fsb;
}
bmapi_flags = XFS_BMAPI_WRITE|XFS_BMAPI_DIRECT_IO|XFS_BMAPI_ENTIRE; end_fsb = XFS_B_TO_FSB(mp, ip->i_d.di_size);
bmapi_flags &= ~XFS_BMAPI_DIRECT_IO; xfs_bmap_last_offset(NULL, ip, &last_block,
XFS_DATA_FORK);
last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
if ((map_start_fsb + count_fsb) > last_block) {
count_fsb = last_block - map_start_fsb;
if (count_fsb == 0) {
error = EAGAIN;
goto trans_cancel;
}
}
/* /* Go get the actual blocks */
* determine if this is a realtime file error = xfs_bmapi(tp, ip, map_start_fsb, count_fsb,
*/ XFS_BMAPI_WRITE, &first_block, 1,
if ((rt = (ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) != 0) { imap, &nimaps, &free_list);
rtextsize = mp->m_sb.sb_rextsize;
} else
rtextsize = 0;
error = 0; if (error)
goto trans_cancel;
/* error = xfs_bmap_finish(&tp, &free_list,
* allocate file space for the bmapp entries passed in. first_block, &committed);
*/
/* if (error)
* determine if reserving space on goto trans_cancel;
* the data or realtime partition.
*/
if (rt) {
numrtextents = (count_fsb + rtextsize - 1);
do_div(numrtextents, rtextsize);
datablocks = 0;
} else {
datablocks = count_fsb;
numrtextents = 0;
}
/* error = xfs_trans_commit(tp,
* allocate and setup the transaction XFS_TRANS_RELEASE_LOG_RES, NULL);
*/
tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
resblks = XFS_DIOSTRAT_SPACE_RES(mp, datablocks);
xfs_iunlock(ip, XFS_ILOCK_EXCL); if (error)
goto error0;
error = xfs_trans_reserve(tp, xfs_iunlock(ip, XFS_ILOCK_EXCL);
resblks, }
XFS_WRITE_LOG_RES(mp),
numrtextents,
XFS_TRANS_PERM_LOG_RES,
XFS_WRITE_LOG_COUNT);
/*
* check for running out of space
*/
if (error) {
/* /*
* Free the transaction structure. * See if we were able to allocate an extent that
* covers at least part of the callers request
*/ */
xfs_trans_cancel(tp, 0);
}
xfs_ilock(ip, XFS_ILOCK_EXCL); for (i = 0; i < nimaps; i++) {
if ((map->br_startoff >= imap[i].br_startoff) &&
(map->br_startoff < (imap[i].br_startoff +
imap[i].br_blockcount))) {
*map = imap[i];
*retmap = 1;
XFS_STATS_INC(xfsstats.xs_xstrat_quick);
return 0;
}
count_fsb -= imap[i].br_blockcount;
}
if (error) { /* So far we have not mapped the requested part of the
goto error_out; /* Don't return in above if .. trans .., * file, just surrounding data, try again.
need lock to return */ */
nimaps--;
offset_fsb = imap[nimaps].br_startoff +
imap[nimaps].br_blockcount;
map_start_fsb = offset_fsb;
} }
if (XFS_IS_QUOTA_ON(mp)) { trans_cancel:
if (xfs_trans_reserve_quota(tp, xfs_bmap_cancel(&free_list);
ip->i_udquot, xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
ip->i_gdquot, error0:
resblks, 0, 0)) { xfs_iunlock(ip, XFS_ILOCK_EXCL);
error = (EDQUOT); return XFS_ERROR(error);
goto error1; }
}
nimaps = 1;
} else {
nimaps = 2;
}
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); int
xfs_trans_ihold(tp, ip); xfs_iomap_write_unwritten(
xfs_inode_t *ip,
loff_t offset,
size_t count)
{
xfs_mount_t *mp = ip->i_mount;
xfs_trans_t *tp;
xfs_fileoff_t offset_fsb;
xfs_filblks_t count_fsb;
xfs_filblks_t numblks_fsb;
xfs_bmbt_irec_t imap;
int committed;
int error;
int nres;
int nimaps;
xfs_fsblock_t firstfsb;
xfs_bmap_free_t free_list;
/* offset_fsb = XFS_B_TO_FSBT(mp, offset);
* issue the bmapi() call to allocate the blocks count_fsb = XFS_B_TO_FSB(mp, count);
*/
XFS_BMAP_INIT(&free_list, &firstfsb);
imapp = &imap[0];
error = XFS_BMAPI(mp, tp, io, offset_fsb, count_fsb,
bmapi_flags, &firstfsb, 1, imapp, &nimaps, &free_list);
if (error) {
goto error0;
}
/* do {
* complete the transaction nres = XFS_DIOSTRAT_SPACE_RES(mp, 0);
*/
error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed); /*
if (error) { * set up a transaction to convert the range of extents
goto error0; * from unwritten to real. Do allocations in a loop until
} * we have covered the range passed in.
*/
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
if (error) { error = xfs_trans_reserve(tp, nres,
goto error_out; XFS_WRITE_LOG_RES(mp), 0,
} XFS_TRANS_PERM_LOG_RES,
XFS_WRITE_LOG_COUNT);
if (error) {
xfs_trans_cancel(tp, 0);
goto error0;
}
/* copy any maps to caller's array and return any error. */ xfs_ilock(ip, XFS_ILOCK_EXCL);
if (nimaps == 0) { xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
error = (ENOSPC); xfs_trans_ihold(tp, ip);
goto error_out;
}
maps = min(nimaps, maps);
*npbmaps = _xfs_imap_to_bmap(io, offset, &imap[0], pbmapp, maps,
*npbmaps);
if (*npbmaps) {
/* /*
* this is new since xfs_iomap_read * Modify the unwritten extent state of the buffer.
* didn't find it.
*/ */
if (*npbmaps != 1) { XFS_BMAP_INIT(&free_list, &firstfsb);
/* NEED MORE WORK FOR MULTIPLE BMAPS (which are new) */ nimaps = 1;
BUG(); error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,
} XFS_BMAPI_WRITE, &firstfsb,
} 1, &imap, &nimaps, &free_list);
goto out; if (error)
goto error_on_bmapi_transaction;
error0: /* Cancel bmap, unlock inode, and cancel trans */ error = xfs_bmap_finish(&(tp), &(free_list),
xfs_bmap_cancel(&free_list); firstfsb, &committed);
if (error)
goto error_on_bmapi_transaction;
error1: /* Just cancel transaction */ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); xfs_iunlock(ip, XFS_ILOCK_EXCL);
*npbmaps = 0; /* nothing set-up here */ if (error)
goto error0;
error_out: if ((numblks_fsb = imap.br_blockcount) == 0) {
out: /* Just return error and any tracing at end of routine */ /*
* The numblks_fsb value should always get
* smaller, otherwise the loop is stuck.
*/
ASSERT(imap.br_blockcount);
break;
}
offset_fsb += numblks_fsb;
count_fsb -= numblks_fsb;
} while (count_fsb > 0);
return 0;
error_on_bmapi_transaction:
xfs_bmap_cancel(&free_list);
xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT));
xfs_iunlock(ip, XFS_ILOCK_EXCL);
error0:
return XFS_ERROR(error); return XFS_ERROR(error);
} }
...@@ -575,7 +575,7 @@ STATIC int ...@@ -575,7 +575,7 @@ STATIC int
linvfs_setxattr( linvfs_setxattr(
struct dentry *dentry, struct dentry *dentry,
const char *name, const char *name,
void *data, const void *data,
size_t size, size_t size,
int flags) int flags)
{ {
...@@ -593,13 +593,15 @@ linvfs_setxattr( ...@@ -593,13 +593,15 @@ linvfs_setxattr(
error = -ENOATTR; error = -ENOATTR;
p += xfs_namespaces[SYSTEM_NAMES].namelen; p += xfs_namespaces[SYSTEM_NAMES].namelen;
if (strcmp(p, POSIXACL_ACCESS) == 0) { if (strcmp(p, POSIXACL_ACCESS) == 0) {
error = xfs_acl_vset(vp, data, size, _ACL_TYPE_ACCESS); error = xfs_acl_vset(vp, (void *) data, size,
_ACL_TYPE_ACCESS);
} }
else if (strcmp(p, POSIXACL_DEFAULT) == 0) { else if (strcmp(p, POSIXACL_DEFAULT) == 0) {
error = xfs_acl_vset(vp, data, size, _ACL_TYPE_DEFAULT); error = xfs_acl_vset(vp, (void *) data, size,
_ACL_TYPE_DEFAULT);
} }
else if (strcmp(p, POSIXCAP) == 0) { else if (strcmp(p, POSIXCAP) == 0) {
error = xfs_cap_vset(vp, data, size); error = xfs_cap_vset(vp, (void *) data, size);
} }
if (!error) { if (!error) {
error = vn_revalidate(vp); error = vn_revalidate(vp);
...@@ -619,7 +621,7 @@ linvfs_setxattr( ...@@ -619,7 +621,7 @@ linvfs_setxattr(
return -EPERM; return -EPERM;
xflags |= ATTR_ROOT; xflags |= ATTR_ROOT;
p += xfs_namespaces[ROOT_NAMES].namelen; p += xfs_namespaces[ROOT_NAMES].namelen;
VOP_ATTR_SET(vp, p, data, size, xflags, NULL, error); VOP_ATTR_SET(vp, p, (void *) data, size, xflags, NULL, error);
return -error; return -error;
} }
if (strncmp(name, xfs_namespaces[USER_NAMES].name, if (strncmp(name, xfs_namespaces[USER_NAMES].name,
...@@ -627,7 +629,7 @@ linvfs_setxattr( ...@@ -627,7 +629,7 @@ linvfs_setxattr(
if (!capable_user_xattr(inode)) if (!capable_user_xattr(inode))
return -EPERM; return -EPERM;
p += xfs_namespaces[USER_NAMES].namelen; p += xfs_namespaces[USER_NAMES].namelen;
VOP_ATTR_SET(vp, p, data, size, xflags, NULL, error); VOP_ATTR_SET(vp, p, (void *) data, size, xflags, NULL, error);
return -error; return -error;
} }
return -ENOATTR; return -ENOATTR;
......
...@@ -808,6 +808,25 @@ xfs_bdstrat_cb(struct xfs_buf *bp) ...@@ -808,6 +808,25 @@ xfs_bdstrat_cb(struct xfs_buf *bp)
} }
} }
int
xfs_bmap(bhv_desc_t *bdp,
xfs_off_t offset,
ssize_t count,
int flags,
page_buf_bmap_t *pbmapp,
int *npbmaps)
{
xfs_inode_t *ip = XFS_BHVTOI(bdp);
xfs_iocore_t *io = &ip->i_iocore;
ASSERT((ip->i_d.di_mode & IFMT) == IFREG);
ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
return xfs_iomap(io, offset, count, flags, pbmapp, npbmaps);
}
/* /*
* Wrapper around bdstrat so that we can stop data * Wrapper around bdstrat so that we can stop data
* from going to disk in case we are shutting down the filesystem. * from going to disk in case we are shutting down the filesystem.
......
...@@ -36,6 +36,8 @@ struct vnode; ...@@ -36,6 +36,8 @@ struct vnode;
struct bhv_desc; struct bhv_desc;
struct xfs_mount; struct xfs_mount;
struct xfs_iocore; struct xfs_iocore;
struct xfs_inode;
struct xfs_bmbt_irec;
struct page_buf_s; struct page_buf_s;
struct page_buf_bmap_s; struct page_buf_bmap_s;
...@@ -62,6 +64,16 @@ extern ssize_t xfs_sendfile (struct bhv_desc *, struct file *, ...@@ -62,6 +64,16 @@ extern ssize_t xfs_sendfile (struct bhv_desc *, struct file *,
loff_t *, size_t, read_actor_t, loff_t *, size_t, read_actor_t,
void *, struct cred *); void *, struct cred *);
extern int xfs_iomap (struct xfs_iocore *, xfs_off_t, ssize_t, int,
struct page_buf_bmap_s *, int *);
extern int xfs_iomap_write_direct (struct xfs_inode *, loff_t, size_t,
int, struct xfs_bmbt_irec *, int *, int);
extern int xfs_iomap_write_delay (struct xfs_inode *, loff_t, size_t,
int, struct xfs_bmbt_irec *, int *);
extern int xfs_iomap_write_allocate (struct xfs_inode *,
struct xfs_bmbt_irec *, int *);
extern int xfs_iomap_write_unwritten (struct xfs_inode *, loff_t, size_t);
extern int xfs_dev_is_read_only (struct xfs_mount *, char *); extern int xfs_dev_is_read_only (struct xfs_mount *, char *);
extern void XFS_log_write_unmount_ro (struct bhv_desc *); extern void XFS_log_write_unmount_ro (struct bhv_desc *);
......
...@@ -507,8 +507,15 @@ xfs_relse_buftarg( ...@@ -507,8 +507,15 @@ xfs_relse_buftarg(
truncate_inode_pages(btp->pbr_mapping, 0LL); truncate_inode_pages(btp->pbr_mapping, 0LL);
} }
unsigned int
xfs_getsize_buftarg(
xfs_buftarg_t *btp)
{
return block_size(btp->pbr_bdev);
}
void void
xfs_size_buftarg( xfs_setsize_buftarg(
xfs_buftarg_t *btp, xfs_buftarg_t *btp,
unsigned int blocksize, unsigned int blocksize,
unsigned int sectorsize) unsigned int sectorsize)
...@@ -535,7 +542,7 @@ xfs_alloc_buftarg( ...@@ -535,7 +542,7 @@ xfs_alloc_buftarg(
btp->pbr_dev = bdev->bd_dev; btp->pbr_dev = bdev->bd_dev;
btp->pbr_bdev = bdev; btp->pbr_bdev = bdev;
btp->pbr_mapping = bdev->bd_inode->i_mapping; btp->pbr_mapping = bdev->bd_inode->i_mapping;
xfs_size_buftarg(btp, PAGE_CACHE_SIZE, bdev_hardsect_size(bdev)); xfs_setsize_buftarg(btp, PAGE_CACHE_SIZE, bdev_hardsect_size(bdev));
return btp; return btp;
} }
......
...@@ -82,15 +82,17 @@ struct xfs_mount; ...@@ -82,15 +82,17 @@ struct xfs_mount;
struct pb_target; struct pb_target;
struct block_device; struct block_device;
extern void xfs_initialize_vnode (bhv_desc_t *, vnode_t *, bhv_desc_t *, int); extern void xfs_initialize_vnode(bhv_desc_t *, vnode_t *, bhv_desc_t *, int);
extern int xfs_blkdev_get (struct xfs_mount *, const char *, extern int xfs_blkdev_get(struct xfs_mount *, const char *,
struct block_device **); struct block_device **);
extern void xfs_blkdev_put (struct block_device *); extern void xfs_blkdev_put(struct block_device *);
extern struct pb_target *xfs_alloc_buftarg (struct block_device *); extern struct pb_target *xfs_alloc_buftarg(struct block_device *);
extern void xfs_size_buftarg (struct pb_target *, unsigned int, unsigned int); extern void xfs_relse_buftarg(struct pb_target *);
extern void xfs_relse_buftarg (struct pb_target *); extern void xfs_free_buftarg(struct pb_target *);
extern void xfs_free_buftarg (struct pb_target *);
extern void xfs_setsize_buftarg(struct pb_target *, unsigned int, unsigned int);
extern unsigned int xfs_getsize_buftarg(struct pb_target *);
#endif /* __XFS_SUPER_H__ */ #endif /* __XFS_SUPER_H__ */
...@@ -48,9 +48,6 @@ ...@@ -48,9 +48,6 @@
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#include <linux/uio.h> #include <linux/uio.h>
enum xfs_buffer_state { BH_Delay = BH_PrivateStart };
BUFFER_FNS(Delay, delay);
/* /*
* Turn this on to get pagebuf lock ownership * Turn this on to get pagebuf lock ownership
#define PAGEBUF_LOCK_TRACKING #define PAGEBUF_LOCK_TRACKING
...@@ -83,7 +80,7 @@ typedef enum { /* pbm_flags values */ ...@@ -83,7 +80,7 @@ typedef enum { /* pbm_flags values */
PBMF_HOLE = 0x02, /* mapping covers a hole */ PBMF_HOLE = 0x02, /* mapping covers a hole */
PBMF_DELAY = 0x04, /* mapping covers delalloc region */ PBMF_DELAY = 0x04, /* mapping covers delalloc region */
PBMF_UNWRITTEN = 0x20 /* mapping covers allocated */ PBMF_UNWRITTEN = 0x20 /* mapping covers allocated */
/* but uninitialized XFS data */ /* but uninitialized file data */
} bmap_flags_t; } bmap_flags_t;
typedef enum page_buf_flags_e { /* pb_flags values */ typedef enum page_buf_flags_e { /* pb_flags values */
...@@ -105,19 +102,17 @@ typedef enum page_buf_flags_e { /* pb_flags values */ ...@@ -105,19 +102,17 @@ typedef enum page_buf_flags_e { /* pb_flags values */
PBF_TRYLOCK = (1 << 14), /* lock requested, but do not wait */ PBF_TRYLOCK = (1 << 14), /* lock requested, but do not wait */
PBF_FILE_ALLOCATE = (1 << 15), /* allocate all file space */ PBF_FILE_ALLOCATE = (1 << 15), /* allocate all file space */
PBF_DONT_BLOCK = (1 << 16), /* do not block in current thread */ PBF_DONT_BLOCK = (1 << 16), /* do not block in current thread */
PBF_DIRECT = (1 << 17), /* direct I/O desired */ PBF_DIRECT = (1 << 17), /* direct I/O desired */
PBF_FILE_UNWRITTEN = (1 << 18), /* convert unwritten extent space */
/* flags used only internally */ /* flags used only internally */
_PBF_LOCKABLE = (1 << 19), /* page_buf_t may be locked */ _PBF_LOCKABLE = (1 << 19), /* page_buf_t may be locked */
_PBF_ALL_PAGES_MAPPED = (1 << 21), _PBF_ALL_PAGES_MAPPED = (1 << 21), /* all pages in range mapped */
/* all pages in rage are mapped */ _PBF_ADDR_ALLOCATED = (1 << 22), /* pb_addr space was allocated */
_PBF_ADDR_ALLOCATED = (1 << 22), _PBF_MEM_ALLOCATED = (1 << 23), /* pb_mem+underlying pages alloc'd */
/* pb_addr space was allocated */
_PBF_MEM_ALLOCATED = (1 << 23),
/* pb_mem and underlying pages allocated */
PBF_FORCEIO = (1 << 24), PBF_FORCEIO = (1 << 24),
PBF_FLUSH = (1 << 25), /* flush disk write cache */ PBF_FLUSH = (1 << 25), /* flush disk write cache */
PBF_READ_AHEAD = (1 << 26), PBF_READ_AHEAD = (1 << 26),
} page_buf_flags_t; } page_buf_flags_t;
......
...@@ -104,8 +104,8 @@ int xfs_alloc_block_minrecs(int lev, struct xfs_btree_cur *cur); ...@@ -104,8 +104,8 @@ int xfs_alloc_block_minrecs(int lev, struct xfs_btree_cur *cur);
#define XFS_MAX_BLOCKSIZE_LOG 16 /* i.e. 65536 bytes */ #define XFS_MAX_BLOCKSIZE_LOG 16 /* i.e. 65536 bytes */
#define XFS_MIN_BLOCKSIZE (1 << XFS_MIN_BLOCKSIZE_LOG) #define XFS_MIN_BLOCKSIZE (1 << XFS_MIN_BLOCKSIZE_LOG)
#define XFS_MAX_BLOCKSIZE (1 << XFS_MAX_BLOCKSIZE_LOG) #define XFS_MAX_BLOCKSIZE (1 << XFS_MAX_BLOCKSIZE_LOG)
#define XFS_MIN_SECTORSIZE_LOG 9 /* i.e. 512 bytes */ #define XFS_MIN_SECTORSIZE_LOG 9 /* i.e. 512 bytes */
#define XFS_MAX_SECTORSIZE_LOG 15 /* i.e. 32768 bytes */ #define XFS_MAX_SECTORSIZE_LOG 15 /* i.e. 32768 bytes */
#define XFS_MIN_SECTORSIZE (1 << XFS_MIN_SECTORSIZE_LOG) #define XFS_MIN_SECTORSIZE (1 << XFS_MIN_SECTORSIZE_LOG)
#define XFS_MAX_SECTORSIZE (1 << XFS_MAX_SECTORSIZE_LOG) #define XFS_MAX_SECTORSIZE (1 << XFS_MAX_SECTORSIZE_LOG)
......
...@@ -35,28 +35,34 @@ ...@@ -35,28 +35,34 @@
/* These are just for xfs_syncsub... it sets an internal variable /* These are just for xfs_syncsub... it sets an internal variable
* then passes it to VOP_FLUSH_PAGES or adds the flags to a newly gotten buf_t * then passes it to VOP_FLUSH_PAGES or adds the flags to a newly gotten buf_t
*/ */
#define XFS_B_ASYNC PBF_ASYNC #define XFS_B_ASYNC PBF_ASYNC
#define XFS_B_DELWRI PBF_DELWRI #define XFS_B_DELWRI PBF_DELWRI
#define XFS_B_READ PBF_READ #define XFS_B_READ PBF_READ
#define XFS_B_WRITE PBF_WRITE #define XFS_B_WRITE PBF_WRITE
#define XFS_B_STALE PBF_STALE #define XFS_B_STALE PBF_STALE
#define XFS_BUF_TRYLOCK PBF_TRYLOCK #define XFS_BUF_TRYLOCK PBF_TRYLOCK
#define XFS_INCORE_TRYLOCK PBF_TRYLOCK #define XFS_INCORE_TRYLOCK PBF_TRYLOCK
#define XFS_BUF_LOCK PBF_LOCK #define XFS_BUF_LOCK PBF_LOCK
#define XFS_BUF_MAPPED PBF_MAPPED #define XFS_BUF_MAPPED PBF_MAPPED
#define BUF_BUSY PBF_DONT_BLOCK #define BUF_BUSY PBF_DONT_BLOCK
#define XFS_BUF_BFLAGS(x) ((x)->pb_flags) /* debugging routines might need this */ #define XFS_BUF_BFLAGS(x) ((x)->pb_flags)
#define XFS_BUF_ZEROFLAGS(x) \ #define XFS_BUF_ZEROFLAGS(x) \
((x)->pb_flags &= ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_SYNC|PBF_DELWRI)) ((x)->pb_flags &= ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_SYNC|PBF_DELWRI))
#define XFS_BUF_STALE(x) ((x)->pb_flags |= XFS_B_STALE) #define XFS_BUF_STALE(x) ((x)->pb_flags |= XFS_B_STALE)
#define XFS_BUF_UNSTALE(x) ((x)->pb_flags &= ~XFS_B_STALE) #define XFS_BUF_UNSTALE(x) ((x)->pb_flags &= ~XFS_B_STALE)
#define XFS_BUF_ISSTALE(x) ((x)->pb_flags & XFS_B_STALE) #define XFS_BUF_ISSTALE(x) ((x)->pb_flags & XFS_B_STALE)
#define XFS_BUF_SUPER_STALE(x) (x)->pb_flags |= XFS_B_STALE;\ #define XFS_BUF_SUPER_STALE(x) do { \
xfs_buf_undelay(x);\ XFS_BUF_STALE(x); \
(x)->pb_flags &= ~(PBF_PARTIAL|PBF_NONE) xfs_buf_undelay(x); \
XFS_BUF_DONE(x); \
} while (0)
#define XFS_BUF_MANAGE PBF_FS_MANAGED
#define XFS_BUF_UNMANAGE(x) ((x)->pb_flags &= ~PBF_FS_MANAGED)
static inline void xfs_buf_undelay(page_buf_t *pb) static inline void xfs_buf_undelay(page_buf_t *pb)
{ {
......
...@@ -164,13 +164,6 @@ xfs_dm_send_data_event( ...@@ -164,13 +164,6 @@ xfs_dm_send_data_event(
int flags, int flags,
vrwlock_t *locktype); vrwlock_t *locktype);
extern int
xfs_dm_send_create_event(
bhv_desc_t *dir_bdp,
char *name,
mode_t new_mode,
int *good_event_sent);
extern int extern int
xfs_dm_send_mmap_event( xfs_dm_send_mmap_event(
struct vm_area_struct *vma, struct vm_area_struct *vma,
...@@ -249,16 +242,6 @@ typedef enum { ...@@ -249,16 +242,6 @@ typedef enum {
* Stubs for XFS DMAPI utility routines. * Stubs for XFS DMAPI utility routines.
*/ */
static __inline int
xfs_dm_send_create_event(
bhv_desc_t *dir_bdp,
char *name,
mode_t new_mode,
int *good_event_sent)
{
return 0;
}
static __inline int static __inline int
xfs_dm_send_data_event( xfs_dm_send_data_event(
dm_eventtype_t event, dm_eventtype_t event,
......
...@@ -246,9 +246,7 @@ xfs_iget_core( ...@@ -246,9 +246,7 @@ xfs_iget_core(
/* /*
* Read the disk inode attributes into a new inode structure and get * Read the disk inode attributes into a new inode structure and get
* a new vnode for it. Initialize the inode lock so we can idestroy * a new vnode for it. This should also initialize i_ino and i_mount.
* it soon if it's a dup. This should also initialize i_ino, i_bno,
* i_mount, and i_index.
*/ */
error = xfs_iread(mp, tp, ino, &ip, bno); error = xfs_iread(mp, tp, ino, &ip, bno);
if (error) { if (error) {
......
...@@ -142,7 +142,7 @@ xfs_inobp_bwcheck(xfs_buf_t *bp) ...@@ -142,7 +142,7 @@ xfs_inobp_bwcheck(xfs_buf_t *bp)
} }
if (INT_ISZERO(dip->di_next_unlinked, ARCH_CONVERT)) { if (INT_ISZERO(dip->di_next_unlinked, ARCH_CONVERT)) {
cmn_err(CE_WARN, cmn_err(CE_WARN,
"Bad next_unlinked field (0) in XFS inode buffer 0x%x, starting blockno %Ld, offset 0x%x", "Bad next_unlinked field (0) in XFS inode buffer 0x%p, starting blockno %Ld, offset 0x%x",
(__uint64_t)(__psunsigned_t) bp, (__uint64_t)(__psunsigned_t) bp,
(__int64_t) XFS_BUF_ADDR(bp), (__int64_t) XFS_BUF_ADDR(bp),
xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize)); xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
......
...@@ -41,14 +41,24 @@ xfs_size_fn( ...@@ -41,14 +41,24 @@ xfs_size_fn(
} }
xfs_ioops_t xfs_iocore_xfs = { xfs_ioops_t xfs_iocore_xfs = {
.xfs_ioinit = (xfs_ioinit_t) fs_noerr,
.xfs_bmapi_func = (xfs_bmapi_t) xfs_bmapi, .xfs_bmapi_func = (xfs_bmapi_t) xfs_bmapi,
.xfs_bmap_eof_func = (xfs_bmap_eof_t) xfs_bmap_eof, .xfs_bmap_eof_func = (xfs_bmap_eof_t) xfs_bmap_eof,
.xfs_iomap_write_direct =
(xfs_iomap_write_direct_t) xfs_iomap_write_direct,
.xfs_iomap_write_delay =
(xfs_iomap_write_delay_t) xfs_iomap_write_delay,
.xfs_iomap_write_allocate =
(xfs_iomap_write_allocate_t) xfs_iomap_write_allocate,
.xfs_iomap_write_unwritten =
(xfs_iomap_write_unwritten_t) xfs_iomap_write_unwritten,
.xfs_ilock = (xfs_lock_t) xfs_ilock, .xfs_ilock = (xfs_lock_t) xfs_ilock,
.xfs_lck_map_shared = (xfs_lck_map_shared_t) xfs_ilock_map_shared,
.xfs_ilock_demote = (xfs_lock_demote_t) xfs_ilock_demote, .xfs_ilock_demote = (xfs_lock_demote_t) xfs_ilock_demote,
.xfs_ilock_nowait = (xfs_lock_nowait_t) xfs_ilock_nowait, .xfs_ilock_nowait = (xfs_lock_nowait_t) xfs_ilock_nowait,
.xfs_unlock = (xfs_unlk_t) xfs_iunlock, .xfs_unlock = (xfs_unlk_t) xfs_iunlock,
.xfs_size_func = (xfs_size_t) xfs_size_fn, .xfs_size_func = (xfs_size_t) xfs_size_fn,
.xfs_lastbyte = (xfs_lastbyte_t) xfs_file_last_byte, .xfs_iodone = (xfs_iodone_t) fs_noerr,
}; };
void void
...@@ -83,4 +93,3 @@ xfs_iocore_inode_init( ...@@ -83,4 +93,3 @@ xfs_iocore_inode_init(
xfs_iocore_inode_reinit(ip); xfs_iocore_inode_reinit(ip);
} }
...@@ -419,42 +419,64 @@ xfs_xlatesb( ...@@ -419,42 +419,64 @@ xfs_xlatesb(
int int
xfs_readsb(xfs_mount_t *mp) xfs_readsb(xfs_mount_t *mp)
{ {
unsigned int sector_size;
unsigned int extra_flags;
xfs_buf_t *bp; xfs_buf_t *bp;
xfs_sb_t *sbp; xfs_sb_t *sbp;
int error = 0; int error;
ASSERT(mp->m_sb_bp == 0); ASSERT(mp->m_sb_bp == NULL);
ASSERT(mp->m_ddev_targp != NULL);
/* /*
* Allocate a (locked) buffer to hold the superblock. * Allocate a (locked) buffer to hold the superblock.
* This will be kept around at all time to optimize * This will be kept around at all times to optimize
* access to the superblock. * access to the superblock.
*/ */
bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR, 1, sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
PBF_LOCK|PBF_READ|PBF_MAPPED|PBF_MAPPABLE|PBF_FS_MANAGED); extra_flags = XFS_BUF_LOCK | XFS_BUF_MANAGE | XFS_BUF_MAPPED;
ASSERT(bp != NULL);
ASSERT(XFS_BUF_ISBUSY(bp) && XFS_BUF_VALUSEMA(bp) <= 0); bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR,
BTOBB(sector_size), extra_flags);
ASSERT(bp);
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
/* /*
* Initialize the mount structure from the superblock. * Initialize the mount structure from the superblock.
* But first do some basic consistency checking. * But first do some basic consistency checking.
*/ */
sbp = XFS_BUF_TO_SBP(bp); sbp = XFS_BUF_TO_SBP(bp);
xfs_xlatesb(XFS_BUF_PTR(bp), &(mp->m_sb), 1, ARCH_CONVERT, XFS_SB_ALL_BITS); xfs_xlatesb(XFS_BUF_PTR(bp), &(mp->m_sb), 1,
if ((error = xfs_mount_validate_sb(mp, &(mp->m_sb)))) { ARCH_CONVERT, XFS_SB_ALL_BITS);
error = xfs_mount_validate_sb(mp, &(mp->m_sb));
if (error) {
cmn_err(CE_WARN, "XFS: SB validate failed"); cmn_err(CE_WARN, "XFS: SB validate failed");
goto err; XFS_BUF_UNMANAGE(bp);
xfs_buf_relse(bp);
return error;
}
/*
* Re-read the superblock so that our buffer is correctly sized.
* We only need to do this if sector size on-disk is different.
*/
if (sector_size != mp->m_sb.sb_sectsize) {
XFS_BUF_UNMANAGE(bp);
xfs_buf_relse(bp);
sector_size = mp->m_sb.sb_sectsize;
bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR,
BTOBB(sector_size), extra_flags);
ASSERT(bp);
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
} }
mp->m_sb_bp = bp; mp->m_sb_bp = bp;
xfs_buf_relse(bp); xfs_buf_relse(bp);
ASSERT(XFS_BUF_VALUSEMA(bp) > 0); ASSERT(XFS_BUF_VALUSEMA(bp) > 0);
return 0; return 0;
err:
bp->pb_flags &= ~PBF_FS_MANAGED;
xfs_buf_relse(bp);
return error;
} }
...@@ -1531,10 +1553,10 @@ xfs_freesb( ...@@ -1531,10 +1553,10 @@ xfs_freesb(
/* /*
* Use xfs_getsb() so that the buffer will be locked * Use xfs_getsb() so that the buffer will be locked
* when we call nfreerbuf(). * when we call xfs_buf_relse().
*/ */
bp = xfs_getsb(mp, 0); bp = xfs_getsb(mp, 0);
bp->pb_flags &= ~PBF_FS_MANAGED; XFS_BUF_UNMANAGE(bp);
xfs_buf_relse(bp); xfs_buf_relse(bp);
mp->m_sb_bp = NULL; mp->m_sb_bp = NULL;
} }
......
...@@ -87,41 +87,60 @@ struct xfs_bmap_free; ...@@ -87,41 +87,60 @@ struct xfs_bmap_free;
#define AIL_LOCK(mp,s) s=mutex_spinlock(&(mp)->m_ail_lock) #define AIL_LOCK(mp,s) s=mutex_spinlock(&(mp)->m_ail_lock)
#define AIL_UNLOCK(mp,s) mutex_spinunlock(&(mp)->m_ail_lock, s) #define AIL_UNLOCK(mp,s) mutex_spinunlock(&(mp)->m_ail_lock, s)
/*
/* Prototypes and functions for I/O core modularization, a vector * Prototypes and functions for I/O core modularization.
* of functions is used to indirect from xfs/cxfs independent code
* to the xfs/cxfs dependent code.
* The vector is placed in the mount structure so that we can
* minimize the number of memory indirections involved.
*/ */
struct flid;
struct buf;
typedef int (*xfs_ioinit_t)(struct vfs *,
struct xfs_mount_args *, int *);
typedef int (*xfs_bmapi_t)(struct xfs_trans *, void *, typedef int (*xfs_bmapi_t)(struct xfs_trans *, void *,
xfs_fileoff_t, xfs_filblks_t, int, xfs_fileoff_t, xfs_filblks_t, int,
xfs_fsblock_t *, xfs_extlen_t, xfs_fsblock_t *, xfs_extlen_t,
struct xfs_bmbt_irec *, int *, struct xfs_bmbt_irec *, int *,
struct xfs_bmap_free *); struct xfs_bmap_free *);
typedef int (*xfs_bmap_eof_t)(void *, xfs_fileoff_t, int, int *); typedef int (*xfs_bmap_eof_t)(void *, xfs_fileoff_t, int, int *);
typedef int (*xfs_iomap_write_direct_t)(
void *, loff_t, size_t, int,
struct xfs_bmbt_irec *, int *, int);
typedef int (*xfs_iomap_write_delay_t)(
void *, loff_t, size_t, int,
struct xfs_bmbt_irec *, int *);
typedef int (*xfs_iomap_write_allocate_t)(
void *, struct xfs_bmbt_irec *, int *);
typedef int (*xfs_iomap_write_unwritten_t)(
void *, loff_t, size_t);
typedef uint (*xfs_lck_map_shared_t)(void *);
typedef void (*xfs_lock_t)(void *, uint); typedef void (*xfs_lock_t)(void *, uint);
typedef void (*xfs_lock_demote_t)(void *, uint); typedef void (*xfs_lock_demote_t)(void *, uint);
typedef int (*xfs_lock_nowait_t)(void *, uint); typedef int (*xfs_lock_nowait_t)(void *, uint);
typedef void (*xfs_unlk_t)(void *, unsigned int); typedef void (*xfs_unlk_t)(void *, unsigned int);
typedef void (*xfs_chgtime_t)(void *, int);
typedef xfs_fsize_t (*xfs_size_t)(void *); typedef xfs_fsize_t (*xfs_size_t)(void *);
typedef xfs_fsize_t (*xfs_lastbyte_t)(void *); typedef xfs_fsize_t (*xfs_iodone_t)(struct vfs *);
typedef struct xfs_ioops { typedef struct xfs_ioops {
xfs_bmapi_t xfs_bmapi_func; xfs_ioinit_t xfs_ioinit;
xfs_bmap_eof_t xfs_bmap_eof_func; xfs_bmapi_t xfs_bmapi_func;
xfs_lock_t xfs_ilock; xfs_bmap_eof_t xfs_bmap_eof_func;
xfs_lock_demote_t xfs_ilock_demote; xfs_iomap_write_direct_t xfs_iomap_write_direct;
xfs_lock_nowait_t xfs_ilock_nowait; xfs_iomap_write_delay_t xfs_iomap_write_delay;
xfs_unlk_t xfs_unlock; xfs_iomap_write_allocate_t xfs_iomap_write_allocate;
xfs_chgtime_t xfs_chgtime; xfs_iomap_write_unwritten_t xfs_iomap_write_unwritten;
xfs_size_t xfs_size_func; xfs_lock_t xfs_ilock;
xfs_lastbyte_t xfs_lastbyte; xfs_lck_map_shared_t xfs_lck_map_shared;
xfs_lock_demote_t xfs_ilock_demote;
xfs_lock_nowait_t xfs_ilock_nowait;
xfs_unlk_t xfs_unlock;
xfs_size_t xfs_size_func;
xfs_iodone_t xfs_iodone;
} xfs_ioops_t; } xfs_ioops_t;
#define XFS_IOINIT(vfsp, args, flags) \
(*(mp)->m_io_ops.xfs_ioinit)(vfsp, args, flags)
#define XFS_BMAPI(mp, trans,io,bno,len,f,first,tot,mval,nmap,flist) \ #define XFS_BMAPI(mp, trans,io,bno,len,f,first,tot,mval,nmap,flist) \
(*(mp)->m_io_ops.xfs_bmapi_func) \ (*(mp)->m_io_ops.xfs_bmapi_func) \
(trans,(io)->io_obj,bno,len,f,first,tot,mval,nmap,flist) (trans,(io)->io_obj,bno,len,f,first,tot,mval,nmap,flist)
...@@ -130,9 +149,31 @@ typedef struct xfs_ioops { ...@@ -130,9 +149,31 @@ typedef struct xfs_ioops {
(*(mp)->m_io_ops.xfs_bmap_eof_func) \ (*(mp)->m_io_ops.xfs_bmap_eof_func) \
((io)->io_obj, endoff, whichfork, eof) ((io)->io_obj, endoff, whichfork, eof)
#define XFS_IOMAP_WRITE_DIRECT(mp, io, offset, count, flags, mval, nmap, found)\
(*(mp)->m_io_ops.xfs_iomap_write_direct) \
((io)->io_obj, offset, count, flags, mval, nmap, found)
#define XFS_IOMAP_WRITE_DELAY(mp, io, offset, count, flags, mval, nmap) \
(*(mp)->m_io_ops.xfs_iomap_write_delay) \
((io)->io_obj, offset, count, flags, mval, nmap)
#define XFS_IOMAP_WRITE_ALLOCATE(mp, io, mval, nmap) \
(*(mp)->m_io_ops.xfs_iomap_write_allocate) \
((io)->io_obj, mval, nmap)
#define XFS_IOMAP_WRITE_UNWRITTEN(mp, io, offset, count) \
(*(mp)->m_io_ops.xfs_iomap_write_unwritten) \
((io)->io_obj, offset, count)
#define XFS_LCK_MAP_SHARED(mp, io) \
(*(mp)->m_io_ops.xfs_lck_map_shared)((io)->io_obj)
#define XFS_ILOCK(mp, io, mode) \ #define XFS_ILOCK(mp, io, mode) \
(*(mp)->m_io_ops.xfs_ilock)((io)->io_obj, mode) (*(mp)->m_io_ops.xfs_ilock)((io)->io_obj, mode)
#define XFS_ILOCK_NOWAIT(mp, io, mode) \
(*(mp)->m_io_ops.xfs_ilock_nowait)((io)->io_obj, mode)
#define XFS_IUNLOCK(mp, io, mode) \ #define XFS_IUNLOCK(mp, io, mode) \
(*(mp)->m_io_ops.xfs_unlock)((io)->io_obj, mode) (*(mp)->m_io_ops.xfs_unlock)((io)->io_obj, mode)
...@@ -142,8 +183,13 @@ typedef struct xfs_ioops { ...@@ -142,8 +183,13 @@ typedef struct xfs_ioops {
#define XFS_SIZE(mp, io) \ #define XFS_SIZE(mp, io) \
(*(mp)->m_io_ops.xfs_size_func)((io)->io_obj) (*(mp)->m_io_ops.xfs_size_func)((io)->io_obj)
#define XFS_LASTBYTE(mp, io) \ #define XFS_IODONE(vfsp) \
(*(mp)->m_io_ops.xfs_lastbyte)((io)->io_obj) (*(mp)->m_io_ops.xfs_iodone)(vfsp)
/*
* Prototypes and functions for the XFS realtime subsystem.
*/
typedef struct xfs_mount { typedef struct xfs_mount {
...@@ -303,8 +349,8 @@ typedef struct xfs_mount { ...@@ -303,8 +349,8 @@ typedef struct xfs_mount {
/* /*
* Default minimum read and write sizes. * Default minimum read and write sizes.
*/ */
#define XFS_READIO_LOG_LARGE 12 #define XFS_READIO_LOG_LARGE 16
#define XFS_WRITEIO_LOG_LARGE 12 #define XFS_WRITEIO_LOG_LARGE 16
/* /*
* Default allocation size * Default allocation size
*/ */
......
...@@ -2287,7 +2287,7 @@ xfs_rtmount_init( ...@@ -2287,7 +2287,7 @@ xfs_rtmount_init(
return XFS_ERROR(E2BIG); return XFS_ERROR(E2BIG);
} }
error = xfs_read_buf(mp, mp->m_rtdev_targp, error = xfs_read_buf(mp, mp->m_rtdev_targp,
XFS_FSB_TO_BB(mp, d - 1), d - XFS_FSB_TO_BB(mp, 1),
XFS_FSB_TO_BB(mp, 1), 0, &bp); XFS_FSB_TO_BB(mp, 1), 0, &bp);
if (error) { if (error) {
cmn_err(CE_WARN, cmn_err(CE_WARN,
......
...@@ -97,7 +97,7 @@ xfs_do_force_shutdown( ...@@ -97,7 +97,7 @@ xfs_do_force_shutdown(
if (!(flags & XFS_FORCE_UMOUNT)) { if (!(flags & XFS_FORCE_UMOUNT)) {
cmn_err(CE_NOTE, cmn_err(CE_NOTE,
"xfs_force_shutdown(%s,0x%x) called from line %d of file %s. Return address = 0x%x", "xfs_force_shutdown(%s,0x%x) called from line %d of file %s. Return address = 0x%p",
mp->m_fsname,flags,lnnum,fname,__return_address); mp->m_fsname,flags,lnnum,fname,__return_address);
} }
/* /*
......
...@@ -472,7 +472,7 @@ xfs_trans_read_buf( ...@@ -472,7 +472,7 @@ xfs_trans_read_buf(
*/ */
#if defined(DEBUG) #if defined(DEBUG)
if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp)) if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp))
cmn_err(CE_NOTE, "about to pop assert, bp == 0x%x", bp); cmn_err(CE_NOTE, "about to pop assert, bp == 0x%p", bp);
#endif #endif
ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) != ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) !=
(XFS_B_STALE|XFS_B_DELWRI)); (XFS_B_STALE|XFS_B_DELWRI));
......
...@@ -451,18 +451,19 @@ xfs_mount( ...@@ -451,18 +451,19 @@ xfs_mount(
goto error; goto error;
} }
xfs_size_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize, xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize,
mp->m_sb.sb_sectsize); mp->m_sb.sb_sectsize);
if (logdev && logdev != ddev) { if (logdev && logdev != ddev) {
unsigned int ss = BBSIZE; unsigned int log_sector_size = BBSIZE;
if (XFS_SB_VERSION_HASSECTOR(&mp->m_sb)) if (XFS_SB_VERSION_HASSECTOR(&mp->m_sb))
ss = mp->m_sb.sb_logsectsize; log_sector_size = mp->m_sb.sb_logsectsize;
xfs_size_buftarg(mp->m_logdev_targp, mp->m_sb.sb_blocksize, ss); xfs_setsize_buftarg(mp->m_logdev_targp, mp->m_sb.sb_blocksize,
log_sector_size);
} }
if (rtdev) if (rtdev)
xfs_size_buftarg(mp->m_rtdev_targp, mp->m_sb.sb_blocksize, xfs_setsize_buftarg(mp->m_rtdev_targp, mp->m_sb.sb_blocksize,
mp->m_sb.sb_blocksize); mp->m_sb.sb_blocksize);
error = xfs_mountfs(vfsp, mp, ddev->bd_dev, flags); error = xfs_mountfs(vfsp, mp, ddev->bd_dev, flags);
if (error) if (error)
......
...@@ -44,15 +44,6 @@ extern int xfs_ioctl(bhv_desc_t *, struct inode *, struct file *, ...@@ -44,15 +44,6 @@ extern int xfs_ioctl(bhv_desc_t *, struct inode *, struct file *,
unsigned int, unsigned long); unsigned int, unsigned long);
#ifdef XFS_RW_TRACE
STATIC void
xfs_ctrunc_trace(
int tag,
xfs_inode_t *ip);
#else
#define xfs_ctrunc_trace(tag, ip)
#endif /* DEBUG */
/* /*
* For xfs, we check that the file isn't too big to be opened by this kernel. * For xfs, we check that the file isn't too big to be opened by this kernel.
* No other open action is required for regular files. Devices are handled * No other open action is required for regular files. Devices are handled
...@@ -1880,7 +1871,6 @@ xfs_lookup( ...@@ -1880,7 +1871,6 @@ xfs_lookup(
cred_t *credp) cred_t *credp)
{ {
xfs_inode_t *dp, *ip; xfs_inode_t *dp, *ip;
struct vnode *vp;
xfs_ino_t e_inum; xfs_ino_t e_inum;
int error; int error;
uint lock_mode; uint lock_mode;
...@@ -1896,58 +1886,19 @@ xfs_lookup( ...@@ -1896,58 +1886,19 @@ xfs_lookup(
lock_mode = xfs_ilock_map_shared(dp); lock_mode = xfs_ilock_map_shared(dp);
error = xfs_dir_lookup_int(dir_bdp, lock_mode, dentry, &e_inum, &ip); error = xfs_dir_lookup_int(dir_bdp, lock_mode, dentry, &e_inum, &ip);
if (error) { if (!error) {
xfs_iunlock_map_shared(dp, lock_mode); *vpp = XFS_ITOV(ip);
return error; ITRACE(ip);
} }
vp = XFS_ITOV(ip);
ITRACE(ip);
xfs_iunlock_map_shared(dp, lock_mode); xfs_iunlock_map_shared(dp, lock_mode);
return error;
*vpp = vp;
return 0;
} }
#ifdef XFS_RW_TRACE
STATIC void
xfs_ctrunc_trace(
int tag,
xfs_inode_t *ip)
{
if (ip->i_rwtrace == NULL) {
return;
}
ktrace_enter(ip->i_rwtrace,
(void*)((long)tag),
(void*)ip,
(void*)((long)private.p_cpuid),
(void*)0,
(void*)0,
(void*)0,
(void*)0,
(void*)0,
(void*)0,
(void*)0,
(void*)0,
(void*)0,
(void*)0,
(void*)0,
(void*)0,
(void*)0);
}
#endif /* XFS_RW_TRACE */
#define XFS_CREATE_NEW_MAXTRIES 10000 #define XFS_CREATE_NEW_MAXTRIES 10000
/* /*
* xfs_create (create a new file). * xfs_create (create a new file).
* It might still find name exists out there, though.
* But vpp, doens't point at a vnode.
*/ */
STATIC int STATIC int
xfs_create( xfs_create(
...@@ -1968,7 +1919,6 @@ xfs_create( ...@@ -1968,7 +1919,6 @@ xfs_create(
xfs_bmap_free_t free_list; xfs_bmap_free_t free_list;
xfs_fsblock_t first_block; xfs_fsblock_t first_block;
boolean_t dp_joined_to_trans; boolean_t dp_joined_to_trans;
int dm_event_sent = 0;
uint cancel_flags; uint cancel_flags;
int committed; int committed;
xfs_prid_t prid; xfs_prid_t prid;
...@@ -1989,8 +1939,10 @@ xfs_create( ...@@ -1989,8 +1939,10 @@ xfs_create(
return XFS_ERROR(ENAMETOOLONG); return XFS_ERROR(ENAMETOOLONG);
if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) { if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) {
error = xfs_dm_send_create_event(dir_bdp, name, error = dm_send_namesp_event(DM_EVENT_CREATE,
dm_di_mode, &dm_event_sent); dir_bdp, DM_RIGHT_NULL, NULL,
DM_RIGHT_NULL, name, NULL,
dm_di_mode, 0, 0);
if (error) if (error)
return error; return error;
} }
...@@ -2161,7 +2113,7 @@ xfs_create( ...@@ -2161,7 +2113,7 @@ xfs_create(
/* Fallthrough to std_return with error = 0 */ /* Fallthrough to std_return with error = 0 */
std_return: std_return:
if ((error != 0 && dm_event_sent != 0) && if ((error != 0) &&
DM_EVENT_ENABLED(dir_vp->v_vfsp, XFS_BHVTOI(dir_bdp), DM_EVENT_ENABLED(dir_vp->v_vfsp, XFS_BHVTOI(dir_bdp),
DM_EVENT_POSTCREATE)) { DM_EVENT_POSTCREATE)) {
(void) dm_send_namesp_event(DM_EVENT_POSTCREATE, (void) dm_send_namesp_event(DM_EVENT_POSTCREATE,
...@@ -2227,16 +2179,7 @@ int xfs_rm_attempts; ...@@ -2227,16 +2179,7 @@ int xfs_rm_attempts;
* vnode ref count will still include that from the .. entry in * vnode ref count will still include that from the .. entry in
* this case. * this case.
* *
* The inode passed in will have been looked up using xfs_get_dir_entry(). * There is a deadlock we need to worry about. If the locked directory is
* Since that lookup the directory lock will have been dropped, so
* we need to validate that the inode given is still pointed to by the
* directory. We use the directory inode in memory generation count
* as an optimization to tell if a new lookup is necessary. If the
* directory no longer points to the given inode with the given name,
* then we drop the directory lock, set the entry_changed parameter to 1,
* and return. It is up to the caller to drop the reference to the inode.
*
* There is a dealock we need to worry about. If the locked directory is
* in the AIL, it might be blocking up the log. The next inode we lock * in the AIL, it might be blocking up the log. The next inode we lock
* could be already locked by another thread waiting for log space (e.g * could be already locked by another thread waiting for log space (e.g
* a permanent log reservation with a long running transaction (see * a permanent log reservation with a long running transaction (see
...@@ -2249,8 +2192,7 @@ STATIC int ...@@ -2249,8 +2192,7 @@ STATIC int
xfs_lock_dir_and_entry( xfs_lock_dir_and_entry(
xfs_inode_t *dp, xfs_inode_t *dp,
vname_t *dentry, vname_t *dentry,
xfs_inode_t *ip, /* inode of entry 'name' */ xfs_inode_t *ip) /* inode of entry 'name' */
int *entry_changed)
{ {
int attempts; int attempts;
xfs_ino_t e_inum; xfs_ino_t e_inum;
...@@ -2263,7 +2205,6 @@ xfs_lock_dir_and_entry( ...@@ -2263,7 +2205,6 @@ xfs_lock_dir_and_entry(
attempts = 0; attempts = 0;
again: again:
*entry_changed = 0;
xfs_ilock(dp, XFS_ILOCK_EXCL); xfs_ilock(dp, XFS_ILOCK_EXCL);
e_inum = ip->i_ino; e_inum = ip->i_ino;
...@@ -2477,7 +2418,6 @@ xfs_remove( ...@@ -2477,7 +2418,6 @@ xfs_remove(
xfs_fsblock_t first_block; xfs_fsblock_t first_block;
int cancel_flags; int cancel_flags;
int committed; int committed;
int entry_changed;
int dm_di_mode = 0; int dm_di_mode = 0;
int link_zero; int link_zero;
uint resblks; uint resblks;
...@@ -2504,7 +2444,6 @@ xfs_remove( ...@@ -2504,7 +2444,6 @@ xfs_remove(
} }
/* From this point on, return through std_return */ /* From this point on, return through std_return */
retry:
ip = NULL; ip = NULL;
/* /*
...@@ -2571,7 +2510,7 @@ xfs_remove( ...@@ -2571,7 +2510,7 @@ xfs_remove(
return error; return error;
} }
error = xfs_lock_dir_and_entry(dp, dentry, ip, &entry_changed); error = xfs_lock_dir_and_entry(dp, dentry, ip);
if (error) { if (error) {
REMOVE_DEBUG_TRACE(__LINE__); REMOVE_DEBUG_TRACE(__LINE__);
xfs_trans_cancel(tp, cancel_flags); xfs_trans_cancel(tp, cancel_flags);
...@@ -2579,17 +2518,6 @@ xfs_remove( ...@@ -2579,17 +2518,6 @@ xfs_remove(
goto std_return; goto std_return;
} }
/*
* If the inode we found in the first pass is no longer
* the entry with the given name, then drop our transaction and
* inode reference and start over.
*/
if (entry_changed) {
xfs_trans_cancel(tp, cancel_flags);
IRELE(ip);
goto retry;
}
/* /*
* At this point, we've gotten both the directory and the entry * At this point, we've gotten both the directory and the entry
* inodes locked. * inodes locked.
...@@ -2610,28 +2538,6 @@ xfs_remove( ...@@ -2610,28 +2538,6 @@ xfs_remove(
goto error_return; goto error_return;
} }
if ((ip->i_d.di_mode & IFMT) == IFDIR) {
error = XFS_ERROR(EPERM);
REMOVE_DEBUG_TRACE(__LINE__);
goto error_return;
}
/*
* Return error when removing . and ..
*/
if (name[0] == '.') {
if (name[1] == '\0') {
error = XFS_ERROR(EINVAL);
REMOVE_DEBUG_TRACE(__LINE__);
goto error_return;
}
else if (name[1] == '.' && name[2] == '\0') {
error = XFS_ERROR(EEXIST);
REMOVE_DEBUG_TRACE(__LINE__);
goto error_return;
}
}
/* /*
* Entry must exist since we did a lookup in xfs_lock_dir_and_entry. * Entry must exist since we did a lookup in xfs_lock_dir_and_entry.
*/ */
...@@ -2696,8 +2602,7 @@ xfs_remove( ...@@ -2696,8 +2602,7 @@ xfs_remove(
IRELE(ip); IRELE(ip);
/* Fall through to std_return with error = 0 */ /* Fall through to std_return with error = 0 */
std_return:
std_return:
if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp,
DM_EVENT_POSTREMOVE)) { DM_EVENT_POSTREMOVE)) {
(void) dm_send_namesp_event(DM_EVENT_POSTREMOVE, (void) dm_send_namesp_event(DM_EVENT_POSTREMOVE,
...@@ -2938,7 +2843,6 @@ xfs_mkdir( ...@@ -2938,7 +2843,6 @@ xfs_mkdir(
vnode_t *dir_vp; vnode_t *dir_vp;
boolean_t dp_joined_to_trans; boolean_t dp_joined_to_trans;
boolean_t created = B_FALSE; boolean_t created = B_FALSE;
int dm_event_sent = 0;
xfs_prid_t prid; xfs_prid_t prid;
xfs_dquot_t *udqp, *gdqp; xfs_dquot_t *udqp, *gdqp;
uint resblks; uint resblks;
...@@ -2961,8 +2865,10 @@ xfs_mkdir( ...@@ -2961,8 +2865,10 @@ xfs_mkdir(
dm_di_mode = vap->va_mode|VTTOIF(vap->va_type); dm_di_mode = vap->va_mode|VTTOIF(vap->va_type);
if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) { if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) {
error = xfs_dm_send_create_event(dir_bdp, dir_name, error = dm_send_namesp_event(DM_EVENT_CREATE,
dm_di_mode, &dm_event_sent); dir_bdp, DM_RIGHT_NULL, NULL,
DM_RIGHT_NULL, dir_name, NULL,
dm_di_mode, 0, 0);
if (error) if (error)
return error; return error;
} }
...@@ -3127,7 +3033,7 @@ xfs_mkdir( ...@@ -3127,7 +3033,7 @@ xfs_mkdir(
* xfs_trans_commit. */ * xfs_trans_commit. */
std_return: std_return:
if ( (created || (error != 0 && dm_event_sent != 0)) && if ( (created || (error != 0)) &&
DM_EVENT_ENABLED(dir_vp->v_vfsp, XFS_BHVTOI(dir_bdp), DM_EVENT_ENABLED(dir_vp->v_vfsp, XFS_BHVTOI(dir_bdp),
DM_EVENT_POSTCREATE)) { DM_EVENT_POSTCREATE)) {
(void) dm_send_namesp_event(DM_EVENT_POSTCREATE, (void) dm_send_namesp_event(DM_EVENT_POSTCREATE,
...@@ -3180,7 +3086,6 @@ xfs_rmdir( ...@@ -3180,7 +3086,6 @@ xfs_rmdir(
xfs_fsblock_t first_block; xfs_fsblock_t first_block;
int cancel_flags; int cancel_flags;
int committed; int committed;
int entry_changed;
vnode_t *dir_vp; vnode_t *dir_vp;
int dm_di_mode = 0; int dm_di_mode = 0;
int last_cdp_link; int last_cdp_link;
...@@ -3209,7 +3114,6 @@ xfs_rmdir( ...@@ -3209,7 +3114,6 @@ xfs_rmdir(
/* Return through std_return after this point. */ /* Return through std_return after this point. */
retry:
cdp = NULL; cdp = NULL;
/* /*
...@@ -3281,24 +3185,13 @@ xfs_rmdir( ...@@ -3281,24 +3185,13 @@ xfs_rmdir(
* that the directory entry for the child directory inode has * that the directory entry for the child directory inode has
* not changed while we were obtaining a log reservation. * not changed while we were obtaining a log reservation.
*/ */
error = xfs_lock_dir_and_entry(dp, dentry, cdp, &entry_changed); error = xfs_lock_dir_and_entry(dp, dentry, cdp);
if (error) { if (error) {
xfs_trans_cancel(tp, cancel_flags); xfs_trans_cancel(tp, cancel_flags);
IRELE(cdp); IRELE(cdp);
goto std_return; goto std_return;
} }
/*
* If the inode we found in the first pass is no longer
* the entry with the given name, then drop our transaction and
* inode reference and start over.
*/
if (entry_changed) {
xfs_trans_cancel(tp, cancel_flags);
IRELE(cdp);
goto retry;
}
xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
if (dp != cdp) { if (dp != cdp) {
/* /*
...@@ -3455,20 +3348,12 @@ xfs_readdir( ...@@ -3455,20 +3348,12 @@ xfs_readdir(
} }
lock_mode = xfs_ilock_map_shared(dp); lock_mode = xfs_ilock_map_shared(dp);
if ((dp->i_d.di_mode & IFMT) != IFDIR) {
xfs_iunlock_map_shared(dp, lock_mode);
return XFS_ERROR(ENOTDIR);
}
start_offset = uiop->uio_offset; start_offset = uiop->uio_offset;
error = XFS_DIR_GETDENTS(dp->i_mount, tp, dp, uiop, eofp); error = XFS_DIR_GETDENTS(dp->i_mount, tp, dp, uiop, eofp);
if (start_offset != uiop->uio_offset) { if (start_offset != uiop->uio_offset) {
xfs_ichgtime(dp, XFS_ICHGTIME_ACC); xfs_ichgtime(dp, XFS_ICHGTIME_ACC);
} }
xfs_iunlock_map_shared(dp, lock_mode); xfs_iunlock_map_shared(dp, lock_mode);
return error; return error;
} }
......
...@@ -1637,9 +1637,9 @@ static void printinode(struct inode *ip) ...@@ -1637,9 +1637,9 @@ static void printinode(struct inode *ip)
if (ip == NULL) if (ip == NULL)
return; return;
kdb_printf(" i_ino = %lu i_count = %u i_dev = 0x%x i_size %Ld\n", kdb_printf(" i_ino = %lu i_count = %u i_size %Ld\n",
ip->i_ino, atomic_read(&ip->i_count), ip->i_ino, atomic_read(&ip->i_count),
ip->i_sb->s_dev, ip->i_size); ip->i_size);
kdb_printf( kdb_printf(
" i_mode = 0x%x i_nlink = %d i_rdev = 0x%x i_state = 0x%lx\n", " i_mode = 0x%x i_nlink = %d i_rdev = 0x%x i_state = 0x%lx\n",
......
...@@ -22,6 +22,7 @@ enum bh_state_bits { ...@@ -22,6 +22,7 @@ enum bh_state_bits {
BH_New, /* Disk mapping was newly created by get_block */ BH_New, /* Disk mapping was newly created by get_block */
BH_Async_Read, /* Is under end_buffer_async_read I/O */ BH_Async_Read, /* Is under end_buffer_async_read I/O */
BH_Async_Write, /* Is under end_buffer_async_write I/O */ BH_Async_Write, /* Is under end_buffer_async_write I/O */
BH_Delay, /* Buffer is not yet allocated on disk */
BH_Boundary, /* Block is followed by a discontiguity */ BH_Boundary, /* Block is followed by a discontiguity */
BH_PrivateStart,/* not a state bit, but the first bit available BH_PrivateStart,/* not a state bit, but the first bit available
...@@ -105,6 +106,7 @@ BUFFER_FNS(Mapped, mapped) ...@@ -105,6 +106,7 @@ BUFFER_FNS(Mapped, mapped)
BUFFER_FNS(New, new) BUFFER_FNS(New, new)
BUFFER_FNS(Async_Read, async_read) BUFFER_FNS(Async_Read, async_read)
BUFFER_FNS(Async_Write, async_write) BUFFER_FNS(Async_Write, async_write)
BUFFER_FNS(Delay, delay);
BUFFER_FNS(Boundary, boundary) BUFFER_FNS(Boundary, boundary)
#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment