Commit e597d4aa authored by Nathan Scott's avatar Nathan Scott

[XFS] Cleanup after initially investigating unwritten extents.

SGI Modid: 2.5.x-xfs:slinx:134059a
parent 44e70659
......@@ -101,7 +101,6 @@ map_buffer_at_offset(
ASSERT(!(mp->pbm_flags & PBMF_HOLE));
ASSERT(!(mp->pbm_flags & PBMF_DELAY));
ASSERT(!(mp->pbm_flags & PBMF_UNWRITTEN));
ASSERT(mp->pbm_bn != PAGE_BUF_DADDR_NULL);
delta = page->index;
......@@ -348,15 +347,15 @@ cluster_write(
* page ready for freeing it's buffers. When called with startio set then
* we are coming from writepage.
*
* When called with startio e.g. from
* write page it is important that we write WHOLE page if possible. The
* bh->b_state's can not know of any of the blocks or which block for
* that matter are dirty due to map writes, and therefore bh uptodate is
* only vaild if the pagei itself isn't completely uptodate. Some layers
* may clear the page dirty flag prior to calling write page under the
* assumption the entire page will be written out, by not writing out the
* whole page the page can be reused before all vaild dirty data is
* written out. Note: in the case of a page that has been dirty'd by
* When called with startio set it is important that we write the WHOLE
* page if possible.
* The bh->b_state's cannot know if any of the blocks or which block for
* that matter are dirty due to mmap writes, and therefore bh uptodate is
* only vaild if the page itself isn't completely uptodate. Some layers
* may clear the page dirty flag prior to calling write page, under the
* assumption the entire page will be written out; by not writing out the
* whole page the page can be reused before all valid dirty data is
* written out. Note: in the case of a page that has been dirty'd by
* mapwrite and but partially setup by block_prepare_write the
* bh->b_states's will not agree and only ones setup by BPW/BCW will have
* valid state, thus the whole page must be written out thing.
......@@ -388,7 +387,7 @@ delalloc_convert(
end_offset = offset + PAGE_CACHE_SIZE;
if (end_offset > inode->i_size)
end_offset = inode->i_size;
if (startio && !page_has_buffers(page))
create_empty_buffers(page, 1 << inode->i_blkbits, 0);
......
......@@ -521,39 +521,46 @@ xfs_attrmulti_by_handle(
* their own functions. Functions are defined after their use
* so gcc doesn't get fancy and inline them with -03 */
int xfs_ioc_space(
STATIC int
xfs_ioc_space(
bhv_desc_t *bdp,
vnode_t *vp,
struct file *filp,
unsigned int cmd,
unsigned long arg);
int xfs_ioc_bulkstat(
STATIC int
xfs_ioc_bulkstat(
xfs_mount_t *mp,
unsigned int cmd,
unsigned long arg);
int xfs_ioc_fsgeometry_v1(
STATIC int
xfs_ioc_fsgeometry_v1(
xfs_mount_t *mp,
unsigned long arg);
int xfs_ioc_fsgeometry(
STATIC int
xfs_ioc_fsgeometry(
xfs_mount_t *mp,
unsigned long arg);
int xfs_ioc_xattr(
STATIC int
xfs_ioc_xattr(
vnode_t *vp,
struct file *filp,
unsigned int cmd,
unsigned long arg);
int xfs_ioc_getbmap(
STATIC int
xfs_ioc_getbmap(
bhv_desc_t *bdp,
struct file *filp,
unsigned int cmd,
unsigned long arg);
int xfs_ioc_getbmapx(
STATIC int
xfs_ioc_getbmapx(
bhv_desc_t *bdp,
unsigned long arg);
......@@ -800,16 +807,17 @@ xfs_ioctl(
}
}
int xfs_ioc_space(
STATIC int
xfs_ioc_space(
bhv_desc_t *bdp,
vnode_t *vp,
struct file *filp,
unsigned int cmd,
unsigned long arg)
{
xfs_flock64_t bf;
int attr_flags = 0;
int error;
xfs_flock64_t bf;
int attr_flags = 0;
int error;
if (!capable(CAP_SYS_ADMIN))
return -XFS_ERROR(EPERM);
......@@ -833,16 +841,18 @@ int xfs_ioc_space(
return -error;
}
int xfs_ioc_bulkstat(
STATIC int
xfs_ioc_bulkstat(
xfs_mount_t *mp,
unsigned int cmd,
unsigned long arg)
{
xfs_fsop_bulkreq_t bulkreq;
int count; /* # of records returned */
xfs_ino_t inlast; /* last inode number */
int done;
int error;
xfs_fsop_bulkreq_t bulkreq;
int count; /* # of records returned */
xfs_ino_t inlast; /* last inode number */
int done;
int error;
/* done = 1 if there are more stats to get and if bulkstat */
/* should be called again (unused here, but used in dmapi) */
......@@ -901,7 +911,8 @@ int xfs_ioc_bulkstat(
return 0;
}
int xfs_ioc_fsgeometry_v1(
STATIC int
xfs_ioc_fsgeometry_v1(
xfs_mount_t *mp,
unsigned long arg)
{
......@@ -917,12 +928,13 @@ int xfs_ioc_fsgeometry_v1(
return 0;
}
int xfs_ioc_fsgeometry(
STATIC int
xfs_ioc_fsgeometry(
xfs_mount_t *mp,
unsigned long arg)
{
xfs_fsop_geom_t fsgeo;
int error;
xfs_fsop_geom_t fsgeo;
int error;
error = xfs_fs_geometry(mp, &fsgeo, 4);
if (error)
......@@ -933,15 +945,16 @@ int xfs_ioc_fsgeometry(
return 0;
}
int xfs_ioc_xattr(
STATIC int
xfs_ioc_xattr(
vnode_t *vp,
struct file *filp,
unsigned int cmd,
unsigned long arg)
{
struct fsxattr fa;
vattr_t va;
int error;
struct fsxattr fa;
vattr_t va;
int error;
switch (cmd) {
case XFS_IOC_FSGETXATTR: {
......@@ -998,15 +1011,16 @@ int xfs_ioc_xattr(
}
}
int xfs_ioc_getbmap(
STATIC int
xfs_ioc_getbmap(
bhv_desc_t *bdp,
struct file *filp,
unsigned int cmd,
unsigned long arg)
{
struct getbmap bm;
int iflags;
int error;
struct getbmap bm;
int iflags;
int error;
if (copy_from_user(&bm, (struct getbmap *)arg, sizeof(bm)))
return -XFS_ERROR(EFAULT);
......@@ -1027,14 +1041,15 @@ int xfs_ioc_getbmap(
return 0;
}
int xfs_ioc_getbmapx(
STATIC int
xfs_ioc_getbmapx(
bhv_desc_t *bdp,
unsigned long arg)
{
struct getbmapx bmx;
struct getbmap bm;
int iflags;
int error;
struct getbmapx bmx;
struct getbmap bm;
int iflags;
int error;
if (copy_from_user(&bmx, (struct getbmapx *)arg, sizeof(bmx)))
return -XFS_ERROR(EFAULT);
......
......@@ -56,7 +56,8 @@ STATIC int _xfs_imap_to_bmap(xfs_iocore_t *, xfs_off_t, xfs_bmbt_irec_t *,
int
xfs_strategy(xfs_inode_t *ip,
xfs_strategy(
xfs_inode_t *ip,
xfs_off_t offset,
ssize_t count,
int flags,
......@@ -74,13 +75,13 @@ xfs_strategy(xfs_inode_t *ip,
xfs_bmap_free_t free_list;
xfs_filblks_t count_fsb;
int committed, i, loops, nimaps;
int is_xfs = 1; /* This will be a variable at some point */
int is_xfs;
xfs_bmbt_irec_t imap[XFS_MAX_RW_NBMAPS];
xfs_trans_t *tp;
io = &ip->i_iocore;
mp = ip->i_mount;
/* is_xfs = IO_IS_XFS(io); */
io = &ip->i_iocore;
is_xfs = IO_IS_XFS(io);
ASSERT((ip->i_d.di_mode & IFMT) == IFREG);
ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
((io->io_flags & XFS_IOCORE_RT) != 0));
......@@ -238,14 +239,19 @@ xfs_strategy(xfs_inode_t *ip,
*/
offset_fsb = XFS_B_TO_FSBT(mp, offset);
for(i = 0; i < nimaps; i++) {
int maps;
if (offset_fsb >= imap[i].br_startoff &&
(offset_fsb < (imap[i].br_startoff + imap[i].br_blockcount))) {
XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL | XFS_EXTSIZE_WR);
for (i = 0; i < nimaps; i++) {
int maps;
if ((offset_fsb >= imap[i].br_startoff) &&
(offset_fsb <
(imap[i].br_startoff + imap[i].br_blockcount))) {
XFS_IUNLOCK(mp, io,
XFS_ILOCK_EXCL|XFS_EXTSIZE_WR);
maps = min(nimaps, *npbmaps);
*npbmaps = _xfs_imap_to_bmap(io, offset, &imap[i],
pbmapp, maps, *npbmaps);
*npbmaps = _xfs_imap_to_bmap(io, offset,
&imap[i], pbmapp,
maps, *npbmaps);
XFS_STATS_INC(xfsstats.xs_xstrat_quick);
return 0;
}
......@@ -260,9 +266,11 @@ xfs_strategy(xfs_inode_t *ip,
nimaps--; /* Index of last entry */
ASSERT(nimaps >= 0);
ASSERT(offset_fsb >= imap[nimaps].br_startoff + imap[nimaps].br_blockcount);
ASSERT(offset_fsb >=
imap[nimaps].br_startoff + imap[nimaps].br_blockcount);
ASSERT(count_fsb);
offset_fsb = imap[nimaps].br_startoff + imap[nimaps].br_blockcount;
offset_fsb =
imap[nimaps].br_startoff + imap[nimaps].br_blockcount;
map_start_fsb = offset_fsb;
XFS_STATS_INC(xfsstats.xs_xstrat_split);
XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_WR);
......@@ -397,10 +405,9 @@ _xfs_imap_to_bmap(
if (io->io_new_size > nisize)
nisize = io->io_new_size;
for (im=0, pbm=0; im < imaps && pbm < pbmaps; im++,pbmapp++,imap++,pbm++) {
for (im=pbm=0; im < imaps && pbm < pbmaps; im++,pbmapp++,imap++,pbm++) {
pbmapp->pbm_target = io->io_flags & XFS_IOCORE_RT ?
mp->m_rtdev_targp :
mp->m_ddev_targp;
mp->m_rtdev_targp : mp->m_ddev_targp;
pbmapp->pbm_offset = XFS_FSB_TO_B(mp, imap->br_startoff);
pbmapp->pbm_delta = offset - pbmapp->pbm_offset;
pbmapp->pbm_bsize = XFS_FSB_TO_B(mp, imap->br_blockcount);
......@@ -415,8 +422,9 @@ _xfs_imap_to_bmap(
pbmapp->pbm_flags = PBMF_DELAY;
} else {
pbmapp->pbm_bn = XFS_FSB_TO_DB_IO(io, start_block);
if (imap->br_state == XFS_EXT_UNWRITTEN)
if (ISUNWRITTEN(imap)) {
pbmapp->pbm_flags |= PBMF_UNWRITTEN;
}
}
if ((pbmapp->pbm_offset + pbmapp->pbm_bsize) >= nisize) {
......@@ -425,7 +433,7 @@ _xfs_imap_to_bmap(
offset += pbmapp->pbm_bsize - pbmapp->pbm_delta;
}
return(pbm); /* Return the number filled */
return pbm; /* Return the number filled */
}
STATIC int
......@@ -475,7 +483,7 @@ xfs_iomap_read(
* 2 must allocate.
* There are 3 cases when we allocate:
* delay allocation (doesn't really allocate or use transactions)
* direct allocation (no previous delay allocation
* direct allocation (no previous delay allocation)
* convert delay to real allocations
*/
......@@ -716,17 +724,14 @@ xfs_iomap_write_direct(
if (io->io_new_size > isize)
isize = io->io_new_size;
if ((offset + count) > isize) {
aeof = 1;
} else {
aeof = 0;
}
aeof = ((offset + count) > isize) ? 1 : 0;
offset_fsb = XFS_B_TO_FSBT(mp, offset);
last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
count_fsb = last_fsb - offset_fsb;
if (found && (pbmapp->pbm_flags & PBMF_HOLE)) {
xfs_fileoff_t map_last_fsb;
map_last_fsb = XFS_B_TO_FSB(mp,
(pbmapp->pbm_bsize + pbmapp->pbm_offset));
......@@ -742,15 +747,15 @@ xfs_iomap_write_direct(
* is greater that 512K and we are allocating past the allocation eof
*/
if (!found && mp->m_dalign && (isize >= 524288) && aeof) {
int eof;
xfs_fileoff_t new_last_fsb;
int eof;
xfs_fileoff_t new_last_fsb;
new_last_fsb = roundup_64(last_fsb, mp->m_dalign);
printk("xfs_iomap_write_direct: about to XFS_BMAP_EOF %Ld\n",
new_last_fsb);
error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof);
if (error) {
if (error)
goto error_out;
}
if (eof)
last_fsb = new_last_fsb;
}
......@@ -867,13 +872,14 @@ xfs_iomap_write_direct(
maps = min(nimaps, maps);
*npbmaps = _xfs_imap_to_bmap(io, offset, &imap[0], pbmapp, maps,
*npbmaps);
if(*npbmaps) {
if (*npbmaps) {
/*
* this is new since xfs_iomap_read
* didn't find it.
*/
if (*npbmaps != 1) {
printk("NEED MORE WORK FOR MULTIPLE BMAPS (which are new)\n");
/* NEED MORE WORK FOR MULTIPLE BMAPS (which are new) */
BUG();
}
}
goto out;
......@@ -889,4 +895,3 @@ xfs_iomap_write_direct(
out: /* Just return error and any tracing at end of routine */
return XFS_ERROR(error);
}
......@@ -321,10 +321,9 @@ xfs_zero_last_block(
return 0;
}
/*
* Get a pagebuf for the last block, zero the part beyond the
* EOF, and write it out sync. We need to drop the ilock
* while we do this so we don't deadlock when the buffer cache
* calls back to us.
* Zero the part of the last block beyond the EOF, and write it
* out sync. We need to drop the ilock while we do this so we
* don't deadlock when the buffer cache calls back to us.
*/
XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
loff = XFS_FSB_TO_B(mp, last_fsb);
......@@ -401,7 +400,6 @@ xfs_zero_eof(
last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
if (last_fsb == end_zero_fsb) {
/*
......@@ -414,10 +412,6 @@ xfs_zero_eof(
ASSERT(start_zero_fsb <= end_zero_fsb);
prev_zero_fsb = NULLFILEOFF;
prev_zero_count = 0;
/*
* Maybe change this loop to do the bmapi call and
* loop while we split the mappings into pagebufs?
*/
while (start_zero_fsb <= end_zero_fsb) {
nimaps = 1;
zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
......@@ -792,7 +786,6 @@ xfs_write(
return(ret);
}
/*
* All xfs metadata buffers except log state machine buffers
* get this attached as their b_bdstrat callback function.
......@@ -822,6 +815,7 @@ xfs_bdstrat_cb(struct xfs_buf *bp)
return (xfs_bioerror(bp));
}
}
/*
* Wrapper around bdstrat so that we can stop data
* from going to disk in case we are shutting down the filesystem.
......
......@@ -113,45 +113,6 @@ pb_trace_func(
}
#endif /* PAGEBUF_TRACE */
#ifdef PAGEBUF_TRACKING
#define MAX_PB 10000
page_buf_t *pb_array[MAX_PB];
EXPORT_SYMBOL(pb_array);
void
pb_tracking_get(
page_buf_t *pb)
{
int i;
for (i = 0; (pb_array[i] != 0) && (i < MAX_PB); i++) { }
if (i == MAX_PB)
printk("pb 0x%p not recorded in pb_array\n", pb);
else {
//printk("pb_get 0x%p in pb_array[%d]\n", pb, i);
pb_array[i] = pb;
}
}
void
pb_tracking_free(
page_buf_t *pb)
{
int i;
for (i = 0; (pb_array[i] != pb) && (i < MAX_PB); i++) { }
if (i < MAX_PB) {
//printk("pb_free 0x%p from pb_array[%d]\n", pb, i);
pb_array[i] = NULL;
}
else
printk("Freed unmonitored pagebuf 0x%p\n", pb);
}
#else
#define pb_tracking_get(pb) do { } while (0)
#define pb_tracking_free(pb) do { } while (0)
#endif /* PAGEBUF_TRACKING */
/*
* File wide globals
*/
......@@ -314,8 +275,6 @@ _pagebuf_initialize(
*/
flags &= ~(PBF_LOCK|PBF_MAPPED|PBF_DONT_BLOCK|PBF_READ_AHEAD);
pb_tracking_get(pb);
memset(pb, 0, sizeof(page_buf_private_t));
atomic_set(&pb->pb_hold, 1);
init_MUTEX_LOCKED(&pb->pb_iodonesema);
......@@ -444,7 +403,6 @@ _pagebuf_free_object(
}
}
pb_tracking_free(pb);
pagebuf_deallocate(pb);
}
......@@ -1743,8 +1701,7 @@ pagebuf_daemon(
spin_unlock(&pb_daemon->pb_delwrite_lock);
while (!list_empty(&tmp)) {
pb = list_entry(tmp.next,
page_buf_t, pb_list);
pb = list_entry(tmp.next, page_buf_t, pb_list);
list_del_init(&pb->pb_list);
pb->pb_flags &= ~PBF_DELWRI;
pb->pb_flags |= PBF_WRITE;
......@@ -2029,14 +1986,8 @@ pagebuf_init(void)
}
#ifdef PAGEBUF_TRACE
# if 1
pb_trace.buf = (pagebuf_trace_t *)kmalloc(
PB_TRACE_BUFSIZE * sizeof(pagebuf_trace_t), GFP_KERNEL);
# else
/* Alternatively, for really really long trace bufs */
pb_trace.buf = (pagebuf_trace_t *)vmalloc(
PB_TRACE_BUFSIZE * sizeof(pagebuf_trace_t));
# endif
memset(pb_trace.buf, 0, PB_TRACE_BUFSIZE * sizeof(pagebuf_trace_t));
pb_trace.start = 0;
pb_trace.end = PB_TRACE_BUFSIZE - 1;
......
......@@ -5526,14 +5526,13 @@ xfs_getbmap(
int bmapi_flags; /* flags for xfs_bmapi */
__int32_t oflags; /* getbmapx bmv_oflags field */
ip = XFS_BHVTOI(bdp);
vp = BHV_TO_VNODE(bdp);
ip = XFS_BHVTOI(bdp);
mp = ip->i_mount;
whichfork = interface & BMV_IF_ATTRFORK ?
XFS_ATTR_FORK : XFS_DATA_FORK;
whichfork = interface & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
sh_unwritten = (interface & BMV_IF_PREALLOC) != 0;
/* If the BMV_IF_NO_DMAPI_READ interface bit specified, do not
* generate a DMAPI read event. Otherwise, if the DM_EVENT_READ
* bit is set for the file, generate a read event in order
......@@ -5575,8 +5574,6 @@ xfs_getbmap(
ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
return XFS_ERROR(EINVAL);
mp = ip->i_mount;
if (whichfork == XFS_DATA_FORK) {
if (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC) {
prealloced = 1;
......@@ -5600,18 +5597,15 @@ xfs_getbmap(
bmv->bmv_entries = 0;
return 0;
}
nex = bmv->bmv_count - 1;
if (nex <= 0)
return XFS_ERROR(EINVAL);
bmvend = bmv->bmv_offset + bmv->bmv_length;
xfs_ilock(ip, XFS_IOLOCK_SHARED);
if (whichfork == XFS_DATA_FORK && ip->i_delayed_blks) {
/* xfs_fsize_t last_byte = xfs_file_last_byte(ip); */
VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
}
......@@ -5629,11 +5623,10 @@ xfs_getbmap(
bmapi_flags = XFS_BMAPI_AFLAG(whichfork) |
((sh_unwritten) ? 0 : XFS_BMAPI_IGSTATE);
subnex = 16; /* XXXjtk - need a #define? */
/*
* Allocate enough space to handle "subnex" maps at a time.
*/
subnex = 16;
map = kmem_alloc(subnex * sizeof(*map), KM_SLEEP);
bmv->bmv_entries = 0;
......@@ -5646,77 +5639,63 @@ xfs_getbmap(
nexleft = nex;
do {
if (nexleft > subnex)
nmap = subnex;
else
nmap = nexleft;
error = xfs_bmapi(NULL, ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
XFS_BB_TO_FSB(mp, bmv->bmv_length),
bmapi_flags, NULL, 0,
map, &nmap, NULL);
ASSERT(nmap <= subnex);
if (error)
goto unlock_and_return;
for (error = i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
nexleft--;
oflags = 0;
out.bmv_offset = XFS_FSB_TO_BB(mp, map[i].br_startoff);
out.bmv_length = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
if ( prealloced
&& map[i].br_startblock == HOLESTARTBLOCK
&& out.bmv_offset + out.bmv_length == bmvend) {
/*
* came to hole at end of file
*/
nmap = (nexleft > subnex) ? subnex : nexleft;
error = xfs_bmapi(NULL, ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
XFS_BB_TO_FSB(mp, bmv->bmv_length),
bmapi_flags, NULL, 0, map, &nmap, NULL);
if (error)
goto unlock_and_return;
} else {
if (map[i].br_startblock == HOLESTARTBLOCK)
out.bmv_block = -1;
else
out.bmv_block =
ASSERT(nmap <= subnex);
for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
nexleft--;
oflags = (map[i].br_state == XFS_EXT_UNWRITTEN) ?
BMV_OF_PREALLOC : 0;
out.bmv_offset = XFS_FSB_TO_BB(mp, map[i].br_startoff);
out.bmv_length = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
if (prealloced &&
map[i].br_startblock == HOLESTARTBLOCK &&
out.bmv_offset + out.bmv_length == bmvend) {
/*
* came to hole at end of file
*/
goto unlock_and_return;
} else {
out.bmv_block =
(map[i].br_startblock == HOLESTARTBLOCK) ?
-1 :
XFS_FSB_TO_DB(ip, map[i].br_startblock);
/* return either a getbmap or a getbmapx structure. */
if (interface & BMV_IF_EXTENDED) {
struct getbmapx outx;
GETBMAP_CONVERT(out,outx);
outx.bmv_oflags = oflags;
outx.bmv_unused1 = outx.bmv_unused2 = 0;
if (copy_to_user(ap, &outx, sizeof(outx))) {
error = XFS_ERROR(EFAULT);
goto unlock_and_return;
}
} else {
if (copy_to_user(ap, &out, sizeof(out))) {
error = XFS_ERROR(EFAULT);
goto unlock_and_return;
/* return either getbmap/getbmapx structure. */
if (interface & BMV_IF_EXTENDED) {
struct getbmapx outx;
GETBMAP_CONVERT(out,outx);
outx.bmv_oflags = oflags;
outx.bmv_unused1 = outx.bmv_unused2 = 0;
if (copy_to_user(ap, &outx,
sizeof(outx))) {
error = XFS_ERROR(EFAULT);
goto unlock_and_return;
}
} else {
if (copy_to_user(ap, &out,
sizeof(out))) {
error = XFS_ERROR(EFAULT);
goto unlock_and_return;
}
}
bmv->bmv_offset =
out.bmv_offset + out.bmv_length;
bmv->bmv_length = MAX((__int64_t)0,
(__int64_t)(bmvend - bmv->bmv_offset));
bmv->bmv_entries++;
ap = (interface & BMV_IF_EXTENDED) ?
(void *)((struct getbmapx *)ap + 1) :
(void *)((struct getbmap *)ap + 1);
}
bmv->bmv_offset = out.bmv_offset + out.bmv_length;
bmv->bmv_length = MAX( (__int64_t)0,
(__int64_t)(bmvend - bmv->bmv_offset) );
bmv->bmv_entries++;
if (interface & BMV_IF_EXTENDED)
ap = (void *)((struct getbmapx *)ap + 1);
else
ap = (void *)((struct getbmap *)ap + 1);
}
}
} while (nmap && nexleft && bmv->bmv_length);
unlock_and_return:
......
......@@ -167,7 +167,7 @@ xfs_exntfmt_t xfs_extfmt_inode(struct xfs_inode *ip);
(XFS_SB_VERSION_HASEXTFLGBIT(&((x)->i_mount->m_sb)) ? \
XFS_EXTFMT_HASSTATE : XFS_EXTFMT_NOSTATE)
#endif
#define ISUNWRITTEN(x) ((x) == XFS_EXT_UNWRITTEN)
#define ISUNWRITTEN(x) ((x)->br_state == XFS_EXT_UNWRITTEN)
/*
* Incore version of above.
......
......@@ -107,8 +107,8 @@ static inline void xfs_buf_undelay(page_buf_t *pb)
#define XFS_BUF_UNWRITE(x) ((x)->pb_flags &= ~PBF_WRITE)
#define XFS_BUF_ISWRITE(x) ((x)->pb_flags & PBF_WRITE)
#define XFS_BUF_ISUNINITIAL(x) ((x)->pb_flags & PBF_UNINITIAL)
#define XFS_BUF_UNUNINITIAL(x) ((x)->pb_flags &= ~PBF_UNINITIAL)
#define XFS_BUF_ISUNINITIAL(x) (0)
#define XFS_BUF_UNUNINITIAL(x) (0)
#define XFS_BUF_BP_ISMAPPED(bp) 1
......
......@@ -902,9 +902,7 @@ xfs_buf_item_relse(
XFS_BUF_SET_FSPRIVATE(bp, bip->bli_item.li_bio_list);
if ((XFS_BUF_FSPRIVATE(bp, void *) == NULL) &&
(XFS_BUF_IODONE_FUNC(bp) != NULL)) {
/**
ASSERT((XFS_BUF_ISUNINITIAL(bp)) == 0);
***/
XFS_BUF_CLR_IODONE_FUNC(bp);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment