Commit 130b6627 authored by Stephen Lord's avatar Stephen Lord

Merge ssh://lord@kernel.bkbits.net/xfs-2.5

into jen.americas.sgi.com:/src/lord/bitkeeper/xfs-2.5
parents 4e9779ba 87f3eb7a
......@@ -109,14 +109,20 @@ linvfs_mknod(
struct inode *ip;
vattr_t va;
vnode_t *vp = NULL, *dvp = LINVFS_GET_VP(dir);
xfs_acl_t *default_acl = NULL;
xattr_exists_t test_default_acl = _ACL_DEFAULT_EXISTS;
int have_default_acl = 0;
int error = EINVAL;
int error;
if (test_default_acl)
have_default_acl = test_default_acl(dvp);
if (test_default_acl && test_default_acl(dvp)) {
if (!_ACL_ALLOC(default_acl))
return -ENOMEM;
if (!_ACL_GET_DEFAULT(dvp, default_acl)) {
_ACL_FREE(default_acl);
default_acl = NULL;
}
}
if (IS_POSIXACL(dir) && !have_default_acl && has_fs_struct(current))
if (IS_POSIXACL(dir) && !default_acl && has_fs_struct(current))
mode &= ~current->fs->umask;
memset(&va, 0, sizeof(va));
......@@ -140,13 +146,36 @@ linvfs_mknod(
break;
}
if (default_acl) {
if (!error) {
error = _ACL_INHERIT(vp, &va, default_acl);
if (!error) {
VMODIFY(vp);
} else {
struct dentry teardown = {};
int err2;
/* Oh, the horror.
* If we can't add the ACL we must back out.
* ENOSPC can hit here, among other things.
*/
teardown.d_inode = ip = LINVFS_GET_IP(vp);
teardown.d_name = dentry->d_name;
remove_inode_hash(ip);
make_bad_inode(ip);
if (S_ISDIR(mode))
VOP_RMDIR(dvp, &teardown, NULL, err2);
else
VOP_REMOVE(dvp, &teardown, NULL, err2);
VN_RELE(vp);
}
}
_ACL_FREE(default_acl);
}
if (!error) {
ASSERT(vp);
ip = LINVFS_GET_IP(vp);
if (!ip) {
VN_RELE(vp);
return -ENOMEM;
}
if (S_ISCHR(mode) || S_ISBLK(mode))
ip->i_rdev = to_kdev_t(rdev);
......@@ -155,19 +184,6 @@ linvfs_mknod(
d_instantiate(dentry, ip);
validate_fields(dir);
}
if (!error && have_default_acl) {
_ACL_DECL (pdacl);
if (!_ACL_ALLOC(pdacl)) {
error = -ENOMEM;
} else {
if (_ACL_GET_DEFAULT(dvp, pdacl))
error = _ACL_INHERIT(vp, &va, pdacl);
VMODIFY(vp);
_ACL_FREE(pdacl);
}
}
return -error;
}
......
......@@ -1114,10 +1114,10 @@ _pagebuf_wait_unpin(
add_wait_queue(&pb->pb_waiters, &wait);
for (;;) {
current->state = TASK_UNINTERRUPTIBLE;
if (atomic_read(&pb->pb_pin_count) == 0) {
if (atomic_read(&pb->pb_pin_count) == 0)
break;
}
pagebuf_run_queues(pb);
if (atomic_read(&pb->pb_io_remaining))
blk_run_queues();
schedule();
}
remove_wait_queue(&pb->pb_waiters, &wait);
......@@ -1224,26 +1224,27 @@ pagebuf_iostart( /* start I/O on a buffer */
return status;
}
pb->pb_flags &=
~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_DELWRI|PBF_READ_AHEAD);
pb->pb_flags |= flags &
(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_SYNC|PBF_READ_AHEAD);
pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC | \
PBF_DELWRI | PBF_READ_AHEAD | PBF_RUN_QUEUES);
pb->pb_flags |= flags & (PBF_READ | PBF_WRITE | PBF_ASYNC | \
PBF_SYNC | PBF_READ_AHEAD | PBF_RUN_QUEUES);
BUG_ON(pb->pb_bn == PAGE_BUF_DADDR_NULL);
/* For writes call internal function which checks for
* filesystem specific callout function and execute it.
/* For writes allow an alternate strategy routine to precede
* the actual I/O request (which may not be issued at all in
* a shutdown situation, for example).
*/
if (flags & PBF_WRITE) {
status = __pagebuf_iorequest(pb);
} else {
status = pagebuf_iorequest(pb);
}
status = (flags & PBF_WRITE) ?
pagebuf_iostrategy(pb) : pagebuf_iorequest(pb);
/* Wait for I/O if we are not an async request */
if ((status == 0) && (flags & PBF_ASYNC) == 0) {
/* Wait for I/O if we are not an async request.
* Note: async I/O request completion will release the buffer,
* and that can already be done by this point. So using the
* buffer pointer from here on, after async I/O, is invalid.
*/
if (!status && !(flags & PBF_ASYNC))
status = pagebuf_iowait(pb);
}
return status;
}
......@@ -1381,8 +1382,6 @@ _pagebuf_ioapply(
nr_pages = total_nr_pages;
bio = bio_alloc(GFP_NOIO, nr_pages);
BUG_ON(bio == NULL);
bio->bi_bdev = pb->pb_target->pbr_bdev;
bio->bi_sector = sector;
bio->bi_end_io = bio_end_io_pagebuf;
......@@ -1418,6 +1417,12 @@ _pagebuf_ioapply(
} else {
pagebuf_ioerror(pb, EIO);
}
if (pb->pb_flags & PBF_RUN_QUEUES) {
pb->pb_flags &= ~PBF_RUN_QUEUES;
if (atomic_read(&pb->pb_io_remaining) > 1)
blk_run_queues();
}
}
/*
......@@ -1453,6 +1458,8 @@ pagebuf_iorequest( /* start real I/O */
_pagebuf_wait_unpin(pb);
}
pagebuf_hold(pb);
/* Set the count to 1 initially, this will stop an I/O
* completion callout which happens before we have started
* all the I/O from calling pagebuf_iodone too early.
......@@ -1460,6 +1467,8 @@ pagebuf_iorequest( /* start real I/O */
atomic_set(&pb->pb_io_remaining, 1);
_pagebuf_ioapply(pb);
_pagebuf_iodone(pb, 0);
pagebuf_rele(pb);
return 0;
}
......@@ -1475,7 +1484,8 @@ pagebuf_iowait(
page_buf_t *pb)
{
PB_TRACE(pb, PB_TRACE_REC(iowait), 0);
pagebuf_run_queues(pb);
if (atomic_read(&pb->pb_io_remaining))
blk_run_queues();
down(&pb->pb_iodonesema);
PB_TRACE(pb, PB_TRACE_REC(iowaited), (int)pb->pb_error);
return pb->pb_error;
......@@ -1554,6 +1564,7 @@ pagebuf_iomove(
}
}
/*
* Pagebuf delayed write buffer handling
*/
......@@ -1683,13 +1694,13 @@ pagebuf_daemon(
pb->pb_flags &= ~PBF_DELWRI;
pb->pb_flags |= PBF_WRITE;
__pagebuf_iorequest(pb);
pagebuf_iostrategy(pb);
}
if (as_list_len > 0)
purge_addresses();
if (count)
pagebuf_run_queues(NULL);
blk_run_queues();
force_flush = 0;
} while (pbd_active == 1);
......@@ -1756,9 +1767,9 @@ pagebuf_delwri_flush(
pb->pb_flags &= ~PBF_DELWRI;
pb->pb_flags |= PBF_WRITE;
__pagebuf_iorequest(pb);
pagebuf_iostrategy(pb);
if (++flush_cnt > 32) {
pagebuf_run_queues(NULL);
blk_run_queues();
flush_cnt = 0;
}
......@@ -1767,7 +1778,7 @@ pagebuf_delwri_flush(
spin_unlock(&pbd_delwrite_lock);
pagebuf_run_queues(NULL);
blk_run_queues();
if (pinptr)
*pinptr = pincount;
......
......@@ -128,6 +128,7 @@ typedef enum page_buf_flags_e { /* pb_flags values */
PBF_FORCEIO = (1 << 21),
PBF_FLUSH = (1 << 22), /* flush disk write cache */
PBF_READ_AHEAD = (1 << 23),
PBF_RUN_QUEUES = (1 << 24), /* run block device task queue */
} page_buf_flags_t;
......@@ -239,10 +240,6 @@ typedef struct page_buf_s {
} page_buf_t;
/*
* page_buf module entry points
*/
/* Finding and Reading Buffers */
extern page_buf_t *pagebuf_find( /* find buffer for block if */
......@@ -276,12 +273,11 @@ extern page_buf_t *pagebuf_get_no_daddr(/* allocate pagebuf struct */
size_t len,
struct pb_target *); /* mount point "fake" inode */
extern int pagebuf_associate_memory(
extern int pagebuf_associate_memory(
page_buf_t *,
void *,
size_t);
extern void pagebuf_hold( /* increment reference count */
page_buf_t *); /* buffer to hold */
......@@ -291,7 +287,7 @@ extern void pagebuf_readahead( /* read ahead into cache */
size_t, /* length of range */
page_buf_flags_t); /* additional read flags */
/* Writing and Releasing Buffers */
/* Releasing Buffers */
extern void pagebuf_free( /* deallocate a buffer */
page_buf_t *); /* buffer to deallocate */
......@@ -314,11 +310,7 @@ extern int pagebuf_lock( /* lock buffer */
extern void pagebuf_unlock( /* unlock buffer */
page_buf_t *); /* buffer to unlock */
/* Buffer Utility Routines */
static inline int pagebuf_geterror(page_buf_t *pb)
{
return (pb ? pb->pb_error : ENOMEM);
}
/* Buffer Read and Write Routines */
extern void pagebuf_iodone( /* mark buffer I/O complete */
page_buf_t *, /* buffer to mark */
......@@ -339,21 +331,9 @@ extern int pagebuf_iostart( /* start I/O on a buffer */
extern int pagebuf_iorequest( /* start real I/O */
page_buf_t *); /* buffer to convey to device */
/*
* pagebuf_iorequest is the core I/O request routine.
* It assumes that the buffer is well-formed and
* mapped and ready for physical I/O, unlike
* pagebuf_iostart() and pagebuf_iophysio(). Those
* routines call the inode pagebuf_ioinitiate routine to start I/O,
* if it is present, or else call pagebuf_iorequest()
* directly if the inode pagebuf_ioinitiate routine is not present.
*/
extern int pagebuf_iowait( /* wait for buffer I/O done */
page_buf_t *); /* buffer to wait on */
extern caddr_t pagebuf_offset(page_buf_t *, size_t);
extern void pagebuf_iomove( /* move data in/out of pagebuf */
page_buf_t *, /* buffer to manipulate */
size_t, /* starting buffer offset */
......@@ -361,6 +341,22 @@ extern void pagebuf_iomove( /* move data in/out of pagebuf */
caddr_t, /* data pointer */
page_buf_rw_t); /* direction */
static inline int pagebuf_iostrategy(page_buf_t *pb)
{
return pb->pb_strat ? pb->pb_strat(pb) : pagebuf_iorequest(pb);
}
static inline int pagebuf_geterror(page_buf_t *pb)
{
return pb ? pb->pb_error : ENOMEM;
}
/* Buffer Utility Routines */
extern caddr_t pagebuf_offset( /* pointer at offset in buffer */
page_buf_t *, /* buffer to offset into */
size_t); /* offset */
/* Pinning Buffer Storage in Memory */
extern void pagebuf_pin( /* pin buffer in memory */
......@@ -369,33 +365,24 @@ extern void pagebuf_pin( /* pin buffer in memory */
extern void pagebuf_unpin( /* unpin buffered data */
page_buf_t *); /* buffer to unpin */
extern int pagebuf_ispin( page_buf_t *); /* check if pagebuf is pinned */
/* Reading and writing pages */
extern int pagebuf_ispin( /* check if buffer is pinned */
page_buf_t *); /* buffer to check */
extern void pagebuf_delwri_dequeue(page_buf_t *);
/* Delayed Write Buffer Routines */
#define PBDF_WAIT 0x01
#define PBDF_TRYLOCK 0x02
extern void pagebuf_delwri_flush(
struct pb_target *,
pb_target_t *,
unsigned long,
int *);
extern int pagebuf_init(void);
extern void pagebuf_terminate(void);
extern void pagebuf_delwri_dequeue(
page_buf_t *);
static __inline__ int __pagebuf_iorequest(page_buf_t *pb)
{
if (pb->pb_strat)
return pb->pb_strat(pb);
return pagebuf_iorequest(pb);
}
/* Buffer Daemon Setup Routines */
static __inline__ void pagebuf_run_queues(page_buf_t *pb)
{
if (!pb || atomic_read(&pb->pb_io_remaining))
blk_run_queues();
}
extern int pagebuf_init(void);
extern void pagebuf_terminate(void);
#endif /* __PAGE_BUF_H__ */
......@@ -113,7 +113,8 @@ pagebuf_lock(
ASSERT(pb->pb_flags & _PBF_LOCKABLE);
PB_TRACE(pb, PB_TRACE_REC(lock), 0);
pagebuf_run_queues(pb);
if (atomic_read(&pb->pb_io_remaining))
blk_run_queues();
down(&pb->pb_sema);
PB_SET_OWNER(pb);
PB_TRACE(pb, PB_TRACE_REC(locked), 0);
......
......@@ -1718,6 +1718,7 @@ xfs_attr_node_get(xfs_da_args_t *args)
int i;
state = xfs_da_state_alloc();
state->holeok = 1;
state->args = args;
state->mp = args->dp->i_mount;
state->blocksize = state->mp->m_sb.sb_blocksize;
......
......@@ -3810,7 +3810,7 @@ xfs_bmap_add_attrfork(
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
switch (ip->i_d.di_format) {
case XFS_DINODE_FMT_DEV:
ip->i_d.di_forkoff = roundup(sizeof(dev_t), 8) >> 3;
ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
break;
case XFS_DINODE_FMT_UUID:
ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
......
......@@ -215,21 +215,16 @@ extern inline xfs_caddr_t xfs_buf_offset(page_buf_t *bp, size_t offset)
static inline int xfs_bawrite(void *mp, page_buf_t *bp)
{
int ret;
bp->pb_fspriv3 = mp;
bp->pb_strat = xfs_bdstrat_cb;
xfs_buf_undelay(bp);
if ((ret = pagebuf_iostart(bp, PBF_WRITE | PBF_ASYNC)) == 0)
pagebuf_run_queues(bp);
return ret;
return pagebuf_iostart(bp, PBF_WRITE | PBF_ASYNC | PBF_RUN_QUEUES);
}
static inline void xfs_buf_relse(page_buf_t *bp)
{
if ((bp->pb_flags & _PBF_LOCKABLE) && !bp->pb_relse)
pagebuf_unlock(bp);
pagebuf_rele(bp);
}
......@@ -263,23 +258,19 @@ static inline void xfs_buf_relse(page_buf_t *bp)
static inline int XFS_bwrite(page_buf_t *pb)
{
int sync = (pb->pb_flags & PBF_ASYNC) == 0;
int error;
int iowait = (pb->pb_flags & PBF_ASYNC) == 0;
int error = 0;
pb->pb_flags |= PBF_SYNC;
if (!iowait)
pb->pb_flags |= PBF_RUN_QUEUES;
xfs_buf_undelay(pb);
__pagebuf_iorequest(pb);
if (sync) {
pagebuf_iostrategy(pb);
if (iowait) {
error = pagebuf_iowait(pb);
xfs_buf_relse(pb);
} else {
pagebuf_run_queues(pb);
error = 0;
}
return error;
}
......@@ -320,4 +311,4 @@ static inline int xfs_bdwrite(void *mp, page_buf_t *bp)
#define xfs_buf_get_noaddr(len, target) pagebuf_get_no_daddr((len), (target))
#define xfs_buf_free(bp) pagebuf_free(bp)
#endif
#endif /* __XFS_BUF_H__ */
/*
* Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -1141,10 +1141,13 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
xfs_da_node_entry_t *btree;
xfs_dablk_t blkno;
int probe, span, max, error, retval;
xfs_daddr_t mappedbno;
xfs_dahash_t hashval;
xfs_da_args_t *args;
args = state->args;
mappedbno = state->holeok ? -2 : -1;
/*
* Descend thru the B-tree searching each level for the right
* node to use, until the right hashval is found.
......@@ -1160,15 +1163,15 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
* Read the next node down in the tree.
*/
blk->blkno = blkno;
error = xfs_da_read_buf(state->args->trans, state->args->dp,
blkno, -1, &blk->bp,
state->args->whichfork);
error = xfs_da_read_buf(args->trans, args->dp, blkno,
mappedbno, &blk->bp, args->whichfork);
if (!error && unlikely(state->holeok && !blk->bp))
error = XFS_ERROR(ENOATTR); /* always attr here */
if (error) {
blk->blkno = 0;
state->path.active--;
return(error);
}
ASSERT(blk->bp != NULL);
curr = blk->bp->data;
ASSERT(INT_GET(curr->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC ||
INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) ||
......@@ -1187,7 +1190,7 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
*/
max = INT_GET(node->hdr.count, ARCH_CONVERT);
probe = span = max / 2;
hashval = state->args->hashval;
hashval = args->hashval;
for (btree = &node->btree[probe]; span > 4;
btree = &node->btree[probe]) {
span /= 2;
......@@ -1250,22 +1253,22 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
for (;;) {
if (blk->magic == XFS_DIR_LEAF_MAGIC) {
ASSERT(XFS_DIR_IS_V1(state->mp));
retval = xfs_dir_leaf_lookup_int(blk->bp, state->args,
retval = xfs_dir_leaf_lookup_int(blk->bp, args,
&blk->index);
} else if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
ASSERT(XFS_DIR_IS_V2(state->mp));
retval = xfs_dir2_leafn_lookup_int(blk->bp, state->args,
retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
&blk->index, state);
}
#ifdef __KERNEL__
else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
retval = xfs_attr_leaf_lookup_int(blk->bp, state->args);
blk->index = state->args->index;
state->args->blkno = blk->blkno;
retval = xfs_attr_leaf_lookup_int(blk->bp, args);
blk->index = args->index;
args->blkno = blk->blkno;
}
#endif
if (((retval == ENOENT) || (retval == ENOATTR)) &&
(blk->hashval == state->args->hashval)) {
(blk->hashval == args->hashval)) {
error = xfs_da_path_shift(state, &state->path, 1, 1,
&retval);
if (error)
......
......@@ -185,14 +185,14 @@ typedef struct xfs_da_args {
int index; /* index of attr of interest in blk */
xfs_dablk_t rmtblkno; /* remote attr value starting blkno */
int rmtblkcnt; /* remote attr value block count */
int rename; /* T/F: this is an atomic rename op */
xfs_dablk_t blkno2; /* blkno of 2nd attr leaf of interest */
int index2; /* index of 2nd attr in blk */
xfs_dablk_t rmtblkno2; /* remote attr value starting blkno */
int rmtblkcnt2; /* remote attr value block count */
int justcheck; /* check for ok with no space */
int addname; /* T/F: this is an add operation */
int oknoent; /* T/F: ok to return ENOENT, else die */
int justcheck : 1; /* T/F: check for ok with no space */
int rename : 1; /* T/F: this is an atomic rename op */
int addname : 1; /* T/F: this is an add operation */
int oknoent : 1; /* T/F: ok to return ENOENT, else die */
} xfs_da_args_t;
/*
......@@ -253,6 +253,7 @@ typedef struct xfs_da_state {
xfs_da_state_path_t path; /* search/split paths */
xfs_da_state_path_t altpath; /* alternate path for join */
unsigned int inleaf : 1; /* insert into 1->lf, 0->splf */
unsigned int holeok : 1; /* T/F: can deal with a hole */
unsigned int extravalid : 1; /* T/F: extrablk is in use */
unsigned int extraafter : 1; /* T/F: extrablk is after new */
xfs_da_state_blk_t extrablk; /* for double-splits on leafs */
......
......@@ -2813,7 +2813,6 @@ xfs_mkdir(
xfs_inode_t *cdp; /* inode of created dir */
vnode_t *cvp; /* vnode of created dir */
xfs_trans_t *tp;
xfs_dev_t rdev;
xfs_mount_t *mp;
int cancel_flags;
int error;
......@@ -2912,10 +2911,9 @@ xfs_mkdir(
/*
* create the directory inode.
*/
rdev = (vap->va_mask & XFS_AT_RDEV) ? vap->va_rdev : 0;
error = xfs_dir_ialloc(&tp, dp,
MAKEIMODE(vap->va_type,vap->va_mode), 2,
rdev, credp, prid, resblks > 0,
0, credp, prid, resblks > 0,
&cdp, NULL);
if (error) {
if (error == ENOSPC)
......@@ -3336,7 +3334,6 @@ xfs_symlink(
xfs_inode_t *ip;
int error;
int pathlen;
xfs_dev_t rdev;
xfs_bmap_free_t free_list;
xfs_fsblock_t first_block;
boolean_t dp_joined_to_trans;
......@@ -3479,10 +3476,8 @@ xfs_symlink(
/*
* Allocate an inode for the symlink.
*/
rdev = (vap->va_mask & XFS_AT_RDEV) ? vap->va_rdev : 0;
error = xfs_dir_ialloc(&tp, dp, IFLNK | (vap->va_mode&~IFMT),
1, rdev, credp, prid, resblks > 0, &ip, NULL);
1, 0, credp, prid, resblks > 0, &ip, NULL);
if (error) {
if (error == ENOSPC)
goto error_return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment