Commit 8916919b authored by Dave Kleikamp's avatar Dave Kleikamp Committed by Dave Kleikamp

JFS: cleanup -- Remove excessive typedefs

parent 89b55979
......@@ -25,18 +25,18 @@
*/
/*
* basic btree page - btpage_t
*/
typedef struct {
s64 next; /* 8: right sibling bn */
s64 prev; /* 8: left sibling bn */
* basic btree page - btpage
*
struct btpage {
s64 next; right sibling bn
s64 prev; left sibling bn
u8 flag; /* 1: */
u8 rsrvd[7]; /* 7: type specific */
s64 self; /* 8: self address */
u8 flag;
u8 rsrvd[7]; type specific
s64 self; self address
u8 entry[4064]; /* 4064: */
} btpage_t; /* (4096) */
u8 entry[4064];
}; */
/* btpaget_t flag */
#define BT_TYPE 0x07 /* B+-tree index */
......@@ -68,7 +68,7 @@ typedef struct {
{\
if ((BN) == 0)\
{\
MP = (metapage_t *)&JFS_IP(IP)->bxflag;\
MP = (struct metapage *)&JFS_IP(IP)->bxflag;\
P = (TYPE *)&JFS_IP(IP)->ROOT;\
RC = 0;\
jEVENT(0,("%d BT_GETPAGE returning root\n", __LINE__));\
......@@ -112,18 +112,18 @@ typedef struct {
* top frame record the leaf page/entry selected.
*/
#define MAXTREEHEIGHT 8
typedef struct btframe { /* stack frame */
struct btframe { /* stack frame */
s64 bn; /* 8: */
s16 index; /* 2: */
s16 lastindex; /* 2: */
struct metapage *mp; /* 4: */
} btframe_t; /* (16) */
}; /* (16) */
typedef struct btstack {
btframe_t *top; /* 4: */
int nsplit; /* 4: */
btframe_t stack[MAXTREEHEIGHT];
} btstack_t;
struct btstack {
struct btframe *top;
int nsplit;
struct btframe stack[MAXTREEHEIGHT];
};
#define BT_CLR(btstack)\
(btstack)->top = (btstack)->stack
......
......@@ -21,7 +21,7 @@
/*
* defragfs parameter list
*/
typedef struct {
struct defragfs {
uint flag; /* 4: */
u8 dev; /* 1: */
u8 pad[3]; /* 3: */
......@@ -33,7 +33,7 @@ typedef struct {
s64 old_xaddr; /* 8: */
s64 new_xaddr; /* 8: */
s32 xlen; /* 4: */
} defragfs_t; /* (52) */
};
/* plist flag */
#define DEFRAGFS_SYNC 0x80000000
......
......@@ -28,7 +28,7 @@
/*
* on-disk inode (dinode_t): 512 bytes
* on-disk inode : 512 bytes
*
* note: align 64-bit fields on 8-byte boundary.
*/
......@@ -89,7 +89,7 @@ struct dinode {
* If the index is small enough, the table is inline,
* otherwise, an x-tree root overlays this table
*/
dir_table_slot_t _table[12]; /* 96: inline */
struct dir_table_slot _table[12]; /* 96: inline */
dtroot_t _dtroot; /* 288: dtree root */
} _dir; /* (384) */
......@@ -131,9 +131,6 @@ struct dinode {
} u;
};
typedef struct dinode dinode_t;
/* extended mode bits (on-disk inode di_mode) */
#define IFJOURNAL 0x00010000 /* journalled file */
#define ISPARSE 0x00020000 /* sparse file enabled */
......
......@@ -100,30 +100,37 @@ static void DBFreeCK(uint *, s64, s64, s64);
/*
* forward references
*/
static void dbAllocBits(bmap_t * bmp, dmap_t * dp, s64 blkno, int nblocks);
static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval);
static void dbBackSplit(dmtree_t * tp, int leafno);
static void dbJoin(dmtree_t * tp, int leafno, int newval);
static void dbAdjTree(dmtree_t * tp, int leafno, int newval);
static int dbAdjCtl(bmap_t * bmp, s64 blkno, int newval, int alloc,
static int dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc,
int level);
static int dbAllocAny(bmap_t * bmp, s64 nblocks, int l2nb, s64 * results);
static int dbAllocNext(bmap_t * bmp, dmap_t * dp, s64 blkno, int nblocks);
static int dbAllocNear(bmap_t * bmp, dmap_t * dp, s64 blkno, int nblocks,
static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results);
static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
static int dbAllocNear(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks,
int l2nb, s64 * results);
static int dbAllocDmap(bmap_t * bmp, dmap_t * dp, s64 blkno, int nblocks);
static int dbAllocDmapLev(bmap_t * bmp, dmap_t * dp, int nblocks, int l2nb,
static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
static int dbAllocDmapLev(struct bmap * bmp, struct dmap * dp, int nblocks,
int l2nb,
s64 * results);
static int dbAllocAG(bmap_t * bmp, int agno, s64 nblocks, int l2nb,
static int dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb,
s64 * results);
static int dbAllocCtl(bmap_t * bmp, s64 nblocks, int l2nb, s64 blkno,
static int dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno,
s64 * results);
int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks);
static int dbFindBits(u32 word, int l2nb);
static int dbFindCtl(bmap_t * bmp, int l2nb, int level, s64 * blkno);
static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno);
static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx);
static void dbFreeBits(bmap_t * bmp, dmap_t * dp, s64 blkno, int nblocks);
static int dbFreeDmap(bmap_t * bmp, dmap_t * dp, s64 blkno, int nblocks);
static void dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
static int dbMaxBud(u8 * cp);
s64 dbMapFileSizeToMapSize(struct inode *ipbmap);
int blkstol2(s64 nb);
......@@ -132,12 +139,12 @@ void fsDirty(void);
int cntlz(u32 value);
int cnttz(u32 word);
static int dbAllocDmapBU(bmap_t * bmp, dmap_t * dp, s64 blkno,
static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
static int dbInitDmap(dmap_t * dp, s64 blkno, int nblocks);
static int dbInitDmapTree(dmap_t * dp);
static int dbInitTree(dmaptree_t * dtp);
static int dbInitDmapCtl(dmapctl_t * dcp, int level, int i);
static int dbInitDmap(struct dmap * dp, s64 blkno, int nblocks);
static int dbInitDmapTree(struct dmap * dp);
static int dbInitTree(struct dmaptree * dtp);
static int dbInitDmapCtl(struct dmapctl * dcp, int level, int i);
static int dbGetL2AGSize(s64 nblocks);
/*
......@@ -186,16 +193,16 @@ signed char budtab[256] = {
*/
int dbMount(struct inode *ipbmap)
{
bmap_t *bmp;
dbmap_t *dbmp_le;
metapage_t *mp;
struct bmap *bmp;
struct dbmap *dbmp_le;
struct metapage *mp;
int i;
/*
* allocate/initialize the in-memory bmap descriptor
*/
/* allocate memory for the in-memory bmap descriptor */
bmp = kmalloc(sizeof(bmap_t), GFP_KERNEL);
bmp = kmalloc(sizeof(struct bmap), GFP_KERNEL);
if (bmp == NULL)
return (ENOMEM);
......@@ -209,7 +216,7 @@ int dbMount(struct inode *ipbmap)
}
/* copy the on-disk bmap descriptor to its in-memory version. */
dbmp_le = (dbmap_t *) mp->data;
dbmp_le = (struct dbmap *) mp->data;
bmp->db_mapsize = le64_to_cpu(dbmp_le->dn_mapsize);
bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree);
bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
......@@ -263,7 +270,7 @@ int dbMount(struct inode *ipbmap)
*/
int dbUnmount(struct inode *ipbmap, int mounterror)
{
bmap_t *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
if (!(mounterror || isReadOnly(ipbmap)))
dbSync(ipbmap);
......@@ -284,9 +291,9 @@ int dbUnmount(struct inode *ipbmap, int mounterror)
*/
int dbSync(struct inode *ipbmap)
{
dbmap_t *dbmp_le;
bmap_t *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
metapage_t *mp;
struct dbmap *dbmp_le;
struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
struct metapage *mp;
int i;
/*
......@@ -301,7 +308,7 @@ int dbSync(struct inode *ipbmap)
return (EIO);
}
/* copy the in-memory version of the bmap to the on-disk version */
dbmp_le = (dbmap_t *) mp->data;
dbmp_le = (struct dbmap *) mp->data;
dbmp_le->dn_mapsize = cpu_to_le64(bmp->db_mapsize);
dbmp_le->dn_nfree = cpu_to_le64(bmp->db_nfree);
dbmp_le->dn_l2nbperpage = cpu_to_le32(bmp->db_l2nbperpage);
......@@ -355,12 +362,12 @@ int dbSync(struct inode *ipbmap)
*/
int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
{
metapage_t *mp;
dmap_t *dp;
struct metapage *mp;
struct dmap *dp;
int nb, rc;
s64 lblkno, rem;
struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
bmap_t *bmp = JFS_SBI(ip->i_sb)->bmap;
struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
IREAD_LOCK(ipbmap);
......@@ -384,7 +391,7 @@ int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
IREAD_UNLOCK(ipbmap);
return (EIO);
}
dp = (dmap_t *) mp->data;
dp = (struct dmap *) mp->data;
/* determine the number of blocks to be freed from
* this dmap.
......@@ -435,16 +442,16 @@ int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
*/
int
dbUpdatePMap(struct inode *ipbmap,
int free, s64 blkno, s64 nblocks, tblock_t * tblk)
int free, s64 blkno, s64 nblocks, struct tblock * tblk)
{
int nblks, dbitno, wbitno, rbits;
int word, nbits, nwords;
bmap_t *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
s64 lblkno, rem, lastlblkno;
u32 mask;
dmap_t *dp;
metapage_t *mp;
log_t *log;
struct dmap *dp;
struct metapage *mp;
struct jfs_log *log;
int lsn, difft, diffp;
/* the blocks better be within the mapsize. */
......@@ -452,7 +459,7 @@ dbUpdatePMap(struct inode *ipbmap,
/* compute delta of transaction lsn from log syncpt */
lsn = tblk->lsn;
log = (log_t *) JFS_SBI(tblk->sb)->log;
log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;
logdiff(difft, lsn, log);
/*
......@@ -473,7 +480,7 @@ dbUpdatePMap(struct inode *ipbmap,
if (mp == NULL)
return (EIO);
}
dp = (dmap_t *) mp->data;
dp = (struct dmap *) mp->data;
/* determine the bit number and word within the dmap of
* the starting block. also determine how many blocks
......@@ -619,7 +626,7 @@ int dbNextAG(struct inode *ipbmap)
{
s64 avgfree, inactfree, actfree, rem;
int actags, inactags, l2agsize;
bmap_t *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
BMAP_LOCK(bmp);
......@@ -737,10 +744,10 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
{
int rc, agno;
struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
bmap_t *bmp;
metapage_t *mp;
struct bmap *bmp;
struct metapage *mp;
s64 lblkno, blkno;
dmap_t *dp;
struct dmap *dp;
int l2nb;
s64 mapSize;
......@@ -832,7 +839,7 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
if (mp == NULL)
goto read_unlock;
dp = (dmap_t *) mp->data;
dp = (struct dmap *) mp->data;
/* first, try to satisfy the allocation request with the
* blocks beginning at the hint.
......@@ -933,10 +940,10 @@ int dbAllocExact(struct inode *ip, s64 blkno, int nblocks)
{
int rc;
struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
bmap_t *bmp = JFS_SBI(ip->i_sb)->bmap;
dmap_t *dp;
struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
struct dmap *dp;
s64 lblkno;
metapage_t *mp;
struct metapage *mp;
IREAD_LOCK(ipbmap);
......@@ -965,7 +972,7 @@ int dbAllocExact(struct inode *ip, s64 blkno, int nblocks)
IREAD_UNLOCK(ipbmap);
return (EIO);
}
dp = (dmap_t *) mp->data;
dp = (struct dmap *) mp->data;
/* try to allocate the requested extent */
rc = dbAllocNext(bmp, dp, blkno, nblocks);
......@@ -1068,11 +1075,11 @@ int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks)
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
s64 lblkno, lastblkno, extblkno;
uint rel_block;
metapage_t *mp;
dmap_t *dp;
struct metapage *mp;
struct dmap *dp;
int rc;
struct inode *ipbmap = sbi->ipbmap;
bmap_t *bmp;
struct bmap *bmp;
/*
* We don't want a non-aligned extent to cross a page boundary
......@@ -1120,7 +1127,7 @@ int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks)
}
DBALLOCCK(bmp->db_DBmap, bmp->db_mapsize, blkno, nblocks);
dp = (dmap_t *) mp->data;
dp = (struct dmap *) mp->data;
/* try to allocate the blocks immediately following the
* current allocation.
......@@ -1163,7 +1170,8 @@ int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks)
*
* serialization: IREAD_LOCK(ipbmap) held on entry/exit;
*/
static int dbAllocNext(bmap_t * bmp, dmap_t * dp, s64 blkno, int nblocks)
static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks)
{
int dbitno, word, rembits, nb, nwords, wbitno, nw;
int l2size;
......@@ -1289,8 +1297,8 @@ static int dbAllocNext(bmap_t * bmp, dmap_t * dp, s64 blkno, int nblocks)
* serialization: IREAD_LOCK(ipbmap) held on entry/exit;
*/
static int
dbAllocNear(bmap_t * bmp,
dmap_t * dp, s64 blkno, int nblocks, int l2nb, s64 * results)
dbAllocNear(struct bmap * bmp,
struct dmap * dp, s64 blkno, int nblocks, int l2nb, s64 * results)
{
int word, lword, rc;
s8 *leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx);
......@@ -1391,10 +1399,10 @@ dbAllocNear(bmap_t * bmp,
* note: IWRITE_LOCK(ipmap) held on entry/exit;
*/
static int
dbAllocAG(bmap_t * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
{
metapage_t *mp;
dmapctl_t *dcp;
struct metapage *mp;
struct dmapctl *dcp;
int rc, ti, i, k, m, n, agperlev;
s64 blkno, lblkno;
int budmin;
......@@ -1447,7 +1455,7 @@ dbAllocAG(bmap_t * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
if (mp == NULL)
return (EIO);
dcp = (dmapctl_t *) mp->data;
dcp = (struct dmapctl *) mp->data;
budmin = dcp->budmin;
/* search the subtree(s) of the dmap control page that describes
......@@ -1566,7 +1574,7 @@ dbAllocAG(bmap_t * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
*
* serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static int dbAllocAny(bmap_t * bmp, s64 nblocks, int l2nb, s64 * results)
static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results)
{
int rc;
s64 blkno = 0;
......@@ -1616,13 +1624,13 @@ static int dbAllocAny(bmap_t * bmp, s64 nblocks, int l2nb, s64 * results)
*
* serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static int dbFindCtl(bmap_t * bmp, int l2nb, int level, s64 * blkno)
static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
{
int rc, leafidx, lev;
s64 b, lblkno;
dmapctl_t *dcp;
struct dmapctl *dcp;
int budmin;
metapage_t *mp;
struct metapage *mp;
/* starting at the specified dmap control page level and block
* number, search down the dmap control levels for the starting
......@@ -1637,7 +1645,7 @@ static int dbFindCtl(bmap_t * bmp, int l2nb, int level, s64 * blkno)
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
if (mp == NULL)
return (EIO);
dcp = (dmapctl_t *) mp->data;
dcp = (struct dmapctl *) mp->data;
budmin = dcp->budmin;
/* search the tree within the dmap control page for
......@@ -1724,12 +1732,12 @@ static int dbFindCtl(bmap_t * bmp, int l2nb, int level, s64 * blkno)
* serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static int
dbAllocCtl(bmap_t * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results)
dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results)
{
int rc, nb;
s64 b, lblkno, n;
metapage_t *mp;
dmap_t *dp;
struct metapage *mp;
struct dmap *dp;
/* check if the allocation request is confined to a single dmap.
*/
......@@ -1740,7 +1748,7 @@ dbAllocCtl(bmap_t * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results)
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
if (mp == NULL)
return (EIO);
dp = (dmap_t *) mp->data;
dp = (struct dmap *) mp->data;
/* try to allocate the blocks.
*/
......@@ -1769,7 +1777,7 @@ dbAllocCtl(bmap_t * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results)
rc = EIO;
goto backout;
}
dp = (dmap_t *) mp->data;
dp = (struct dmap *) mp->data;
/* the dmap better be all free.
*/
......@@ -1821,7 +1829,7 @@ dbAllocCtl(bmap_t * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results)
("dbAllocCtl: I/O Error: Block Leakage.\n"));
continue;
}
dp = (dmap_t *) mp->data;
dp = (struct dmap *) mp->data;
/* free the blocks is this dmap.
*/
......@@ -1871,8 +1879,8 @@ dbAllocCtl(bmap_t * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results)
* IWRITE_LOCK(ipbmap), e.g., dbAllocCtl(), held on entry/exit;
*/
static int
dbAllocDmapLev(bmap_t * bmp,
dmap_t * dp, int nblocks, int l2nb, s64 * results)
dbAllocDmapLev(struct bmap * bmp,
struct dmap * dp, int nblocks, int l2nb, s64 * results)
{
s64 blkno;
int leafidx, rc;
......@@ -1934,7 +1942,8 @@ dbAllocDmapLev(bmap_t * bmp,
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static int dbAllocDmap(bmap_t * bmp, dmap_t * dp, s64 blkno, int nblocks)
static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks)
{
s8 oldroot;
int rc;
......@@ -1988,7 +1997,8 @@ static int dbAllocDmap(bmap_t * bmp, dmap_t * dp, s64 blkno, int nblocks)
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static int dbFreeDmap(bmap_t * bmp, dmap_t * dp, s64 blkno, int nblocks)
static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks)
{
s8 oldroot;
int rc, word;
......@@ -2049,7 +2059,8 @@ static int dbFreeDmap(bmap_t * bmp, dmap_t * dp, s64 blkno, int nblocks)
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static void dbAllocBits(bmap_t * bmp, dmap_t * dp, s64 blkno, int nblocks)
static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks)
{
int dbitno, word, rembits, nb, nwords, wbitno, nw, agno;
dmtree_t *tp = (dmtree_t *) & dp->tree;
......@@ -2190,7 +2201,8 @@ static void dbAllocBits(bmap_t * bmp, dmap_t * dp, s64 blkno, int nblocks)
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static void dbFreeBits(bmap_t * bmp, dmap_t * dp, s64 blkno, int nblocks)
static void dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks)
{
int dbitno, word, rembits, nb, nwords, wbitno, nw, agno;
dmtree_t *tp = (dmtree_t *) & dp->tree;
......@@ -2368,13 +2380,13 @@ static void dbFreeBits(bmap_t * bmp, dmap_t * dp, s64 blkno, int nblocks)
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
static int
dbAdjCtl(bmap_t * bmp, s64 blkno, int newval, int alloc, int level)
dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
{
metapage_t *mp;
struct metapage *mp;
s8 oldroot;
int oldval;
s64 lblkno;
dmapctl_t *dcp;
struct dmapctl *dcp;
int rc, leafno, ti;
/* get the buffer for the dmap control page for the specified
......@@ -2384,7 +2396,7 @@ dbAdjCtl(bmap_t * bmp, s64 blkno, int newval, int alloc, int level)
mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
if (mp == NULL)
return (EIO);
dcp = (dmapctl_t *) mp->data;
dcp = (struct dmapctl *) mp->data;
/* determine the leaf number corresponding to the block and
* the index within the dmap control tree.
......@@ -3064,12 +3076,12 @@ void fsDirty()
*/
int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks)
{
metapage_t *mp;
dmap_t *dp;
struct metapage *mp;
struct dmap *dp;
int nb, rc;
s64 lblkno, rem;
struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
bmap_t *bmp = JFS_SBI(ip->i_sb)->bmap;
struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
IREAD_LOCK(ipbmap);
......@@ -3093,7 +3105,7 @@ int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks)
IREAD_UNLOCK(ipbmap);
return (EIO);
}
dp = (dmap_t *) mp->data;
dp = (struct dmap *) mp->data;
/* determine the number of blocks to be allocated from
* this dmap.
......@@ -3121,12 +3133,13 @@ int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks)
}
static int dbAllocDmapBU(bmap_t * bmp, dmap_t * dp, s64 blkno, int nblocks)
static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks)
{
int rc;
int dbitno, word, rembits, nb, nwords, wbitno, agno;
s8 oldroot, *leaf;
dmaptree_t *tp = (dmaptree_t *) & dp->tree;
struct dmaptree *tp = (struct dmaptree *) & dp->tree;
/* save the current value of the root (i.e. maximum free string)
* of the dmap tree.
......@@ -3251,11 +3264,11 @@ int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks)
int i, i0 = TRUE, j, j0 = TRUE, k, n;
s64 newsize;
s64 p;
metapage_t *mp, *l2mp, *l1mp, *l0mp;
dmapctl_t *l2dcp, *l1dcp, *l0dcp;
dmap_t *dp;
struct metapage *mp, *l2mp, *l1mp, *l0mp;
struct dmapctl *l2dcp, *l1dcp, *l0dcp;
struct dmap *dp;
s8 *l0leaf, *l1leaf, *l2leaf;
bmap_t *bmp = sbi->bmap;
struct bmap *bmp = sbi->bmap;
int agno, l2agsize, oldl2agsize;
s64 ag_rem;
......@@ -3331,7 +3344,7 @@ int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks)
p = BMAPBLKNO + nbperpage; /* L2 page */
l2mp = read_metapage(ipbmap, p, PSIZE, 0);
assert(l2mp);
l2dcp = (dmapctl_t *) l2mp->data;
l2dcp = (struct dmapctl *) l2mp->data;
/* compute start L1 */
k = blkno >> L2MAXL1SIZE;
......@@ -3348,7 +3361,7 @@ int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks)
l1mp = read_metapage(ipbmap, p, PSIZE, 0);
if (l1mp == NULL)
goto errout;
l1dcp = (dmapctl_t *) l1mp->data;
l1dcp = (struct dmapctl *) l1mp->data;
/* compute start L0 */
j = (blkno & (MAXL1SIZE - 1)) >> L2MAXL0SIZE;
......@@ -3361,7 +3374,7 @@ int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks)
if (l1mp == NULL)
goto errout;
l1dcp = (dmapctl_t *) l1mp->data;
l1dcp = (struct dmapctl *) l1mp->data;
/* compute start L0 */
j = 0;
......@@ -3380,7 +3393,7 @@ int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks)
l0mp = read_metapage(ipbmap, p, PSIZE, 0);
if (l0mp == NULL)
goto errout;
l0dcp = (dmapctl_t *) l0mp->data;
l0dcp = (struct dmapctl *) l0mp->data;
/* compute start dmap */
i = (blkno & (MAXL0SIZE - 1)) >>
......@@ -3395,7 +3408,7 @@ int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks)
if (l0mp == NULL)
goto errout;
l0dcp = (dmapctl_t *) l0mp->data;
l0dcp = (struct dmapctl *) l0mp->data;
/* compute start dmap */
i = 0;
......@@ -3428,7 +3441,7 @@ int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks)
n = min(nblocks, (s64)BPERDMAP);
}
dp = (dmap_t *) mp->data;
dp = (struct dmap *) mp->data;
*l0leaf = dbInitDmap(dp, blkno, n);
bmp->db_nfree += n;
......@@ -3510,7 +3523,7 @@ int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks)
*/
void dbFinalizeBmap(struct inode *ipbmap)
{
bmap_t *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
int actags, inactags, l2nl;
s64 ag_rem, actfree, inactfree, avgfree;
int i, n;
......@@ -3600,7 +3613,7 @@ printk("bmap: agpref:%d aglevel:%d agheigth:%d agwidth:%d\n",
*
* RETURNS: NONE
*/
static int dbInitDmap(dmap_t * dp, s64 Blkno, int nblocks)
static int dbInitDmap(struct dmap * dp, s64 Blkno, int nblocks)
{
int blkno, w, b, r, nw, nb, i;
/*
......@@ -3713,9 +3726,9 @@ printk("sbh_dmap: in dbInitDmap, b:%ld w:%ld mask: %lx\n", b, w, (ONES>>b));
*
* RETURNS: max free string at the root of the tree
*/
static int dbInitDmapTree(dmap_t * dp)
static int dbInitDmapTree(struct dmap * dp)
{
dmaptree_t *tp;
struct dmaptree *tp;
s8 *cp;
int i;
......@@ -3759,7 +3772,7 @@ static int dbInitDmapTree(dmap_t * dp)
*
* RETURNS: max free string at the root of the tree
*/
static int dbInitTree(dmaptree_t * dtp)
static int dbInitTree(struct dmaptree * dtp)
{
int l2max, l2free, bsize, nextb, i;
int child, parent, nparent;
......@@ -3834,7 +3847,7 @@ static int dbInitTree(dmaptree_t * dtp)
*
* function: initialize dmapctl page
*/
static int dbInitDmapCtl(dmapctl_t * dcp, int level, int i)
static int dbInitDmapCtl(struct dmapctl * dcp, int level, int i)
{ /* start leaf index not covered by range */
s8 *cp;
......@@ -3854,7 +3867,7 @@ static int dbInitDmapCtl(dmapctl_t * dcp, int level, int i)
*cp++ = NOFREE;
/* build the dmap's binary buddy summary tree */
return (dbInitTree((dmaptree_t *) dcp));
return (dbInitTree((struct dmaptree *) dcp));
}
......@@ -3969,8 +3982,8 @@ static void DBinitmap(s64 size, struct inode *ipbmap, u32 ** results)
u32 *dbmap, *d;
int n;
s64 lblkno, cur_block;
dmap_t *dp;
metapage_t *mp;
struct dmap *dp;
struct metapage *mp;
npages = size / 32768;
npages += (size % 32768) ? 1 : 0;
......@@ -3993,7 +4006,7 @@ static void DBinitmap(s64 size, struct inode *ipbmap, u32 ** results)
if (mp == NULL) {
assert(0);
}
dp = (dmap_t *) mp->data;
dp = (struct dmap *) mp->data;
for (n = 0; n < LPERDMAP; n++)
d[n] = le32_to_cpu(dp->wmap[n]);
......@@ -4122,7 +4135,7 @@ static void DBFreeCK(uint * dbmap, s64 mapsize, s64 blkno, s64 nblocks)
/*
* dbPrtMap()
*/
static void dbPrtMap(bmap_t * bmp)
static void dbPrtMap(struct bmap * bmp)
{
printk(" mapsize: %d%d\n", bmp->db_mapsize);
printk(" nfree: %d%d\n", bmp->db_nfree);
......@@ -4143,7 +4156,7 @@ static void dbPrtMap(bmap_t * bmp)
/*
* dbPrtCtl()
*/
static void dbPrtCtl(dmapctl_t * dcp)
static void dbPrtCtl(struct dmapctl * dcp)
{
int i, j, n;
......
......@@ -142,9 +142,9 @@ static __inline signed char TREEMAX(signed char *cp)
/*
* dmap summary tree
*
* dmaptree_t must be consistent with dmapctl_t.
* dmaptree must be consistent with dmapctl.
*/
typedef struct {
struct dmaptree {
s32 nleafs; /* 4: number of tree leafs */
s32 l2nleafs; /* 4: l2 number of tree leafs */
s32 leafidx; /* 4: index of first tree leaf */
......@@ -152,27 +152,27 @@ typedef struct {
s8 budmin; /* 1: min l2 tree leaf value to combine */
s8 stree[TREESIZE]; /* TREESIZE: tree */
u8 pad[2]; /* 2: pad to word boundary */
} dmaptree_t; /* - 360 - */
}; /* - 360 - */
/*
* dmap page per 8K blocks bitmap
*/
typedef struct {
struct dmap {
s32 nblocks; /* 4: num blks covered by this dmap */
s32 nfree; /* 4: num of free blks in this dmap */
s64 start; /* 8: starting blkno for this dmap */
dmaptree_t tree; /* 360: dmap tree */
struct dmaptree tree; /* 360: dmap tree */
u8 pad[1672]; /* 1672: pad to 2048 bytes */
u32 wmap[LPERDMAP]; /* 1024: bits of the working map */
u32 pmap[LPERDMAP]; /* 1024: bits of the persistent map */
} dmap_t; /* - 4096 - */
}; /* - 4096 - */
/*
* disk map control page per level.
*
* dmapctl_t must be consistent with dmaptree_t.
* dmapctl must be consistent with dmaptree.
*/
typedef struct {
struct dmapctl {
s32 nleafs; /* 4: number of tree leafs */
s32 l2nleafs; /* 4: l2 number of tree leafs */
s32 leafidx; /* 4: index of the first tree leaf */
......@@ -180,17 +180,17 @@ typedef struct {
s8 budmin; /* 1: minimum l2 tree leaf value */
s8 stree[CTLTREESIZE]; /* CTLTREESIZE: dmapctl tree */
u8 pad[2714]; /* 2714: pad to 4096 */
} dmapctl_t; /* - 4096 - */
}; /* - 4096 - */
/*
* common definition for dmaptree_t within dmap and dmapctl
* common definition for dmaptree within dmap and dmapctl
*/
typedef union {
dmaptree_t t1;
dmapctl_t t2;
typedef union dmtree {
struct dmaptree t1;
struct dmapctl t2;
} dmtree_t;
/* macros for accessing fields within dmtree_t */
/* macros for accessing fields within dmtree */
#define dmt_nleafs t1.nleafs
#define dmt_l2nleafs t1.l2nleafs
#define dmt_leafidx t1.leafidx
......@@ -201,7 +201,7 @@ typedef union {
/*
* on-disk aggregate disk allocation map descriptor.
*/
typedef struct {
struct dbmap {
s64 dn_mapsize; /* 8: number of blocks in aggregate */
s64 dn_nfree; /* 8: num free blks in aggregate map */
s32 dn_l2nbperpage; /* 4: number of blks per page */
......@@ -218,17 +218,17 @@ typedef struct {
s64 dn_agsize; /* 8: num of blks per alloc group */
s8 dn_maxfreebud; /* 1: max free buddy system */
u8 pad[3007]; /* 3007: pad to 4096 */
} dbmap_t; /* - 4096 - */
}; /* - 4096 - */
/*
* in-memory aggregate disk allocation map descriptor.
*/
typedef struct bmap {
dbmap_t db_bmap; /* on-disk aggregate map descriptor */
struct bmap {
struct dbmap db_bmap; /* on-disk aggregate map descriptor */
struct inode *db_ipbmap; /* ptr to aggregate map incore inode */
struct semaphore db_bmaplock; /* aggregate map lock */
u32 *db_DBmap;
} bmap_t;
};
/* macros for accessing fields within in-memory aggregate map descriptor */
#define db_mapsize db_bmap.dn_mapsize
......@@ -279,7 +279,7 @@ extern int dbUnmount(struct inode *ipbmap, int mounterror);
extern int dbFree(struct inode *ipbmap, s64 blkno, s64 nblocks);
extern int dbUpdatePMap(struct inode *ipbmap,
int free, s64 blkno, s64 nblocks, tblock_t * tblk);
int free, s64 blkno, s64 nblocks, struct tblock * tblk);
extern int dbNextAG(struct inode *ipbmap);
......
......@@ -110,14 +110,14 @@
#include "jfs_debug.h"
/* dtree split parameter */
typedef struct {
metapage_t *mp;
struct dtsplit {
struct metapage *mp;
s16 index;
s16 nslot;
component_t *key;
struct component_name *key;
ddata_t *data;
pxdlist_t *pxdlist;
} dtsplit_t;
struct pxdlist *pxdlist;
};
#define DT_PAGE(IP, MP) BT_PAGE(IP, MP, dtpage_t, i_dtroot)
......@@ -149,51 +149,53 @@ typedef struct {
* forward references
*/
static int dtSplitUp(tid_t tid, struct inode *ip,
dtsplit_t * split, btstack_t * btstack);
struct dtsplit * split, struct btstack * btstack);
static int dtSplitPage(tid_t tid, struct inode *ip, dtsplit_t * split,
metapage_t ** rmpp, dtpage_t ** rpp, pxd_t * rxdp);
static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
struct metapage ** rmpp, dtpage_t ** rpp, pxd_t * rxdp);
static int dtExtendPage(tid_t tid, struct inode *ip,
dtsplit_t * split, btstack_t * btstack);
struct dtsplit * split, struct btstack * btstack);
static int dtSplitRoot(tid_t tid, struct inode *ip,
dtsplit_t * split, metapage_t ** rmpp);
struct dtsplit * split, struct metapage ** rmpp);
static int dtDeleteUp(tid_t tid, struct inode *ip, metapage_t * fmp,
dtpage_t * fp, btstack_t * btstack);
static int dtDeleteUp(tid_t tid, struct inode *ip, struct metapage * fmp,
dtpage_t * fp, struct btstack * btstack);
static int dtSearchNode(struct inode *ip,
s64 lmxaddr, pxd_t * kpxd, btstack_t * btstack);
s64 lmxaddr, pxd_t * kpxd, struct btstack * btstack);
static int dtRelink(tid_t tid, struct inode *ip, dtpage_t * p);
static int dtReadFirst(struct inode *ip, btstack_t * btstack);
static int dtReadFirst(struct inode *ip, struct btstack * btstack);
static int dtReadNext(struct inode *ip,
loff_t * offset, btstack_t * btstack);
loff_t * offset, struct btstack * btstack);
static int dtCompare(component_t * key, dtpage_t * p, int si);
static int dtCompare(struct component_name * key, dtpage_t * p, int si);
static int ciCompare(component_t * key, dtpage_t * p, int si, int flag);
static int ciCompare(struct component_name * key, dtpage_t * p, int si,
int flag);
static void dtGetKey(dtpage_t * p, int i, component_t * key, int flag);
static void dtGetKey(dtpage_t * p, int i, struct component_name * key,
int flag);
static void ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
int ri, component_t * key, int flag);
int ri, struct component_name * key, int flag);
static void dtInsertEntry(dtpage_t * p, int index, component_t * key,
ddata_t * data, dtlock_t ** dtlock);
static void dtInsertEntry(dtpage_t * p, int index, struct component_name * key,
ddata_t * data, struct dt_lock **);
static void dtMoveEntry(dtpage_t * sp, int si, dtpage_t * dp,
dtlock_t ** sdtlock, dtlock_t ** ddtlock,
struct dt_lock ** sdtlock, struct dt_lock ** ddtlock,
int do_index);
static void dtDeleteEntry(dtpage_t * p, int fi, dtlock_t ** dtlock);
static void dtDeleteEntry(dtpage_t * p, int fi, struct dt_lock ** dtlock);
static void dtTruncateEntry(dtpage_t * p, int ti, dtlock_t ** dtlock);
static void dtTruncateEntry(dtpage_t * p, int ti, struct dt_lock ** dtlock);
static void dtLinelockFreelist(dtpage_t * p, int m, dtlock_t ** dtlock);
static void dtLinelockFreelist(dtpage_t * p, int m, struct dt_lock ** dtlock);
#define ciToUpper(c) UniStrupr((c)->name)
......@@ -205,14 +207,14 @@ static void dtLinelockFreelist(dtpage_t * p, int m, dtlock_t ** dtlock);
*
* mp must be released by caller.
*/
static dir_table_slot_t *find_index(struct inode *ip, u32 index,
metapage_t ** mp)
static struct dir_table_slot *find_index(struct inode *ip, u32 index,
struct metapage ** mp)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
s64 blkno;
s64 offset;
int page_offset;
dir_table_slot_t *slot;
struct dir_table_slot *slot;
static int maxWarnings = 10;
if (index < 2) {
......@@ -236,7 +238,7 @@ static dir_table_slot_t *find_index(struct inode *ip, u32 index,
*mp = 0;
slot = &jfs_ip->i_dirtable[index - 2];
} else {
offset = (index - 2) * sizeof(dir_table_slot_t);
offset = (index - 2) * sizeof(struct dir_table_slot);
page_offset = offset & (PSIZE - 1);
blkno = ((offset + 1) >> L2PSIZE) <<
JFS_SBI(ip->i_sb)->l2nbperpage;
......@@ -254,21 +256,21 @@ static dir_table_slot_t *find_index(struct inode *ip, u32 index,
}
slot =
(dir_table_slot_t *) ((char *) (*mp)->data +
page_offset);
(struct dir_table_slot *) ((char *) (*mp)->data +
page_offset);
}
return slot;
}
static inline void lock_index(tid_t tid, struct inode *ip, metapage_t * mp,
static inline void lock_index(tid_t tid, struct inode *ip, struct metapage * mp,
u32 index)
{
tlock_t *tlck;
linelock_t *llck;
lv_t *lv;
struct tlock *tlck;
struct linelock *llck;
struct lv *lv;
tlck = txLock(tid, ip, mp, tlckDATA);
llck = (linelock_t *) tlck->lock;
llck = (struct linelock *) tlck->lock;
if (llck->index >= llck->maxcnt)
llck = txLinelock(llck);
......@@ -296,15 +298,15 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
struct jfs_sb_info *sbi = JFS_SBI(sb);
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
u64 blkno;
dir_table_slot_t *dirtab_slot;
struct dir_table_slot *dirtab_slot;
u32 index;
linelock_t *llck;
lv_t *lv;
metapage_t *mp;
struct linelock *llck;
struct lv *lv;
struct metapage *mp;
s64 offset;
uint page_offset;
int rc;
tlock_t *tlck;
struct tlock *tlck;
s64 xaddr;
ASSERT(DO_INDEX(ip));
......@@ -345,7 +347,7 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
* Save the table, we're going to overwrite it with the
* xtree root
*/
dir_table_slot_t temp_table[12];
struct dir_table_slot temp_table[12];
memcpy(temp_table, &jfs_ip->i_dirtable, sizeof(temp_table));
/*
......@@ -372,7 +374,7 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
return -1;
}
tlck = txLock(tid, ip, mp, tlckDATA);
llck = (linelock_t *) & tlck->lock;
llck = (struct linelock *) & tlck->lock;
ASSERT(llck->index == 0);
lv = &llck->lv[0];
......@@ -391,7 +393,7 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
clear_cflag(COMMIT_Dirtable, ip);
}
offset = (index - 2) * sizeof(dir_table_slot_t);
offset = (index - 2) * sizeof(struct dir_table_slot);
page_offset = offset & (PSIZE - 1);
blkno = ((offset + 1) >> L2PSIZE) << sbi->l2nbperpage;
if (page_offset == 0) {
......@@ -424,7 +426,7 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
lock_index(tid, ip, mp, index);
dirtab_slot =
(dir_table_slot_t *) ((char *) mp->data + page_offset);
(struct dir_table_slot *) ((char *) mp->data + page_offset);
dirtab_slot->flag = DIR_INDEX_VALID;
dirtab_slot->slot = slot;
DTSaddress(dirtab_slot, bn);
......@@ -442,8 +444,8 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
*/
static void free_index(tid_t tid, struct inode *ip, u32 index, u32 next)
{
dir_table_slot_t *dirtab_slot;
metapage_t *mp = 0;
struct dir_table_slot *dirtab_slot;
struct metapage *mp = 0;
dirtab_slot = find_index(ip, index, &mp);
......@@ -468,9 +470,9 @@ static void free_index(tid_t tid, struct inode *ip, u32 index, u32 next)
* Changes an entry in the directory index table
*/
static void modify_index(tid_t tid, struct inode *ip, u32 index, s64 bn,
int slot, metapage_t ** mp)
int slot, struct metapage ** mp)
{
dir_table_slot_t *dirtab_slot;
struct dir_table_slot *dirtab_slot;
dirtab_slot = find_index(ip, index, mp);
......@@ -493,17 +495,17 @@ static void modify_index(tid_t tid, struct inode *ip, u32 index, s64 bn,
* reads a directory table slot
*/
static int get_index(struct inode *ip, u32 index,
dir_table_slot_t * dirtab_slot)
struct dir_table_slot * dirtab_slot)
{
metapage_t *mp = 0;
dir_table_slot_t *slot;
struct metapage *mp = 0;
struct dir_table_slot *slot;
slot = find_index(ip, index, &mp);
if (slot == 0) {
return -EIO;
}
memcpy(dirtab_slot, slot, sizeof(dir_table_slot_t));
memcpy(dirtab_slot, slot, sizeof(struct dir_table_slot));
if (mp)
release_metapage(mp);
......@@ -522,21 +524,21 @@ static int get_index(struct inode *ip, u32 index,
* return: 0 - search result on stack, leaf page pinned;
* errno - I/O error
*/
int dtSearch(struct inode *ip,
component_t * key, ino_t * data, btstack_t * btstack, int flag)
int dtSearch(struct inode *ip, struct component_name * key, ino_t * data,
struct btstack * btstack, int flag)
{
int rc = 0;
int cmp = 1; /* init for empty page */
s64 bn;
metapage_t *mp;
struct metapage *mp;
dtpage_t *p;
s8 *stbl;
int base, index, lim;
btframe_t *btsp;
struct btframe *btsp;
pxd_t *pxd;
int psize = 288; /* initial in-line directory */
ino_t inumber;
component_t ciKey;
struct component_name ciKey;
struct super_block *sb = ip->i_sb;
ciKey.name =
......@@ -613,7 +615,7 @@ int dtSearch(struct inode *ip,
*/
if (p->header.flag & BT_LEAF) {
inumber = le32_to_cpu(
((ldtentry_t *) & p->slot[stbl[index]])->inumber);
((struct ldtentry *) & p->slot[stbl[index]])->inumber);
/*
* search for JFS_LOOKUP
......@@ -766,19 +768,19 @@ int dtSearch(struct inode *ip,
* errno - failure;
*/
int dtInsert(tid_t tid, struct inode *ip,
component_t * name, ino_t * fsn, btstack_t * btstack)
struct component_name * name, ino_t * fsn, struct btstack * btstack)
{
int rc = 0;
metapage_t *mp; /* meta-page buffer */
struct metapage *mp; /* meta-page buffer */
dtpage_t *p; /* base B+-tree index page */
s64 bn;
int index;
dtsplit_t split; /* split information */
struct dtsplit split; /* split information */
ddata_t data;
dtlock_t *dtlck;
struct dt_lock *dtlck;
int n;
tlock_t *tlck;
lv_t *lv;
struct tlock *tlck;
struct lv *lv;
/*
* retrieve search result
......@@ -833,9 +835,9 @@ int dtInsert(tid_t tid, struct inode *ip,
* acquire a transaction lock on the leaf page
*/
tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
dtlck = (dtlock_t *) & tlck->lock;
dtlck = (struct dt_lock *) & tlck->lock;
ASSERT(dtlck->index == 0);
lv = (lv_t *) & dtlck->lv[0];
lv = & dtlck->lv[0];
/* linelock header */
lv->offset = 0;
......@@ -847,8 +849,8 @@ int dtInsert(tid_t tid, struct inode *ip,
/* linelock stbl of non-root leaf page */
if (!(p->header.flag & BT_ROOT)) {
if (dtlck->index >= dtlck->maxcnt)
dtlck = (dtlock_t *) txLinelock(dtlck);
lv = (lv_t *) & dtlck->lv[dtlck->index];
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
n = index >> L2DTSLOTSIZE;
lv->offset = p->header.stblindex + n;
lv->length =
......@@ -875,29 +877,29 @@ int dtInsert(tid_t tid, struct inode *ip,
* leaf page unpinned;
*/
static int dtSplitUp(tid_t tid,
struct inode *ip, dtsplit_t * split, btstack_t * btstack)
struct inode *ip, struct dtsplit * split, struct btstack * btstack)
{
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
int rc = 0;
metapage_t *smp;
struct metapage *smp;
dtpage_t *sp; /* split page */
metapage_t *rmp;
struct metapage *rmp;
dtpage_t *rp; /* new right page split from sp */
pxd_t rpxd; /* new right page extent descriptor */
metapage_t *lmp;
struct metapage *lmp;
dtpage_t *lp; /* left child page */
int skip; /* index of entry of insertion */
btframe_t *parent; /* parent page entry on traverse stack */
struct btframe *parent; /* parent page entry on traverse stack */
s64 xaddr, nxaddr;
int xlen, xsize;
pxdlist_t pxdlist;
struct pxdlist pxdlist;
pxd_t *pxd;
component_t key = { 0, 0 };
struct component_name key = { 0, 0 };
ddata_t *data = split->data;
int n;
dtlock_t *dtlck;
tlock_t *tlck;
lv_t *lv;
struct dt_lock *dtlck;
struct tlock *tlck;
struct lv *lv;
/* get split page */
smp = split->mp;
......@@ -1183,9 +1185,9 @@ static int dtSplitUp(tid_t tid,
* acquire a transaction lock on the parent page
*/
tlck = txLock(tid, ip, smp, tlckDTREE | tlckENTRY);
dtlck = (dtlock_t *) & tlck->lock;
dtlck = (struct dt_lock *) & tlck->lock;
ASSERT(dtlck->index == 0);
lv = (lv_t *) & dtlck->lv[0];
lv = & dtlck->lv[0];
/* linelock header */
lv->offset = 0;
......@@ -1243,32 +1245,32 @@ static int dtSplitUp(tid_t tid,
* errno - failure;
* return split and new page pinned;
*/
static int dtSplitPage(tid_t tid, struct inode *ip, dtsplit_t * split,
metapage_t ** rmpp, dtpage_t ** rpp, pxd_t * rpxdp)
static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
struct metapage ** rmpp, dtpage_t ** rpp, pxd_t * rpxdp)
{
struct super_block *sb = ip->i_sb;
int rc = 0;
metapage_t *smp;
struct metapage *smp;
dtpage_t *sp;
metapage_t *rmp;
struct metapage *rmp;
dtpage_t *rp; /* new right page allocated */
s64 rbn; /* new right page block number */
metapage_t *mp;
struct metapage *mp;
dtpage_t *p;
s64 nextbn;
pxdlist_t *pxdlist;
struct pxdlist *pxdlist;
pxd_t *pxd;
int skip, nextindex, half, left, nxt, off, si;
ldtentry_t *ldtentry;
idtentry_t *idtentry;
struct ldtentry *ldtentry;
struct idtentry *idtentry;
u8 *stbl;
dtslot_t *f;
struct dtslot *f;
int fsi, stblsize;
int n;
dtlock_t *sdtlck, *rdtlck;
tlock_t *tlck;
dtlock_t *dtlck;
lv_t *slv, *rlv, *lv;
struct dt_lock *sdtlck, *rdtlck;
struct tlock *tlck;
struct dt_lock *dtlck;
struct lv *slv, *rlv, *lv;
/* get split page */
smp = split->mp;
......@@ -1293,7 +1295,7 @@ static int dtSplitPage(tid_t tid, struct inode *ip, dtsplit_t * split,
* acquire a transaction lock on the new right page
*/
tlck = txLock(tid, ip, rmp, tlckDTREE | tlckNEW);
rdtlck = (dtlock_t *) & tlck->lock;
rdtlck = (struct dt_lock *) & tlck->lock;
rp = (dtpage_t *) rmp->data;
*rpp = rp;
......@@ -1306,11 +1308,11 @@ static int dtSplitPage(tid_t tid, struct inode *ip, dtsplit_t * split,
* action:
*/
tlck = txLock(tid, ip, smp, tlckDTREE | tlckENTRY);
sdtlck = (dtlock_t *) & tlck->lock;
sdtlck = (struct dt_lock *) & tlck->lock;
/* linelock header of split page */
ASSERT(sdtlck->index == 0);
slv = (lv_t *) & sdtlck->lv[0];
slv = & sdtlck->lv[0];
slv->offset = 0;
slv->length = 1;
sdtlck->index++;
......@@ -1356,7 +1358,7 @@ static int dtSplitPage(tid_t tid, struct inode *ip, dtsplit_t * split,
*/
if (nextbn == 0 && split->index == sp->header.nextindex) {
/* linelock header + stbl (first slot) of new page */
rlv = (lv_t *) & rdtlck->lv[rdtlck->index];
rlv = & rdtlck->lv[rdtlck->index];
rlv->offset = 0;
rlv->length = 2;
rdtlck->index++;
......@@ -1395,10 +1397,10 @@ static int dtSplitPage(tid_t tid, struct inode *ip, dtsplit_t * split,
jEVENT(0,
("dtSplitPage: tlck = 0x%p, ip = 0x%p, mp=0x%p\n",
tlck, ip, mp));
dtlck = (dtlock_t *) & tlck->lock;
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock header of previous right sibling page */
lv = (lv_t *) & dtlck->lv[dtlck->index];
lv = & dtlck->lv[dtlck->index];
lv->offset = 0;
lv->length = 1;
dtlck->index++;
......@@ -1431,7 +1433,7 @@ static int dtSplitPage(tid_t tid, struct inode *ip, dtsplit_t * split,
si = stbl[nxt];
switch (sp->header.flag & BT_TYPE) {
case BT_LEAF:
ldtentry = (ldtentry_t *) & sp->slot[si];
ldtentry = (struct ldtentry *) & sp->slot[si];
if (DO_INDEX(ip))
n = NDTLEAF(ldtentry->namlen);
else
......@@ -1440,7 +1442,7 @@ static int dtSplitPage(tid_t tid, struct inode *ip, dtsplit_t * split,
break;
case BT_INTERNAL:
idtentry = (idtentry_t *) & sp->slot[si];
idtentry = (struct idtentry *) & sp->slot[si];
n = NDTINTERNAL(idtentry->namlen);
break;
......@@ -1467,7 +1469,7 @@ static int dtSplitPage(tid_t tid, struct inode *ip, dtsplit_t * split,
* new/right page moved in entries are linelocked;
*/
/* linelock header + stbl of new right page */
rlv = (lv_t *) & rdtlck->lv[rdtlck->index];
rlv = & rdtlck->lv[rdtlck->index];
rlv->offset = 0;
rlv->length = 5;
rdtlck->index++;
......@@ -1492,7 +1494,7 @@ static int dtSplitPage(tid_t tid, struct inode *ip, dtsplit_t * split,
mp = 0;
stbl = DT_GETSTBL(rp);
for (n = 0; n < rp->header.nextindex; n++) {
ldtentry = (ldtentry_t *) & rp->slot[stbl[n]];
ldtentry = (struct ldtentry *) & rp->slot[stbl[n]];
modify_index(tid, ip, le32_to_cpu(ldtentry->index),
rbn, n, &mp);
}
......@@ -1509,8 +1511,8 @@ static int dtSplitPage(tid_t tid, struct inode *ip, dtsplit_t * split,
/* linelock stbl of split page */
if (sdtlck->index >= sdtlck->maxcnt)
sdtlck = (dtlock_t *) txLinelock(sdtlck);
slv = (lv_t *) & sdtlck->lv[sdtlck->index];
sdtlck = (struct dt_lock *) txLinelock(sdtlck);
slv = & sdtlck->lv[sdtlck->index];
n = skip >> L2DTSLOTSIZE;
slv->offset = sp->header.stblindex + n;
slv->length =
......@@ -1551,28 +1553,28 @@ static int dtSplitPage(tid_t tid, struct inode *ip, dtsplit_t * split,
* return extended page pinned;
*/
static int dtExtendPage(tid_t tid,
struct inode *ip, dtsplit_t * split, btstack_t * btstack)
struct inode *ip, struct dtsplit * split, struct btstack * btstack)
{
struct super_block *sb = ip->i_sb;
int rc;
metapage_t *smp, *pmp, *mp;
struct metapage *smp, *pmp, *mp;
dtpage_t *sp, *pp;
pxdlist_t *pxdlist;
struct pxdlist *pxdlist;
pxd_t *pxd, *tpxd;
int xlen, xsize;
int newstblindex, newstblsize;
int oldstblindex, oldstblsize;
int fsi, last;
dtslot_t *f;
btframe_t *parent;
struct dtslot *f;
struct btframe *parent;
int n;
dtlock_t *dtlck;
struct dt_lock *dtlck;
s64 xaddr, txaddr;
tlock_t *tlck;
pxdlock_t *pxdlock;
lv_t *lv;
struct tlock *tlck;
struct pxd_lock *pxdlock;
struct lv *lv;
uint type;
ldtentry_t *ldtentry;
struct ldtentry *ldtentry;
u8 *stbl;
/* get page to extend */
......@@ -1605,7 +1607,7 @@ static int dtExtendPage(tid_t tid,
/* save moved extent descriptor for later free */
tlck = txMaplock(tid, ip, tlckDTREE | tlckRELOCATE);
pxdlock = (pxdlock_t *) & tlck->lock;
pxdlock = (struct pxd_lock *) & tlck->lock;
pxdlock->flag = mlckFREEPXD;
pxdlock->pxd = sp->header.self;
pxdlock->index = 1;
......@@ -1618,7 +1620,7 @@ static int dtExtendPage(tid_t tid,
stbl = DT_GETSTBL(sp);
for (n = 0; n < sp->header.nextindex; n++) {
ldtentry =
(ldtentry_t *) & sp->slot[stbl[n]];
(struct ldtentry *) & sp->slot[stbl[n]];
modify_index(tid, ip,
le32_to_cpu(ldtentry->index),
xaddr, n, &mp);
......@@ -1641,8 +1643,8 @@ static int dtExtendPage(tid_t tid,
* acquire a transaction lock on the extended/leaf page
*/
tlck = txLock(tid, ip, smp, tlckDTREE | type);
dtlck = (dtlock_t *) & tlck->lock;
lv = (lv_t *) & dtlck->lv[0];
dtlck = (struct dt_lock *) & tlck->lock;
lv = & dtlck->lv[0];
/* update buffer extent descriptor of extended page */
xlen = lengthPXD(pxd);
......@@ -1750,8 +1752,8 @@ static int dtExtendPage(tid_t tid,
* acquire a transaction lock on the parent/root page
*/
tlck = txLock(tid, ip, pmp, tlckDTREE | tlckENTRY);
dtlck = (dtlock_t *) & tlck->lock;
lv = (lv_t *) & dtlck->lv[dtlck->index];
dtlck = (struct dt_lock *) & tlck->lock;
lv = & dtlck->lv[dtlck->index];
/* linelock parent entry - 1st slot */
lv->offset = 1;
......@@ -1800,26 +1802,26 @@ static int dtExtendPage(tid_t tid,
* return new page pinned;
*/
static int dtSplitRoot(tid_t tid,
struct inode *ip, dtsplit_t * split, metapage_t ** rmpp)
struct inode *ip, struct dtsplit * split, struct metapage ** rmpp)
{
struct super_block *sb = ip->i_sb;
metapage_t *smp;
struct metapage *smp;
dtroot_t *sp;
metapage_t *rmp;
struct metapage *rmp;
dtpage_t *rp;
s64 rbn;
int xlen;
int xsize;
dtslot_t *f;
struct dtslot *f;
s8 *stbl;
int fsi, stblsize, n;
idtentry_t *s;
struct idtentry *s;
pxd_t *ppxd;
pxdlist_t *pxdlist;
struct pxdlist *pxdlist;
pxd_t *pxd;
dtlock_t *dtlck;
tlock_t *tlck;
lv_t *lv;
struct dt_lock *dtlck;
struct tlock *tlck;
struct lv *lv;
/* get split root page */
smp = split->mp;
......@@ -1845,7 +1847,7 @@ static int dtSplitRoot(tid_t tid,
* acquire a transaction lock on the new right page
*/
tlck = txLock(tid, ip, rmp, tlckDTREE | tlckNEW);
dtlck = (dtlock_t *) & tlck->lock;
dtlck = (struct dt_lock *) & tlck->lock;
rp->header.flag =
(sp->header.flag & BT_LEAF) ? BT_LEAF : BT_INTERNAL;
......@@ -1860,7 +1862,7 @@ static int dtSplitRoot(tid_t tid,
*/
/* linelock header + copied entries + new stbl (1st slot) in new page */
ASSERT(dtlck->index == 0);
lv = (lv_t *) & dtlck->lv[0];
lv = & dtlck->lv[0];
lv->offset = 0;
lv->length = 10; /* 1 + 8 + 1 */
dtlck->index++;
......@@ -1909,12 +1911,12 @@ static int dtSplitRoot(tid_t tid,
* Update directory index table for entries now in right page
*/
if ((rp->header.flag & BT_LEAF) && DO_INDEX(ip)) {
metapage_t *mp = 0;
ldtentry_t *ldtentry;
struct metapage *mp = 0;
struct ldtentry *ldtentry;
stbl = DT_GETSTBL(rp);
for (n = 0; n < rp->header.nextindex; n++) {
ldtentry = (ldtentry_t *) & rp->slot[stbl[n]];
ldtentry = (struct ldtentry *) & rp->slot[stbl[n]];
modify_index(tid, ip, le32_to_cpu(ldtentry->index),
rbn, n, &mp);
}
......@@ -1941,11 +1943,11 @@ static int dtSplitRoot(tid_t tid,
* acquire a transaction lock on the root page (in-memory inode)
*/
tlck = txLock(tid, ip, smp, tlckDTREE | tlckNEW | tlckBTROOT);
dtlck = (dtlock_t *) & tlck->lock;
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock root */
ASSERT(dtlck->index == 0);
lv = (lv_t *) & dtlck->lv[0];
lv = & dtlck->lv[0];
lv->offset = 0;
lv->length = DTROOTMAXSLOT;
dtlck->index++;
......@@ -1957,7 +1959,7 @@ static int dtSplitRoot(tid_t tid,
}
/* init the first entry */
s = (idtentry_t *) & sp->slot[DTENTRYSTART];
s = (struct idtentry *) & sp->slot[DTENTRYSTART];
ppxd = (pxd_t *) s;
*ppxd = *pxd;
s->next = -1;
......@@ -1996,22 +1998,22 @@ static int dtSplitRoot(tid_t tid,
* return:
*/
int dtDelete(tid_t tid,
struct inode *ip, component_t * key, ino_t * ino, int flag)
struct inode *ip, struct component_name * key, ino_t * ino, int flag)
{
int rc = 0;
s64 bn;
metapage_t *mp, *imp;
struct metapage *mp, *imp;
dtpage_t *p;
int index;
btstack_t btstack;
dtlock_t *dtlck;
tlock_t *tlck;
lv_t *lv;
struct btstack btstack;
struct dt_lock *dtlck;
struct tlock *tlck;
struct lv *lv;
int i;
ldtentry_t *ldtentry;
struct ldtentry *ldtentry;
u8 *stbl;
u32 table_index, next_index;
metapage_t *nmp;
struct metapage *nmp;
dtpage_t *np;
/*
......@@ -2032,7 +2034,7 @@ int dtDelete(tid_t tid,
*/
if (DO_INDEX(ip)) {
stbl = DT_GETSTBL(p);
ldtentry = (ldtentry_t *) & p->slot[stbl[index]];
ldtentry = (struct ldtentry *) & p->slot[stbl[index]];
table_index = le32_to_cpu(ldtentry->index);
if (index == (p->header.nextindex - 1)) {
/*
......@@ -2050,7 +2052,7 @@ int dtDelete(tid_t tid,
else {
stbl = DT_GETSTBL(np);
ldtentry =
(ldtentry_t *) & np->
(struct ldtentry *) & np->
slot[stbl[0]];
next_index =
le32_to_cpu(ldtentry->index);
......@@ -2059,7 +2061,7 @@ int dtDelete(tid_t tid,
}
} else {
ldtentry =
(ldtentry_t *) & p->slot[stbl[index + 1]];
(struct ldtentry *) & p->slot[stbl[index + 1]];
next_index = le32_to_cpu(ldtentry->index);
}
free_index(tid, ip, table_index, next_index);
......@@ -2082,7 +2084,7 @@ int dtDelete(tid_t tid,
* acquire a transaction lock on the leaf page
*/
tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
dtlck = (dtlock_t *) & tlck->lock;
dtlck = (struct dt_lock *) & tlck->lock;
/*
* Do not assume that dtlck->index will be zero. During a
......@@ -2092,8 +2094,8 @@ int dtDelete(tid_t tid,
/* linelock header */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (dtlock_t *) txLinelock(dtlck);
lv = (lv_t *) & dtlck->lv[dtlck->index];
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = 0;
lv->length = 1;
dtlck->index++;
......@@ -2101,8 +2103,8 @@ int dtDelete(tid_t tid,
/* linelock stbl of non-root leaf page */
if (!(p->header.flag & BT_ROOT)) {
if (dtlck->index >= dtlck->maxcnt)
dtlck = (dtlock_t *) txLinelock(dtlck);
lv = (lv_t *) & dtlck->lv[dtlck->index];
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
i = index >> L2DTSLOTSIZE;
lv->offset = p->header.stblindex + i;
lv->length =
......@@ -2122,7 +2124,7 @@ int dtDelete(tid_t tid,
stbl = DT_GETSTBL(p);
for (i = index; i < p->header.nextindex; i++) {
ldtentry =
(ldtentry_t *) & p->slot[stbl[i]];
(struct ldtentry *) & p->slot[stbl[i]];
modify_index(tid, ip,
le32_to_cpu(ldtentry->index),
bn, i, &imp);
......@@ -2149,18 +2151,18 @@ int dtDelete(tid_t tid,
* return:
*/
static int dtDeleteUp(tid_t tid, struct inode *ip,
metapage_t * fmp, dtpage_t * fp, btstack_t * btstack)
struct metapage * fmp, dtpage_t * fp, struct btstack * btstack)
{
int rc = 0;
metapage_t *mp;
struct metapage *mp;
dtpage_t *p;
int index, nextindex;
int xlen;
btframe_t *parent;
dtlock_t *dtlck;
tlock_t *tlck;
lv_t *lv;
pxdlock_t *pxdlock;
struct btframe *parent;
struct dt_lock *dtlck;
struct tlock *tlck;
struct lv *lv;
struct pxd_lock *pxdlock;
int i;
/*
......@@ -2190,7 +2192,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
* the buffer page is freed;
*/
tlck = txMaplock(tid, ip, tlckDTREE | tlckFREE);
pxdlock = (pxdlock_t *) & tlck->lock;
pxdlock = (struct pxd_lock *) & tlck->lock;
pxdlock->flag = mlckFREEPXD;
pxdlock->pxd = fp->header.self;
pxdlock->index = 1;
......@@ -2262,7 +2264,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
tlck =
txMaplock(tid, ip,
tlckDTREE | tlckFREE);
pxdlock = (pxdlock_t *) & tlck->lock;
pxdlock = (struct pxd_lock *) & tlck->lock;
pxdlock->flag = mlckFREEPXD;
pxdlock->pxd = p->header.self;
pxdlock->index = 1;
......@@ -2294,12 +2296,12 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
* action: router entry deletion
*/
tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
dtlck = (dtlock_t *) & tlck->lock;
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock header */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (dtlock_t *) txLinelock(dtlck);
lv = (lv_t *) & dtlck->lv[dtlck->index];
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = 0;
lv->length = 1;
dtlck->index++;
......@@ -2309,8 +2311,8 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
if (dtlck->index < dtlck->maxcnt)
lv++;
else {
dtlck = (dtlock_t *) txLinelock(dtlck);
lv = (lv_t *) & dtlck->lv[0];
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[0];
}
i = index >> L2DTSLOTSIZE;
lv->offset = p->header.stblindex + i;
......@@ -2349,25 +2351,26 @@ int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd,
s64 nxaddr)
{
int rc = 0;
metapage_t *mp, *pmp, *lmp, *rmp;
struct metapage *mp, *pmp, *lmp, *rmp;
dtpage_t *p, *pp, *rp = 0, *lp= 0;
s64 bn;
int index;
btstack_t btstack;
struct btstack btstack;
pxd_t *pxd;
s64 oxaddr, nextbn, prevbn;
int xlen, xsize;
tlock_t *tlck;
dtlock_t *dtlck;
pxdlock_t *pxdlock;
struct tlock *tlck;
struct dt_lock *dtlck;
struct pxd_lock *pxdlock;
s8 *stbl;
lv_t *lv;
struct lv *lv;
oxaddr = addressPXD(opxd);
xlen = lengthPXD(opxd);
jEVENT(0, ("dtRelocate: lmxaddr:%Ld xaddr:%Ld:%Ld xlen:%d\n",
(long long)lmxaddr, (long long)oxaddr, (long long)nxaddr, xlen));
(long long)lmxaddr, (long long)oxaddr, (long long)nxaddr,
xlen));
/*
* 1. get the internal parent dtpage covering
......@@ -2426,10 +2429,10 @@ int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd,
*/
if (lmp) {
tlck = txLock(tid, ip, lmp, tlckDTREE | tlckRELINK);
dtlck = (dtlock_t *) & tlck->lock;
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock header */
ASSERT(dtlck->index == 0);
lv = (lv_t *) & dtlck->lv[0];
lv = & dtlck->lv[0];
lv->offset = 0;
lv->length = 1;
dtlck->index++;
......@@ -2440,10 +2443,10 @@ int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd,
if (rmp) {
tlck = txLock(tid, ip, rmp, tlckDTREE | tlckRELINK);
dtlck = (dtlock_t *) & tlck->lock;
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock header */
ASSERT(dtlck->index == 0);
lv = (lv_t *) & dtlck->lv[0];
lv = & dtlck->lv[0];
lv->offset = 0;
lv->length = 1;
dtlck->index++;
......@@ -2462,10 +2465,10 @@ int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd,
* the dst extent;
*/
tlck = txLock(tid, ip, mp, tlckDTREE | tlckNEW);
dtlck = (dtlock_t *) & tlck->lock;
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock header */
ASSERT(dtlck->index == 0);
lv = (lv_t *) & dtlck->lv[0];
lv = & dtlck->lv[0];
/* update the self address in the dtpage header */
pxd = &p->header.self;
......@@ -2501,7 +2504,7 @@ int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd,
* dtpage), and upadte bmap for free of the source dtpage;
*/
tlck = txMaplock(tid, ip, tlckDTREE | tlckFREE);
pxdlock = (pxdlock_t *) & tlck->lock;
pxdlock = (struct pxd_lock *) & tlck->lock;
pxdlock->flag = mlckFREEPXD;
PXDaddress(&pxdlock->pxd, oxaddr);
PXDlength(&pxdlock->pxd, xlen);
......@@ -2515,8 +2518,8 @@ int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd,
*/
jEVENT(0, ("dtRelocate: update parent router entry.\n"));
tlck = txLock(tid, ip, pmp, tlckDTREE | tlckENTRY);
dtlck = (dtlock_t *) & tlck->lock;
lv = (lv_t *) & dtlck->lv[dtlck->index];
dtlck = (struct dt_lock *) & tlck->lock;
lv = & dtlck->lv[dtlck->index];
/* update the PXD with the new address */
stbl = DT_GETSTBL(pp);
......@@ -2545,17 +2548,17 @@ int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd,
* dtree level, in which the required dtpage resides.
*/
static int dtSearchNode(struct inode *ip, s64 lmxaddr, pxd_t * kpxd,
btstack_t * btstack)
struct btstack * btstack)
{
int rc = 0;
s64 bn;
metapage_t *mp;
struct metapage *mp;
dtpage_t *p;
int psize = 288; /* initial in-line directory */
s8 *stbl;
int i;
pxd_t *pxd;
btframe_t *btsp;
struct btframe *btsp;
BT_CLR(btstack); /* reset stack */
......@@ -2650,11 +2653,11 @@ static int dtSearchNode(struct inode *ip, s64 lmxaddr, pxd_t * kpxd,
static int dtRelink(tid_t tid, struct inode *ip, dtpage_t * p)
{
int rc;
metapage_t *mp;
struct metapage *mp;
s64 nextbn, prevbn;
tlock_t *tlck;
dtlock_t *dtlck;
lv_t *lv;
struct tlock *tlck;
struct dt_lock *dtlck;
struct lv *lv;
nextbn = le64_to_cpu(p->header.next);
prevbn = le64_to_cpu(p->header.prev);
......@@ -2675,12 +2678,12 @@ static int dtRelink(tid_t tid, struct inode *ip, dtpage_t * p)
jEVENT(0,
("dtRelink nextbn: tlck = 0x%p, ip = 0x%p, mp=0x%p\n",
tlck, ip, mp));
dtlck = (dtlock_t *) & tlck->lock;
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock header */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (dtlock_t *) txLinelock(dtlck);
lv = (lv_t *) & dtlck->lv[dtlck->index];
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = 0;
lv->length = 1;
dtlck->index++;
......@@ -2705,12 +2708,12 @@ static int dtRelink(tid_t tid, struct inode *ip, dtpage_t * p)
jEVENT(0,
("dtRelink prevbn: tlck = 0x%p, ip = 0x%p, mp=0x%p\n",
tlck, ip, mp));
dtlck = (dtlock_t *) & tlck->lock;
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock header */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (dtlock_t *) txLinelock(dtlck);
lv = (lv_t *) & dtlck->lv[dtlck->index];
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = 0;
lv->length = 1;
dtlck->index++;
......@@ -2733,10 +2736,10 @@ void dtInitRoot(tid_t tid, struct inode *ip, u32 idotdot)
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
dtroot_t *p;
int fsi;
dtslot_t *f;
tlock_t *tlck;
dtlock_t *dtlck;
lv_t *lv;
struct dtslot *f;
struct tlock *tlck;
struct dt_lock *dtlck;
struct lv *lv;
u16 xflag_save;
/*
......@@ -2745,7 +2748,7 @@ void dtInitRoot(tid_t tid, struct inode *ip, u32 idotdot)
*/
if (DO_INDEX(ip)) {
if (jfs_ip->next_index > (MAX_INLINE_DIRTABLE_ENTRY + 1)) {
tblock_t *tblk = tid_to_tblock(tid);
struct tblock *tblk = tid_to_tblock(tid);
/*
* We're playing games with the tid's xflag. If
* we're removing a regular file, the file's xtree
......@@ -2784,13 +2787,13 @@ void dtInitRoot(tid_t tid, struct inode *ip, u32 idotdot)
*
* action: directory initialization;
*/
tlck = txLock(tid, ip, (metapage_t *) & jfs_ip->bxflag,
tlck = txLock(tid, ip, (struct metapage *) & jfs_ip->bxflag,
tlckDTREE | tlckENTRY | tlckBTROOT);
dtlck = (dtlock_t *) & tlck->lock;
dtlck = (struct dt_lock *) & tlck->lock;
/* linelock root */
ASSERT(dtlck->index == 0);
lv = (lv_t *) & dtlck->lv[0];
lv = & dtlck->lv[0];
lv->offset = 0;
lv->length = DTROOTMAXSLOT;
dtlck->index++;
......@@ -2849,14 +2852,14 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
s32 unused;
} *dtoffset = (struct dtoffset *) &filp->f_pos;
s64 bn;
metapage_t *mp;
struct metapage *mp;
dtpage_t *p;
int index;
s8 *stbl;
btstack_t btstack;
struct btstack btstack;
int i, next;
ldtentry_t *d;
dtslot_t *t;
struct ldtentry *d;
struct dtslot *t;
int d_namleft, d_namlen, len, outlen;
char *d_name, *name_ptr;
int dtlhdrdatalen;
......@@ -2880,7 +2883,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
dir_index = (u32) filp->f_pos;
if (dir_index > 1) {
dir_table_slot_t dirtab_slot;
struct dir_table_slot dirtab_slot;
if (dtEmpty(ip)) {
filp->f_pos = DIREND;
......@@ -3017,7 +3020,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
stbl = DT_GETSTBL(p);
for (i = index; i < p->header.nextindex; i++) {
d = (ldtentry_t *) & p->slot[stbl[i]];
d = (struct ldtentry *) & p->slot[stbl[i]];
d_namleft = d->namlen;
name_ptr = d_name;
......@@ -3036,7 +3039,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
/* copy name in the additional segment(s) */
next = d->next;
while (next >= 0) {
t = (dtslot_t *) & p->slot[next];
t = (struct dtslot *) & p->slot[next];
name_ptr += outlen;
d_namleft -= len;
/* Sanity Check */
......@@ -3110,15 +3113,15 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
*
* function: get the leftmost page of the directory
*/
static int dtReadFirst(struct inode *ip, btstack_t * btstack)
static int dtReadFirst(struct inode *ip, struct btstack * btstack)
{
int rc = 0;
s64 bn;
int psize = 288; /* initial in-line directory */
metapage_t *mp;
struct metapage *mp;
dtpage_t *p;
s8 *stbl;
btframe_t *btsp;
struct btframe *btsp;
pxd_t *xd;
BT_CLR(btstack); /* reset stack */
......@@ -3176,7 +3179,8 @@ static int dtReadFirst(struct inode *ip, btstack_t * btstack)
* note: if index > nextindex of the target leaf page,
* start with 1st entry of next leaf page;
*/
static int dtReadNext(struct inode *ip, loff_t * offset, btstack_t * btstack)
static int dtReadNext(struct inode *ip, loff_t * offset,
struct btstack * btstack)
{
int rc = 0;
struct dtoffset {
......@@ -3185,12 +3189,12 @@ static int dtReadNext(struct inode *ip, loff_t * offset, btstack_t * btstack)
s32 unused;
} *dtoffset = (struct dtoffset *) offset;
s64 bn;
metapage_t *mp;
struct metapage *mp;
dtpage_t *p;
int index;
int pn;
s8 *stbl;
btframe_t *btsp, *parent;
struct btframe *btsp, *parent;
pxd_t *xd;
/*
......@@ -3351,14 +3355,14 @@ static int dtReadNext(struct inode *ip, loff_t * offset, btstack_t * btstack)
* = 0 if k is = record
* > 0 if k is > record
*/
static int dtCompare(component_t * key, /* search key */
static int dtCompare(struct component_name * key, /* search key */
dtpage_t * p, /* directory page */
int si)
{ /* entry slot index */
wchar_t *kname, *name;
int klen, namlen, len, rc;
idtentry_t *ih;
dtslot_t *t;
struct idtentry *ih;
struct dtslot *t;
/*
* force the left-most key on internal pages, at any level of
......@@ -3379,7 +3383,7 @@ static int dtCompare(component_t * key, /* search key */
kname = key->name;
klen = key->namlen;
ih = (idtentry_t *) & p->slot[si];
ih = (struct idtentry *) & p->slot[si];
si = ih->next;
name = ih->name;
namlen = ih->namlen;
......@@ -3397,7 +3401,7 @@ static int dtCompare(component_t * key, /* search key */
kname += len;
while (klen > 0 && namlen > 0) {
/* compare with next name segment */
t = (dtslot_t *) & p->slot[si];
t = (struct dtslot *) & p->slot[si];
len = min(namlen, DTSLOTDATALEN);
len = min(klen, len);
name = t->name;
......@@ -3426,16 +3430,16 @@ static int dtCompare(component_t * key, /* search key */
* = 0 if k is = record
* > 0 if k is > record
*/
static int ciCompare(component_t * key, /* search key */
static int ciCompare(struct component_name * key, /* search key */
dtpage_t * p, /* directory page */
int si, /* entry slot index */
int flag)
{
wchar_t *kname, *name, x;
int klen, namlen, len, rc;
ldtentry_t *lh;
idtentry_t *ih;
dtslot_t *t;
struct ldtentry *lh;
struct idtentry *ih;
struct dtslot *t;
int i;
/*
......@@ -3461,7 +3465,7 @@ static int ciCompare(component_t * key, /* search key */
* leaf page entry
*/
if (p->header.flag & BT_LEAF) {
lh = (ldtentry_t *) & p->slot[si];
lh = (struct ldtentry *) & p->slot[si];
si = lh->next;
name = lh->name;
namlen = lh->namlen;
......@@ -3474,7 +3478,7 @@ static int ciCompare(component_t * key, /* search key */
* internal page entry
*/
else {
ih = (idtentry_t *) & p->slot[si];
ih = (struct idtentry *) & p->slot[si];
si = ih->next;
name = ih->name;
namlen = ih->namlen;
......@@ -3499,7 +3503,7 @@ static int ciCompare(component_t * key, /* search key */
/* compare with additional segment(s) */
while (klen > 0 && namlen > 0) {
/* compare with next name segment */
t = (dtslot_t *) & p->slot[si];
t = (struct dtslot *) & p->slot[si];
len = min(namlen, DTSLOTDATALEN);
len = min(klen, len);
name = t->name;
......@@ -3534,14 +3538,14 @@ static int ciCompare(component_t * key, /* search key */
* Number of prefix bytes needed to distinguish b from a.
*/
static void ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
int ri, component_t * key, int flag)
int ri, struct component_name * key, int flag)
{
int klen, namlen;
wchar_t *pl, *pr, *kname;
wchar_t lname[JFS_NAME_MAX + 1];
component_t lkey = { 0, lname };
struct component_name lkey = { 0, lname };
wchar_t rname[JFS_NAME_MAX + 1];
component_t rkey = { 0, rname };
struct component_name rkey = { 0, rname };
/* get left and right key */
dtGetKey(lp, li, &lkey, flag);
......@@ -3588,13 +3592,13 @@ static void ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
* function: get key of the entry
*/
static void dtGetKey(dtpage_t * p, int i, /* entry index */
component_t * key, int flag)
struct component_name * key, int flag)
{
int si;
s8 *stbl;
ldtentry_t *lh;
idtentry_t *ih;
dtslot_t *t;
struct ldtentry *lh;
struct idtentry *ih;
struct dtslot *t;
int namlen, len;
wchar_t *name, *kname;
......@@ -3602,7 +3606,7 @@ static void dtGetKey(dtpage_t * p, int i, /* entry index */
stbl = DT_GETSTBL(p);
si = stbl[i];
if (p->header.flag & BT_LEAF) {
lh = (ldtentry_t *) & p->slot[si];
lh = (struct ldtentry *) & p->slot[si];
si = lh->next;
namlen = lh->namlen;
name = lh->name;
......@@ -3611,7 +3615,7 @@ static void dtGetKey(dtpage_t * p, int i, /* entry index */
else
len = min(namlen, DTLHDRDATALEN_LEGACY);
} else {
ih = (idtentry_t *) & p->slot[si];
ih = (struct idtentry *) & p->slot[si];
si = ih->next;
namlen = ih->namlen;
name = ih->name;
......@@ -3650,21 +3654,21 @@ static void dtGetKey(dtpage_t * p, int i, /* entry index */
*
* return: entry slot index
*/
static void dtInsertEntry(dtpage_t * p, int index, component_t * key,
ddata_t * data, dtlock_t ** dtlock)
static void dtInsertEntry(dtpage_t * p, int index, struct component_name * key,
ddata_t * data, struct dt_lock ** dtlock)
{
dtslot_t *h, *t;
ldtentry_t *lh = 0;
idtentry_t *ih = 0;
struct dtslot *h, *t;
struct ldtentry *lh = 0;
struct idtentry *ih = 0;
int hsi, fsi, klen, len, nextindex;
wchar_t *kname, *name;
s8 *stbl;
pxd_t *xd;
dtlock_t *dtlck = *dtlock;
lv_t *lv;
struct dt_lock *dtlck = *dtlock;
struct lv *lv;
int xsi, n;
s64 bn = 0;
metapage_t *mp = 0;
struct metapage *mp = 0;
klen = key->namlen;
kname = key->name;
......@@ -3677,14 +3681,14 @@ static void dtInsertEntry(dtpage_t * p, int index, component_t * key,
/* open new linelock */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (dtlock_t *) txLinelock(dtlck);
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = (lv_t *) & dtlck->lv[dtlck->index];
lv = & dtlck->lv[dtlck->index];
lv->offset = hsi;
/* write head/only segment */
if (p->header.flag & BT_LEAF) {
lh = (ldtentry_t *) h;
lh = (struct ldtentry *) h;
lh->next = h->next;
lh->inumber = data->leaf.ino; /* little-endian */
lh->namlen = klen;
......@@ -3699,7 +3703,7 @@ static void dtInsertEntry(dtpage_t * p, int index, component_t * key,
} else
len = min(klen, DTLHDRDATALEN_LEGACY);
} else {
ih = (idtentry_t *) h;
ih = (struct idtentry *) h;
ih->next = h->next;
xd = (pxd_t *) ih;
*xd = data->xd;
......@@ -3733,8 +3737,8 @@ static void dtInsertEntry(dtpage_t * p, int index, component_t * key,
if (dtlck->index < dtlck->maxcnt)
lv++;
else {
dtlck = (dtlock_t *) txLinelock(dtlck);
lv = (lv_t *) & dtlck->lv[0];
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[0];
}
lv->offset = fsi;
......@@ -3780,7 +3784,7 @@ static void dtInsertEntry(dtpage_t * p, int index, component_t * key,
*/
mp = 0;
for (n = index + 1; n <= nextindex; n++) {
lh = (ldtentry_t *) & (p->slot[stbl[n]]);
lh = (struct ldtentry *) & (p->slot[stbl[n]]);
modify_index(data->leaf.tid, data->leaf.ip,
le32_to_cpu(lh->index), bn, n,
&mp);
......@@ -3806,7 +3810,7 @@ static void dtInsertEntry(dtpage_t * p, int index, component_t * key,
* are updated.
*/
static void dtMoveEntry(dtpage_t * sp, int si, dtpage_t * dp,
dtlock_t ** sdtlock, dtlock_t ** ddtlock,
struct dt_lock ** sdtlock, struct dt_lock ** ddtlock,
int do_index)
{
int ssi, next; /* src slot index */
......@@ -3814,11 +3818,11 @@ static void dtMoveEntry(dtpage_t * sp, int si, dtpage_t * dp,
int dsi; /* dst slot index */
s8 *sstbl, *dstbl; /* sorted entry table */
int snamlen, len;
ldtentry_t *slh, *dlh = 0;
idtentry_t *sih, *dih = 0;
dtslot_t *h, *s, *d;
dtlock_t *sdtlck = *sdtlock, *ddtlck = *ddtlock;
lv_t *slv, *dlv;
struct ldtentry *slh, *dlh = 0;
struct idtentry *sih, *dih = 0;
struct dtslot *h, *s, *d;
struct dt_lock *sdtlck = *sdtlock, *ddtlck = *ddtlock;
struct lv *slv, *dlv;
int xssi, ns, nd;
int sfsi;
......@@ -3829,11 +3833,11 @@ static void dtMoveEntry(dtpage_t * sp, int si, dtpage_t * dp,
sfsi = sp->header.freelist;
/* linelock destination entry slot */
dlv = (lv_t *) & ddtlck->lv[ddtlck->index];
dlv = & ddtlck->lv[ddtlck->index];
dlv->offset = dsi;
/* linelock source entry slot */
slv = (lv_t *) & sdtlck->lv[sdtlck->index];
slv = & sdtlck->lv[sdtlck->index];
slv->offset = sstbl[si];
xssi = slv->offset - 1;
......@@ -3855,8 +3859,8 @@ static void dtMoveEntry(dtpage_t * sp, int si, dtpage_t * dp,
if (sdtlck->index < sdtlck->maxcnt)
slv++;
else {
sdtlck = (dtlock_t *) txLinelock(sdtlck);
slv = (lv_t *) & sdtlck->lv[0];
sdtlck = (struct dt_lock *) txLinelock(sdtlck);
slv = & sdtlck->lv[0];
}
slv->offset = ssi;
......@@ -3873,8 +3877,8 @@ static void dtMoveEntry(dtpage_t * sp, int si, dtpage_t * dp,
s = &sp->slot[ssi];
if (sp->header.flag & BT_LEAF) {
/* get source entry */
slh = (ldtentry_t *) s;
dlh = (ldtentry_t *) h;
slh = (struct ldtentry *) s;
dlh = (struct ldtentry *) h;
snamlen = slh->namlen;
if (do_index) {
......@@ -3891,11 +3895,11 @@ static void dtMoveEntry(dtpage_t * sp, int si, dtpage_t * dp,
dsi++;
dlh->next = dsi;
} else {
sih = (idtentry_t *) s;
sih = (struct idtentry *) s;
snamlen = sih->namlen;
len = min(snamlen, DTIHDRDATALEN);
dih = (idtentry_t *) h;
dih = (struct idtentry *) h;
memcpy(dih, sih, 10 + len * 2);
next = sih->next;
......@@ -3928,9 +3932,9 @@ static void dtMoveEntry(dtpage_t * sp, int si, dtpage_t * dp,
slv++;
else {
sdtlck =
(dtlock_t *)
(struct dt_lock *)
txLinelock(sdtlck);
slv = (lv_t *) & sdtlck->lv[0];
slv = & sdtlck->lv[0];
}
slv->offset = ssi;
......@@ -4006,14 +4010,14 @@ static void dtMoveEntry(dtpage_t * sp, int si, dtpage_t * dp,
* the entry logged to avoid applying previous updates
* to the same slots)
*/
static void dtDeleteEntry(dtpage_t * p, int fi, dtlock_t ** dtlock)
static void dtDeleteEntry(dtpage_t * p, int fi, struct dt_lock ** dtlock)
{
int fsi; /* free entry slot index */
s8 *stbl;
dtslot_t *t;
struct dtslot *t;
int si, freecnt;
dtlock_t *dtlck = *dtlock;
lv_t *lv;
struct dt_lock *dtlck = *dtlock;
struct lv *lv;
int xsi, n;
/* get free entry slot index */
......@@ -4022,17 +4026,17 @@ static void dtDeleteEntry(dtpage_t * p, int fi, dtlock_t ** dtlock)
/* open new linelock */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (dtlock_t *) txLinelock(dtlck);
lv = (lv_t *) & dtlck->lv[dtlck->index];
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = fsi;
/* get the head/only segment */
t = &p->slot[fsi];
if (p->header.flag & BT_LEAF)
si = ((ldtentry_t *) t)->next;
si = ((struct ldtentry *) t)->next;
else
si = ((idtentry_t *) t)->next;
si = ((struct idtentry *) t)->next;
t->next = si;
t->cnt = 1;
......@@ -4051,8 +4055,8 @@ static void dtDeleteEntry(dtpage_t * p, int fi, dtlock_t ** dtlock)
if (dtlck->index < dtlck->maxcnt)
lv++;
else {
dtlck = (dtlock_t *) txLinelock(dtlck);
lv = (lv_t *) & dtlck->lv[0];
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[0];
}
lv->offset = si;
......@@ -4101,14 +4105,14 @@ static void dtDeleteEntry(dtpage_t * p, int fi, dtlock_t ** dtlock)
* the entry logged to avoid applying previous updates
* to the same slots)
*/
static void dtTruncateEntry(dtpage_t * p, int ti, dtlock_t ** dtlock)
static void dtTruncateEntry(dtpage_t * p, int ti, struct dt_lock ** dtlock)
{
int tsi; /* truncate entry slot index */
s8 *stbl;
dtslot_t *t;
struct dtslot *t;
int si, freecnt;
dtlock_t *dtlck = *dtlock;
lv_t *lv;
struct dt_lock *dtlck = *dtlock;
struct lv *lv;
int fsi, xsi, n;
/* get free entry slot index */
......@@ -4117,17 +4121,17 @@ static void dtTruncateEntry(dtpage_t * p, int ti, dtlock_t ** dtlock)
/* open new linelock */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (dtlock_t *) txLinelock(dtlck);
lv = (lv_t *) & dtlck->lv[dtlck->index];
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = tsi;
/* get the head/only segment */
t = &p->slot[tsi];
ASSERT(p->header.flag & BT_INTERNAL);
((idtentry_t *) t)->namlen = 0;
si = ((idtentry_t *) t)->next;
((idtentry_t *) t)->next = -1;
((struct idtentry *) t)->namlen = 0;
si = ((struct idtentry *) t)->next;
((struct idtentry *) t)->next = -1;
n = 1;
freecnt = 0;
......@@ -4146,8 +4150,8 @@ static void dtTruncateEntry(dtpage_t * p, int ti, dtlock_t ** dtlock)
if (dtlck->index < dtlck->maxcnt)
lv++;
else {
dtlck = (dtlock_t *) txLinelock(dtlck);
lv = (lv_t *) & dtlck->lv[0];
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[0];
}
lv->offset = si;
......@@ -4183,13 +4187,13 @@ static void dtTruncateEntry(dtpage_t * p, int ti, dtlock_t ** dtlock)
*/
static void dtLinelockFreelist(dtpage_t * p, /* directory page */
int m, /* max slot index */
dtlock_t ** dtlock)
struct dt_lock ** dtlock)
{
int fsi; /* free entry slot index */
dtslot_t *t;
struct dtslot *t;
int si;
dtlock_t *dtlck = *dtlock;
lv_t *lv;
struct dt_lock *dtlck = *dtlock;
struct lv *lv;
int xsi, n;
/* get free entry slot index */
......@@ -4197,8 +4201,8 @@ static void dtLinelockFreelist(dtpage_t * p, /* directory page */
/* open new linelock */
if (dtlck->index >= dtlck->maxcnt)
dtlck = (dtlock_t *) txLinelock(dtlck);
lv = (lv_t *) & dtlck->lv[dtlck->index];
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[dtlck->index];
lv->offset = fsi;
......@@ -4220,8 +4224,8 @@ static void dtLinelockFreelist(dtpage_t * p, /* directory page */
if (dtlck->index < dtlck->maxcnt)
lv++;
else {
dtlck = (dtlock_t *) txLinelock(dtlck);
lv = (lv_t *) & dtlck->lv[0];
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = & dtlck->lv[0];
}
lv->offset = si;
......@@ -4262,20 +4266,20 @@ static void dtLinelockFreelist(dtpage_t * p, /* directory page */
* 0 - If successfully modified entry
*/
int dtModify(tid_t tid, struct inode *ip,
component_t * key, ino_t * orig_ino, ino_t new_ino, int flag)
struct component_name * key, ino_t * orig_ino, ino_t new_ino, int flag)
{
int rc;
s64 bn;
metapage_t *mp;
struct metapage *mp;
dtpage_t *p;
int index;
btstack_t btstack;
tlock_t *tlck;
dtlock_t *dtlck;
lv_t *lv;
struct btstack btstack;
struct tlock *tlck;
struct dt_lock *dtlck;
struct lv *lv;
s8 *stbl;
int entry_si; /* entry slot index */
ldtentry_t *entry;
struct ldtentry *entry;
/*
* search for the entry to modify:
......@@ -4293,7 +4297,7 @@ int dtModify(tid_t tid, struct inode *ip,
* acquire a transaction lock on the leaf page of named entry
*/
tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
dtlck = (dtlock_t *) & tlck->lock;
dtlck = (struct dt_lock *) & tlck->lock;
/* get slot index of the entry */
stbl = DT_GETSTBL(p);
......@@ -4301,13 +4305,13 @@ int dtModify(tid_t tid, struct inode *ip,
/* linelock entry */
ASSERT(dtlck->index == 0);
lv = (lv_t *) & dtlck->lv[0];
lv = & dtlck->lv[0];
lv->offset = entry_si;
lv->length = 1;
dtlck->index++;
/* get the head/only segment */
entry = (ldtentry_t *) & p->slot[entry_si];
entry = (struct ldtentry *) & p->slot[entry_si];
/* substitute the inode number of the entry */
entry->inumber = cpu_to_le32(new_ino);
......@@ -4327,14 +4331,14 @@ int dtModify(tid_t tid, struct inode *ip,
int dtDisplayTree(struct inode *ip)
{
int rc;
metapage_t *mp;
struct metapage *mp;
dtpage_t *p;
s64 bn, pbn;
int index, lastindex, v, h;
pxd_t *xd;
btstack_t btstack;
btframe_t *btsp;
btframe_t *parent;
struct btstack btstack;
struct btframe *btsp;
struct btframe *parent;
u8 *stbl;
int psize = 256;
......@@ -4465,14 +4469,14 @@ int dtDisplayTree(struct inode *ip)
int dtDisplayPage(struct inode *ip, s64 bn, dtpage_t * p)
{
int rc;
metapage_t *mp;
ldtentry_t *lh;
idtentry_t *ih;
struct metapage *mp;
struct ldtentry *lh;
struct idtentry *ih;
pxd_t *xd;
int i, j;
u8 *stbl;
wchar_t name[JFS_NAME_MAX + 1];
component_t key = { 0, name };
struct component_name key = { 0, name };
int freepage = 0;
if (p == NULL) {
......@@ -4492,11 +4496,11 @@ int dtDisplayPage(struct inode *ip, s64 bn, dtpage_t * p)
dtGetKey(p, i, &key, JFS_SBI(ip->i_sb)->mntflag);
key.name[key.namlen] = '\0';
if (p->header.flag & BT_LEAF) {
lh = (ldtentry_t *) & p->slot[stbl[i]];
lh = (struct ldtentry *) & p->slot[stbl[i]];
printf("\t[%d] %s:%d", i, key.name,
le32_to_cpu(lh->inumber));
} else {
ih = (idtentry_t *) & p->slot[stbl[i]];
ih = (struct idtentry *) & p->slot[stbl[i]];
xd = (pxd_t *) ih;
bn = addressPXD(xd);
printf("\t[%d] %s:0x%Lx", i, key.name, bn);
......
......@@ -44,11 +44,11 @@ typedef union {
/*
* directory page slot
*/
typedef struct {
struct dtslot {
s8 next; /* 1: */
s8 cnt; /* 1: */
wchar_t name[15]; /* 30: */
} dtslot_t; /* (32) */
}; /* (32) */
#define DATASLOTSIZE 16
......@@ -62,13 +62,13 @@ typedef struct {
/*
* internal node entry head/only segment
*/
typedef struct {
struct idtentry {
pxd_t xd; /* 8: child extent descriptor */
s8 next; /* 1: */
u8 namlen; /* 1: */
wchar_t name[11]; /* 22: 2-byte aligned */
} idtentry_t; /* (32) */
}; /* (32) */
#define DTIHDRSIZE 10
#define DTIHDRDATALEN 11
......@@ -82,13 +82,13 @@ typedef struct {
*
* For legacy filesystems, name contains 13 wchars -- no index field
*/
typedef struct {
struct ldtentry {
u32 inumber; /* 4: 4-byte aligned */
s8 next; /* 1: */
u8 namlen; /* 1: */
wchar_t name[11]; /* 22: 2-byte aligned */
u32 index; /* 4: index into dir_table */
} ldtentry_t; /* (32) */
}; /* (32) */
#define DTLHDRSIZE 6
#define DTLHDRDATALEN_LEGACY 13 /* Old (OS/2) format */
......@@ -108,14 +108,14 @@ typedef struct {
*/
#define MAX_INLINE_DIRTABLE_ENTRY 13
typedef struct dir_table_slot {
struct dir_table_slot {
u8 rsrvd; /* 1: */
u8 flag; /* 1: 0 if free */
u8 slot; /* 1: slot within leaf page of entry */
u8 addr1; /* 1: upper 8 bits of leaf page address */
u32 addr2; /* 4: lower 32 bits of leaf page address -OR-
index of next entry when this entry was deleted */
} dir_table_slot_t; /* (8) */
}; /* (8) */
/*
* flag values
......@@ -144,7 +144,7 @@ typedef struct dir_table_slot {
*/
typedef union {
struct {
dasd_t DASD; /* 16: DASD limit/usage info F226941 */
struct dasd DASD; /* 16: DASD limit/usage info */
u8 flag; /* 1: */
u8 nextindex; /* 1: next free entry in stbl */
......@@ -156,7 +156,7 @@ typedef union {
s8 stbl[8]; /* 8: sorted entry index table */
} header; /* (32) */
dtslot_t slot[9];
struct dtslot slot[9];
} dtroot_t;
#define PARENT(IP) \
......@@ -207,7 +207,7 @@ typedef union {
pxd_t self; /* 8: self pxd */
} header; /* (32) */
dtslot_t slot[128];
struct dtslot slot[128];
} dtpage_t;
#define DTPAGEMAXSLOT 128
......@@ -256,20 +256,20 @@ typedef union {
*/
extern void dtInitRoot(tid_t tid, struct inode *ip, u32 idotdot);
extern int dtSearch(struct inode *ip, component_t * key,
ino_t * data, btstack_t * btstack, int flag);
extern int dtSearch(struct inode *ip, struct component_name * key,
ino_t * data, struct btstack * btstack, int flag);
extern int dtInsert(tid_t tid, struct inode *ip,
component_t * key, ino_t * ino, btstack_t * btstack);
extern int dtInsert(tid_t tid, struct inode *ip, struct component_name * key,
ino_t * ino, struct btstack * btstack);
extern int dtDelete(tid_t tid,
struct inode *ip, component_t * key, ino_t * data, int flag);
extern int dtDelete(tid_t tid, struct inode *ip, struct component_name * key,
ino_t * data, int flag);
extern int dtRelocate(tid_t tid,
struct inode *ip, s64 lmxaddr, pxd_t * opxd, s64 nxaddr);
extern int dtModify(tid_t tid, struct inode *ip,
component_t * key, ino_t * orig_ino, ino_t new_ino, int flag);
extern int dtModify(tid_t tid, struct inode *ip, struct component_name * key,
ino_t * orig_ino, ino_t new_ino, int flag);
extern int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir);
......
......@@ -358,8 +358,8 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, boolean_t abnr)
int extHint(struct inode *ip, s64 offset, xad_t * xp)
{
struct super_block *sb = ip->i_sb;
xadlist_t xadl;
lxdlist_t lxdl;
struct xadlist xadl;
struct lxdlist lxdl;
lxd_t lxd;
s64 prev;
int rc, nbperpage = JFS_SBI(sb)->nbperpage;
......@@ -516,7 +516,7 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
{
s64 nb, nblks, daddr, max;
int rc, nbperpage = JFS_SBI(ip->i_sb)->nbperpage;
bmap_t *mp = JFS_SBI(ip->i_sb)->bmap;
struct bmap *mp = JFS_SBI(ip->i_sb)->bmap;
/* get the number of blocks to initially attempt to allocate.
* we'll first try the number of blocks requested unless this
......
......@@ -74,19 +74,19 @@ extern struct address_space_operations jfs_aops;
/*
* forward references
*/
static int diAllocAG(imap_t *, int, boolean_t, struct inode *);
static int diAllocAny(imap_t *, int, boolean_t, struct inode *);
static int diAllocBit(imap_t *, iag_t *, int);
static int diAllocExt(imap_t *, int, struct inode *);
static int diAllocIno(imap_t *, int, struct inode *);
static int diAllocAG(struct inomap *, int, boolean_t, struct inode *);
static int diAllocAny(struct inomap *, int, boolean_t, struct inode *);
static int diAllocBit(struct inomap *, struct iag *, int);
static int diAllocExt(struct inomap *, int, struct inode *);
static int diAllocIno(struct inomap *, int, struct inode *);
static int diFindFree(u32, int);
static int diNewExt(imap_t *, iag_t *, int);
static int diNewIAG(imap_t *, int *, int, metapage_t **);
static int diNewExt(struct inomap *, struct iag *, int);
static int diNewIAG(struct inomap *, int *, int, struct metapage **);
static void duplicateIXtree(struct super_block *, s64, int, s64 *);
static int diIAGRead(imap_t * imap, int, metapage_t **);
static int copy_from_dinode(dinode_t *, struct inode *);
static void copy_to_dinode(dinode_t *, struct inode *);
static int diIAGRead(struct inomap * imap, int, struct metapage **);
static int copy_from_dinode(struct dinode *, struct inode *);
static void copy_to_dinode(struct dinode *, struct inode *);
/*
* debug code for double-checking inode map
......@@ -98,9 +98,9 @@ static void copy_to_dinode(dinode_t *, struct inode *);
#define DBG_DIALLOC(imap, ino) DBGdiAlloc(imap, ino)
#define DBG_DIFREE(imap, ino) DBGdiFree(imap, ino)
static void *DBGdiInit(imap_t * imap);
static void DBGdiAlloc(imap_t * imap, ino_t ino);
static void DBGdiFree(imap_t * imap, ino_t ino);
static void *DBGdiInit(struct inomap * imap);
static void DBGdiAlloc(struct inomap * imap, ino_t ino);
static void DBGdiFree(struct inomap * imap, ino_t ino);
#else
#define DBG_DIINIT(imap)
#define DBG_DIALLOC(imap, ino)
......@@ -113,7 +113,7 @@ static void DBGdiFree(imap_t * imap, ino_t ino);
* FUNCTION: initialize the incore inode map control structures for
* a fileset or aggregate init time.
*
* the inode map's control structure (dinomap_t) is
* the inode map's control structure (dinomap) is
* brought in from disk and placed in virtual memory.
*
* PARAMETERS:
......@@ -126,16 +126,16 @@ static void DBGdiFree(imap_t * imap, ino_t ino);
*/
int diMount(struct inode *ipimap)
{
imap_t *imap;
metapage_t *mp;
struct inomap *imap;
struct metapage *mp;
int index;
dinomap_t *dinom_le;
struct dinomap *dinom_le;
/*
* allocate/initialize the in-memory inode map control structure
*/
/* allocate the in-memory inode map control structure. */
imap = (imap_t *) kmalloc(sizeof(imap_t), GFP_KERNEL);
imap = (struct inomap *) kmalloc(sizeof(struct inomap), GFP_KERNEL);
if (imap == NULL) {
jERROR(1, ("diMount: kmalloc returned NULL!\n"));
return (ENOMEM);
......@@ -152,7 +152,7 @@ int diMount(struct inode *ipimap)
}
/* copy the on-disk version to the in-memory version. */
dinom_le = (dinomap_t *) mp->data;
dinom_le = (struct dinomap *) mp->data;
imap->im_freeiag = le32_to_cpu(dinom_le->in_freeiag);
imap->im_nextiag = le32_to_cpu(dinom_le->in_nextiag);
atomic_set(&imap->im_numinos, le32_to_cpu(dinom_le->in_numinos));
......@@ -212,7 +212,7 @@ int diMount(struct inode *ipimap)
*/
int diUnmount(struct inode *ipimap, int mounterror)
{
imap_t *imap = JFS_IP(ipimap)->i_imap;
struct inomap *imap = JFS_IP(ipimap)->i_imap;
/*
* update the on-disk inode map control structure
......@@ -240,9 +240,9 @@ int diUnmount(struct inode *ipimap, int mounterror)
*/
int diSync(struct inode *ipimap)
{
dinomap_t *dinom_le;
imap_t *imp = JFS_IP(ipimap)->i_imap;
metapage_t *mp;
struct dinomap *dinom_le;
struct inomap *imp = JFS_IP(ipimap)->i_imap;
struct metapage *mp;
int index;
/*
......@@ -258,8 +258,7 @@ int diSync(struct inode *ipimap)
}
/* copy the in-memory version to the on-disk version */
//memcpy(mp->data, &imp->im_imap,sizeof(dinomap_t));
dinom_le = (dinomap_t *) mp->data;
dinom_le = (struct dinomap *) mp->data;
dinom_le->in_freeiag = cpu_to_le32(imp->im_freeiag);
dinom_le->in_nextiag = cpu_to_le32(imp->im_nextiag);
dinom_le->in_numinos = cpu_to_le32(atomic_read(&imp->im_numinos));
......@@ -330,11 +329,11 @@ int diRead(struct inode *ip)
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
int iagno, ino, extno, rc;
struct inode *ipimap;
dinode_t *dp;
iag_t *iagp;
metapage_t *mp;
struct dinode *dp;
struct iag *iagp;
struct metapage *mp;
s64 blkno, agstart;
imap_t *imap;
struct inomap *imap;
int block_offset;
int inodes_left;
uint pageno;
......@@ -358,7 +357,7 @@ int diRead(struct inode *ip)
return (rc);
}
iagp = (iag_t *) mp->data;
iagp = (struct iag *) mp->data;
/* determine inode extent that holds the disk inode */
ino = ip->i_ino & (INOSPERIAG - 1);
......@@ -410,7 +409,7 @@ int diRead(struct inode *ip)
}
/* locate the the disk inode requested */
dp = (dinode_t *) mp->data;
dp = (struct dinode *) mp->data;
dp += rel_inode;
if (ip->i_ino != le32_to_cpu(dp->di_number)) {
......@@ -461,9 +460,9 @@ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
uint address;
dinode_t *dp;
struct dinode *dp;
struct inode *ip;
metapage_t *mp;
struct metapage *mp;
ip = new_inode(sb);
if (ip == NULL) {
......@@ -499,7 +498,7 @@ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
}
/* get the pointer to the disk inode of interest */
dp = (dinode_t *) (mp->data);
dp = (struct dinode *) (mp->data);
dp += inum % 8; /* 8 inodes per 4K page */
/* copy on-disk inode to in-memory inode */
......@@ -544,9 +543,9 @@ void diWriteSpecial(struct inode *ip, int secondary)
{
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
uint address;
dinode_t *dp;
struct dinode *dp;
ino_t inum = ip->i_ino;
metapage_t *mp;
struct metapage *mp;
ip->i_state &= ~I_DIRTY;
......@@ -571,7 +570,7 @@ void diWriteSpecial(struct inode *ip, int secondary)
}
/* get the pointer to the disk inode of interest */
dp = (dinode_t *) (mp->data);
dp = (struct dinode *) (mp->data);
dp += inum % 8; /* 8 inodes per 4K page */
/* copy on-disk inode to in-memory inode */
......@@ -634,20 +633,20 @@ int diWrite(tid_t tid, struct inode *ip)
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
int rc = 0;
s32 ino;
dinode_t *dp;
struct dinode *dp;
s64 blkno;
int block_offset;
int inodes_left;
metapage_t *mp;
struct metapage *mp;
uint pageno;
int rel_inode;
int dioffset;
struct inode *ipimap;
uint type;
lid_t lid;
tlock_t *ditlck, *tlck;
linelock_t *dilinelock, *ilinelock;
lv_t *lv;
struct tlock *ditlck, *tlck;
struct linelock *dilinelock, *ilinelock;
struct lv *lv;
int n;
ipimap = jfs_ip->ipimap;
......@@ -688,7 +687,7 @@ int diWrite(tid_t tid, struct inode *ip)
return (EIO);
/* get the pointer to the disk inode */
dp = (dinode_t *) mp->data;
dp = (struct dinode *) mp->data;
dp += rel_inode;
dioffset = (ino & (INOSPERPAGE - 1)) << L2DISIZE;
......@@ -700,7 +699,7 @@ int diWrite(tid_t tid, struct inode *ip)
if ((ditlck =
txLock(tid, ipimap, mp, tlckINODE | tlckENTRY)) == NULL)
goto retry;
dilinelock = (linelock_t *) & ditlck->lock;
dilinelock = (struct linelock *) & ditlck->lock;
/*
* copy btree root from in-memory inode to on-disk inode
......@@ -728,14 +727,14 @@ int diWrite(tid_t tid, struct inode *ip)
assert(tlck->type & tlckXTREE);
tlck->type |= tlckBTROOT;
tlck->mp = mp;
ilinelock = (linelock_t *) & tlck->lock;
ilinelock = (struct linelock *) & tlck->lock;
/*
* copy xtree root from inode to dinode:
*/
p = &jfs_ip->i_xtroot;
xp = (xtpage_t *) &dp->di_dirtable;
lv = (lv_t *) & ilinelock->lv;
lv = ilinelock->lv;
for (n = 0; n < ilinelock->index; n++, lv++) {
memcpy(&xp->xad[lv->offset], &p->xad[lv->offset],
lv->length << L2XTSLOTSIZE);
......@@ -757,7 +756,7 @@ int diWrite(tid_t tid, struct inode *ip)
type = tlck->type;
tlck->type |= tlckBTROOT;
tlck->mp = mp;
ilinelock = (linelock_t *) & tlck->lock;
ilinelock = (struct linelock *) & tlck->lock;
/*
* regular file: 16 byte (XAD slot) granularity
......@@ -771,7 +770,7 @@ int diWrite(tid_t tid, struct inode *ip)
*/
p = &jfs_ip->i_xtroot;
xp = &dp->di_xtroot;
lv = (lv_t *) & ilinelock->lv;
lv = ilinelock->lv;
for (n = 0; n < ilinelock->index; n++, lv++) {
memcpy(&xp->xad[lv->offset], &p->xad[lv->offset],
lv->length << L2XTSLOTSIZE);
......@@ -795,7 +794,7 @@ int diWrite(tid_t tid, struct inode *ip)
*/
p = (dtpage_t *) &jfs_ip->i_dtroot;
xp = (dtpage_t *) & dp->di_dtroot;
lv = (lv_t *) & ilinelock->lv;
lv = ilinelock->lv;
for (n = 0; n < ilinelock->index; n++, lv++) {
memcpy(&xp->slot[lv->offset], &p->slot[lv->offset],
lv->length << L2DTSLOTSIZE);
......@@ -809,7 +808,7 @@ int diWrite(tid_t tid, struct inode *ip)
* copy inline symlink from in-memory inode to on-disk inode
*/
if (S_ISLNK(ip->i_mode) && ip->i_size < IDATASIZE) {
lv = (lv_t *) & dilinelock->lv[dilinelock->index];
lv = & dilinelock->lv[dilinelock->index];
lv->offset = (dioffset + 2 * 128) >> L2INODESLOTSIZE;
lv->length = 2;
memcpy(&dp->di_fastsymlink, jfs_ip->i_inline, IDATASIZE);
......@@ -820,7 +819,7 @@ int diWrite(tid_t tid, struct inode *ip)
* 128 byte slot granularity
*/
if (test_cflag(COMMIT_Inlineea, ip)) {
lv = (lv_t *) & dilinelock->lv[dilinelock->index];
lv = & dilinelock->lv[dilinelock->index];
lv->offset = (dioffset + 3 * 128) >> L2INODESLOTSIZE;
lv->length = 1;
memcpy(&dp->di_inlineea, jfs_ip->i_inline_ea, INODESLOTSIZE);
......@@ -833,7 +832,7 @@ int diWrite(tid_t tid, struct inode *ip)
* lock/copy inode base: 128 byte slot granularity
*/
// baseDinode:
lv = (lv_t *) & dilinelock->lv[dilinelock->index];
lv = & dilinelock->lv[dilinelock->index];
lv->offset = dioffset >> L2INODESLOTSIZE;
copy_to_dinode(dp, ip);
if (test_and_clear_cflag(COMMIT_Dirtable, ip)) {
......@@ -851,7 +850,7 @@ int diWrite(tid_t tid, struct inode *ip)
*/
if (S_ISDIR(ip->i_mode)
&& (ip->i_ipmnt->i_mntflag & JFS_DASD_ENABLED))
bcopy(&ip->i_DASD, &dp->di_DASD, sizeof(dasd_t));
bcopy(&ip->i_DASD, &dp->di_DASD, sizeof(struct dasd));
#endif /* _JFS_FASTDASD */
/* release the buffer holding the updated on-disk inode.
......@@ -905,18 +904,18 @@ int diFree(struct inode *ip)
{
int rc;
ino_t inum = ip->i_ino;
iag_t *iagp, *aiagp, *biagp, *ciagp, *diagp;
metapage_t *mp, *amp, *bmp, *cmp, *dmp;
struct iag *iagp, *aiagp, *biagp, *ciagp, *diagp;
struct metapage *mp, *amp, *bmp, *cmp, *dmp;
int iagno, ino, extno, bitno, sword, agno;
int back, fwd;
u32 bitmap, mask;
struct inode *ipimap = JFS_SBI(ip->i_sb)->ipimap;
imap_t *imap = JFS_IP(ipimap)->i_imap;
struct inomap *imap = JFS_IP(ipimap)->i_imap;
pxd_t freepxd;
tid_t tid;
struct inode *iplist[3];
tlock_t *tlck;
pxdlock_t *pxdlock;
struct tlock *tlck;
struct pxd_lock *pxdlock;
/*
* This is just to suppress compiler warnings. The same logic that
......@@ -960,7 +959,7 @@ int diFree(struct inode *ip)
AG_UNLOCK(imap, agno);
return (rc);
}
iagp = (iag_t *) mp->data;
iagp = (struct iag *) mp->data;
/* get the inode number and extent number of the inode within
* the iag and the inode number within the extent.
......@@ -1017,7 +1016,7 @@ int diFree(struct inode *ip)
release_metapage(mp);
return (rc);
}
aiagp = (iag_t *) amp->data;
aiagp = (struct iag *) amp->data;
/* make current head point back to the iag.
*/
......@@ -1096,7 +1095,7 @@ int diFree(struct inode *ip)
if ((fwd = imap->im_agctl[agno].extfree) >= 0) {
if ((rc = diIAGRead(imap, fwd, &amp)))
goto error_out;
aiagp = (iag_t *) amp->data;
aiagp = (struct iag *) amp->data;
}
} else {
/* iag has free extents. check if the addition of a free
......@@ -1113,13 +1112,13 @@ int diFree(struct inode *ip)
if ((fwd = le32_to_cpu(iagp->extfreefwd)) >= 0) {
if ((rc = diIAGRead(imap, fwd, &amp)))
goto error_out;
aiagp = (iag_t *) amp->data;
aiagp = (struct iag *) amp->data;
}
if ((back = le32_to_cpu(iagp->extfreeback)) >= 0) {
if ((rc = diIAGRead(imap, back, &bmp)))
goto error_out;
biagp = (iag_t *) bmp->data;
biagp = (struct iag *) bmp->data;
}
}
}
......@@ -1142,30 +1141,30 @@ int diFree(struct inode *ip)
if (inofreefwd >= 0) {
if (inofreefwd == fwd)
ciagp = (iag_t *) amp->data;
ciagp = (struct iag *) amp->data;
else if (inofreefwd == back)
ciagp = (iag_t *) bmp->data;
ciagp = (struct iag *) bmp->data;
else {
if ((rc =
diIAGRead(imap, inofreefwd, &cmp)))
goto error_out;
assert(cmp != NULL);
ciagp = (iag_t *) cmp->data;
ciagp = (struct iag *) cmp->data;
}
assert(ciagp != NULL);
}
if (inofreeback >= 0) {
if (inofreeback == fwd)
diagp = (iag_t *) amp->data;
diagp = (struct iag *) amp->data;
else if (inofreeback == back)
diagp = (iag_t *) bmp->data;
diagp = (struct iag *) bmp->data;
else {
if ((rc =
diIAGRead(imap, inofreeback, &dmp)))
goto error_out;
assert(dmp != NULL);
diagp = (iag_t *) dmp->data;
diagp = (struct iag *) dmp->data;
}
assert(diagp != NULL);
}
......@@ -1297,7 +1296,7 @@ int diFree(struct inode *ip)
* N.B. linelock is overlaid as freed extent descriptor;
*/
tlck = txLock(tid, ipimap, mp, tlckINODE | tlckFREE);
pxdlock = (pxdlock_t *) & tlck->lock;
pxdlock = (struct pxd_lock *) & tlck->lock;
pxdlock->flag = mlckFREEPXD;
pxdlock->pxd = freepxd;
pxdlock->index = 1;
......@@ -1350,7 +1349,7 @@ int diFree(struct inode *ip)
* the inode.
*/
static inline void
diInitInode(struct inode *ip, int iagno, int ino, int extno, iag_t * iagp)
diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp)
{
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
......@@ -1384,10 +1383,10 @@ int diAlloc(struct inode *pip, boolean_t dir, struct inode *ip)
int nwords, rem, i, agno;
u32 mask, inosmap, extsmap;
struct inode *ipimap;
metapage_t *mp;
struct metapage *mp;
ino_t inum;
iag_t *iagp;
imap_t *imap;
struct iag *iagp;
struct inomap *imap;
/* get the pointers to the inode map inode and the
* corresponding imap control structure.
......@@ -1436,7 +1435,7 @@ int diAlloc(struct inode *pip, boolean_t dir, struct inode *ip)
IREAD_UNLOCK(ipimap);
return (rc);
}
iagp = (iag_t *) mp->data;
iagp = (struct iag *) mp->data;
/* determine if new inode extent is allowed to be added to the iag.
* new inode extent can be added to the iag if the ag
......@@ -1667,7 +1666,7 @@ int diAlloc(struct inode *pip, boolean_t dir, struct inode *ip)
* EIO - i/o error.
*/
static int
diAllocAG(imap_t * imap, int agno, boolean_t dir, struct inode *ip)
diAllocAG(struct inomap * imap, int agno, boolean_t dir, struct inode *ip)
{
int rc, addext, numfree, numinos;
......@@ -1738,7 +1737,7 @@ diAllocAG(imap_t * imap, int agno, boolean_t dir, struct inode *ip)
* EIO - i/o error.
*/
static int
diAllocAny(imap_t * imap, int agno, boolean_t dir, struct inode *ip)
diAllocAny(struct inomap * imap, int agno, boolean_t dir, struct inode *ip)
{
int ag, rc;
int maxag = JFS_SBI(imap->im_ipimap->i_sb)->bmap->db_maxag;
......@@ -1802,11 +1801,11 @@ diAllocAny(imap_t * imap, int agno, boolean_t dir, struct inode *ip)
* ENOSPC - insufficient disk resources.
* EIO - i/o error.
*/
static int diAllocIno(imap_t * imap, int agno, struct inode *ip)
static int diAllocIno(struct inomap * imap, int agno, struct inode *ip)
{
int iagno, ino, rc, rem, extno, sword;
metapage_t *mp;
iag_t *iagp;
struct metapage *mp;
struct iag *iagp;
/* check if there are iags on the ag's free inode list.
*/
......@@ -1822,7 +1821,7 @@ static int diAllocIno(imap_t * imap, int agno, struct inode *ip)
IREAD_UNLOCK(imap->im_ipimap);
return (rc);
}
iagp = (iag_t *) mp->data;
iagp = (struct iag *) mp->data;
/* better be free inodes in this iag if it is on the
* list.
......@@ -1913,11 +1912,11 @@ static int diAllocIno(imap_t * imap, int agno, struct inode *ip)
* ENOSPC - insufficient disk resources.
* EIO - i/o error.
*/
static int diAllocExt(imap_t * imap, int agno, struct inode *ip)
static int diAllocExt(struct inomap * imap, int agno, struct inode *ip)
{
int rem, iagno, sword, extno, rc;
metapage_t *mp;
iag_t *iagp;
struct metapage *mp;
struct iag *iagp;
/* check if the ag has any iags with free extents. if not,
* allocate a new iag for the ag.
......@@ -1929,7 +1928,7 @@ static int diAllocExt(imap_t * imap, int agno, struct inode *ip)
if ((rc = diNewIAG(imap, &iagno, agno, &mp))) {
return (rc);
}
iagp = (iag_t *) mp->data;
iagp = (struct iag *) mp->data;
/* set the ag number if this a brand new iag
*/
......@@ -1942,7 +1941,7 @@ static int diAllocExt(imap_t * imap, int agno, struct inode *ip)
if ((rc = diIAGRead(imap, iagno, &mp))) {
assert(0);
}
iagp = (iag_t *) mp->data;
iagp = (struct iag *) mp->data;
}
/* using the free extent summary map, find a free extent.
......@@ -2018,11 +2017,11 @@ static int diAllocExt(imap_t * imap, int agno, struct inode *ip)
* ENOSPC - insufficient disk resources.
* EIO - i/o error.
*/
static int diAllocBit(imap_t * imap, iag_t * iagp, int ino)
static int diAllocBit(struct inomap * imap, struct iag * iagp, int ino)
{
int extno, bitno, agno, sword, rc;
metapage_t *amp, *bmp;
iag_t *aiagp = 0, *biagp = 0;
struct metapage *amp, *bmp;
struct iag *aiagp = 0, *biagp = 0;
u32 mask;
/* check if this is the last free inode within the iag.
......@@ -2038,7 +2037,7 @@ static int diAllocBit(imap_t * imap, iag_t * iagp, int ino)
diIAGRead(imap, le32_to_cpu(iagp->inofreefwd),
&amp)))
return (rc);
aiagp = (iag_t *) amp->data;
aiagp = (struct iag *) amp->data;
}
if ((int) le32_to_cpu(iagp->inofreeback) >= 0) {
......@@ -2050,7 +2049,7 @@ static int diAllocBit(imap_t * imap, iag_t * iagp, int ino)
release_metapage(amp);
return (rc);
}
biagp = (iag_t *) bmp->data;
biagp = (struct iag *) bmp->data;
}
}
......@@ -2158,17 +2157,17 @@ static int diAllocBit(imap_t * imap, iag_t * iagp, int ino)
* ENOSPC - insufficient disk resources.
* EIO - i/o error.
*/
static int diNewExt(imap_t * imap, iag_t * iagp, int extno)
static int diNewExt(struct inomap * imap, struct iag * iagp, int extno)
{
int agno, iagno, fwd, back, freei = 0, sword, rc;
iag_t *aiagp = 0, *biagp = 0, *ciagp = 0;
metapage_t *amp, *bmp, *cmp, *dmp;
struct iag *aiagp = 0, *biagp = 0, *ciagp = 0;
struct metapage *amp, *bmp, *cmp, *dmp;
struct inode *ipimap;
s64 blkno, hint;
int i, j;
u32 mask;
ino_t ino;
dinode_t *dp;
struct dinode *dp;
struct jfs_sb_info *sbi;
/* better have free extents.
......@@ -2196,13 +2195,13 @@ static int diNewExt(imap_t * imap, iag_t * iagp, int extno)
if ((fwd = le32_to_cpu(iagp->extfreefwd)) >= 0) {
if ((rc = diIAGRead(imap, fwd, &amp)))
return (rc);
aiagp = (iag_t *) amp->data;
aiagp = (struct iag *) amp->data;
}
if ((back = le32_to_cpu(iagp->extfreeback)) >= 0) {
if ((rc = diIAGRead(imap, back, &bmp)))
goto error_out;
biagp = (iag_t *) bmp->data;
biagp = (struct iag *) bmp->data;
}
} else {
/* the iag has free extents. if all extents are free
......@@ -2216,7 +2215,7 @@ static int diNewExt(imap_t * imap, iag_t * iagp, int extno)
if ((fwd = imap->im_agctl[agno].extfree) >= 0) {
if ((rc = diIAGRead(imap, fwd, &amp)))
goto error_out;
aiagp = (iag_t *) amp->data;
aiagp = (struct iag *) amp->data;
}
}
}
......@@ -2239,7 +2238,7 @@ static int diNewExt(imap_t * imap, iag_t * iagp, int extno)
} else {
if ((rc = diIAGRead(imap, freei, &cmp)))
goto error_out;
ciagp = (iag_t *) cmp->data;
ciagp = (struct iag *) cmp->data;
}
assert(ciagp != NULL);
}
......@@ -2272,7 +2271,7 @@ static int diNewExt(imap_t * imap, iag_t * iagp, int extno)
rc = EIO;
goto error_out;
}
dp = (dinode_t *) dmp->data;
dp = (struct dinode *) dmp->data;
/* initialize the inode number, mode, link count and
* inode extent address.
......@@ -2433,15 +2432,15 @@ static int diNewExt(imap_t * imap, iag_t * iagp, int extno)
* new imap size;
*/
static int
diNewIAG(imap_t * imap, int *iagnop, int agno, metapage_t ** mpp)
diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
{
int rc;
int iagno, i, xlen;
struct inode *ipimap;
struct super_block *sb;
struct jfs_sb_info *sbi;
metapage_t *mp;
iag_t *iagp;
struct metapage *mp;
struct iag *iagp;
s64 xaddr = 0;
s64 blkno;
tid_t tid;
......@@ -2521,10 +2520,10 @@ diNewIAG(imap_t * imap, int *iagnop, int agno, metapage_t ** mpp)
rc = EIO;
goto out;
}
iagp = (iag_t *) mp->data;
iagp = (struct iag *) mp->data;
/* init the iag */
memset(iagp, 0, sizeof(iag_t));
memset(iagp, 0, sizeof(struct iag));
iagp->iagnum = cpu_to_le32(iagno);
iagp->inofreefwd = iagp->inofreeback = -1;
iagp->extfreefwd = iagp->extfreeback = -1;
......@@ -2619,7 +2618,7 @@ diNewIAG(imap_t * imap, int *iagnop, int agno, metapage_t ** mpp)
rc = EIO;
goto out;
}
iagp = (iag_t *) mp->data;
iagp = (struct iag *) mp->data;
/* remove the iag from the iag free list */
imap->im_freeiag = le32_to_cpu(iagp->iagfree);
......@@ -2657,7 +2656,7 @@ diNewIAG(imap_t * imap, int *iagnop, int agno, metapage_t ** mpp)
* 0 - success.
* EIO - i/o error.
*/
static int diIAGRead(imap_t * imap, int iagno, metapage_t ** mpp)
static int diIAGRead(struct inomap * imap, int iagno, struct metapage ** mpp)
{
struct inode *ipimap = imap->im_ipimap;
s64 blkno;
......@@ -2719,15 +2718,15 @@ static int diFindFree(u32 word, int start)
*/
int
diUpdatePMap(struct inode *ipimap,
unsigned long inum, boolean_t is_free, tblock_t * tblk)
unsigned long inum, boolean_t is_free, struct tblock * tblk)
{
int rc;
iag_t *iagp;
metapage_t *mp;
struct iag *iagp;
struct metapage *mp;
int iagno, ino, extno, bitno;
imap_t *imap;
struct inomap *imap;
u32 mask;
log_t *log;
struct jfs_log *log;
int lsn, difft, diffp;
imap = JFS_IP(ipimap)->i_imap;
......@@ -2741,7 +2740,7 @@ diUpdatePMap(struct inode *ipimap,
IREAD_UNLOCK(ipimap);
if (rc)
return (rc);
iagp = (iag_t *) mp->data;
iagp = (struct iag *) mp->data;
/* get the inode number and extent number of the inode within
* the iag and the inode number within the extent.
*/
......@@ -2837,10 +2836,10 @@ diUpdatePMap(struct inode *ipimap,
int diExtendFS(struct inode *ipimap, struct inode *ipbmap)
{
int rc, rcx = 0;
imap_t *imap = JFS_IP(ipimap)->i_imap;
iag_t *iagp = 0, *hiagp = 0;
bmap_t *mp = JFS_SBI(ipbmap->i_sb)->bmap;
metapage_t *bp, *hbp;
struct inomap *imap = JFS_IP(ipimap)->i_imap;
struct iag *iagp = 0, *hiagp = 0;
struct bmap *mp = JFS_SBI(ipbmap->i_sb)->bmap;
struct metapage *bp, *hbp;
int i, n, head;
int numinos, xnuminos = 0, xnumfree = 0;
s64 agstart;
......@@ -2866,7 +2865,7 @@ int diExtendFS(struct inode *ipimap, struct inode *ipbmap)
}
/*
* process each iag_t page of the map.
* process each iag page of the map.
*
* rebuild AG Free Inode List, AG Free Inode Extent List;
*/
......@@ -2875,7 +2874,7 @@ int diExtendFS(struct inode *ipimap, struct inode *ipbmap)
rcx = rc;
continue;
}
iagp = (iag_t *) bp->data;
iagp = (struct iag *) bp->data;
assert(le32_to_cpu(iagp->iagnum) == i);
/* leave free iag in the free iag list */
......@@ -2910,7 +2909,7 @@ printf("diExtendFS: iag:%d agstart:%Ld agno:%d\n", i, agstart, n);
rcx = rc;
goto nextiag;
}
hiagp = (iag_t *) hbp->data;
hiagp = (struct iag *) hbp->data;
hiagp->inofreeback =
le32_to_cpu(iagp->iagnum);
iagp->inofreefwd = cpu_to_le32(head);
......@@ -2936,7 +2935,7 @@ printf("diExtendFS: iag:%d agstart:%Ld agno:%d\n", i, agstart, n);
rcx = rc;
goto nextiag;
}
hiagp = (iag_t *) hbp->data;
hiagp = (struct iag *) hbp->data;
hiagp->extfreeback = iagp->iagnum;
iagp->extfreefwd = cpu_to_le32(head);
iagp->extfreeback = -1;
......@@ -3019,7 +3018,7 @@ static void duplicateIXtree(struct super_block *sb, s64 blkno,
* 0 - success
* ENOMEM - insufficient memory
*/
static int copy_from_dinode(dinode_t * dip, struct inode *ip)
static int copy_from_dinode(struct dinode * dip, struct inode *ip)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
......@@ -3072,7 +3071,7 @@ static int copy_from_dinode(dinode_t * dip, struct inode *ip)
*
* FUNCTION: Copies inode info from in-memory inode to disk inode
*/
static void copy_to_dinode(dinode_t * dip, struct inode *ip)
static void copy_to_dinode(struct dinode * dip, struct inode *ip)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
......@@ -3112,7 +3111,7 @@ static void copy_to_dinode(dinode_t * dip, struct inode *ip)
/*
* DBGdiInit()
*/
static void *DBGdiInit(imap_t * imap)
static void *DBGdiInit(struct inomap * imap)
{
u32 *dimap;
int size;
......@@ -3126,7 +3125,7 @@ static void *DBGdiInit(imap_t * imap)
/*
* DBGdiAlloc()
*/
static void DBGdiAlloc(imap_t * imap, ino_t ino)
static void DBGdiAlloc(struct inomap * imap, ino_t ino)
{
u32 *dimap = imap->im_DBGdimap;
int w, b;
......@@ -3144,7 +3143,7 @@ static void DBGdiAlloc(imap_t * imap, ino_t ino)
/*
* DBGdiFree()
*/
static void DBGdiFree(imap_t * imap, ino_t ino)
static void DBGdiFree(struct inomap * imap, ino_t ino)
{
u32 *dimap = imap->im_DBGdimap;
int w, b;
......@@ -3159,7 +3158,7 @@ static void DBGdiFree(imap_t * imap, ino_t ino)
dimap[w] &= ~m;
}
static void dump_cp(imap_t * ipimap, char *function, int line)
static void dump_cp(struct inomap * ipimap, char *function, int line)
{
printk("\n* ********* *\nControl Page %s %d\n", function, line);
printk("FreeIAG %d\tNextIAG %d\n", ipimap->im_freeiag,
......@@ -3173,7 +3172,7 @@ static void dump_cp(imap_t * ipimap, char *function, int line)
ipimap->im_agctl[0].numinos, ipimap->im_agctl[0].numfree);
}
static void dump_iag(iag_t * iag, char *function, int line)
static void dump_iag(struct iag * iag, char *function, int line)
{
printk("\n* ********* *\nIAG %s %d\n", function, line);
printk("IagNum %d\tIAG Free %d\n", le32_to_cpu(iag->iagnum),
......
......@@ -59,7 +59,7 @@
/*
* inode allocation group page (per 4096 inodes of an AG)
*/
typedef struct {
struct iag {
s64 agstart; /* 8: starting block of ag */
s32 iagnum; /* 4: inode allocation group number */
s32 inofreefwd; /* 4: ag inode free list forward */
......@@ -87,22 +87,22 @@ typedef struct {
u32 wmap[EXTSPERIAG]; /* 512: working allocation map */
u32 pmap[EXTSPERIAG]; /* 512: persistent allocation map */
pxd_t inoext[EXTSPERIAG]; /* 1024: inode extent addresses */
} iag_t; /* (4096) */
}; /* (4096) */
/*
* per AG control information (in inode map control page)
*/
typedef struct {
struct iagctl {
s32 inofree; /* 4: free inode list anchor */
s32 extfree; /* 4: free extent list anchor */
s32 numinos; /* 4: number of backed inodes */
s32 numfree; /* 4: number of free inodes */
} iagctl_t; /* (16) */
}; /* (16) */
/*
* per fileset/aggregate inode map control page
*/
typedef struct {
struct dinomap {
s32 in_freeiag; /* 4: free iag list anchor */
s32 in_nextiag; /* 4: next free iag number */
s32 in_numinos; /* 4: num of backed inodes */
......@@ -112,22 +112,22 @@ typedef struct {
s32 in_diskblock; /* 4: for standalone test driver */
s32 in_maxag; /* 4: for standalone test driver */
u8 pad[2016]; /* 2016: pad to 2048 */
iagctl_t in_agctl[MAXAG]; /* 2048: AG control information */
} dinomap_t; /* (4096) */
struct iagctl in_agctl[MAXAG]; /* 2048: AG control information */
}; /* (4096) */
/*
* In-core inode map control page
*/
typedef struct inomap {
dinomap_t im_imap; /* 4096: inode allocation control */
struct inomap {
struct dinomap im_imap; /* 4096: inode allocation control */
struct inode *im_ipimap; /* 4: ptr to inode for imap */
struct semaphore im_freelock; /* 4: iag free list lock */
struct semaphore im_aglock[MAXAG]; /* 512: per AG locks */
u32 *im_DBGdimap;
atomic_t im_numinos; /* num of backed inodes */
atomic_t im_numfree; /* num of free backed inodes */
} imap_t;
};
#define im_freeiag im_imap.in_freeiag
#define im_nextiag im_imap.in_nextiag
......@@ -145,7 +145,7 @@ extern int diAlloc(struct inode *, boolean_t, struct inode *);
extern int diSync(struct inode *);
/* external references */
extern int diUpdatePMap(struct inode *ipimap, unsigned long inum,
boolean_t is_free, tblock_t * tblk);
boolean_t is_free, struct tblock * tblk);
extern int diExtendFS(struct inode *ipimap, struct inode *ipbmap);
extern int diMount(struct inode *);
extern int diUnmount(struct inode *, int);
......
......@@ -75,7 +75,7 @@ struct jfs_inode_info {
struct inomap *_imap; /* 4: inode map header */
} file;
struct {
dir_table_slot_t _table[12]; /* 96: directory index */
struct dir_table_slot _table[12]; /* 96: dir index */
dtroot_t _dtroot; /* 288: dtree root */
} dir;
struct {
......
......@@ -76,7 +76,7 @@
/*
* lbuf's ready to be redriven. Protected by log_redrive_lock (jfsIO thread)
*/
static lbuf_t *log_redrive_list;
static struct lbuf *log_redrive_list;
static spinlock_t log_redrive_lock = SPIN_LOCK_UNLOCKED;
DECLARE_WAIT_QUEUE_HEAD(jfs_IO_thread_wait);
......@@ -160,32 +160,32 @@ do { \
/*
* external references
*/
extern void txLazyUnlock(tblock_t * tblk);
extern void txLazyUnlock(struct tblock * tblk);
extern int jfs_stop_threads;
extern struct completion jfsIOwait;
/*
* forward references
*/
static int lmWriteRecord(log_t * log, tblock_t * tblk, lrd_t * lrd,
tlock_t * tlck);
static int lmNextPage(log_t * log);
static int lmLogFileSystem(log_t * log, char *uuid, int activate);
static int lbmLogInit(log_t * log);
static void lbmLogShutdown(log_t * log);
static lbuf_t *lbmAllocate(log_t * log, int);
static void lbmFree(lbuf_t * bp);
static void lbmfree(lbuf_t * bp);
static int lbmRead(log_t * log, int pn, lbuf_t ** bpp);
static void lbmWrite(log_t * log, lbuf_t * bp, int flag, int cant_block);
static void lbmDirectWrite(log_t * log, lbuf_t * bp, int flag);
static int lbmIOWait(lbuf_t * bp, int flag);
static int lmWriteRecord(struct jfs_log * log, struct tblock * tblk,
struct lrd * lrd, struct tlock * tlck);
static int lmNextPage(struct jfs_log * log);
static int lmLogFileSystem(struct jfs_log * log, char *uuid, int activate);
static int lbmLogInit(struct jfs_log * log);
static void lbmLogShutdown(struct jfs_log * log);
static struct lbuf *lbmAllocate(struct jfs_log * log, int);
static void lbmFree(struct lbuf * bp);
static void lbmfree(struct lbuf * bp);
static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp);
static void lbmWrite(struct jfs_log * log, struct lbuf * bp, int flag, int cant_block);
static void lbmDirectWrite(struct jfs_log * log, struct lbuf * bp, int flag);
static int lbmIOWait(struct lbuf * bp, int flag);
static bio_end_io_t lbmIODone;
void lbmStartIO(lbuf_t * bp);
void lmGCwrite(log_t * log, int cant_block);
void lbmStartIO(struct lbuf * bp);
void lmGCwrite(struct jfs_log * log, int cant_block);
/*
......@@ -212,11 +212,12 @@ struct lmStat {
*
* note: todo: log error handler
*/
int lmLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck)
{
int lsn;
int diffp, difft;
metapage_t *mp = NULL;
struct metapage *mp = NULL;
jFYI(1, ("lmLog: log:0x%p tblk:0x%p, lrd:0x%p tlck:0x%p\n",
log, tblk, lrd, tlck));
......@@ -330,11 +331,12 @@ int lmLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
* serialization: LOG_LOCK() held on entry/exit
*/
static int
lmWriteRecord(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
lmWriteRecord(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck)
{
int lsn = 0; /* end-of-log address */
lbuf_t *bp; /* dst log page buffer */
logpage_t *lp; /* dst log page */
struct lbuf *bp; /* dst log page buffer */
struct logpage *lp; /* dst log page */
caddr_t dst; /* destination address in log page */
int dstoffset; /* end-of-log offset in log page */
int freespace; /* free space in log page */
......@@ -344,16 +346,16 @@ lmWriteRecord(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
int nbytes; /* number of bytes to move */
int i;
int len;
linelock_t *linelock;
lv_t *lv;
lvd_t *lvd;
struct linelock *linelock;
struct lv *lv;
struct lvd *lvd;
int l2linesize;
len = 0;
/* retrieve destination log page to write */
bp = (lbuf_t *) log->bp;
lp = (logpage_t *) bp->l_ldata;
bp = (struct lbuf *) log->bp;
lp = (struct logpage *) bp->l_ldata;
dstoffset = log->eor;
/* any log data to write ? */
......@@ -366,7 +368,7 @@ lmWriteRecord(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
/* retrieve source meta-data page to log */
if (tlck->flag & tlckPAGELOCK) {
p = (caddr_t) (tlck->mp->data);
linelock = (linelock_t *) & tlck->lock;
linelock = (struct linelock *) & tlck->lock;
}
/* retrieve source in-memory inode to log */
else if (tlck->flag & tlckINODELOCK) {
......@@ -374,14 +376,14 @@ lmWriteRecord(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
p = (caddr_t) &JFS_IP(tlck->ip)->i_dtroot;
else
p = (caddr_t) &JFS_IP(tlck->ip)->i_xtroot;
linelock = (linelock_t *) & tlck->lock;
linelock = (struct linelock *) & tlck->lock;
}
#ifdef _JFS_WIP
else if (tlck->flag & tlckINLINELOCK) {
inlinelock = (inlinelock_t *) & tlck;
inlinelock = (struct inlinelock *) & tlck;
p = (caddr_t) & inlinelock->pxd;
linelock = (linelock_t *) & tlck;
linelock = (struct linelock *) & tlck;
}
#endif /* _JFS_WIP */
else {
......@@ -393,7 +395,7 @@ lmWriteRecord(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
moveData:
ASSERT(linelock->index <= linelock->maxcnt);
lv = (lv_t *) & linelock->lv;
lv = linelock->lv;
for (i = 0; i < linelock->index; i++, lv++) {
if (lv->length == 0)
continue;
......@@ -404,7 +406,7 @@ lmWriteRecord(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
lmNextPage(log);
bp = log->bp;
lp = (logpage_t *) bp->l_ldata;
lp = (struct logpage *) bp->l_ldata;
dstoffset = LOGPHDRSIZE;
}
......@@ -428,8 +430,8 @@ lmWriteRecord(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
/* page become full: move on to next page */
lmNextPage(log);
bp = (lbuf_t *) log->bp;
lp = (logpage_t *) bp->l_ldata;
bp = (struct lbuf *) log->bp;
lp = (struct logpage *) bp->l_ldata;
dstoffset = LOGPHDRSIZE;
srclen -= nbytes;
......@@ -440,7 +442,7 @@ lmWriteRecord(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
* move log vector descriptor
*/
len += 4;
lvd = (lvd_t *) ((caddr_t) lp + dstoffset);
lvd = (struct lvd *) ((caddr_t) lp + dstoffset);
lvd->offset = cpu_to_le16(lv->offset);
lvd->length = cpu_to_le16(lv->length);
dstoffset += 4;
......@@ -450,7 +452,7 @@ lmWriteRecord(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
}
if ((i = linelock->next)) {
linelock = (linelock_t *) lid_to_tlock(i);
linelock = (struct linelock *) lid_to_tlock(i);
goto moveData;
}
......@@ -534,8 +536,8 @@ lmWriteRecord(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
/* page become full: move on to next page */
lmNextPage(log);
bp = (lbuf_t *) log->bp;
lp = (logpage_t *) bp->l_ldata;
bp = (struct lbuf *) log->bp;
lp = (struct logpage *) bp->l_ldata;
dstoffset = LOGPHDRSIZE;
src += nbytes;
}
......@@ -555,21 +557,21 @@ lmWriteRecord(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
*
* serialization: LOG_LOCK() held on entry/exit
*/
static int lmNextPage(log_t * log)
static int lmNextPage(struct jfs_log * log)
{
logpage_t *lp;
struct logpage *lp;
int lspn; /* log sequence page number */
int pn; /* current page number */
lbuf_t *bp;
lbuf_t *nextbp;
tblock_t *tblk;
struct lbuf *bp;
struct lbuf *nextbp;
struct tblock *tblk;
jFYI(1, ("lmNextPage\n"));
/* get current log page number and log sequence page number */
pn = log->page;
bp = log->bp;
lp = (logpage_t *) bp->l_ldata;
lp = (struct logpage *) bp->l_ldata;
lspn = le32_to_cpu(lp->h.page);
LOGGC_LOCK(log);
......@@ -637,7 +639,7 @@ static int lmNextPage(log_t * log)
log->bp = nextbp;
/* initialize next log page */
lp = (logpage_t *) nextbp->l_ldata;
lp = (struct logpage *) nextbp->l_ldata;
lp->h.page = lp->t.page = cpu_to_le32(lspn + 1);
lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE);
......@@ -661,7 +663,7 @@ static int lmNextPage(log_t * log)
* transaction blocks on the commit queue.
* N.B. LOG_LOCK is NOT held during lmGroupCommit().
*/
int lmGroupCommit(log_t * log, tblock_t * tblk)
int lmGroupCommit(struct jfs_log * log, struct tblock * tblk)
{
int rc = 0;
......@@ -738,13 +740,13 @@ int lmGroupCommit(log_t * log, tblock_t * tblk)
* LOGGC_LOCK must be held by caller.
* N.B. LOG_LOCK is NOT held during lmGroupCommit().
*/
void lmGCwrite(log_t * log, int cant_write)
void lmGCwrite(struct jfs_log * log, int cant_write)
{
lbuf_t *bp;
logpage_t *lp;
struct lbuf *bp;
struct logpage *lp;
int gcpn; /* group commit page number */
tblock_t *tblk;
tblock_t *xtblk;
struct tblock *tblk;
struct tblock *xtblk;
/*
* build the commit group of a log page
......@@ -768,8 +770,8 @@ void lmGCwrite(log_t * log, int cant_write)
/*
* pageout to commit transactions on the log page.
*/
bp = (lbuf_t *) tblk->bp;
lp = (logpage_t *) bp->l_ldata;
bp = (struct lbuf *) tblk->bp;
lp = (struct logpage *) bp->l_ldata;
/* is page already full ? */
if (tblk->flag & tblkGC_EOP) {
/* mark page to free at end of group commit of the page */
......@@ -806,12 +808,12 @@ void lmGCwrite(log_t * log, int cant_write)
* NOTE:
* This routine is called a interrupt time by lbmIODone
*/
void lmPostGC(lbuf_t * bp)
void lmPostGC(struct lbuf * bp)
{
unsigned long flags;
log_t *log = bp->l_log;
logpage_t *lp;
tblock_t *tblk;
struct jfs_log *log = bp->l_log;
struct logpage *lp;
struct tblock *tblk;
//LOGGC_LOCK(log);
spin_lock_irqsave(&log->gclock, flags);
......@@ -866,7 +868,7 @@ void lmPostGC(lbuf_t * bp)
*/
else if (tblk->flag & tblkGC_EOP) {
/* finalize the page */
lp = (logpage_t *) bp->l_ldata;
lp = (struct logpage *) bp->l_ldata;
bp->l_ceor = bp->l_eor;
lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);
jEVENT(0, ("lmPostGC: calling lbmWrite\n"));
......@@ -921,14 +923,14 @@ void lmPostGC(lbuf_t * bp)
*
* serialization: LOG_LOCK() held on entry/exit
*/
int lmLogSync(log_t * log, int nosyncwait)
int lmLogSync(struct jfs_log * log, int nosyncwait)
{
int logsize;
int written; /* written since last syncpt */
int free; /* free space left available */
int delta; /* additional delta to write normally */
int more; /* additional write granted */
lrd_t lrd;
struct lrd lrd;
int lsn;
struct logsyncblk *lp;
......@@ -1056,15 +1058,15 @@ int lmLogSync(log_t * log, int nosyncwait)
*
* serialization:
*/
int lmLogOpen(struct super_block *sb, log_t ** logptr)
int lmLogOpen(struct super_block *sb, struct jfs_log ** logptr)
{
int rc;
struct block_device *bdev;
log_t *log;
struct jfs_log *log;
if (!(log = kmalloc(sizeof(log_t), GFP_KERNEL)))
if (!(log = kmalloc(sizeof(struct jfs_log), GFP_KERNEL)))
return ENOMEM;
memset(log, 0, sizeof(log_t));
memset(log, 0, sizeof(struct jfs_log));
init_waitqueue_head(&log->syncwait);
log->sb = sb; /* This should be a list */
......@@ -1173,14 +1175,14 @@ int lmLogOpen(struct super_block *sb, log_t ** logptr)
*
* serialization: single first open thread
*/
int lmLogInit(log_t * log)
int lmLogInit(struct jfs_log * log)
{
int rc = 0;
lrd_t lrd;
logsuper_t *logsuper;
lbuf_t *bpsuper;
lbuf_t *bp;
logpage_t *lp;
struct lrd lrd;
struct logsuper *logsuper;
struct lbuf *bpsuper;
struct lbuf *bp;
struct logpage *lp;
int lsn;
jFYI(1, ("lmLogInit: log:0x%p\n", log));
......@@ -1206,7 +1208,7 @@ int lmLogInit(log_t * log)
if ((rc = lbmRead(log, 1, &bpsuper)))
goto errout10;
logsuper = (logsuper_t *) bpsuper->l_ldata;
logsuper = (struct logsuper *) bpsuper->l_ldata;
if (logsuper->magic != cpu_to_le32(LOGMAGIC)) {
jERROR(1, ("*** Log Format Error ! ***\n"));
......@@ -1252,7 +1254,7 @@ int lmLogInit(log_t * log)
if ((rc = lbmRead(log, log->page, &bp)))
goto errout20;
lp = (logpage_t *) bp->l_ldata;
lp = (struct logpage *) bp->l_ldata;
jFYI(1, ("lmLogInit: lsn:0x%x page:%d eor:%d:%d\n",
le32_to_cpu(logsuper->end), log->page, log->eor,
......@@ -1291,7 +1293,7 @@ int lmLogInit(log_t * log)
lsn = lmWriteRecord(log, NULL, &lrd, NULL);
bp = log->bp;
bp->l_ceor = bp->l_eor;
lp = (logpage_t *) bp->l_ldata;
lp = (struct logpage *) bp->l_ldata;
lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);
lbmWrite(log, bp, lbmWRITE | lbmSYNC, 0);
if ((rc = lbmIOWait(bp, 0)))
......@@ -1363,7 +1365,7 @@ int lmLogInit(log_t * log)
*
* serialization:
*/
int lmLogClose(struct super_block *sb, log_t * log)
int lmLogClose(struct super_block *sb, struct jfs_log * log)
{
struct block_device *bdev = log->bdev;
int rc;
......@@ -1400,7 +1402,7 @@ int lmLogClose(struct super_block *sb, log_t * log)
*
* FUNCTION: wait for all outstanding log records to be written to disk
*/
void lmLogWait(log_t *log)
void lmLogWait(struct jfs_log *log)
{
int i;
......@@ -1438,15 +1440,15 @@ void lmLogWait(log_t *log)
*
* serialization: single last close thread
*/
int lmLogShutdown(log_t * log)
int lmLogShutdown(struct jfs_log * log)
{
int rc;
lrd_t lrd;
struct lrd lrd;
int lsn;
logsuper_t *logsuper;
lbuf_t *bpsuper;
lbuf_t *bp;
logpage_t *lp;
struct logsuper *logsuper;
struct lbuf *bpsuper;
struct lbuf *bp;
struct logpage *lp;
jFYI(1, ("lmLogShutdown: log:0x%p\n", log));
......@@ -1469,7 +1471,7 @@ int lmLogShutdown(log_t * log)
lrd.log.syncpt.sync = 0;
lsn = lmWriteRecord(log, NULL, &lrd, NULL);
bp = log->bp;
lp = (logpage_t *) bp->l_ldata;
lp = (struct logpage *) bp->l_ldata;
lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);
lbmWrite(log, log->bp, lbmWRITE | lbmRELEASE | lbmSYNC, 0);
lbmIOWait(log->bp, lbmFREE);
......@@ -1482,7 +1484,7 @@ int lmLogShutdown(log_t * log)
if ((rc = lbmRead(log, 1, &bpsuper)))
goto out;
logsuper = (logsuper_t *) bpsuper->l_ldata;
logsuper = (struct logsuper *) bpsuper->l_ldata;
logsuper->state = cpu_to_le32(LOGREDONE);
logsuper->end = cpu_to_le32(lsn);
lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC);
......@@ -1518,12 +1520,12 @@ int lmLogShutdown(log_t * log)
* RETURN: 0 - success
* errors returned by vms_iowait().
*/
static int lmLogFileSystem(log_t * log, char *uuid, int activate)
static int lmLogFileSystem(struct jfs_log * log, char *uuid, int activate)
{
int rc = 0;
int i;
logsuper_t *logsuper;
lbuf_t *bpsuper;
struct logsuper *logsuper;
struct lbuf *bpsuper;
/*
* insert/remove file system device to log active file system list.
......@@ -1531,7 +1533,7 @@ static int lmLogFileSystem(log_t * log, char *uuid, int activate)
if ((rc = lbmRead(log, 1, &bpsuper)))
return rc;
logsuper = (logsuper_t *) bpsuper->l_ldata;
logsuper = (struct logsuper *) bpsuper->l_ldata;
if (activate) {
for (i = 0; i < MAX_ACTIVE; i++)
if (!memcmp(logsuper->active[i].uuid, NULL_UUID, 16)) {
......@@ -1589,10 +1591,10 @@ static int lmLogFileSystem(log_t * log, char *uuid, int activate)
*
* initialize per log I/O setup at lmLogInit()
*/
static int lbmLogInit(log_t * log)
static int lbmLogInit(struct jfs_log * log)
{ /* log inode */
int i;
lbuf_t *lbuf;
struct lbuf *lbuf;
jFYI(1, ("lbmLogInit: log:0x%p\n", log));
......@@ -1616,7 +1618,7 @@ static int lbmLogInit(log_t * log)
log->lbuf_free = NULL;
for (i = 0; i < LOGPAGES; i++) {
lbuf = kmalloc(sizeof(lbuf_t), GFP_KERNEL);
lbuf = kmalloc(sizeof(struct lbuf), GFP_KERNEL);
if (lbuf == 0)
goto error;
lbuf->l_ldata = (char *) __get_free_page(GFP_KERNEL);
......@@ -1644,15 +1646,15 @@ static int lbmLogInit(log_t * log)
*
* finalize per log I/O setup at lmLogShutdown()
*/
static void lbmLogShutdown(log_t * log)
static void lbmLogShutdown(struct jfs_log * log)
{
lbuf_t *lbuf;
struct lbuf *lbuf;
jFYI(1, ("lbmLogShutdown: log:0x%p\n", log));
lbuf = log->lbuf_free;
while (lbuf) {
lbuf_t *next = lbuf->l_freelist;
struct lbuf *next = lbuf->l_freelist;
free_page((unsigned long) lbuf->l_ldata);
kfree(lbuf);
lbuf = next;
......@@ -1667,9 +1669,9 @@ static void lbmLogShutdown(log_t * log)
*
* allocate an empty log buffer
*/
static lbuf_t *lbmAllocate(log_t * log, int pn)
static struct lbuf *lbmAllocate(struct jfs_log * log, int pn)
{
lbuf_t *bp;
struct lbuf *bp;
unsigned long flags;
/*
......@@ -1698,7 +1700,7 @@ static lbuf_t *lbmAllocate(log_t * log, int pn)
*
* release a log buffer to freelist
*/
static void lbmFree(lbuf_t * bp)
static void lbmFree(struct lbuf * bp)
{
unsigned long flags;
......@@ -1709,9 +1711,9 @@ static void lbmFree(lbuf_t * bp)
LCACHE_UNLOCK(flags);
}
static void lbmfree(lbuf_t * bp)
static void lbmfree(struct lbuf * bp)
{
log_t *log = bp->l_log;
struct jfs_log *log = bp->l_log;
assert(bp->l_wqnext == NULL);
......@@ -1737,7 +1739,7 @@ static void lbmfree(lbuf_t * bp)
* NOTES:
* Takes log_redrive_lock.
*/
static inline void lbmRedrive(lbuf_t *bp)
static inline void lbmRedrive(struct lbuf *bp)
{
unsigned long flags;
......@@ -1753,10 +1755,10 @@ static inline void lbmRedrive(lbuf_t *bp)
/*
* lbmRead()
*/
static int lbmRead(log_t * log, int pn, lbuf_t ** bpp)
static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
{
struct bio *bio;
lbuf_t *bp;
struct lbuf *bp;
/*
* allocate a log buffer
......@@ -1804,9 +1806,10 @@ static int lbmRead(log_t * log, int pn, lbuf_t ** bpp)
* LOGGC_LOCK() serializes lbmWrite() by lmNextPage() and lmGroupCommit().
* LCACHE_LOCK() serializes xflag between lbmWrite() and lbmIODone()
*/
static void lbmWrite(log_t * log, lbuf_t * bp, int flag, int cant_block)
static void lbmWrite(struct jfs_log * log, struct lbuf * bp, int flag,
int cant_block)
{
lbuf_t *tail;
struct lbuf *tail;
unsigned long flags;
jFYI(1, ("lbmWrite: bp:0x%p flag:0x%x pn:0x%x\n",
......@@ -1872,7 +1875,7 @@ static void lbmWrite(log_t * log, lbuf_t * bp, int flag, int cant_block)
* initiate pageout bypassing write queue for sidestream
* (e.g., log superblock) write;
*/
static void lbmDirectWrite(log_t * log, lbuf_t * bp, int flag)
static void lbmDirectWrite(struct jfs_log * log, struct lbuf * bp, int flag)
{
jEVENT(0, ("lbmDirectWrite: bp:0x%p flag:0x%x pn:0x%x\n",
bp, flag, bp->l_pn));
......@@ -1902,10 +1905,10 @@ static void lbmDirectWrite(log_t * log, lbuf_t * bp, int flag)
*
* serialization: LCACHE_LOCK() is NOT held during log i/o;
*/
void lbmStartIO(lbuf_t * bp)
void lbmStartIO(struct lbuf * bp)
{
struct bio *bio;
log_t *log = bp->l_log;
struct jfs_log *log = bp->l_log;
jFYI(1, ("lbmStartIO\n"));
......@@ -1935,7 +1938,7 @@ void lbmStartIO(lbuf_t * bp)
/*
* lbmIOWait()
*/
static int lbmIOWait(lbuf_t * bp, int flag)
static int lbmIOWait(struct lbuf * bp, int flag)
{
unsigned long flags;
int rc = 0;
......@@ -1968,9 +1971,9 @@ static int lbmIOWait(lbuf_t * bp, int flag)
*/
static void lbmIODone(struct bio *bio)
{
lbuf_t *bp = bio->bi_private;
lbuf_t *nextbp, *tail;
log_t *log;
struct lbuf *bp = bio->bi_private;
struct lbuf *nextbp, *tail;
struct jfs_log *log;
unsigned long flags;
/*
......@@ -2109,7 +2112,7 @@ static void lbmIODone(struct bio *bio)
int jfsIOWait(void *arg)
{
lbuf_t *bp;
struct lbuf *bp;
jFYI(1, ("jfsIOWait is here!\n"));
......@@ -2167,16 +2170,16 @@ int jfsIOWait(void *arg)
* XXX: We're synchronously writing one page at a time. This needs to
* be improved by writing multiple pages at once.
*/
int lmLogFormat(log_t *log, s64 logAddress, int logSize)
int lmLogFormat(struct jfs_log *log, s64 logAddress, int logSize)
{
int rc = -EIO;
struct jfs_sb_info *sbi = JFS_SBI(log->sb);
logsuper_t *logsuper;
logpage_t *lp;
struct logsuper *logsuper;
struct logpage *lp;
int lspn; /* log sequence page number */
struct lrd *lrd_ptr;
int npages = 0;
lbuf_t *bp;
struct lbuf *bp;
jFYI(0, ("lmLogFormat: logAddress:%Ld logSize:%d\n",
(long long)logAddress, logSize));
......@@ -2198,7 +2201,7 @@ int lmLogFormat(log_t *log, s64 logAddress, int logSize)
/*
* init log superblock: log page 1
*/
logsuper = (logsuper_t *) bp->l_ldata;
logsuper = (struct logsuper *) bp->l_ldata;
logsuper->magic = cpu_to_le32(LOGMAGIC);
logsuper->version = cpu_to_le32(LOGVERSION);
......@@ -2237,7 +2240,7 @@ int lmLogFormat(log_t *log, s64 logAddress, int logSize)
* the succeeding log pages will have ascending order of
* the lspn starting from 0, ... (N-2)
*/
lp = (logpage_t *) bp->l_ldata;
lp = (struct logpage *) bp->l_ldata;
/*
* initialize 1st log page to be written: lpsn = N - 1,
* write a SYNCPT log record is written to this page
......
......@@ -61,7 +61,7 @@
#define MAX_ACTIVE 128 /* Max active file systems sharing log */
typedef struct {
struct logsuper {
u32 magic; /* 4: log lv identifier */
s32 version; /* 4: version number */
s32 serial; /* 4: log open/mount counter */
......@@ -78,7 +78,7 @@ typedef struct {
struct {
char uuid[16];
} active[MAX_ACTIVE]; /* 2048: active file systems list */
} logsuper_t;
};
#define NULL_UUID "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
......@@ -119,7 +119,7 @@ typedef struct {
* the two and h.eor and t.eor set to 8 (i.e. empty page). if (only)
* h.eor != t.eor they were set to the smaller of their two values.
*/
typedef struct {
struct logpage {
struct { /* header */
s32 page; /* 4: log sequence page number */
s16 rsrvd; /* 2: */
......@@ -133,7 +133,7 @@ typedef struct {
s16 rsrvd; /* 2: */
s16 eor; /* 2: normally the same as h.eor */
} t;
} logpage_t;
};
#define LOGPHDRSIZE 8 /* log page header size */
#define LOGPTLRSIZE 8 /* log page trailer size */
......@@ -198,7 +198,7 @@ typedef struct {
#define LOG_FREEPXD 0x0001
typedef struct lrd {
struct lrd {
/*
* type independent area
*/
......@@ -349,23 +349,23 @@ typedef struct lrd {
* no type-dependent information
*/
} log;
} lrd_t; /* (36) */
}; /* (36) */
#define LOGRDSIZE (sizeof(struct lrd))
/*
* line vector descriptor
*/
typedef struct {
struct lvd {
s16 offset;
s16 length;
} lvd_t;
};
/*
* log logical volume
*/
typedef struct jfs_log {
struct jfs_log {
struct super_block *sb; /* 4: This is used to sync metadata
* before writing syncpt. Will
......@@ -416,7 +416,7 @@ typedef struct jfs_log {
struct lbuf *wqueue; /* 4: log pageout queue */
int count; /* 4: count */
char uuid[16]; /* 16: 128-bit uuid of log device */
} log_t;
};
/*
* Log flag
......@@ -428,10 +428,10 @@ typedef struct jfs_log {
/*
* group commit flag
*/
/* log_t */
/* jfs_log */
#define logGC_PAGEOUT 0x00000001
/* tblock_t/lbuf_t */
/* tblock/lbuf */
#define tblkGC_QUEUE 0x0001
#define tblkGC_READY 0x0002
#define tblkGC_COMMIT 0x0004
......@@ -446,8 +446,8 @@ typedef struct jfs_log {
/*
* log cache buffer header
*/
typedef struct lbuf {
log_t *l_log; /* 4: log associated with buffer */
struct lbuf {
struct jfs_log *l_log; /* 4: log associated with buffer */
/*
* data buffer base area
......@@ -466,7 +466,7 @@ typedef struct lbuf {
wait_queue_head_t l_ioevent; /* 4: i/o done event */
struct page *l_page; /* The page itself */
} lbuf_t;
};
/* Reuse l_freelist for redrive list */
#define l_redrive_next l_freelist
......@@ -474,15 +474,15 @@ typedef struct lbuf {
/*
* logsynclist block
*
* common logsyncblk prefix for jbuf_t and tblock_t
* common logsyncblk prefix for jbuf_t and tblock
*/
typedef struct logsyncblk {
struct logsyncblk {
u16 xflag; /* flags */
u16 flag; /* only meaninful in tblock_t */
u16 flag; /* only meaninful in tblock */
lid_t lid; /* lock id */
s32 lsn; /* log sequence number */
struct list_head synclist; /* log sync list link */
} logsyncblk_t;
};
/*
* logsynclist serialization (per log)
......@@ -500,12 +500,12 @@ typedef struct logsyncblk {
diff += (log)->logsize;\
}
extern int lmLogOpen(struct super_block *sb, log_t ** log);
extern void lmLogWait(log_t * log);
extern int lmLogClose(struct super_block *sb, log_t * log);
extern int lmLogSync(log_t * log, int nosyncwait);
extern int lmLogShutdown(log_t * log);
extern int lmLogInit(log_t * log);
extern int lmLogFormat(log_t *log, s64 logAddress, int logSize);
extern int lmLogOpen(struct super_block *sb, struct jfs_log ** log);
extern void lmLogWait(struct jfs_log * log);
extern int lmLogClose(struct super_block *sb, struct jfs_log * log);
extern int lmLogSync(struct jfs_log * log, int nosyncwait);
extern int lmLogShutdown(struct jfs_log * log);
extern int lmLogInit(struct jfs_log * log);
extern int lmLogFormat(struct jfs_log *log, s64 logAddress, int logSize);
#endif /* _H_JFS_LOGMGR */
......@@ -40,7 +40,7 @@ struct {
#define HASH_BITS 10 /* This makes hash_table 1 4K page */
#define HASH_SIZE (1 << HASH_BITS)
static metapage_t **hash_table = NULL;
static struct metapage **hash_table = NULL;
static unsigned long hash_order;
......@@ -92,7 +92,7 @@ static mempool_t *metapage_mempool;
static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
{
metapage_t *mp = (metapage_t *)foo;
struct metapage *mp = (struct metapage *)foo;
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
SLAB_CTOR_CONSTRUCTOR) {
......@@ -107,12 +107,12 @@ static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
}
}
static inline metapage_t *alloc_metapage(int no_wait)
static inline struct metapage *alloc_metapage(int no_wait)
{
return mempool_alloc(metapage_mempool, no_wait ? GFP_ATOMIC : GFP_NOFS);
}
static inline void free_metapage(metapage_t *mp)
static inline void free_metapage(struct metapage *mp)
{
mp->flag = 0;
set_bit(META_free, &mp->flag);
......@@ -134,8 +134,8 @@ int __init metapage_init(void)
/*
* Allocate the metapage structures
*/
metapage_cache = kmem_cache_create("jfs_mp", sizeof(metapage_t), 0, 0,
init_once, NULL);
metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
0, 0, init_once, NULL);
if (metapage_cache == NULL)
return -ENOMEM;
......@@ -153,7 +153,7 @@ int __init metapage_init(void)
((PAGE_SIZE << hash_order) / sizeof(void *)) < HASH_SIZE;
hash_order++);
hash_table =
(metapage_t **) __get_free_pages(GFP_KERNEL, hash_order);
(struct metapage **) __get_free_pages(GFP_KERNEL, hash_order);
assert(hash_table);
memset(hash_table, 0, PAGE_SIZE << hash_order);
......@@ -169,8 +169,8 @@ void metapage_exit(void)
/*
* Basically same hash as in pagemap.h, but using our hash table
*/
static metapage_t **meta_hash(struct address_space *mapping,
unsigned long index)
static struct metapage **meta_hash(struct address_space *mapping,
unsigned long index)
{
#define i (((unsigned long)mapping)/ \
(sizeof(struct inode) & ~(sizeof(struct inode) -1 )))
......@@ -180,11 +180,11 @@ static metapage_t **meta_hash(struct address_space *mapping,
#undef s
}
static metapage_t *search_hash(metapage_t ** hash_ptr,
struct address_space *mapping,
static struct metapage *search_hash(struct metapage ** hash_ptr,
struct address_space *mapping,
unsigned long index)
{
metapage_t *ptr;
struct metapage *ptr;
for (ptr = *hash_ptr; ptr; ptr = ptr->hash_next) {
if ((ptr->mapping == mapping) && (ptr->index == index))
......@@ -194,7 +194,7 @@ static metapage_t *search_hash(metapage_t ** hash_ptr,
return NULL;
}
static void add_to_hash(metapage_t * mp, metapage_t ** hash_ptr)
static void add_to_hash(struct metapage * mp, struct metapage ** hash_ptr)
{
if (*hash_ptr)
(*hash_ptr)->hash_prev = mp;
......@@ -204,7 +204,7 @@ static void add_to_hash(metapage_t * mp, metapage_t ** hash_ptr)
*hash_ptr = mp;
}
static void remove_from_hash(metapage_t * mp, metapage_t ** hash_ptr)
static void remove_from_hash(struct metapage * mp, struct metapage ** hash_ptr)
{
if (mp->hash_prev)
mp->hash_prev->hash_next = mp->hash_next;
......@@ -217,15 +217,15 @@ static void remove_from_hash(metapage_t * mp, metapage_t ** hash_ptr)
mp->hash_next->hash_prev = mp->hash_prev;
}
metapage_t *__get_metapage(struct inode *inode,
unsigned long lblock, unsigned int size,
int absolute, unsigned long new)
struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
unsigned int size, int absolute,
unsigned long new)
{
metapage_t **hash_ptr;
struct metapage **hash_ptr;
int l2BlocksPerPage;
int l2bsize;
struct address_space *mapping;
metapage_t *mp;
struct metapage *mp;
unsigned long page_index;
unsigned long page_offset;
......@@ -289,7 +289,7 @@ metapage_t *__get_metapage(struct inode *inode,
}
}
if (!mp) {
metapage_t *mp2;
struct metapage *mp2;
spin_unlock(&meta_lock);
mp = mempool_alloc(metapage_mempool, GFP_NOFS);
......@@ -358,7 +358,7 @@ metapage_t *__get_metapage(struct inode *inode,
return NULL;
}
void hold_metapage(metapage_t * mp, int force)
void hold_metapage(struct metapage * mp, int force)
{
spin_lock(&meta_lock);
......@@ -374,7 +374,7 @@ void hold_metapage(metapage_t * mp, int force)
spin_unlock(&meta_lock);
}
static void __write_metapage(metapage_t * mp)
static void __write_metapage(struct metapage * mp)
{
int l2bsize = mp->mapping->host->i_blkbits;
int l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize;
......@@ -420,7 +420,7 @@ static void __write_metapage(metapage_t * mp)
jFYI(1, ("__write_metapage done\n"));
}
static inline void sync_metapage(metapage_t *mp)
static inline void sync_metapage(struct metapage *mp)
{
struct page *page = mp->page;
......@@ -435,9 +435,9 @@ static inline void sync_metapage(metapage_t *mp)
page_cache_release(page);
}
void release_metapage(metapage_t * mp)
void release_metapage(struct metapage * mp)
{
log_t *log;
struct jfs_log *log;
jFYI(1,
("release_metapage: mp = 0x%p, flag = 0x%lx\n", mp,
......@@ -502,11 +502,11 @@ void release_metapage(metapage_t * mp)
void __invalidate_metapages(struct inode *ip, s64 addr, int len)
{
metapage_t **hash_ptr;
struct metapage **hash_ptr;
unsigned long lblock;
int l2BlocksPerPage = PAGE_CACHE_SHIFT - ip->i_blkbits;
struct address_space *mapping = ip->i_mapping;
metapage_t *mp;
struct metapage *mp;
struct page *page;
/*
......@@ -538,11 +538,11 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len)
void invalidate_inode_metapages(struct inode *inode)
{
struct list_head *ptr;
metapage_t *mp;
struct metapage *mp;
spin_lock(&meta_lock);
list_for_each(ptr, &JFS_IP(inode)->mp_list) {
mp = list_entry(ptr, metapage_t, inode_list);
mp = list_entry(ptr, struct metapage, inode_list);
clear_bit(META_dirty, &mp->flag);
set_bit(META_discard, &mp->flag);
kunmap(mp->page);
......
......@@ -21,7 +21,7 @@
#include <linux/pagemap.h>
typedef struct metapage {
struct metapage {
/* Common logsyncblk prefix (see jfs_logmgr.h) */
u16 xflag;
u16 unused;
......@@ -55,7 +55,7 @@ typedef struct metapage {
int clsn;
atomic_t nohomeok;
struct jfs_log *log;
} metapage_t;
};
/* metapage flag */
#define META_locked 0
......@@ -69,7 +69,7 @@ typedef struct metapage {
#define mark_metapage_dirty(mp) set_bit(META_dirty, &(mp)->flag)
/* function prototypes */
extern metapage_t *__get_metapage(struct inode *inode,
extern struct metapage *__get_metapage(struct inode *inode,
unsigned long lblock, unsigned int size,
int absolute, unsigned long new);
......@@ -79,22 +79,22 @@ extern metapage_t *__get_metapage(struct inode *inode,
#define get_metapage(inode, lblock, size, absolute)\
__get_metapage(inode, lblock, size, absolute, TRUE)
extern void release_metapage(metapage_t *);
extern void hold_metapage(metapage_t *, int);
extern void release_metapage(struct metapage *);
extern void hold_metapage(struct metapage *, int);
static inline void write_metapage(metapage_t *mp)
static inline void write_metapage(struct metapage *mp)
{
set_bit(META_dirty, &mp->flag);
release_metapage(mp);
}
static inline void flush_metapage(metapage_t *mp)
static inline void flush_metapage(struct metapage *mp)
{
set_bit(META_sync, &mp->flag);
write_metapage(mp);
}
static inline void discard_metapage(metapage_t *mp)
static inline void discard_metapage(struct metapage *mp)
{
clear_bit(META_dirty, &mp->flag);
set_bit(META_discard, &mp->flag);
......
......@@ -249,7 +249,7 @@ int jfs_mount(struct super_block *sb)
int jfs_mount_rw(struct super_block *sb, int remount)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
log_t *log;
struct jfs_log *log;
int rc;
/*
......@@ -506,8 +506,8 @@ int readSuper(struct super_block *sb, struct buffer_head **bpp)
*/
static int logMOUNT(struct super_block *sb)
{
log_t *log = JFS_SBI(sb)->log;
lrd_t lrd;
struct jfs_log *log = JFS_SBI(sb)->log;
struct lrd lrd;
lrd.logtid = 0;
lrd.backchain = 0;
......
......@@ -61,18 +61,18 @@
*/
static struct {
/* tblock */
int freetid; /* 4: index of a free tid structure */
wait_queue_head_t freewait; /* 4: eventlist of free tblock */
int freetid; /* index of a free tid structure */
wait_queue_head_t freewait; /* eventlist of free tblock */
/* tlock */
int freelock; /* 4: index first free lock word */
wait_queue_head_t freelockwait; /* 4: eventlist of free tlock */
wait_queue_head_t lowlockwait; /* 4: eventlist of ample tlocks */
int tlocksInUse; /* 4: Number of tlocks in use */
spinlock_t LazyLock; /* 4: synchronize sync_queue & unlock_queue */
/* tblock_t *sync_queue; * 4: Transactions waiting for data sync */
tblock_t *unlock_queue; /* 4: Transactions waiting to be released */
tblock_t *unlock_tail; /* 4: Tail of unlock_queue */
int freelock; /* index first free lock word */
wait_queue_head_t freelockwait; /* eventlist of free tlock */
wait_queue_head_t lowlockwait; /* eventlist of ample tlocks */
int tlocksInUse; /* Number of tlocks in use */
spinlock_t LazyLock; /* synchronize sync_queue & unlock_queue */
/* struct tblock *sync_queue; * Transactions waiting for data sync */
struct tblock *unlock_queue; /* Txns waiting to be released */
struct tblock *unlock_tail; /* Tail of unlock_queue */
struct list_head anon_list; /* inodes having anonymous txns */
struct list_head anon_list2; /* inodes having anonymous txns
that couldn't be sync'ed */
......@@ -142,8 +142,8 @@ struct {
/*
* external references
*/
extern int lmGroupCommit(log_t * log, tblock_t * tblk);
extern void lmSync(log_t *);
extern int lmGroupCommit(struct jfs_log * log, struct tblock * tblk);
extern void lmSync(struct jfs_log *);
extern int jfs_commit_inode(struct inode *, int);
extern int jfs_stop_threads;
......@@ -153,22 +153,27 @@ extern struct completion jfsIOwait;
/*
* forward references
*/
int diLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck,
commit_t * cd);
int dataLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck);
void dtLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck);
void inlineLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck);
void mapLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck);
void txAbortCommit(commit_t * cd, int exval);
static void txAllocPMap(struct inode *ip, maplock_t * maplock,
tblock_t * tblk);
void txForce(tblock_t * tblk);
static int txLog(log_t * log, tblock_t * tblk, commit_t * cd);
int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck, struct commit * cd);
int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck);
void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck);
void inlineLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck);
void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck);
void txAbortCommit(struct commit * cd, int exval);
static void txAllocPMap(struct inode *ip, struct maplock * maplock,
struct tblock * tblk);
void txForce(struct tblock * tblk);
static int txLog(struct jfs_log * log, struct tblock * tblk, struct commit * cd);
int txMoreLock(void);
static void txUpdateMap(tblock_t * tblk);
static void txRelease(tblock_t * tblk);
void xtLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck);
static void LogSyncRelease(metapage_t * mp);
static void txUpdateMap(struct tblock * tblk);
static void txRelease(struct tblock * tblk);
void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck);
static void LogSyncRelease(struct metapage * mp);
/*
* transaction block/lock management
......@@ -229,8 +234,8 @@ int txInit(void)
* transaction id (tid) = tblock index
* tid = 0 is reserved.
*/
size = sizeof(tblock_t) * nTxBlock;
TxBlock = (tblock_t *) vmalloc(size);
size = sizeof(struct tblock) * nTxBlock;
TxBlock = (struct tblock *) vmalloc(size);
if (TxBlock == NULL)
return ENOMEM;
......@@ -254,8 +259,8 @@ int txInit(void)
* transaction lock id = tlock index
* tlock id = 0 is reserved.
*/
size = sizeof(tlock_t) * nTxLock;
TxLock = (tlock_t *) vmalloc(size);
size = sizeof(struct tlock) * nTxLock;
TxLock = (struct tlock *) vmalloc(size);
if (TxLock == NULL) {
vfree(TxBlock);
return ENOMEM;
......@@ -308,11 +313,11 @@ void txExit(void)
tid_t txBegin(struct super_block *sb, int flag)
{
tid_t t;
tblock_t *tblk;
log_t *log;
struct tblock *tblk;
struct jfs_log *log;
jFYI(1, ("txBegin: flag = 0x%x\n", flag));
log = (log_t *) JFS_SBI(sb)->log;
log = JFS_SBI(sb)->log;
TXN_LOCK();
......@@ -367,7 +372,7 @@ tid_t txBegin(struct super_block *sb, int flag)
* We can't zero the whole thing or we screw up another thread being
* awakened after sleeping on tblk->waitor
*
* memset(tblk, 0, sizeof(tblock_t));
* memset(tblk, 0, sizeof(struct tblock));
*/
tblk->next = tblk->last = tblk->xflag = tblk->flag = tblk->lsn = 0;
......@@ -401,9 +406,9 @@ tid_t txBegin(struct super_block *sb, int flag)
*/
void txBeginAnon(struct super_block *sb)
{
log_t *log;
struct jfs_log *log;
log = (log_t *) JFS_SBI(sb)->log;
log = JFS_SBI(sb)->log;
TXN_LOCK();
......@@ -439,8 +444,8 @@ void txBeginAnon(struct super_block *sb)
*/
void txEnd(tid_t tid)
{
tblock_t *tblk = tid_to_tblock(tid);
log_t *log;
struct tblock *tblk = tid_to_tblock(tid);
struct jfs_log *log;
jFYI(1, ("txEnd: tid = %d\n", tid));
TXN_LOCK();
......@@ -451,7 +456,7 @@ void txEnd(tid_t tid)
*/
TXN_WAKEUP(&tblk->waitor);
log = (log_t *) JFS_SBI(tblk->sb)->log;
log = JFS_SBI(tblk->sb)->log;
/*
* Lazy commit thread can't free this guy until we mark it UNLOCKED,
......@@ -525,17 +530,18 @@ void txEnd(tid_t tid)
*
* serialization:
*/
tlock_t *txLock(tid_t tid, struct inode *ip, metapage_t * mp, int type)
struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
int type)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
int dir_xtree = 0;
lid_t lid;
tid_t xtid;
tlock_t *tlck;
xtlock_t *xtlck;
linelock_t *linelock;
struct tlock *tlck;
struct xtlock *xtlck;
struct linelock *linelock;
xtpage_t *p;
tblock_t *tblk;
struct tblock *tblk;
TXN_LOCK();
......@@ -695,7 +701,7 @@ tlock_t *txLock(tid_t tid, struct inode *ip, metapage_t * mp, int type)
}
/* initialize type dependent area for linelock */
linelock = (linelock_t *) & tlck->lock;
linelock = (struct linelock *) & tlck->lock;
linelock->next = 0;
linelock->flag = tlckLINELOCK;
linelock->maxcnt = TLOCKSHORT;
......@@ -709,7 +715,7 @@ tlock_t *txLock(tid_t tid, struct inode *ip, metapage_t * mp, int type)
case tlckXTREE:
linelock->l2linesize = L2XTSLOTSIZE;
xtlck = (xtlock_t *) linelock;
xtlck = (struct xtlock *) linelock;
xtlck->header.offset = 0;
xtlck->header.length = 2;
......@@ -761,10 +767,10 @@ tlock_t *txLock(tid_t tid, struct inode *ip, metapage_t * mp, int type)
if (jfs_ip->fileset != AGGREGATE_I) {
jERROR(1, ("txLock: trying to lock locked page!\n"));
dump_mem("ip", ip, sizeof(struct inode));
dump_mem("mp", mp, sizeof(metapage_t));
dump_mem("mp", mp, sizeof(struct metapage));
dump_mem("Locker's tblk", tid_to_tblock(tid),
sizeof(tblock_t));
dump_mem("Tlock", tlck, sizeof(tlock_t));
sizeof(struct tblock));
dump_mem("Tlock", tlck, sizeof(struct tlock));
BUG();
}
INCREMENT(stattx.waitlock); /* statistics */
......@@ -792,11 +798,11 @@ tlock_t *txLock(tid_t tid, struct inode *ip, metapage_t * mp, int type)
*
* RETURN: Errors from subroutines.
*/
static void txRelease(tblock_t * tblk)
static void txRelease(struct tblock * tblk)
{
metapage_t *mp;
struct metapage *mp;
lid_t lid;
tlock_t *tlck;
struct tlock *tlck;
TXN_LOCK();
......@@ -825,17 +831,17 @@ static void txRelease(tblock_t * tblk)
* FUNCTION: Initiates pageout of pages modified by tid in journalled
* objects and frees their lockwords.
*/
static void txUnlock(tblock_t * tblk)
static void txUnlock(struct tblock * tblk)
{
tlock_t *tlck;
linelock_t *linelock;
struct tlock *tlck;
struct linelock *linelock;
lid_t lid, next, llid, k;
metapage_t *mp;
log_t *log;
struct metapage *mp;
struct jfs_log *log;
int difft, diffp;
jFYI(1, ("txUnlock: tblk = 0x%p\n", tblk));
log = (log_t *) JFS_SBI(tblk->sb)->log;
log = JFS_SBI(tblk->sb)->log;
/*
* mark page under tlock homeok (its log has been written):
......@@ -889,9 +895,9 @@ static void txUnlock(tblock_t * tblk)
*/
TXN_LOCK();
llid = ((linelock_t *) & tlck->lock)->next;
llid = ((struct linelock *) & tlck->lock)->next;
while (llid) {
linelock = (linelock_t *) lid_to_tlock(llid);
linelock = (struct linelock *) lid_to_tlock(llid);
k = linelock->next;
txLockFree(llid);
llid = k;
......@@ -922,13 +928,13 @@ static void txUnlock(tblock_t * tblk)
* function: allocate a transaction lock for freed page/entry;
* for freed page, maplock is used as xtlock/dtlock type;
*/
tlock_t *txMaplock(tid_t tid, struct inode *ip, int type)
struct tlock *txMaplock(tid_t tid, struct inode *ip, int type)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
lid_t lid;
tblock_t *tblk;
tlock_t *tlck;
maplock_t *maplock;
struct tblock *tblk;
struct tlock *tlck;
struct maplock *maplock;
TXN_LOCK();
......@@ -980,7 +986,7 @@ tlock_t *txMaplock(tid_t tid, struct inode *ip, int type)
TXN_UNLOCK();
/* initialize type dependent area for maplock */
maplock = (maplock_t *) & tlck->lock;
maplock = (struct maplock *) & tlck->lock;
maplock->next = 0;
maplock->maxcnt = 0;
maplock->index = 0;
......@@ -994,11 +1000,11 @@ tlock_t *txMaplock(tid_t tid, struct inode *ip, int type)
*
* function: allocate a transaction lock for log vector list
*/
linelock_t *txLinelock(linelock_t * tlock)
struct linelock *txLinelock(struct linelock * tlock)
{
lid_t lid;
tlock_t *tlck;
linelock_t *linelock;
struct tlock *tlck;
struct linelock *linelock;
TXN_LOCK();
......@@ -1009,7 +1015,7 @@ linelock_t *txLinelock(linelock_t * tlock)
TXN_UNLOCK();
/* initialize linelock */
linelock = (linelock_t *) tlck;
linelock = (struct linelock *) tlck;
linelock->next = 0;
linelock->flag = tlckLINELOCK;
linelock->maxcnt = TLOCKLONG;
......@@ -1067,10 +1073,10 @@ int txCommit(tid_t tid, /* transaction identifier */
int flag)
{
int rc = 0, rc1 = 0;
commit_t cd;
log_t *log;
tblock_t *tblk;
lrd_t *lrd;
struct commit cd;
struct jfs_log *log;
struct tblock *tblk;
struct lrd *lrd;
int lsn;
struct inode *ip;
struct jfs_inode_info *jfs_ip;
......@@ -1095,7 +1101,7 @@ int txCommit(tid_t tid, /* transaction identifier */
/*
* initialize commit structure
*/
log = (log_t *) JFS_SBI(sb)->log;
log = JFS_SBI(sb)->log;
cd.log = log;
/* initialize log record descriptor in commit */
......@@ -1293,13 +1299,13 @@ int txCommit(tid_t tid, /* transaction identifier */
*
* RETURN :
*/
static int txLog(log_t * log, tblock_t * tblk, commit_t * cd)
static int txLog(struct jfs_log * log, struct tblock * tblk, struct commit * cd)
{
int rc = 0;
struct inode *ip;
lid_t lid;
tlock_t *tlck;
lrd_t *lrd = &cd->lrd;
struct tlock *tlck;
struct lrd *lrd = &cd->lrd;
/*
* write log record(s) for each tlock of transaction,
......@@ -1356,13 +1362,13 @@ static int txLog(log_t * log, tblock_t * tblk, commit_t * cd)
*
* function: log inode tlock and format maplock to update bmap;
*/
int diLog(log_t * log,
tblock_t * tblk, lrd_t * lrd, tlock_t * tlck, commit_t * cd)
int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck, struct commit * cd)
{
int rc = 0;
metapage_t *mp;
struct metapage *mp;
pxd_t *pxd;
pxdlock_t *pxdlock;
struct pxd_lock *pxdlock;
mp = tlck->mp;
......@@ -1418,7 +1424,7 @@ int diLog(log_t * log,
lrd->log.noredoinoext.inoext_idx =
cpu_to_le32((u32) (size_t) cd->iplist[2]);
pxdlock = (pxdlock_t *) & tlck->lock;
pxdlock = (struct pxd_lock *) & tlck->lock;
*pxd = pxdlock->pxd;
lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
......@@ -1444,7 +1450,7 @@ int diLog(log_t * log,
* alloc of new (and free of old) external EA extent;
*/
lrd->type = cpu_to_le16(LOG_UPDATEMAP);
pxdlock = (pxdlock_t *) & tlck->lock;
pxdlock = (struct pxd_lock *) & tlck->lock;
nlock = pxdlock->index;
for (i = 0; i < nlock; i++, pxdlock++) {
if (pxdlock->flag & mlckALLOCPXD)
......@@ -1473,9 +1479,10 @@ int diLog(log_t * log,
*
* function: log data tlock
*/
int dataLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck)
{
metapage_t *mp;
struct metapage *mp;
pxd_t *pxd;
int rc;
s64 xaddr;
......@@ -1528,11 +1535,12 @@ int dataLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
*
* function: log dtree tlock and format maplock to update bmap;
*/
void dtLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck)
{
struct inode *ip;
metapage_t *mp;
pxdlock_t *pxdlock;
struct metapage *mp;
struct pxd_lock *pxdlock;
pxd_t *pxd;
ip = tlck->ip;
......@@ -1575,7 +1583,7 @@ void dtLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
if (tlck->type & tlckBTROOT)
return;
tlck->flag |= tlckUPDATEMAP;
pxdlock = (pxdlock_t *) & tlck->lock;
pxdlock = (struct pxd_lock *) & tlck->lock;
pxdlock->flag = mlckALLOCPXD;
pxdlock->pxd = *pxd;
......@@ -1616,7 +1624,7 @@ void dtLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
* of the deletd page
*/
lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
pxdlock = (pxdlock_t *) & tlck->lock;
pxdlock = (struct pxd_lock *) & tlck->lock;
*pxd = pxdlock->pxd;
lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
......@@ -1634,15 +1642,16 @@ void dtLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
*
* function: log xtree tlock and format maplock to update bmap;
*/
void xtLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck)
{
struct inode *ip;
metapage_t *mp;
struct metapage *mp;
xtpage_t *p;
xtlock_t *xtlck;
maplock_t *maplock;
xdlistlock_t *xadlock;
pxdlock_t *pxdlock;
struct xtlock *xtlck;
struct maplock *maplock;
struct xdlistlock *xadlock;
struct pxd_lock *pxdlock;
pxd_t *pxd;
int next, lwm, hwm;
......@@ -1665,10 +1674,10 @@ void xtLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
p = (xtpage_t *) mp->data;
next = le16_to_cpu(p->header.nextindex);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
maplock = (maplock_t *) & tlck->lock;
xadlock = (xdlistlock_t *) maplock;
maplock = (struct maplock *) & tlck->lock;
xadlock = (struct xdlistlock *) maplock;
/*
* entry insertion/extension;
......@@ -1786,7 +1795,7 @@ void xtLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
*/
lrd->type = cpu_to_le16(LOG_UPDATEMAP);
lrd->log.updatemap.type = cpu_to_le16(LOG_FREEXADLIST);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
hwm = xtlck->hwm.offset;
lrd->log.updatemap.nxd =
cpu_to_le16(hwm - XTENTRYSTART + 1);
......@@ -1905,7 +1914,7 @@ void xtLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
* entry XAD[next - 1]:
* (xtlck->pxdlock = truncated delta extent);
*/
pxdlock = (pxdlock_t *) & xtlck->pxdlock;
pxdlock = (struct pxd_lock *) & xtlck->pxdlock;
/* assert(pxdlock->type & tlckTRUNCATE); */
lrd->type = cpu_to_le16(LOG_UPDATEMAP);
lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
......@@ -1927,7 +1936,7 @@ void xtLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
lrd->type = cpu_to_le16(LOG_UPDATEMAP);
lrd->log.updatemap.type =
cpu_to_le16(LOG_FREEXADLIST);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
hwm = xtlck->hwm.offset;
lrd->log.updatemap.nxd =
cpu_to_le16(hwm - next + 1);
......@@ -1969,7 +1978,7 @@ void xtLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
* truncate entry XAD[twm == next - 1]:
*/
if (twm == next - 1) {
pxdlock_t *pxdlock;
struct pxd_lock *pxdlock;
/* format a maplock for txUpdateMap() to update bmap
* to free truncated delta extent of the truncated
......@@ -1977,7 +1986,7 @@ void xtLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
* (xtlck->pxdlock = truncated delta extent);
*/
tlck->flag |= tlckUPDATEMAP;
pxdlock = (pxdlock_t *) xadlock;
pxdlock = (struct pxd_lock *) xadlock;
pxdlock->flag = mlckFREEPXD;
pxdlock->count = 1;
pxdlock->pxd = tpxd;
......@@ -2020,9 +2029,10 @@ void xtLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
*
* function: log from maplock of freed data extents;
*/
void mapLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck)
{
pxdlock_t *pxdlock;
struct pxd_lock *pxdlock;
int i, nlock;
pxd_t *pxd;
......@@ -2038,7 +2048,7 @@ void mapLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
* for logredo() to start NoRedoPage filter;
*/
lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
pxdlock = (pxdlock_t *) & tlck->lock;
pxdlock = (struct pxd_lock *) & tlck->lock;
pxd = &lrd->log.redopage.pxd;
*pxd = pxdlock->pxd;
lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
......@@ -2077,7 +2087,7 @@ void mapLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
* from xtTailgate();
*/
lrd->type = cpu_to_le16(LOG_UPDATEMAP);
pxdlock = (pxdlock_t *) & tlck->lock;
pxdlock = (struct pxd_lock *) & tlck->lock;
nlock = pxdlock->index;
for (i = 0; i < nlock; i++, pxdlock++) {
if (pxdlock->flag & mlckALLOCPXD)
......@@ -2109,8 +2119,8 @@ void mapLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck)
*/
void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea)
{
tlock_t *tlck = NULL;
pxdlock_t *maplock = NULL, *pxdlock = NULL;
struct tlock *tlck = NULL;
struct pxd_lock *maplock = NULL, *pxdlock = NULL;
/*
* format maplock for alloc of new EA extent
......@@ -2122,8 +2132,8 @@ void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea)
*/
if (newea->flag & DXD_EXTENT) {
tlck = txMaplock(tid, ip, tlckMAP);
maplock = (pxdlock_t *) & tlck->lock;
pxdlock = (pxdlock_t *) maplock;
maplock = (struct pxd_lock *) & tlck->lock;
pxdlock = (struct pxd_lock *) maplock;
pxdlock->flag = mlckALLOCPXD;
PXDaddress(&pxdlock->pxd, addressDXD(newea));
PXDlength(&pxdlock->pxd, lengthDXD(newea));
......@@ -2142,8 +2152,8 @@ void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea)
if (!test_cflag(COMMIT_Nolink, ip) && oldea->flag & DXD_EXTENT) {
if (tlck == NULL) {
tlck = txMaplock(tid, ip, tlckMAP);
maplock = (pxdlock_t *) & tlck->lock;
pxdlock = (pxdlock_t *) maplock;
maplock = (struct pxd_lock *) & tlck->lock;
pxdlock = (struct pxd_lock *) maplock;
maplock->index = 0;
}
pxdlock->flag = mlckFREEPXD;
......@@ -2160,11 +2170,11 @@ void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea)
* function: synchronously write pages locked by transaction
* after txLog() but before txUpdateMap();
*/
void txForce(tblock_t * tblk)
void txForce(struct tblock * tblk)
{
tlock_t *tlck;
struct tlock *tlck;
lid_t lid, next;
metapage_t *mp;
struct metapage *mp;
/*
* reverse the order of transaction tlocks in
......@@ -2215,17 +2225,17 @@ void txForce(tblock_t * tblk)
*
* parameter:
*/
static void txUpdateMap(tblock_t * tblk)
static void txUpdateMap(struct tblock * tblk)
{
struct inode *ip;
struct inode *ipimap;
lid_t lid;
tlock_t *tlck;
maplock_t *maplock;
pxdlock_t pxdlock;
struct tlock *tlck;
struct maplock *maplock;
struct pxd_lock pxdlock;
int maptype;
int k, nlock;
metapage_t *mp = 0;
struct metapage *mp = 0;
ipimap = JFS_SBI(tblk->sb)->ipimap;
......@@ -2268,7 +2278,7 @@ static void txUpdateMap(tblock_t * tblk)
* . in-line PXD list:
* . out-of-line XAD list:
*/
maplock = (maplock_t *) & tlck->lock;
maplock = (struct maplock *) & tlck->lock;
nlock = maplock->index;
for (k = 0; k < nlock; k++, maplock++) {
......@@ -2339,7 +2349,7 @@ static void txUpdateMap(tblock_t * tblk)
pxdlock.flag = mlckALLOCPXD;
pxdlock.pxd = JFS_IP(ip)->ixpxd;
pxdlock.index = 1;
txAllocPMap(ip, (maplock_t *) & pxdlock, tblk);
txAllocPMap(ip, (struct maplock *) & pxdlock, tblk);
iput(ip);
} else if (tblk->xflag & COMMIT_DELETE) {
ip = tblk->ip;
......@@ -2374,16 +2384,16 @@ static void txUpdateMap(tblock_t * tblk)
*
* lsn - log sequence number;
*/
static void txAllocPMap(struct inode *ip, maplock_t * maplock,
tblock_t * tblk)
static void txAllocPMap(struct inode *ip, struct maplock * maplock,
struct tblock * tblk)
{
struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
xdlistlock_t *xadlistlock;
struct xdlistlock *xadlistlock;
xad_t *xad;
s64 xaddr;
int xlen;
pxdlock_t *pxdlock;
xdlistlock_t *pxdlistlock;
struct pxd_lock *pxdlock;
struct xdlistlock *pxdlistlock;
pxd_t *pxd;
int n;
......@@ -2391,7 +2401,7 @@ static void txAllocPMap(struct inode *ip, maplock_t * maplock,
* allocate from persistent map;
*/
if (maplock->flag & mlckALLOCXADLIST) {
xadlistlock = (xdlistlock_t *) maplock;
xadlistlock = (struct xdlistlock *) maplock;
xad = xadlistlock->xdlist;
for (n = 0; n < xadlistlock->count; n++, xad++) {
if (xad->flag & (XAD_NEW | XAD_EXTENDED)) {
......@@ -2406,7 +2416,7 @@ static void txAllocPMap(struct inode *ip, maplock_t * maplock,
}
}
} else if (maplock->flag & mlckALLOCPXD) {
pxdlock = (pxdlock_t *) maplock;
pxdlock = (struct pxd_lock *) maplock;
xaddr = addressPXD(&pxdlock->pxd);
xlen = lengthPXD(&pxdlock->pxd);
dbUpdatePMap(ipbmap, FALSE, xaddr, (s64) xlen, tblk);
......@@ -2415,7 +2425,7 @@ static void txAllocPMap(struct inode *ip, maplock_t * maplock,
xlen));
} else { /* (maplock->flag & mlckALLOCPXDLIST) */
pxdlistlock = (xdlistlock_t *) maplock;
pxdlistlock = (struct xdlistlock *) maplock;
pxd = pxdlistlock->xdlist;
for (n = 0; n < pxdlistlock->count; n++, pxd++) {
xaddr = addressPXD(pxd);
......@@ -2438,15 +2448,15 @@ static void txAllocPMap(struct inode *ip, maplock_t * maplock,
* todo: optimization
*/
void txFreeMap(struct inode *ip,
maplock_t * maplock, tblock_t * tblk, int maptype)
struct maplock * maplock, struct tblock * tblk, int maptype)
{
struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
xdlistlock_t *xadlistlock;
struct xdlistlock *xadlistlock;
xad_t *xad;
s64 xaddr;
int xlen;
pxdlock_t *pxdlock;
xdlistlock_t *pxdlistlock;
struct pxd_lock *pxdlock;
struct xdlistlock *pxdlistlock;
pxd_t *pxd;
int n;
......@@ -2459,7 +2469,7 @@ void txFreeMap(struct inode *ip,
*/
if (maptype == COMMIT_PMAP || maptype == COMMIT_PWMAP) {
if (maplock->flag & mlckFREEXADLIST) {
xadlistlock = (xdlistlock_t *) maplock;
xadlistlock = (struct xdlistlock *) maplock;
xad = xadlistlock->xdlist;
for (n = 0; n < xadlistlock->count; n++, xad++) {
if (!(xad->flag & XAD_NEW)) {
......@@ -2473,7 +2483,7 @@ void txFreeMap(struct inode *ip,
}
}
} else if (maplock->flag & mlckFREEPXD) {
pxdlock = (pxdlock_t *) maplock;
pxdlock = (struct pxd_lock *) maplock;
xaddr = addressPXD(&pxdlock->pxd);
xlen = lengthPXD(&pxdlock->pxd);
dbUpdatePMap(ipbmap, TRUE, xaddr, (s64) xlen,
......@@ -2483,7 +2493,7 @@ void txFreeMap(struct inode *ip,
(ulong) xaddr, xlen));
} else { /* (maplock->flag & mlckALLOCPXDLIST) */
pxdlistlock = (xdlistlock_t *) maplock;
pxdlistlock = (struct xdlistlock *) maplock;
pxd = pxdlistlock->xdlist;
for (n = 0; n < pxdlistlock->count; n++, pxd++) {
xaddr = addressPXD(pxd);
......@@ -2502,7 +2512,7 @@ void txFreeMap(struct inode *ip,
*/
if (maptype == COMMIT_PWMAP || maptype == COMMIT_WMAP) {
if (maplock->flag & mlckFREEXADLIST) {
xadlistlock = (xdlistlock_t *) maplock;
xadlistlock = (struct xdlistlock *) maplock;
xad = xadlistlock->xdlist;
for (n = 0; n < xadlistlock->count; n++, xad++) {
xaddr = addressXAD(xad);
......@@ -2514,7 +2524,7 @@ void txFreeMap(struct inode *ip,
(ulong) xaddr, xlen));
}
} else if (maplock->flag & mlckFREEPXD) {
pxdlock = (pxdlock_t *) maplock;
pxdlock = (struct pxd_lock *) maplock;
xaddr = addressPXD(&pxdlock->pxd);
xlen = lengthPXD(&pxdlock->pxd);
dbFree(ip, xaddr, (s64) xlen);
......@@ -2523,7 +2533,7 @@ void txFreeMap(struct inode *ip,
(ulong) xaddr, xlen));
} else { /* (maplock->flag & mlckFREEPXDLIST) */
pxdlistlock = (xdlistlock_t *) maplock;
pxdlistlock = (struct xdlistlock *) maplock;
pxd = pxdlistlock->xdlist;
for (n = 0; n < pxdlistlock->count; n++, pxd++) {
xaddr = addressPXD(pxd);
......@@ -2546,13 +2556,13 @@ void txFreeMap(struct inode *ip,
void txFreelock(struct inode *ip)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
tlock_t *xtlck, *tlck;
struct tlock *xtlck, *tlck;
lid_t xlid = 0, lid;
if (!jfs_ip->atlhead)
return;
xtlck = (tlock_t *) &jfs_ip->atlhead;
xtlck = (struct tlock *) &jfs_ip->atlhead;
while ((lid = xtlck->next)) {
tlck = lid_to_tlock(lid);
......@@ -2593,8 +2603,8 @@ void txFreelock(struct inode *ip)
void txAbort(tid_t tid, int dirty)
{
lid_t lid, next;
metapage_t *mp;
tblock_t *tblk = tid_to_tblock(tid);
struct metapage *mp;
struct tblock *tblk = tid_to_tblock(tid);
jEVENT(1, ("txAbort: tid:%d dirty:0x%x\n", tid, dirty));
......@@ -2651,12 +2661,12 @@ void txAbort(tid_t tid, int dirty)
* log age of page-frames in memory for which caller has
* are reset to 0 (to avoid logwarap).
*/
void txAbortCommit(commit_t * cd, int exval)
void txAbortCommit(struct commit * cd, int exval)
{
tblock_t *tblk;
struct tblock *tblk;
tid_t tid;
lid_t lid, next;
metapage_t *mp;
struct metapage *mp;
assert(exval == EIO || exval == ENOMEM);
jEVENT(1, ("txAbortCommit: cd:0x%p\n", cd));
......@@ -2706,9 +2716,9 @@ void txAbortCommit(commit_t * cd, int exval)
* allocation maps are updated in order. For synchronous transactions,
* let the user thread finish processing after txUpdateMap() is called.
*/
void txLazyCommit(tblock_t * tblk)
void txLazyCommit(struct tblock * tblk)
{
log_t *log;
struct jfs_log *log;
while (((tblk->flag & tblkGC_READY) == 0) &&
((tblk->flag & tblkGC_UNLOCKED) == 0)) {
......@@ -2723,7 +2733,7 @@ void txLazyCommit(tblock_t * tblk)
txUpdateMap(tblk);
log = (log_t *) JFS_SBI(tblk->sb)->log;
log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;
spin_lock_irq(&log->gclock); // LOGGC_LOCK
......@@ -2756,7 +2766,7 @@ void txLazyCommit(tblock_t * tblk)
int jfs_lazycommit(void *arg)
{
int WorkDone;
tblock_t *tblk;
struct tblock *tblk;
unsigned long flags;
lock_kernel();
......@@ -2831,7 +2841,7 @@ int jfs_lazycommit(void *arg)
return 0;
}
void txLazyUnlock(tblock_t * tblk)
void txLazyUnlock(struct tblock * tblk)
{
unsigned long flags;
......@@ -2847,9 +2857,9 @@ void txLazyUnlock(tblock_t * tblk)
wake_up(&jfs_commit_thread_wait);
}
static void LogSyncRelease(metapage_t * mp)
static void LogSyncRelease(struct metapage * mp)
{
log_t *log = mp->log;
struct jfs_log *log = mp->log;
assert(atomic_read(&mp->nohomeok));
assert(log);
......@@ -2885,7 +2895,7 @@ void txQuiesce(struct super_block *sb)
{
struct inode *ip;
struct jfs_inode_info *jfs_ip;
log_t *log = JFS_SBI(sb)->log;
struct jfs_log *log = JFS_SBI(sb)->log;
int rc;
tid_t tid;
......@@ -2936,7 +2946,7 @@ void txQuiesce(struct super_block *sb)
*/
void txResume(struct super_block *sb)
{
log_t *log = JFS_SBI(sb)->log;
struct jfs_log *log = JFS_SBI(sb)->log;
clear_bit(log_QUIESCE, &log->flag);
TXN_WAKEUP(&log->syncwait);
......
......@@ -30,9 +30,9 @@
/*
* transaction block
*/
typedef struct tblock {
struct tblock {
/*
* tblock_t and jbuf_t common area: struct logsyncblk
* tblock and jbuf_t common area: struct logsyncblk
*
* the following 5 fields are the same as struct logsyncblk
* which is common to tblock and jbuf to form logsynclist
......@@ -44,28 +44,26 @@ typedef struct tblock {
struct list_head synclist; /* logsynclist link */
/* lock management */
struct super_block *sb; /* 4: super block */
lid_t next; /* 2: index of first tlock of tid */
lid_t last; /* 2: index of last tlock of tid */
wait_queue_head_t waitor; /* 4: tids waiting on this tid */
struct super_block *sb; /* super block */
lid_t next; /* index of first tlock of tid */
lid_t last; /* index of last tlock of tid */
wait_queue_head_t waitor; /* tids waiting on this tid */
/* log management */
u32 logtid; /* 4: log transaction id */
/* (32) */
u32 logtid; /* log transaction id */
/* commit management */
struct tblock *cqnext; /* 4: commit queue link */
s32 clsn; /* 4: commit lsn */
struct lbuf *bp; /* 4: */
s32 pn; /* 4: commit record log page number */
s32 eor; /* 4: commit record eor */
wait_queue_head_t gcwait; /* 4: group commit event list:
* ready transactions wait on this
* event for group commit completion.
struct tblock *cqnext; /* commit queue link */
s32 clsn; /* commit lsn */
struct lbuf *bp;
s32 pn; /* commit record log page number */
s32 eor; /* commit record eor */
wait_queue_head_t gcwait; /* group commit event list:
* ready transactions wait on this
* event for group commit completion.
*/
struct inode *ip; /* 4: inode being created or deleted */
s32 rsrvd; /* 4: */
} tblock_t; /* (64) */
struct inode *ip; /* inode being created or deleted */
};
extern struct tblock *TxBlock; /* transaction block table */
......@@ -90,7 +88,7 @@ extern struct tblock *TxBlock; /* transaction block table */
/*
* transaction lock
*/
typedef struct tlock {
struct tlock {
lid_t next; /* index next lockword on tid locklist
* next lockword on freelist
*/
......@@ -104,7 +102,7 @@ typedef struct tlock {
/* (16) */
s16 lock[24]; /* 48: overlay area */
} tlock_t; /* (64) */
}; /* (64) */
extern struct tlock *TxLock; /* transaction lock table */
......@@ -153,18 +151,18 @@ extern struct tlock *TxLock; /* transaction lock table */
/*
* linelock for lmLog()
*
* note: linelock_t and its variations are overlaid
* note: linelock and its variations are overlaid
* at tlock.lock: watch for alignment;
*/
typedef struct {
struct lv {
u8 offset; /* 1: */
u8 length; /* 1: */
} lv_t; /* (2) */
}; /* (2) */
#define TLOCKSHORT 20
#define TLOCKLONG 28
typedef struct {
struct linelock {
u16 next; /* 2: next linelock */
s8 maxcnt; /* 1: */
......@@ -175,13 +173,12 @@ typedef struct {
u8 l2linesize; /* 1: log2 of linesize */
/* (8) */
lv_t lv[20]; /* 40: */
} linelock_t; /* (48) */
struct lv lv[20]; /* 40: */
}; /* (48) */
#define dtlock_t linelock_t
#define itlock_t linelock_t
#define dt_lock linelock
typedef struct {
struct xtlock {
u16 next; /* 2: */
s8 maxcnt; /* 1: */
......@@ -192,27 +189,27 @@ typedef struct {
u8 l2linesize; /* 1: log2 of linesize */
/* (8) */
lv_t header; /* 2: */
lv_t lwm; /* 2: low water mark */
lv_t hwm; /* 2: high water mark */
lv_t twm; /* 2: */
struct lv header; /* 2: */
struct lv lwm; /* 2: low water mark */
struct lv hwm; /* 2: high water mark */
struct lv twm; /* 2: */
/* (16) */
s32 pxdlock[8]; /* 32: */
} xtlock_t; /* (48) */
}; /* (48) */
/*
* maplock for txUpdateMap()
*
* note: maplock_t and its variations are overlaid
* note: maplock and its variations are overlaid
* at tlock.lock/linelock: watch for alignment;
* N.B. next field may be set by linelock, and should not
* be modified by maplock;
* N.B. index of the first pxdlock specifies index of next
* free maplock (i.e., number of maplock) in the tlock;
*/
typedef struct {
struct maplock {
u16 next; /* 2: */
u8 maxcnt; /* 2: */
......@@ -224,7 +221,7 @@ typedef struct {
/* (8) */
pxd_t pxd; /* 8: */
} maplock_t; /* (16): */
}; /* (16): */
/* maplock flag */
#define mlckALLOC 0x00f0
......@@ -238,9 +235,9 @@ typedef struct {
#define mlckFREEXAD 0x0002
#define mlckFREEPXD 0x0001
#define pxdlock_t maplock_t
#define pxd_lock maplock
typedef struct {
struct xdlistlock {
u16 next; /* 2: */
u8 maxcnt; /* 2: */
......@@ -252,14 +249,14 @@ typedef struct {
/* (8) */
/*
* We need xdlistlock_t to be 64 bits (8 bytes), regardless of
* We need xdlist to be 64 bits (8 bytes), regardless of
* whether void * is 32 or 64 bits
*/
union {
void *_xdlist; /* pxd/xad list */
s64 pad; /* 8: Force 64-bit xdlist size */
} union64;
} xdlistlock_t; /* (16): */
}; /* (16): */
#define xdlist union64._xdlist
......@@ -268,26 +265,26 @@ typedef struct {
*
* parameter to the commit manager routines
*/
typedef struct commit {
tid_t tid; /* 4: tid = index of tblock */
int flag; /* 4: flags */
log_t *log; /* 4: log */
struct super_block *sb; /* 4: superblock */
struct commit {
tid_t tid; /* tid = index of tblock */
int flag; /* flags */
struct jfs_log *log; /* log */
struct super_block *sb; /* superblock */
int nip; /* 4: number of entries in iplist */
struct inode **iplist; /* 4: list of pointers to inodes */
/* (32) */
int nip; /* number of entries in iplist */
struct inode **iplist; /* list of pointers to inodes */
/* log record descriptor on 64-bit boundary */
lrd_t lrd; /* : log record descriptor */
} commit_t;
struct lrd lrd; /* : log record descriptor */
};
/*
* external declarations
*/
extern tlock_t *txLock(tid_t tid, struct inode *ip, struct metapage *mp, int flag);
extern struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage *mp,
int flag);
extern tlock_t *txMaplock(tid_t tid, struct inode *ip, int flag);
extern struct tlock *txMaplock(tid_t tid, struct inode *ip, int flag);
extern int txCommit(tid_t tid, int nip, struct inode **iplist, int flag);
......@@ -299,16 +296,17 @@ extern void txEnd(tid_t tid);
extern void txAbort(tid_t tid, int dirty);
extern linelock_t *txLinelock(linelock_t * tlock);
extern struct linelock *txLinelock(struct linelock * tlock);
extern void txFreeMap(struct inode *ip,
maplock_t * maplock, tblock_t * tblk, int maptype);
extern void txFreeMap(struct inode *ip, struct maplock * maplock,
struct tblock * tblk, int maptype);
extern void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea);
extern void txFreelock(struct inode *ip);
extern int lmLog(log_t * log, tblock_t * tblk, lrd_t * lrd, tlock_t * tlck);
extern int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck);
extern void txQuiesce(struct super_block *sb);
......
......@@ -81,11 +81,11 @@ typedef struct {
( ((s64)((lxd)->off1)) << 32 | (lxd)->off2 )
/* lxd list */
typedef struct {
struct lxdlist {
s16 maxnlxd;
s16 nlxd;
lxd_t *lxd;
} lxdlist_t;
};
/*
* physical xd (pxd)
......@@ -111,11 +111,11 @@ typedef struct {
( ((s64)((pxd)->addr1)) << 32 | __le32_to_cpu((pxd)->addr2))
/* pxd list */
typedef struct {
struct pxdlist {
s16 maxnpxd;
s16 npxd;
pxd_t pxd[8];
} pxdlist_t;
};
/*
......@@ -150,16 +150,16 @@ typedef struct {
/*
* directory entry argument
*/
typedef struct component_name {
struct component_name {
int namlen;
wchar_t *name;
} component_t;
};
/*
* DASD limit information - stored in directory inode
*/
typedef struct dasd {
struct dasd {
u8 thresh; /* Alert Threshold (in percent) */
u8 delta; /* Alert Threshold delta (in percent) */
u8 rsrvd1;
......@@ -168,7 +168,7 @@ typedef struct dasd {
u8 rsrvd2[3];
u8 used_hi; /* DASD usage (in logical blocks) */
u32 used_lo; /* DASD usage (in logical blocks) */
} dasd_t;
};
#define DASDLIMIT(dasdp) \
(((u64)((dasdp)->limit_hi) << 32) + __le32_to_cpu((dasdp)->limit_lo))
......
......@@ -55,7 +55,7 @@ int jfs_umount(struct super_block *sb)
struct inode *ipimap = sbi->ipimap;
struct inode *ipaimap = sbi->ipaimap;
struct inode *ipaimap2 = sbi->ipaimap2;
log_t *log;
struct jfs_log *log;
int rc = 0;
jFYI(1, ("\n UnMount JFS: sb:0x%p\n", sb));
......@@ -143,7 +143,7 @@ int jfs_umount_rw(struct super_block *sb)
{
struct address_space *bdev_mapping = sb->s_bdev->bd_inode->i_mapping;
struct jfs_sb_info *sbi = JFS_SBI(sb);
log_t *log = sbi->log;
struct jfs_log *log = sbi->log;
if (!log)
return 0;
......
......@@ -89,7 +89,7 @@ int jfs_strtoUCS(wchar_t * to,
* FUNCTION: Allocate and translate to unicode string
*
*/
int get_UCSname(component_t * uniName, struct dentry *dentry,
int get_UCSname(struct component_name * uniName, struct dentry *dentry,
struct nls_table *nls_tab)
{
int length = dentry->d_name.len;
......
......@@ -30,7 +30,8 @@ typedef struct {
extern signed char UniUpperTable[512];
extern UNICASERANGE UniUpperRange[];
extern int get_UCSname(component_t *, struct dentry *, struct nls_table *);
extern int get_UCSname(struct component_name *, struct dentry *,
struct nls_table *);
extern int jfs_strfromUCS_le(char *, const wchar_t *, int, struct nls_table *);
#define free_UCSname(COMP) kfree((COMP)->name)
......
......@@ -84,15 +84,15 @@
#define XT_GETSEARCH(IP, LEAF, BN, MP, P, INDEX) \
BT_GETSEARCH(IP, LEAF, BN, MP, xtpage_t, P, INDEX, i_xtroot)
/* xtree entry parameter descriptor */
typedef struct {
metapage_t *mp;
struct xtsplit {
struct metapage *mp;
s16 index;
u8 flag;
s64 off;
s64 addr;
int len;
pxdlist_t *pxdlist;
} xtsplit_t;
struct pxdlist *pxdlist;
};
/*
......@@ -111,29 +111,25 @@ static struct {
* forward references
*/
static int xtSearch(struct inode *ip,
s64 xoff, int *cmpp, btstack_t * btstack, int flag);
s64 xoff, int *cmpp, struct btstack * btstack, int flag);
static int xtSplitUp(tid_t tid,
struct inode *ip,
xtsplit_t * split, btstack_t * btstack);
struct xtsplit * split, struct btstack * btstack);
static int xtSplitPage(tid_t tid,
struct inode *ip,
xtsplit_t * split, metapage_t ** rmpp, s64 * rbnp);
static int xtSplitPage(tid_t tid, struct inode *ip, struct xtsplit * split,
struct metapage ** rmpp, s64 * rbnp);
static int xtSplitRoot(tid_t tid,
struct inode *ip,
xtsplit_t * split, metapage_t ** rmpp);
static int xtSplitRoot(tid_t tid, struct inode *ip,
struct xtsplit * split, struct metapage ** rmpp);
#ifdef _STILL_TO_PORT
static int xtDeleteUp(tid_t tid,
struct inode *ip,
metapage_t * fmp,
xtpage_t * fp, btstack_t * btstack);
static int xtDeleteUp(tid_t tid, struct inode *ip, struct metapage * fmp,
xtpage_t * fp, struct btstack * btstack);
static int xtSearchNode(struct inode *ip,
xad_t * xad,
int *cmpp, btstack_t * btstack, int flag);
int *cmpp, struct btstack * btstack, int flag);
static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * fp);
#endif /* _STILL_TO_PORT */
......@@ -155,10 +151,10 @@ int xtLookup(struct inode *ip, s64 lstart,
s64 llen, int *pflag, s64 * paddr, s32 * plen, int no_check)
{
int rc = 0;
btstack_t btstack;
struct btstack btstack;
int cmp;
s64 bn;
metapage_t *mp;
struct metapage *mp;
xtpage_t *p;
int index;
xad_t *xad;
......@@ -240,8 +236,8 @@ int xtLookup(struct inode *ip, s64 lstart,
*
* parameter:
* struct inode *ip,
* lxdlist_t *lxdlist, lxd list (in)
* xadlist_t *xadlist, xad list (in/out)
* struct lxdlist *lxdlist, lxd list (in)
* struct xadlist *xadlist, xad list (in/out)
* int flag)
*
* coverage of lxd by xad under assumption of
......@@ -256,15 +252,14 @@ int xtLookup(struct inode *ip, s64 lstart,
* required to cover the last byte;
* the extent backing a page is fully contained within an xad;
*/
int xtLookupList(struct inode *ip, lxdlist_t * lxdlist, /* lxd list (in) */
xadlist_t * xadlist, /* xad list (in/out) */
int flag)
int xtLookupList(struct inode *ip, struct lxdlist * lxdlist,
struct xadlist * xadlist, int flag)
{
int rc = 0;
btstack_t btstack;
struct btstack btstack;
int cmp;
s64 bn;
metapage_t *mp;
struct metapage *mp;
xtpage_t *p;
int index;
lxd_t *lxd;
......@@ -509,17 +504,17 @@ int xtLookupList(struct inode *ip, lxdlist_t * lxdlist, /* lxd list (in) */
* the page containing the entry is pinned at exit.
*/
static int xtSearch(struct inode *ip, s64 xoff, /* offset of extent */
int *cmpp, btstack_t * btstack, int flag)
int *cmpp, struct btstack * btstack, int flag)
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
int rc = 0;
int cmp = 1; /* init for empty page */
s64 bn; /* block number */
metapage_t *mp; /* page buffer */
struct metapage *mp; /* page buffer */
xtpage_t *p; /* page */
xad_t *xad;
int base, index, lim, btindex;
btframe_t *btsp;
struct btframe *btsp;
int nsplit = 0; /* number of pages to split */
s64 t64;
......@@ -796,16 +791,16 @@ int xtInsert(tid_t tid, /* transaction id */
{
int rc = 0;
s64 xaddr, hint;
metapage_t *mp; /* meta-page buffer */
struct metapage *mp; /* meta-page buffer */
xtpage_t *p; /* base B+-tree index page */
s64 bn;
int index, nextindex;
btstack_t btstack; /* traverse stack */
xtsplit_t split; /* split information */
struct btstack btstack; /* traverse stack */
struct xtsplit split; /* split information */
xad_t *xad;
int cmp;
tlock_t *tlck;
xtlock_t *xtlck;
struct tlock *tlck;
struct xtlock *xtlck;
jFYI(1,
("xtInsert: nxoff:0x%lx nxlen:0x%x\n", (ulong) xoff, xlen));
......@@ -903,7 +898,7 @@ int xtInsert(tid_t tid, /* transaction id */
/* Don't log it if there are no links to the file */
if (!test_cflag(COMMIT_Nolink, ip)) {
tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
xtlck->lwm.offset =
(xtlck->lwm.offset) ? min(index,
(int)xtlck->lwm.offset) : index;
......@@ -937,27 +932,27 @@ int xtInsert(tid_t tid, /* transaction id */
*/
static int
xtSplitUp(tid_t tid,
struct inode *ip, xtsplit_t * split, btstack_t * btstack)
struct inode *ip, struct xtsplit * split, struct btstack * btstack)
{
int rc = 0;
metapage_t *smp;
struct metapage *smp;
xtpage_t *sp; /* split page */
metapage_t *rmp;
struct metapage *rmp;
s64 rbn; /* new right page block number */
metapage_t *rcmp;
struct metapage *rcmp;
xtpage_t *rcp; /* right child page */
s64 rcbn; /* right child page block number */
int skip; /* index of entry of insertion */
int nextindex; /* next available entry index of p */
btframe_t *parent; /* parent page entry on traverse stack */
struct btframe *parent; /* parent page entry on traverse stack */
xad_t *xad;
s64 xaddr;
int xlen;
int nsplit; /* number of pages split */
pxdlist_t pxdlist;
struct pxdlist pxdlist;
pxd_t *pxd;
tlock_t *tlck;
xtlock_t *xtlck;
struct tlock *tlck;
struct xtlock *xtlck;
smp = split->mp;
sp = XT_PAGE(ip, smp);
......@@ -995,7 +990,7 @@ xtSplitUp(tid_t tid,
/* Don't log it if there are no links to the file */
if (!test_cflag(COMMIT_Nolink, ip)) {
tlck = txLock(tid, ip, smp, tlckXTREE | tlckGROW);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
xtlck->lwm.offset = (xtlck->lwm.offset) ?
min(skip, (int)xtlck->lwm.offset) : skip;
xtlck->lwm.length =
......@@ -1163,7 +1158,7 @@ xtSplitUp(tid_t tid,
if (!test_cflag(COMMIT_Nolink, ip)) {
tlck = txLock(tid, ip, smp,
tlckXTREE | tlckGROW);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
xtlck->lwm.offset = (xtlck->lwm.offset) ?
min(skip, (int)xtlck->lwm.offset) : skip;
xtlck->lwm.length =
......@@ -1210,8 +1205,8 @@ xtSplitUp(tid_t tid,
* parameter:
* int tid,
* struct inode *ip,
* xtsplit_t *split,
* metapage_t **rmpp,
* struct xtsplit *split,
* struct metapage **rmpp,
* u64 *rbnp,
*
* return:
......@@ -1219,23 +1214,23 @@ xtSplitUp(tid_t tid,
*/
static int
xtSplitPage(tid_t tid, struct inode *ip,
xtsplit_t * split, metapage_t ** rmpp, s64 * rbnp)
struct xtsplit * split, struct metapage ** rmpp, s64 * rbnp)
{
int rc = 0;
metapage_t *smp;
struct metapage *smp;
xtpage_t *sp;
metapage_t *rmp;
struct metapage *rmp;
xtpage_t *rp; /* new right page allocated */
s64 rbn; /* new right page block number */
metapage_t *mp;
struct metapage *mp;
xtpage_t *p;
s64 nextbn;
int skip, maxentry, middle, righthalf, n;
xad_t *xad;
pxdlist_t *pxdlist;
struct pxdlist *pxdlist;
pxd_t *pxd;
tlock_t *tlck;
xtlock_t *sxtlck = 0, *rxtlck = 0;
struct tlock *tlck;
struct xtlock *sxtlck = 0, *rxtlck = 0;
smp = split->mp;
sp = XT_PAGE(ip, smp);
......@@ -1274,13 +1269,13 @@ xtSplitPage(tid_t tid, struct inode *ip,
* acquire a transaction lock on the new right page;
*/
tlck = txLock(tid, ip, rmp, tlckXTREE | tlckNEW);
rxtlck = (xtlock_t *) & tlck->lock;
rxtlck = (struct xtlock *) & tlck->lock;
rxtlck->lwm.offset = XTENTRYSTART;
/*
* acquire a transaction lock on the split page
*/
tlck = txLock(tid, ip, smp, tlckXTREE | tlckGROW);
sxtlck = (xtlock_t *) & tlck->lock;
sxtlck = (struct xtlock *) & tlck->lock;
}
/*
......@@ -1465,26 +1460,26 @@ xtSplitPage(tid_t tid, struct inode *ip,
* parameter:
* int tid,
* struct inode *ip,
* xtsplit_t *split,
* metapage_t **rmpp)
* struct xtsplit *split,
* struct metapage **rmpp)
*
* return:
* Pointer to page in which to insert or NULL on error.
*/
static int
xtSplitRoot(tid_t tid,
struct inode *ip, xtsplit_t * split, metapage_t ** rmpp)
struct inode *ip, struct xtsplit * split, struct metapage ** rmpp)
{
xtpage_t *sp;
metapage_t *rmp;
struct metapage *rmp;
xtpage_t *rp;
s64 rbn;
int skip, nextindex;
xad_t *xad;
pxd_t *pxd;
pxdlist_t *pxdlist;
tlock_t *tlck;
xtlock_t *xtlck;
struct pxdlist *pxdlist;
struct tlock *tlck;
struct xtlock *xtlck;
sp = &JFS_IP(ip)->i_xtroot;
......@@ -1546,7 +1541,7 @@ xtSplitRoot(tid_t tid,
if (!test_cflag(COMMIT_Nolink, ip)) {
tlck = txLock(tid, ip, rmp, tlckXTREE | tlckNEW);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
xtlck->lwm.offset = XTENTRYSTART;
xtlck->lwm.length = le16_to_cpu(rp->header.nextindex) -
XTENTRYSTART;
......@@ -1577,7 +1572,7 @@ xtSplitRoot(tid_t tid,
if (!test_cflag(COMMIT_Nolink, ip)) {
tlck = txLock(tid, ip, split->mp, tlckXTREE | tlckGROW);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
xtlck->lwm.offset = XTENTRYSTART;
xtlck->lwm.length = 1;
}
......@@ -1608,16 +1603,16 @@ int xtExtend(tid_t tid, /* transaction id */
{
int rc = 0;
int cmp;
metapage_t *mp; /* meta-page buffer */
struct metapage *mp; /* meta-page buffer */
xtpage_t *p; /* base B+-tree index page */
s64 bn;
int index, nextindex, len;
btstack_t btstack; /* traverse stack */
xtsplit_t split; /* split information */
struct btstack btstack; /* traverse stack */
struct xtsplit split; /* split information */
xad_t *xad;
s64 xaddr;
tlock_t *tlck;
xtlock_t *xtlck = 0;
struct tlock *tlck;
struct xtlock *xtlck = 0;
int rootsplit = 0;
jFYI(1,
......@@ -1646,7 +1641,7 @@ int xtExtend(tid_t tid, /* transaction id */
BT_MARK_DIRTY(mp, ip);
if (!test_cflag(COMMIT_Nolink, ip)) {
tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
}
/* extend will overflow extent ? */
......@@ -1701,7 +1696,7 @@ int xtExtend(tid_t tid, /* transaction id */
tlck = txLock(tid, ip, mp,
tlckXTREE |
tlckGROW);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
}
}
} else
......@@ -1768,17 +1763,17 @@ int xtTailgate(tid_t tid, /* transaction id */
{
int rc = 0;
int cmp;
metapage_t *mp; /* meta-page buffer */
struct metapage *mp; /* meta-page buffer */
xtpage_t *p; /* base B+-tree index page */
s64 bn;
int index, nextindex, llen, rlen;
btstack_t btstack; /* traverse stack */
xtsplit_t split; /* split information */
struct btstack btstack; /* traverse stack */
struct xtsplit split; /* split information */
xad_t *xad;
tlock_t *tlck;
xtlock_t *xtlck = 0;
tlock_t *mtlck;
maplock_t *pxdlock;
struct tlock *tlck;
struct xtlock *xtlck = 0;
struct tlock *mtlck;
struct maplock *pxdlock;
int rootsplit = 0;
/*
......@@ -1804,7 +1799,7 @@ printf("xtTailgate: nxoff:0x%lx nxlen:0x%x nxaddr:0x%lx\n",
*/
if (!test_cflag(COMMIT_Nolink, ip)) {
tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
}
/* completely replace extent ? */
......@@ -1859,7 +1854,7 @@ printf("xtTailgate: xoff:0x%lx xlen:0x%x xaddr:0x%lx\n",
tlck = txLock(tid, ip, mp,
tlckXTREE |
tlckGROW);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
}
}
} else
......@@ -1892,7 +1887,7 @@ printf("xtTailgate: xoff:0x%lx xlen:0x%x xaddr:0x%lx\n",
/* free from PWMAP at commit */
if (!test_cflag(COMMIT_Nolink, ip)) {
mtlck = txMaplock(tid, ip, tlckMAP);
pxdlock = (maplock_t *) & mtlck->lock;
pxdlock = (struct maplock *) & mtlck->lock;
pxdlock->flag = mlckFREEPXD;
PXDaddress(&pxdlock->pxd, addressXAD(xad) + llen);
PXDlength(&pxdlock->pxd, rlen);
......@@ -1944,19 +1939,19 @@ int xtUpdate(tid_t tid, struct inode *ip, xad_t * nxad)
{ /* new XAD */
int rc = 0;
int cmp;
metapage_t *mp; /* meta-page buffer */
struct metapage *mp; /* meta-page buffer */
xtpage_t *p; /* base B+-tree index page */
s64 bn;
int index0, index, newindex, nextindex;
btstack_t btstack; /* traverse stack */
xtsplit_t split; /* split information */
struct btstack btstack; /* traverse stack */
struct xtsplit split; /* split information */
xad_t *xad, *lxad, *rxad;
int xflag;
s64 nxoff, xoff;
int nxlen, xlen, lxlen, rxlen;
s64 nxaddr, xaddr;
tlock_t *tlck;
xtlock_t *xtlck = 0;
struct tlock *tlck;
struct xtlock *xtlck = 0;
int rootsplit = 0, newpage = 0;
/* there must exist extent to be tailgated */
......@@ -1980,7 +1975,7 @@ printf("xtUpdate: nxflag:0x%x nxoff:0x%lx nxlen:0x%x nxaddr:0x%lx\n",
*/
if (!test_cflag(COMMIT_Nolink, ip)) {
tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
}
xad = &p->xad[index0];
......@@ -2192,7 +2187,7 @@ printf("xtUpdate.updateRight.split p:0x%p\n", p);
tlck = txLock(tid, ip, mp,
tlckXTREE |
tlckGROW);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
}
}
} else {
......@@ -2255,7 +2250,7 @@ printf("xtUpdate.updateRight.split p:0x%p\n", p);
BT_MARK_DIRTY(mp, ip);
if (!test_cflag(COMMIT_Nolink, ip)) {
tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
}
index0 = index = newindex;
......@@ -2337,7 +2332,7 @@ printf("xtUpdate.updateLeft.split p:0x%p\n", p);
tlck = txLock(tid, ip, mp,
tlckXTREE |
tlckGROW);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
}
}
} else
......@@ -2397,18 +2392,18 @@ int xtAppend(tid_t tid, /* transaction id */
int flag)
{
int rc = 0;
metapage_t *mp; /* meta-page buffer */
struct metapage *mp; /* meta-page buffer */
xtpage_t *p; /* base B+-tree index page */
s64 bn, xaddr;
int index, nextindex;
btstack_t btstack; /* traverse stack */
xtsplit_t split; /* split information */
struct btstack btstack; /* traverse stack */
struct xtsplit split; /* split information */
xad_t *xad;
int cmp;
tlock_t *tlck;
xtlock_t *xtlck;
struct tlock *tlck;
struct xtlock *xtlck;
int nsplit, nblocks, xlen;
pxdlist_t pxdlist;
struct pxdlist pxdlist;
pxd_t *pxd;
xaddr = *xaddrp;
......@@ -2516,7 +2511,7 @@ int xtAppend(tid_t tid, /* transaction id */
* action: xad insertion/extension;
*/
tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
/* insert the new entry: mark the entry NEW */
xad = &p->xad[index];
......@@ -2561,14 +2556,14 @@ int xtAppend(tid_t tid, /* transaction id */
int xtDelete(tid_t tid, struct inode *ip, s64 xoff, s32 xlen, int flag)
{
int rc = 0;
btstack_t btstack;
struct btstack btstack;
int cmp;
s64 bn;
metapage_t *mp;
struct metapage *mp;
xtpage_t *p;
int index, nextindex;
tlock_t *tlck;
xtlock_t *xtlck;
struct tlock *tlck;
struct xtlock *xtlck;
/*
* find the matching entry; xtSearch() pins the page
......@@ -2603,7 +2598,7 @@ int xtDelete(tid_t tid, struct inode *ip, s64 xoff, s32 xlen, int flag)
* action:xad deletion;
*/
tlck = txLock(tid, ip, mp, tlckXTREE);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
xtlck->lwm.offset =
(xtlck->lwm.offset) ? min(index, xtlck->lwm.offset) : index;
......@@ -2630,19 +2625,18 @@ int xtDelete(tid_t tid, struct inode *ip, s64 xoff, s32 xlen, int flag)
* return:
*/
static int
xtDeleteUp(tid_t tid,
struct inode *ip,
metapage_t * fmp, xtpage_t * fp, btstack_t * btstack)
xtDeleteUp(tid_t tid, struct inode *ip,
struct metapage * fmp, xtpage_t * fp, struct btstack * btstack)
{
int rc = 0;
metapage_t *mp;
struct metapage *mp;
xtpage_t *p;
int index, nextindex;
s64 xaddr;
int xlen;
btframe_t *parent;
tlock_t *tlck;
xtlock_t *xtlck;
struct btframe *parent;
struct tlock *tlck;
struct xtlock *xtlck;
/*
* keep root leaf page which has become empty
......@@ -2736,7 +2730,7 @@ xtDeleteUp(tid_t tid,
* action:xad deletion;
*/
tlck = txLock(tid, ip, mp, tlckXTREE);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
xtlck->lwm.offset =
(xtlck->lwm.offset) ? min(index,
xtlck->lwm.
......@@ -2785,10 +2779,10 @@ xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */
int xtype)
{ /* extent type: XTPAGE or DATAEXT */
int rc = 0;
tblock_t *tblk;
tlock_t *tlck;
xtlock_t *xtlck;
metapage_t *mp, *pmp, *lmp, *rmp; /* meta-page buffer */
struct tblock *tblk;
struct tlock *tlck;
struct xtlock *xtlck;
struct metapage *mp, *pmp, *lmp, *rmp; /* meta-page buffer */
xtpage_t *p, *pp, *rp, *lp; /* base B+-tree index page */
xad_t *xad;
pxd_t *pxd;
......@@ -2801,8 +2795,8 @@ xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */
s64 bn;
int cmp;
int index;
pxdlock_t *pxdlock;
btstack_t btstack; /* traverse stack */
struct pxd_lock *pxdlock;
struct btstack btstack; /* traverse stack */
xtype = xtype & EXTENT_TYPE;
......@@ -3016,7 +3010,7 @@ xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */
BT_MARK_DIRTY(mp, ip);
/* tlckNEW init xtlck->lwm.offset = XTENTRYSTART; */
tlck = txLock(tid, ip, mp, tlckXTREE | tlckNEW);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
/* update the self address in the xtpage header */
pxd = &p->header.self;
......@@ -3060,7 +3054,7 @@ xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */
else /* (xtype == XTPAGE) */
tlck = txMaplock(tid, ip, tlckMAP | tlckRELOCATE);
pxdlock = (pxdlock_t *) & tlck->lock;
pxdlock = (struct pxd_lock *) & tlck->lock;
pxdlock->flag = mlckFREEPXD;
PXDaddress(&pxdlock->pxd, oxaddr);
PXDlength(&pxdlock->pxd, xlen);
......@@ -3076,7 +3070,7 @@ xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */
jEVENT(0, ("xtRelocate: update parent xad entry.\n"));
BT_MARK_DIRTY(pmp, ip);
tlck = txLock(tid, ip, pmp, tlckXTREE | tlckGROW);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
/* update the XAD with the new destination extent; */
xad = &pp->xad[index];
......@@ -3113,17 +3107,17 @@ xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */
* the page containing the entry is pinned at exit.
*/
static int xtSearchNode(struct inode *ip, xad_t * xad, /* required XAD entry */
int *cmpp, btstack_t * btstack, int flag)
int *cmpp, struct btstack * btstack, int flag)
{
int rc = 0;
s64 xoff, xaddr;
int xlen;
int cmp = 1; /* init for empty page */
s64 bn; /* block number */
metapage_t *mp; /* meta-page buffer */
struct metapage *mp; /* meta-page buffer */
xtpage_t *p; /* page */
int base, index, lim;
btframe_t *btsp;
struct btframe *btsp;
s64 t64;
BT_CLR(btstack);
......@@ -3233,9 +3227,9 @@ static int xtSearchNode(struct inode *ip, xad_t * xad, /* required XAD entry */
static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * p)
{
int rc = 0;
metapage_t *mp;
struct metapage *mp;
s64 nextbn, prevbn;
tlock_t *tlck;
struct tlock *tlck;
nextbn = le64_to_cpu(p->header.next);
prevbn = le64_to_cpu(p->header.prev);
......@@ -3295,14 +3289,14 @@ static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * p)
void xtInitRoot(tid_t tid, struct inode *ip)
{
xtpage_t *p;
tlock_t *tlck;
struct tlock *tlck;
/*
* acquire a transaction lock on the root
*
* action:
*/
tlck = txLock(tid, ip, (metapage_t *) &JFS_IP(ip)->bxflag,
tlck = txLock(tid, ip, (struct metapage *) &JFS_IP(ip)->bxflag,
tlckXTREE | tlckNEW);
p = &JFS_IP(ip)->i_xtroot;
......@@ -3386,20 +3380,20 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
{
int rc = 0;
s64 teof;
metapage_t *mp;
struct metapage *mp;
xtpage_t *p;
s64 bn;
int index, nextindex;
xad_t *xad;
s64 xoff, xaddr;
int xlen, len, freexlen;
btstack_t btstack;
btframe_t *parent;
tblock_t *tblk = 0;
tlock_t *tlck = 0;
xtlock_t *xtlck = 0;
xdlistlock_t xadlock; /* maplock for COMMIT_WMAP */
pxdlock_t *pxdlock; /* maplock for COMMIT_WMAP */
struct btstack btstack;
struct btframe *parent;
struct tblock *tblk = 0;
struct tlock *tlck = 0;
struct xtlock *xtlck = 0;
struct xdlistlock xadlock; /* maplock for COMMIT_WMAP */
struct pxd_lock *pxdlock; /* maplock for COMMIT_WMAP */
s64 nfreed;
int freed, log;
int locked_leaves = 0;
......@@ -3509,7 +3503,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
}
tlck = txLock(tid, ip, mp, tlckXTREE);
tlck->type = tlckXTREE | tlckTRUNCATE;
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
xtlck->hwm.offset = le16_to_cpu(p->header.nextindex) - 1;
}
BT_MARK_DIRTY(mp, ip);
......@@ -3574,7 +3568,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
xtlck->lwm.length = index + 1 -
xtlck->lwm.offset;
xtlck->twm.offset = index;
pxdlock = (pxdlock_t *) & xtlck->pxdlock;
pxdlock = (struct pxd_lock *) & xtlck->pxdlock;
pxdlock->flag = mlckFREEPXD;
PXDaddress(&pxdlock->pxd, xaddr);
PXDlength(&pxdlock->pxd, freexlen);
......@@ -3582,7 +3576,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
/* free truncated extent */
else { /* COMMIT_WMAP */
pxdlock = (pxdlock_t *) & xadlock;
pxdlock = (struct pxd_lock *) & xadlock;
pxdlock->flag = mlckFREEPXD;
PXDaddress(&pxdlock->pxd, xaddr);
PXDlength(&pxdlock->pxd, freexlen);
......@@ -3614,7 +3608,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
xadlock.count =
le16_to_cpu(p->header.nextindex) -
nextindex;
txFreeMap(ip, (maplock_t *) & xadlock, 0,
txFreeMap(ip, (struct maplock *) & xadlock, 0,
COMMIT_WMAP);
}
p->header.nextindex = cpu_to_le16(nextindex);
......@@ -3644,7 +3638,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
xadlock.xdlist = &p->xad[XTENTRYSTART];
xadlock.count =
le16_to_cpu(p->header.nextindex) - XTENTRYSTART;
txFreeMap(ip, (maplock_t *) & xadlock, 0, COMMIT_WMAP);
txFreeMap(ip, (struct maplock *) & xadlock, 0, COMMIT_WMAP);
}
if (p->header.flag & BT_ROOT) {
......@@ -3704,7 +3698,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
* free child extents covered by parent [);
*/
tlck = txLock(tid, ip, mp, tlckXTREE);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
if (!(tlck->type & tlckTRUNCATE)) {
xtlck->hwm.offset =
le16_to_cpu(p->header.
......@@ -3719,7 +3713,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
xadlock.count =
le16_to_cpu(p->header.nextindex) -
index - 1;
txFreeMap(ip, (maplock_t *) & xadlock, 0,
txFreeMap(ip, (struct maplock *) & xadlock, 0,
COMMIT_WMAP);
}
BT_MARK_DIRTY(mp, ip);
......@@ -3751,7 +3745,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
if (log && mp->lid && (tblk->last != mp->lid) &&
lid_to_tlock(mp->lid)->tid) {
lid_t lid = mp->lid;
tlock_t *prev;
struct tlock *prev;
tlck = lid_to_tlock(lid);
......@@ -3780,7 +3774,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
* invalidate parent if COMMIT_PWMAP;
*/
tlck = txLock(tid, ip, mp, tlckXTREE);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
xtlck->hwm.offset =
le16_to_cpu(p->header.nextindex) - 1;
tlck->type = tlckXTREE | tlckFREE;
......@@ -3791,7 +3785,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
xadlock.count =
le16_to_cpu(p->header.nextindex) -
XTENTRYSTART;
txFreeMap(ip, (maplock_t *) & xadlock, 0,
txFreeMap(ip, (struct maplock *) & xadlock, 0,
COMMIT_WMAP);
}
BT_MARK_DIRTY(mp, ip);
......@@ -3919,20 +3913,20 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size)
{
s64 bn;
btstack_t btstack;
struct btstack btstack;
int cmp;
int index;
int locked_leaves = 0;
metapage_t *mp;
struct metapage *mp;
xtpage_t *p;
btframe_t *parent;
struct btframe *parent;
int rc;
tblock_t *tblk;
tlock_t *tlck = 0;
struct tblock *tblk;
struct tlock *tlck = 0;
xad_t *xad;
int xlen;
s64 xoff;
xtlock_t *xtlck = 0;
struct xtlock *xtlck = 0;
/* save object truncation type */
tblk = tid_to_tblock(tid);
......@@ -3988,7 +3982,7 @@ s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size)
}
tlck = txLock(tid, ip, mp, tlckXTREE);
tlck->type = tlckXTREE | tlckFREE;
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
xtlck->hwm.offset = index;
......@@ -4020,7 +4014,7 @@ s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size)
* invalidate parent if COMMIT_PWMAP;
*/
tlck = txLock(tid, ip, mp, tlckXTREE);
xtlck = (xtlock_t *) & tlck->lock;
xtlck = (struct xtlock *) & tlck->lock;
xtlck->hwm.offset =
le16_to_cpu(p->header.nextindex) - 1;
tlck->type = tlckXTREE | tlckFREE;
......@@ -4074,14 +4068,14 @@ s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size)
int xtDisplayTree(struct inode *ip)
{
int rc = 0;
metapage_t *mp;
struct metapage *mp;
xtpage_t *p;
s64 bn, pbn;
int index, lastindex, v, h;
xad_t *xad;
btstack_t btstack;
btframe_t *btsp;
btframe_t *parent;
struct btstack btstack;
struct btframe *btsp;
struct btframe *parent;
printk("display B+-tree.\n");
......@@ -4195,7 +4189,7 @@ int xtDisplayTree(struct inode *ip)
int xtDisplayPage(struct inode *ip, s64 bn, xtpage_t * p)
{
int rc = 0;
metapage_t *mp;
struct metapage *mp;
xad_t *xad;
s64 xaddr, xoff;
int xlen, i, j;
......@@ -4254,7 +4248,7 @@ btree_t *t;
u64 bn;
int index;
btentry_t *e;
btstack_t btstack;
struct btstack btstack;
struct btsf *parent;
/* clear stack */
......
......@@ -64,11 +64,11 @@ typedef struct xad {
#define lengthXAD(xad) __le24_to_cpu((xad)->len)
/* xad list */
typedef struct {
struct xadlist {
s16 maxnxad;
s16 nxad;
xad_t *xad;
} xadlist_t;
};
/* xad_t flags */
#define XAD_NEW 0x01 /* new */
......@@ -110,8 +110,8 @@ typedef union {
*/
extern int xtLookup(struct inode *ip, s64 lstart, s64 llen,
int *pflag, s64 * paddr, int *plen, int flag);
extern int xtLookupList(struct inode *ip, lxdlist_t * lxdlist,
xadlist_t * xadlist, int flag);
extern int xtLookupList(struct inode *ip, struct lxdlist * lxdlist,
struct xadlist * xadlist, int flag);
extern void xtInitRoot(tid_t tid, struct inode *ip);
extern int xtInsert(tid_t tid, struct inode *ip,
int xflag, s64 xoff, int xlen, s64 * xaddrp, int flag);
......
......@@ -63,10 +63,10 @@ int jfs_create(struct inode *dip, struct dentry *dentry, int mode)
tid_t tid; /* transaction id */
struct inode *ip = NULL; /* child directory inode */
ino_t ino;
component_t dname; /* child directory name */
btstack_t btstack;
struct component_name dname; /* child directory name */
struct btstack btstack;
struct inode *iplist[2];
tblock_t *tblk;
struct tblock *tblk;
jFYI(1, ("jfs_create: dip:0x%p name:%s\n", dip, dentry->d_name.name));
......@@ -178,10 +178,10 @@ int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode)
tid_t tid; /* transaction id */
struct inode *ip = NULL; /* child directory inode */
ino_t ino;
component_t dname; /* child directory name */
btstack_t btstack;
struct component_name dname; /* child directory name */
struct btstack btstack;
struct inode *iplist[2];
tblock_t *tblk;
struct tblock *tblk;
jFYI(1, ("jfs_mkdir: dip:0x%p name:%s\n", dip, dentry->d_name.name));
......@@ -306,9 +306,9 @@ int jfs_rmdir(struct inode *dip, struct dentry *dentry)
tid_t tid; /* transaction id */
struct inode *ip = dentry->d_inode;
ino_t ino;
component_t dname;
struct component_name dname;
struct inode *iplist[2];
tblock_t *tblk;
struct tblock *tblk;
jFYI(1, ("jfs_rmdir: dip:0x%p name:%s\n", dip, dentry->d_name.name));
......@@ -429,9 +429,9 @@ int jfs_unlink(struct inode *dip, struct dentry *dentry)
tid_t tid; /* transaction id */
struct inode *ip = dentry->d_inode;
ino_t ino;
component_t dname; /* object name */
struct component_name dname; /* object name */
struct inode *iplist[2];
tblock_t *tblk;
struct tblock *tblk;
s64 new_size = 0;
int commit_flag;
......@@ -580,7 +580,7 @@ int jfs_unlink(struct inode *dip, struct dentry *dentry)
s64 commitZeroLink(tid_t tid, struct inode *ip)
{
int filetype;
tblock_t *tblk;
struct tblock *tblk;
jFYI(1, ("commitZeroLink: tid = %d, ip = 0x%p\n", tid, ip));
......@@ -675,15 +675,15 @@ int freeZeroLink(struct inode *ip)
if (JFS_IP(ip)->ea.flag & DXD_EXTENT) {
s64 xaddr = addressDXD(&JFS_IP(ip)->ea);
int xlen = lengthDXD(&JFS_IP(ip)->ea);
maplock_t maplock; /* maplock for COMMIT_WMAP */
pxdlock_t *pxdlock; /* maplock for COMMIT_WMAP */
struct maplock maplock; /* maplock for COMMIT_WMAP */
struct pxd_lock *pxdlock; /* maplock for COMMIT_WMAP */
/* free EA pages from cache */
invalidate_dxd_metapages(ip, JFS_IP(ip)->ea);
/* free EA extent from working block map */
maplock.index = 1;
pxdlock = (pxdlock_t *) & maplock;
pxdlock = (struct pxd_lock *) & maplock;
pxdlock->flag = mlckFREEPXD;
PXDaddress(&pxdlock->pxd, xaddr);
PXDlength(&pxdlock->pxd, xlen);
......@@ -696,14 +696,14 @@ int freeZeroLink(struct inode *ip)
if (JFS_IP(ip)->acl.flag & DXD_EXTENT) {
s64 xaddr = addressDXD(&JFS_IP(ip)->acl);
int xlen = lengthDXD(&JFS_IP(ip)->acl);
maplock_t maplock; /* maplock for COMMIT_WMAP */
pxdlock_t *pxdlock; /* maplock for COMMIT_WMAP */
struct maplock maplock; /* maplock for COMMIT_WMAP */
struct pxd_lock *pxdlock; /* maplock for COMMIT_WMAP */
invalidate_dxd_metapages(ip, JFS_IP(ip)->acl);
/* free ACL extent from working block map */
maplock.index = 1;
pxdlock = (pxdlock_t *) & maplock;
pxdlock = (struct pxd_lock *) & maplock;
pxdlock->flag = mlckFREEPXD;
PXDaddress(&pxdlock->pxd, xaddr);
PXDlength(&pxdlock->pxd, xlen);
......@@ -752,8 +752,8 @@ int jfs_link(struct dentry *old_dentry,
tid_t tid;
struct inode *ip = old_dentry->d_inode;
ino_t ino;
component_t dname;
btstack_t btstack;
struct component_name dname;
struct btstack btstack;
struct inode *iplist[2];
jFYI(1,
......@@ -830,17 +830,17 @@ int jfs_symlink(struct inode *dip, struct dentry *dentry, const char *name)
int rc;
tid_t tid;
ino_t ino = 0;
component_t dname;
struct component_name dname;
int ssize; /* source pathname size */
btstack_t btstack;
struct btstack btstack;
struct inode *ip = dentry->d_inode;
unchar *i_fastsymlink;
s64 xlen = 0;
int bmask = 0, xsize;
s64 xaddr;
metapage_t *mp;
struct metapage *mp;
struct super_block *sb;
tblock_t *tblk;
struct tblock *tblk;
struct inode *iplist[2];
......@@ -1030,20 +1030,20 @@ int jfs_symlink(struct inode *dip, struct dentry *dentry, const char *name)
int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
btstack_t btstack;
struct btstack btstack;
ino_t ino;
component_t new_dname;
struct component_name new_dname;
struct inode *new_ip;
component_t old_dname;
struct component_name old_dname;
struct inode *old_ip;
int rc;
tid_t tid;
tlock_t *tlck;
dtlock_t *dtlck;
lv_t *lv;
struct tlock *tlck;
struct dt_lock *dtlck;
struct lv *lv;
int ipcount;
struct inode *iplist[4];
tblock_t *tblk;
struct tblock *tblk;
s64 new_size = 0;
int commit_flag;
......@@ -1194,11 +1194,11 @@ int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
/* Linelock header of dtree */
tlck = txLock(tid, old_ip,
(metapage_t *) & JFS_IP(old_ip)->bxflag,
(struct metapage *) &JFS_IP(old_ip)->bxflag,
tlckDTREE | tlckBTROOT);
dtlck = (dtlock_t *) & tlck->lock;
dtlck = (struct dt_lock *) & tlck->lock;
ASSERT(dtlck->index == 0);
lv = (lv_t *) & dtlck->lv[0];
lv = & dtlck->lv[0];
lv->offset = 0;
lv->length = 1;
dtlck->index++;
......@@ -1301,14 +1301,14 @@ int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
*/
int jfs_mknod(struct inode *dir, struct dentry *dentry, int mode, int rdev)
{
btstack_t btstack;
component_t dname;
struct btstack btstack;
struct component_name dname;
ino_t ino;
struct inode *ip;
struct inode *iplist[2];
int rc;
tid_t tid;
tblock_t *tblk;
struct tblock *tblk;
jFYI(1, ("jfs_mknod: %s\n", dentry->d_name.name));
......@@ -1371,10 +1371,10 @@ int jfs_mknod(struct inode *dir, struct dentry *dentry, int mode, int rdev)
static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry)
{
btstack_t btstack;
struct btstack btstack;
ino_t inum;
struct inode *ip;
component_t key;
struct component_name key;
const char *name = dentry->d_name.name;
int len = dentry->d_name.len;
int rc;
......
......@@ -65,8 +65,8 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
struct inode *ipbmap = sbi->ipbmap;
struct inode *ipbmap2;
struct inode *ipimap = sbi->ipimap;
log_t *log = sbi->log;
bmap_t *bmp = sbi->bmap;
struct jfs_log *log = sbi->log;
struct bmap *bmp = sbi->bmap;
s64 newLogAddress, newFSCKAddress;
int newFSCKSize;
s64 newMapSize = 0, mapSize;
......
......@@ -101,7 +101,7 @@ static int jfs_statfs(struct super_block *sb, struct statfs *buf)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
s64 maxinodes;
imap_t *imap = JFS_IP(sbi->ipimap)->i_imap;
struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap;
jFYI(1, ("In jfs_statfs\n"));
buf->f_type = JFS_SUPER_MAGIC;
......@@ -337,7 +337,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
static void jfs_write_super_lockfs(struct super_block *sb)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
log_t *log = sbi->log;
struct jfs_log *log = sbi->log;
if (!(sb->s_flags & MS_RDONLY)) {
txQuiesce(sb);
......@@ -348,7 +348,7 @@ static void jfs_write_super_lockfs(struct super_block *sb)
static void jfs_unlockfs(struct super_block *sb)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
log_t *log = sbi->log;
struct jfs_log *log = sbi->log;
int rc = 0;
if (!(sb->s_flags & MS_RDONLY)) {
......
......@@ -174,7 +174,7 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
char *cp;
s32 nbytes, nb;
s32 bytes_to_write;
metapage_t *mp;
struct metapage *mp;
/*
* Quick check to see if this is an in-linable EA. Short EAs
......@@ -311,7 +311,7 @@ static int ea_read(struct inode *ip, struct jfs_ea_list *ealist)
int i;
int nbytes, nb;
s32 bytes_to_read;
metapage_t *mp;
struct metapage *mp;
/* quick check for in-line EA */
if (ji->ea.flag & DXD_INLINE)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment