Commit ba6c76ed authored by Dave Kleikamp's avatar Dave Kleikamp

Merge jfs@jfs.bkbits.net:linux-2.5

into kleikamp.austin.ibm.com:/home/shaggy/bk/jfs-2.5
parents 2d92e3e8 d17e9bb6
......@@ -22,7 +22,6 @@
#include <asm/bitops.h>
#include <asm/byteorder.h>
#include <asm/semaphore.h>
#include <asm/smplock.h>
#include "ieee1394_types.h"
#include "ieee1394.h"
......
......@@ -233,6 +233,7 @@ nfs_xdr_readargs(struct rpc_rqst *req, u32 *p, struct nfs_readargs *args)
static int
nfs_xdr_readres(struct rpc_rqst *req, u32 *p, struct nfs_readres *res)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
struct iovec *iov = req->rq_rvec;
int status, count, recvd, hdrlen;
......@@ -241,25 +242,33 @@ nfs_xdr_readres(struct rpc_rqst *req, u32 *p, struct nfs_readres *res)
p = xdr_decode_fattr(p, res->fattr);
count = ntohl(*p++);
res->eof = 0;
if (rcvbuf->page_len) {
u32 end = page_offset(rcvbuf->pages[0]) + rcvbuf->page_base + count;
if (end >= res->fattr->size)
res->eof = 1;
}
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
if (iov->iov_len > hdrlen) {
if (iov->iov_len < hdrlen) {
printk(KERN_WARNING "NFS: READ reply header overflowed:"
"length %d > %d\n", hdrlen, iov->iov_len);
return -errno_NFSERR_IO;
} else if (iov->iov_len != hdrlen) {
dprintk("NFS: READ header is short. iovec will be shifted.\n");
xdr_shift_buf(&req->rq_rcv_buf, iov->iov_len - hdrlen);
}
recvd = req->rq_rlen - hdrlen;
recvd = req->rq_received - hdrlen;
if (count > recvd) {
printk(KERN_WARNING "NFS: server cheating in read reply: "
"count %d > recvd %d\n", count, recvd);
count = recvd;
res->eof = 0;
}
dprintk("RPC: readres OK count %d\n", count);
if (count < res->count) {
if (count < res->count)
res->count = count;
res->eof = 1; /* Silly NFSv3ism which can't be helped */
} else
res->eof = 0;
return count;
}
......@@ -384,7 +393,7 @@ nfs_xdr_readdirres(struct rpc_rqst *req, u32 *p, void *dummy)
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
struct iovec *iov = rcvbuf->head;
struct page **page;
int hdrlen;
int hdrlen, recvd;
int status, nr;
unsigned int len, pglen;
u32 *end, *entry;
......@@ -393,17 +402,24 @@ nfs_xdr_readdirres(struct rpc_rqst *req, u32 *p, void *dummy)
return -nfs_stat_to_errno(status);
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
if (iov->iov_len > hdrlen) {
if (iov->iov_len < hdrlen) {
printk(KERN_WARNING "NFS: READDIR reply header overflowed:"
"length %d > %d\n", hdrlen, iov->iov_len);
return -errno_NFSERR_IO;
} else if (iov->iov_len != hdrlen) {
dprintk("NFS: READDIR header is short. iovec will be shifted.\n");
xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
}
pglen = rcvbuf->page_len;
recvd = req->rq_received - hdrlen;
if (pglen > recvd)
pglen = recvd;
page = rcvbuf->pages;
p = kmap(*page);
end = (u32 *)((char *)p + pglen);
entry = p;
for (nr = 0; *p++; nr++) {
entry = p - 1;
if (p + 2 > end)
goto short_pkt;
p++; /* fileid */
......@@ -416,14 +432,21 @@ nfs_xdr_readdirres(struct rpc_rqst *req, u32 *p, void *dummy)
}
if (p + 2 > end)
goto short_pkt;
entry = p;
}
if (!nr)
goto short_pkt;
out:
kunmap(*page);
return nr;
short_pkt:
printk(KERN_NOTICE "NFS: short packet in readdir reply!\n");
entry[0] = entry[1] = 0;
kunmap(*page);
return nr;
/* truncate listing ? */
if (!nr) {
printk(KERN_NOTICE "NFS: readdir reply truncated!\n");
entry[1] = 1;
}
goto out;
err_unmap:
kunmap(*page);
return -errno_NFSERR_IO;
......
......@@ -504,7 +504,7 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs3_readdirres *res)
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
struct iovec *iov = rcvbuf->head;
struct page **page;
int hdrlen;
int hdrlen, recvd;
int status, nr;
unsigned int len, pglen;
u32 *entry, *end;
......@@ -523,17 +523,24 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs3_readdirres *res)
}
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
if (iov->iov_len > hdrlen) {
if (iov->iov_len < hdrlen) {
printk(KERN_WARNING "NFS: READDIR reply header overflowed:"
"length %d > %d\n", hdrlen, iov->iov_len);
return -errno_NFSERR_IO;
} else if (iov->iov_len != hdrlen) {
dprintk("NFS: READDIR header is short. iovec will be shifted.\n");
xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
}
pglen = rcvbuf->page_len;
recvd = req->rq_received - hdrlen;
if (pglen > recvd)
pglen = recvd;
page = rcvbuf->pages;
p = kmap(*page);
end = (u32 *)((char *)p + pglen);
entry = p;
for (nr = 0; *p++; nr++) {
entry = p - 1;
if (p + 3 > end)
goto short_pkt;
p += 2; /* inode # */
......@@ -570,15 +577,21 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs3_readdirres *res)
if (p + 2 > end)
goto short_pkt;
entry = p;
}
if (!nr)
goto short_pkt;
out:
kunmap(*page);
return nr;
short_pkt:
printk(KERN_NOTICE "NFS: short packet in readdir reply!\n");
/* truncate listing */
entry[0] = entry[1] = 0;
kunmap(*page);
return nr;
/* truncate listing ? */
if (!nr) {
printk(KERN_NOTICE "NFS: readdir reply truncated!\n");
entry[1] = 1;
}
goto out;
err_unmap:
kunmap(*page);
return -errno_NFSERR_IO;
......@@ -793,16 +806,21 @@ nfs3_xdr_readres(struct rpc_rqst *req, u32 *p, struct nfs_readres *res)
}
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
if (iov->iov_len > hdrlen) {
if (iov->iov_len < hdrlen) {
printk(KERN_WARNING "NFS: READ reply header overflowed:"
"length %d > %d\n", hdrlen, iov->iov_len);
return -errno_NFSERR_IO;
} else if (iov->iov_len != hdrlen) {
dprintk("NFS: READ header is short. iovec will be shifted.\n");
xdr_shift_buf(&req->rq_rcv_buf, iov->iov_len - hdrlen);
}
recvd = req->rq_rlen - hdrlen;
recvd = req->rq_received - hdrlen;
if (count > recvd) {
printk(KERN_WARNING "NFS: server cheating in read reply: "
"count %d > recvd %d\n", count, recvd);
count = recvd;
res->eof = 0;
}
if (count < res->count)
......
......@@ -424,9 +424,14 @@ nfs_readpage_result(struct rpc_task *task)
memset(p + count, 0, PAGE_CACHE_SIZE - count);
kunmap(page);
count = 0;
} else
if (data->res.eof)
SetPageUptodate(page);
else
SetPageError(page);
} else {
count -= PAGE_CACHE_SIZE;
SetPageUptodate(page);
SetPageUptodate(page);
}
} else
SetPageError(page);
flush_dcache_page(page);
......
......@@ -83,6 +83,12 @@ struct romfs_inode_info {
struct inode vfs_inode;
};
/* instead of private superblock data */
static inline unsigned long romfs_maxsize(struct super_block *sb)
{
return (unsigned long)sb->u.generic_sbp;
}
static inline struct romfs_inode_info *ROMFS_I(struct inode *inode)
{
return list_entry(inode, struct romfs_inode_info, vfs_inode);
......@@ -113,7 +119,6 @@ static int romfs_fill_super(struct super_block *s, void *data, int silent)
/* I would parse the options here, but there are none.. :) */
sb_set_blocksize(s, ROMBSIZE);
s->u.generic_sbp = (void *) 0;
s->s_maxbytes = 0xFFFFFFFF;
bh = sb_bread(s, 0);
......@@ -139,7 +144,7 @@ static int romfs_fill_super(struct super_block *s, void *data, int silent)
}
s->s_magic = ROMFS_MAGIC;
s->u.romfs_sb.s_maxsize = sz;
s->u.generic_sbp = (void *)sz;
s->s_flags |= MS_RDONLY;
......@@ -175,7 +180,7 @@ romfs_statfs(struct super_block *sb, struct statfs *buf)
buf->f_type = ROMFS_MAGIC;
buf->f_bsize = ROMBSIZE;
buf->f_bfree = buf->f_bavail = buf->f_ffree;
buf->f_blocks = (sb->u.romfs_sb.s_maxsize+ROMBSIZE-1)>>ROMBSBITS;
buf->f_blocks = (romfs_maxsize(sb)+ROMBSIZE-1)>>ROMBSBITS;
buf->f_namelen = ROMFS_MAXFN;
return 0;
}
......@@ -188,7 +193,7 @@ romfs_strnlen(struct inode *i, unsigned long offset, unsigned long count)
struct buffer_head *bh;
unsigned long avail, maxsize, res;
maxsize = i->i_sb->u.romfs_sb.s_maxsize;
maxsize = romfs_maxsize(i->i_sb);
if (offset >= maxsize)
return -1;
......@@ -230,7 +235,7 @@ romfs_copyfrom(struct inode *i, void *dest, unsigned long offset, unsigned long
struct buffer_head *bh;
unsigned long avail, maxsize, res;
maxsize = i->i_sb->u.romfs_sb.s_maxsize;
maxsize = romfs_maxsize(i->i_sb);
if (offset >= maxsize || count > maxsize || offset+count>maxsize)
return -1;
......@@ -275,8 +280,8 @@ romfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
char fsname[ROMFS_MAXFN]; /* XXX dynamic? */
lock_kernel();
maxoff = i->i_sb->u.romfs_sb.s_maxsize;
maxoff = romfs_maxsize(i->i_sb);
offset = filp->f_pos;
if (!offset) {
......@@ -339,7 +344,7 @@ romfs_lookup(struct inode *dir, struct dentry *dentry)
if (romfs_copyfrom(dir, &ri, offset, ROMFH_SIZE) <= 0)
goto out;
maxoff = dir->i_sb->u.romfs_sb.s_maxsize;
maxoff = romfs_maxsize(dir->i_sb);
offset = ntohl(ri.spec) & ROMFH_MASK;
/* OK, now find the file whose name is in "dentry" in the
......
......@@ -47,7 +47,7 @@ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count
unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
sb = inode->i_sb;
uspi = sb->u.ufs_sb.s_uspi;
uspi = UFS_SB(sb)->s_uspi;
usb1 = ubh_get_usb_first(USPI_UBH);
UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
......@@ -89,7 +89,7 @@ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count
fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
fs32_add(sb, &usb1->fs_cstotal.cs_nffree, count);
fs32_add(sb, &sb->fs_cs(cgno).cs_nffree, count);
fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
......@@ -100,12 +100,12 @@ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count
if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, uspi->s_fpb);
fs32_sub(sb, &sb->fs_cs(cgno).cs_nffree, uspi->s_fpb);
if ((sb->u.ufs_sb.s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
ufs_clusteracct (sb, ucpi, blkno, 1);
fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
fs32_add(sb, &sb->fs_cs(cgno).cs_nbfree, 1);
fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
cylno = ufs_cbtocylno (bbase);
fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
......@@ -141,7 +141,7 @@ void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) {
unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
sb = inode->i_sb;
uspi = sb->u.ufs_sb.s_uspi;
uspi = UFS_SB(sb)->s_uspi;
usb1 = ubh_get_usb_first(USPI_UBH);
UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
......@@ -184,13 +184,13 @@ void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) {
ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
}
ubh_setblock(UCPI_UBH, ucpi->c_freeoff, blkno);
if ((sb->u.ufs_sb.s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
ufs_clusteracct (sb, ucpi, blkno, 1);
DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
fs32_add(sb, &sb->fs_cs(cgno).cs_nbfree, 1);
fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
cylno = ufs_cbtocylno(i);
fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
......@@ -247,7 +247,7 @@ unsigned ufs_new_fragments (struct inode * inode, u32 * p, unsigned fragment,
UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count))
sb = inode->i_sb;
uspi = sb->u.ufs_sb.s_uspi;
uspi = UFS_SB(sb)->s_uspi;
usb1 = ubh_get_usb_first(USPI_UBH);
*err = -ENOSPC;
......@@ -285,7 +285,7 @@ unsigned ufs_new_fragments (struct inode * inode, u32 * p, unsigned fragment,
return 0;
}
}
/*
* There is not enough space for user on the device
*/
......@@ -293,8 +293,8 @@ unsigned ufs_new_fragments (struct inode * inode, u32 * p, unsigned fragment,
unlock_super (sb);
UFSD(("EXIT (FAILED)\n"))
return 0;
}
}
if (goal >= uspi->s_size)
goal = 0;
if (goal == 0)
......@@ -407,12 +407,12 @@ unsigned ufs_add_fragments (struct inode * inode, unsigned fragment,
UFSD(("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount))
sb = inode->i_sb;
uspi = sb->u.ufs_sb.s_uspi;
uspi = UFS_SB(sb)->s_uspi;
usb1 = ubh_get_usb_first (USPI_UBH);
count = newcount - oldcount;
cgno = ufs_dtog(fragment);
if (sb->fs_cs(cgno).cs_nffree < count)
if (UFS_SB(sb)->fs_cs(cgno).cs_nffree < count)
return 0;
if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
return 0;
......@@ -453,7 +453,7 @@ unsigned ufs_add_fragments (struct inode * inode, unsigned fragment,
}
fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
fs32_sub(sb, &sb->fs_cs(cgno).cs_nffree, count);
fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
ubh_mark_buffer_dirty (USPI_UBH);
......@@ -470,7 +470,7 @@ unsigned ufs_add_fragments (struct inode * inode, unsigned fragment,
}
#define UFS_TEST_FREE_SPACE_CG \
ucg = (struct ufs_cylinder_group *) sb->u.ufs_sb.s_ucg[cgno]->b_data; \
ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
goto cg_found; \
for (k = count; k < uspi->s_fpb; k++) \
......@@ -490,7 +490,7 @@ unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
UFSD(("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count))
sb = inode->i_sb;
uspi = sb->u.ufs_sb.s_uspi;
uspi = UFS_SB(sb)->s_uspi;
usb1 = ubh_get_usb_first(USPI_UBH);
oldcg = cgno;
......@@ -557,7 +557,7 @@ unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
fs32_add(sb, &usb1->fs_cstotal.cs_nffree, i);
fs32_add(sb, &sb->fs_cs(cgno).cs_nffree, i);
fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
fs32_add(sb, &ucg->cg_frsum[i], 1);
goto succed;
}
......@@ -574,7 +574,7 @@ unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
fs32_sub(sb, &sb->fs_cs(cgno).cs_nffree, count);
fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
if (count != allocsize)
......@@ -606,7 +606,7 @@ unsigned ufs_alloccg_block (struct inode * inode,
UFSD(("ENTER, goal %u\n", goal))
sb = inode->i_sb;
uspi = sb->u.ufs_sb.s_uspi;
uspi = UFS_SB(sb)->s_uspi;
usb1 = ubh_get_usb_first(USPI_UBH);
ucg = ubh_get_ucg(UCPI_UBH);
......@@ -633,7 +633,7 @@ unsigned ufs_alloccg_block (struct inode * inode,
gotit:
blkno = ufs_fragstoblks(result);
ubh_clrblock (UCPI_UBH, ucpi->c_freeoff, blkno);
if ((sb->u.ufs_sb.s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
ufs_clusteracct (sb, ucpi, blkno, -1);
if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
*err = -EDQUOT;
......@@ -642,7 +642,7 @@ unsigned ufs_alloccg_block (struct inode * inode,
fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
fs32_sub(sb, &usb1->fs_cstotal.cs_nbfree, 1);
fs32_sub(sb, &sb->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
cylno = ufs_cbtocylno(result);
fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
......@@ -663,7 +663,7 @@ unsigned ufs_bitmap_search (struct super_block * sb,
UFSD(("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count))
uspi = sb->u.ufs_sb.s_uspi;
uspi = UFS_SB(sb)->s_uspi;
usb1 = ubh_get_usb_first (USPI_UBH);
ucg = ubh_get_ucg(UCPI_UBH);
......@@ -729,7 +729,7 @@ void ufs_clusteracct(struct super_block * sb,
struct ufs_sb_private_info * uspi;
int i, start, end, forw, back;
uspi = sb->u.ufs_sb.s_uspi;
uspi = UFS_SB(sb)->s_uspi;
if (uspi->s_contigsumsize <= 0)
return;
......
......@@ -36,26 +36,27 @@
static void ufs_read_cylinder (struct super_block * sb,
unsigned cgno, unsigned bitmap_nr)
{
struct ufs_sb_info * sbi = UFS_SB(sb);
struct ufs_sb_private_info * uspi;
struct ufs_cg_private_info * ucpi;
struct ufs_cylinder_group * ucg;
unsigned i, j;
UFSD(("ENTER, cgno %u, bitmap_nr %u\n", cgno, bitmap_nr))
uspi = sb->u.ufs_sb.s_uspi;
ucpi = sb->u.ufs_sb.s_ucpi[bitmap_nr];
ucg = (struct ufs_cylinder_group *)sb->u.ufs_sb.s_ucg[cgno]->b_data;
uspi = sbi->s_uspi;
ucpi = sbi->s_ucpi[bitmap_nr];
ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data;
UCPI_UBH->fragment = ufs_cgcmin(cgno);
UCPI_UBH->count = uspi->s_cgsize >> sb->s_blocksize_bits;
/*
* We have already the first fragment of cylinder group block in buffer
*/
UCPI_UBH->bh[0] = sb->u.ufs_sb.s_ucg[cgno];
UCPI_UBH->bh[0] = sbi->s_ucg[cgno];
for (i = 1; i < UCPI_UBH->count; i++)
if (!(UCPI_UBH->bh[i] = sb_bread(sb, UCPI_UBH->fragment + i)))
goto failed;
sb->u.ufs_sb.s_cgno[bitmap_nr] = cgno;
sbi->s_cgno[bitmap_nr] = cgno;
ucpi->c_cgx = fs32_to_cpu(sb, ucg->cg_cgx);
ucpi->c_ncyl = fs16_to_cpu(sb, ucg->cg_ncyl);
......@@ -77,8 +78,8 @@ static void ufs_read_cylinder (struct super_block * sb,
failed:
for (j = 1; j < i; j++)
brelse (sb->u.ufs_sb.s_ucg[j]);
sb->u.ufs_sb.s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
brelse (sbi->s_ucg[j]);
sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
ufs_error (sb, "ufs_read_cylinder", "can't read cylinder group block %u", cgno);
}
......@@ -88,6 +89,7 @@ static void ufs_read_cylinder (struct super_block * sb,
*/
void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr)
{
struct ufs_sb_info * sbi = UFS_SB(sb);
struct ufs_sb_private_info * uspi;
struct ufs_cg_private_info * ucpi;
struct ufs_cylinder_group * ucg;
......@@ -95,15 +97,15 @@ void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr)
UFSD(("ENTER, bitmap_nr %u\n", bitmap_nr))
uspi = sb->u.ufs_sb.s_uspi;
if (sb->u.ufs_sb.s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) {
uspi = sbi->s_uspi;
if (sbi->s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) {
UFSD(("EXIT\n"))
return;
}
ucpi = sb->u.ufs_sb.s_ucpi[bitmap_nr];
ucpi = sbi->s_ucpi[bitmap_nr];
ucg = ubh_get_ucg(UCPI_UBH);
if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sb->u.ufs_sb.s_cg_loaded) {
if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sbi->s_cg_loaded) {
ufs_panic (sb, "ufs_put_cylinder", "internal error");
return;
}
......@@ -119,7 +121,7 @@ void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr)
brelse (UCPI_UBH->bh[i]);
}
sb->u.ufs_sb.s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
UFSD(("EXIT\n"))
}
......@@ -132,13 +134,14 @@ void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr)
struct ufs_cg_private_info * ufs_load_cylinder (
struct super_block * sb, unsigned cgno)
{
struct ufs_sb_info * sbi = UFS_SB(sb);
struct ufs_sb_private_info * uspi;
struct ufs_cg_private_info * ucpi;
unsigned cg, i, j;
UFSD(("ENTER, cgno %u\n", cgno))
uspi = sb->u.ufs_sb.s_uspi;
uspi = sbi->s_uspi;
if (cgno >= uspi->s_ncg) {
ufs_panic (sb, "ufs_load_cylinder", "internal error, high number of cg");
return NULL;
......@@ -146,61 +149,61 @@ struct ufs_cg_private_info * ufs_load_cylinder (
/*
* Cylinder group number cg it in cache and it was last used
*/
if (sb->u.ufs_sb.s_cgno[0] == cgno) {
if (sbi->s_cgno[0] == cgno) {
UFSD(("EXIT\n"))
return sb->u.ufs_sb.s_ucpi[0];
return sbi->s_ucpi[0];
}
/*
* Number of cylinder groups is not higher than UFS_MAX_GROUP_LOADED
*/
if (uspi->s_ncg <= UFS_MAX_GROUP_LOADED) {
if (sb->u.ufs_sb.s_cgno[cgno] != UFS_CGNO_EMPTY) {
if (sb->u.ufs_sb.s_cgno[cgno] != cgno) {
if (sbi->s_cgno[cgno] != UFS_CGNO_EMPTY) {
if (sbi->s_cgno[cgno] != cgno) {
ufs_panic (sb, "ufs_load_cylinder", "internal error, wrong number of cg in cache");
UFSD(("EXIT (FAILED)\n"))
return NULL;
}
else {
UFSD(("EXIT\n"))
return sb->u.ufs_sb.s_ucpi[cgno];
return sbi->s_ucpi[cgno];
}
} else {
ufs_read_cylinder (sb, cgno, cgno);
UFSD(("EXIT\n"))
return sb->u.ufs_sb.s_ucpi[cgno];
return sbi->s_ucpi[cgno];
}
}
/*
* Cylinder group number cg is in cache but it was not last used,
* we will move to the first position
*/
for (i = 0; i < sb->u.ufs_sb.s_cg_loaded && sb->u.ufs_sb.s_cgno[i] != cgno; i++);
if (i < sb->u.ufs_sb.s_cg_loaded && sb->u.ufs_sb.s_cgno[i] == cgno) {
cg = sb->u.ufs_sb.s_cgno[i];
ucpi = sb->u.ufs_sb.s_ucpi[i];
for (i = 0; i < sbi->s_cg_loaded && sbi->s_cgno[i] != cgno; i++);
if (i < sbi->s_cg_loaded && sbi->s_cgno[i] == cgno) {
cg = sbi->s_cgno[i];
ucpi = sbi->s_ucpi[i];
for (j = i; j > 0; j--) {
sb->u.ufs_sb.s_cgno[j] = sb->u.ufs_sb.s_cgno[j-1];
sb->u.ufs_sb.s_ucpi[j] = sb->u.ufs_sb.s_ucpi[j-1];
sbi->s_cgno[j] = sbi->s_cgno[j-1];
sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
}
sb->u.ufs_sb.s_cgno[0] = cg;
sb->u.ufs_sb.s_ucpi[0] = ucpi;
sbi->s_cgno[0] = cg;
sbi->s_ucpi[0] = ucpi;
/*
* Cylinder group number cg is not in cache, we will read it from disk
* and put it to the first position
*/
} else {
if (sb->u.ufs_sb.s_cg_loaded < UFS_MAX_GROUP_LOADED)
sb->u.ufs_sb.s_cg_loaded++;
if (sbi->s_cg_loaded < UFS_MAX_GROUP_LOADED)
sbi->s_cg_loaded++;
else
ufs_put_cylinder (sb, UFS_MAX_GROUP_LOADED-1);
ucpi = sb->u.ufs_sb.s_ucpi[sb->u.ufs_sb.s_cg_loaded - 1];
for (j = sb->u.ufs_sb.s_cg_loaded - 1; j > 0; j--) {
sb->u.ufs_sb.s_cgno[j] = sb->u.ufs_sb.s_cgno[j-1];
sb->u.ufs_sb.s_ucpi[j] = sb->u.ufs_sb.s_ucpi[j-1];
ucpi = sbi->s_ucpi[sbi->s_cg_loaded - 1];
for (j = sbi->s_cg_loaded - 1; j > 0; j--) {
sbi->s_cgno[j] = sbi->s_cgno[j-1];
sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
}
sb->u.ufs_sb.s_ucpi[0] = ucpi;
sbi->s_ucpi[0] = ucpi;
ufs_read_cylinder (sb, cgno, 0);
}
UFSD(("EXIT\n"))
return sb->u.ufs_sb.s_ucpi[0];
return sbi->s_ucpi[0];
}
......@@ -67,7 +67,7 @@ ufs_readdir (struct file * filp, void * dirent, filldir_t filldir)
lock_kernel();
sb = inode->i_sb;
flags = sb->u.ufs_sb.s_flags;
flags = UFS_SB(sb)->s_flags;
UFSD(("ENTER, ino %lu f_pos %lu\n", inode->i_ino, (unsigned long) filp->f_pos))
......@@ -308,8 +308,8 @@ int ufs_check_dir_entry (const char * function, struct inode * dir,
error_msg = "reclen is too small for namlen";
else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
error_msg = "directory entry across blocks";
else if (fs32_to_cpu(sb, de->d_ino) > (sb->u.ufs_sb.s_uspi->s_ipg *
sb->u.ufs_sb.s_uspi->s_ncg))
else if (fs32_to_cpu(sb, de->d_ino) > (UFS_SB(sb)->s_uspi->s_ipg *
UFS_SB(sb)->s_uspi->s_ncg))
error_msg = "inode out of bounds";
if (error_msg != NULL)
......@@ -386,7 +386,7 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
UFSD(("ENTER, name %s, namelen %u\n", name, namelen))
sb = dir->i_sb;
uspi = sb->u.ufs_sb.s_uspi;
uspi = UFS_SB(sb)->s_uspi;
if (!namelen)
return -EINVAL;
......
......@@ -71,7 +71,7 @@ void ufs_free_inode (struct inode * inode)
UFSD(("ENTER, ino %lu\n", inode->i_ino))
sb = inode->i_sb;
uspi = sb->u.ufs_sb.s_uspi;
uspi = UFS_SB(sb)->s_uspi;
usb1 = ubh_get_usb_first(USPI_UBH);
ino = inode->i_ino;
......@@ -112,12 +112,12 @@ void ufs_free_inode (struct inode * inode)
ucpi->c_irotor = ino;
fs32_add(sb, &ucg->cg_cs.cs_nifree, 1);
fs32_add(sb, &usb1->fs_cstotal.cs_nifree, 1);
fs32_add(sb, &sb->fs_cs(cg).cs_nifree, 1);
fs32_add(sb, &UFS_SB(sb)->fs_cs(cg).cs_nifree, 1);
if (is_directory) {
fs32_sub(sb, &ucg->cg_cs.cs_ndir, 1);
fs32_sub(sb, &usb1->fs_cstotal.cs_ndir, 1);
fs32_sub(sb, &sb->fs_cs(cg).cs_ndir, 1);
fs32_sub(sb, &UFS_SB(sb)->fs_cs(cg).cs_ndir, 1);
}
}
......@@ -146,6 +146,7 @@ void ufs_free_inode (struct inode * inode)
struct inode * ufs_new_inode(struct inode * dir, int mode)
{
struct super_block * sb;
struct ufs_sb_info * sbi;
struct ufs_sb_private_info * uspi;
struct ufs_super_block_first * usb1;
struct ufs_cg_private_info * ucpi;
......@@ -164,7 +165,8 @@ struct inode * ufs_new_inode(struct inode * dir, int mode)
if (!inode)
return ERR_PTR(-ENOMEM);
ufsi = UFS_I(inode);
uspi = sb->u.ufs_sb.s_uspi;
sbi = UFS_SB(sb);
uspi = sbi->s_uspi;
usb1 = ubh_get_usb_first(USPI_UBH);
lock_super (sb);
......@@ -173,7 +175,7 @@ struct inode * ufs_new_inode(struct inode * dir, int mode)
* Try to place the inode in its parent directory
*/
i = ufs_inotocg(dir->i_ino);
if (sb->fs_cs(i).cs_nifree) {
if (sbi->fs_cs(i).cs_nifree) {
cg = i;
goto cg_found;
}
......@@ -185,7 +187,7 @@ struct inode * ufs_new_inode(struct inode * dir, int mode)
i += j;
if (i >= uspi->s_ncg)
i -= uspi->s_ncg;
if (sb->fs_cs(i).cs_nifree) {
if (sbi->fs_cs(i).cs_nifree) {
cg = i;
goto cg_found;
}
......@@ -199,7 +201,7 @@ struct inode * ufs_new_inode(struct inode * dir, int mode)
i++;
if (i >= uspi->s_ncg)
i = 0;
if (sb->fs_cs(i).cs_nifree) {
if (sbi->fs_cs(i).cs_nifree) {
cg = i;
goto cg_found;
}
......@@ -235,12 +237,12 @@ struct inode * ufs_new_inode(struct inode * dir, int mode)
fs32_sub(sb, &ucg->cg_cs.cs_nifree, 1);
fs32_sub(sb, &usb1->fs_cstotal.cs_nifree, 1);
fs32_sub(sb, &sb->fs_cs(cg).cs_nifree, 1);
fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1);
if (S_ISDIR(mode)) {
fs32_add(sb, &ucg->cg_cs.cs_ndir, 1);
fs32_add(sb, &usb1->fs_cstotal.cs_ndir, 1);
fs32_add(sb, &sb->fs_cs(cg).cs_ndir, 1);
fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1);
}
ubh_mark_buffer_dirty (USPI_UBH);
......
......@@ -52,7 +52,7 @@
static int ufs_block_to_path(struct inode *inode, long i_block, int offsets[4])
{
struct ufs_sb_private_info *uspi = inode->i_sb->u.ufs_sb.s_uspi;
struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
int ptrs = uspi->s_apb;
int ptrs_bits = uspi->s_apbshift;
const long direct_blocks = UFS_NDADDR,
......@@ -86,7 +86,7 @@ int ufs_frag_map(struct inode *inode, int frag)
{
struct ufs_inode_info *ufsi = UFS_I(inode);
struct super_block *sb = inode->i_sb;
struct ufs_sb_private_info *uspi = sb->u.ufs_sb.s_uspi;
struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
int mask = uspi->s_apbmask>>uspi->s_fpbshift;
int shift = uspi->s_apbshift-uspi->s_fpbshift;
int offsets[4], *p;
......@@ -137,7 +137,7 @@ static struct buffer_head * ufs_inode_getfrag (struct inode *inode,
inode->i_ino, fragment, new_fragment, required))
sb = inode->i_sb;
uspi = sb->u.ufs_sb.s_uspi;
uspi = UFS_SB(sb)->s_uspi;
block = ufs_fragstoblks (fragment);
blockoff = ufs_fragnum (fragment);
p = ufsi->i_u1.i_data + block;
......@@ -243,7 +243,7 @@ static struct buffer_head * ufs_block_getfrag (struct inode *inode,
u32 * p;
sb = inode->i_sb;
uspi = sb->u.ufs_sb.s_uspi;
uspi = UFS_SB(sb)->s_uspi;
block = ufs_fragstoblks (fragment);
blockoff = ufs_fragnum (fragment);
......@@ -313,7 +313,7 @@ static struct buffer_head * ufs_block_getfrag (struct inode *inode,
static int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
{
struct super_block * sb = inode->i_sb;
struct ufs_sb_private_info * uspi = sb->u.ufs_sb.s_uspi;
struct ufs_sb_private_info * uspi = UFS_SB(sb)->s_uspi;
struct buffer_head * bh;
int ret, err, new;
unsigned long ptr, phys;
......@@ -483,8 +483,8 @@ void ufs_read_inode (struct inode * inode)
UFSD(("ENTER, ino %lu\n", inode->i_ino))
sb = inode->i_sb;
uspi = sb->u.ufs_sb.s_uspi;
flags = sb->u.ufs_sb.s_flags;
uspi = UFS_SB(sb)->s_uspi;
flags = UFS_SB(sb)->s_flags;
if (inode->i_ino < UFS_ROOTINO ||
inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
......@@ -579,8 +579,8 @@ static int ufs_update_inode(struct inode * inode, int do_sync)
UFSD(("ENTER, ino %lu\n", inode->i_ino))
sb = inode->i_sb;
uspi = sb->u.ufs_sb.s_uspi;
flags = sb->u.ufs_sb.s_flags;
uspi = UFS_SB(sb)->s_uspi;
flags = UFS_SB(sb)->s_flags;
if (inode->i_ino < UFS_ROOTINO ||
inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
......
......@@ -139,7 +139,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
if (IS_ERR(inode))
goto out;
if (l > sb->u.ufs_sb.s_uspi->s_maxsymlinklen) {
if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) {
/* slow symlink */
inode->i_op = &page_symlink_inode_operations;
inode->i_mapping->a_ops = &ufs_aops;
......
This diff is collapsed.
......@@ -25,7 +25,7 @@ enum {
static __inline u64
fs64_to_cpu(struct super_block *sbp, u64 n)
{
if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE)
if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
return le64_to_cpu(n);
else
return be64_to_cpu(n);
......@@ -34,7 +34,7 @@ fs64_to_cpu(struct super_block *sbp, u64 n)
static __inline u64
cpu_to_fs64(struct super_block *sbp, u64 n)
{
if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE)
if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
return cpu_to_le64(n);
else
return cpu_to_be64(n);
......@@ -43,7 +43,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
static __inline u32
fs64_add(struct super_block *sbp, u32 *n, int d)
{
if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE)
if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
return *n = cpu_to_le64(le64_to_cpu(*n)+d);
else
return *n = cpu_to_be64(be64_to_cpu(*n)+d);
......@@ -52,7 +52,7 @@ fs64_add(struct super_block *sbp, u32 *n, int d)
static __inline u32
fs64_sub(struct super_block *sbp, u32 *n, int d)
{
if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE)
if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
return *n = cpu_to_le64(le64_to_cpu(*n)-d);
else
return *n = cpu_to_be64(be64_to_cpu(*n)-d);
......@@ -61,7 +61,7 @@ fs64_sub(struct super_block *sbp, u32 *n, int d)
static __inline u32
fs32_to_cpu(struct super_block *sbp, u32 n)
{
if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE)
if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
return le32_to_cpu(n);
else
return be32_to_cpu(n);
......@@ -70,7 +70,7 @@ fs32_to_cpu(struct super_block *sbp, u32 n)
static __inline u32
cpu_to_fs32(struct super_block *sbp, u32 n)
{
if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE)
if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
return cpu_to_le32(n);
else
return cpu_to_be32(n);
......@@ -79,7 +79,7 @@ cpu_to_fs32(struct super_block *sbp, u32 n)
static __inline u32
fs32_add(struct super_block *sbp, u32 *n, int d)
{
if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE)
if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
return *n = cpu_to_le32(le32_to_cpu(*n)+d);
else
return *n = cpu_to_be32(be32_to_cpu(*n)+d);
......@@ -88,7 +88,7 @@ fs32_add(struct super_block *sbp, u32 *n, int d)
static __inline u32
fs32_sub(struct super_block *sbp, u32 *n, int d)
{
if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE)
if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
return *n = cpu_to_le32(le32_to_cpu(*n)-d);
else
return *n = cpu_to_be32(be32_to_cpu(*n)-d);
......@@ -97,7 +97,7 @@ fs32_sub(struct super_block *sbp, u32 *n, int d)
static __inline u16
fs16_to_cpu(struct super_block *sbp, u16 n)
{
if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE)
if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
return le16_to_cpu(n);
else
return be16_to_cpu(n);
......@@ -106,7 +106,7 @@ fs16_to_cpu(struct super_block *sbp, u16 n)
static __inline u16
cpu_to_fs16(struct super_block *sbp, u16 n)
{
if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE)
if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
return cpu_to_le16(n);
else
return cpu_to_be16(n);
......@@ -115,7 +115,7 @@ cpu_to_fs16(struct super_block *sbp, u16 n)
static __inline u16
fs16_add(struct super_block *sbp, u16 *n, int d)
{
if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE)
if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
return *n = cpu_to_le16(le16_to_cpu(*n)+d);
else
return *n = cpu_to_be16(be16_to_cpu(*n)+d);
......@@ -124,7 +124,7 @@ fs16_add(struct super_block *sbp, u16 *n, int d)
static __inline u16
fs16_sub(struct super_block *sbp, u16 *n, int d)
{
if (sbp->u.ufs_sb.s_bytesex == BYTESEX_LE)
if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
return *n = cpu_to_le16(le16_to_cpu(*n)-d);
else
return *n = cpu_to_be16(be16_to_cpu(*n)-d);
......
......@@ -82,7 +82,7 @@ static int ufs_trunc_direct (struct inode * inode)
UFSD(("ENTER\n"))
sb = inode->i_sb;
uspi = sb->u.ufs_sb.s_uspi;
uspi = UFS_SB(sb)->s_uspi;
frag_to_free = 0;
free_count = 0;
......@@ -212,7 +212,7 @@ static int ufs_trunc_indirect (struct inode * inode, unsigned offset, u32 * p)
UFSD(("ENTER\n"))
sb = inode->i_sb;
uspi = sb->u.ufs_sb.s_uspi;
uspi = UFS_SB(sb)->s_uspi;
frag_to_free = 0;
free_count = 0;
......@@ -306,7 +306,7 @@ static int ufs_trunc_dindirect (struct inode * inode, unsigned offset, u32 * p)
UFSD(("ENTER\n"))
sb = inode->i_sb;
uspi = sb->u.ufs_sb.s_uspi;
uspi = UFS_SB(sb)->s_uspi;
dindirect_block = (DIRECT_BLOCK > offset)
? ((DIRECT_BLOCK - offset) >> uspi->s_apbshift) : 0;
......@@ -374,7 +374,7 @@ static int ufs_trunc_tindirect (struct inode * inode)
UFSD(("ENTER\n"))
sb = inode->i_sb;
uspi = sb->u.ufs_sb.s_uspi;
uspi = UFS_SB(sb)->s_uspi;
retry = 0;
tindirect_block = (DIRECT_BLOCK > (UFS_NDADDR + uspi->s_apb + uspi->s_2apb))
......@@ -435,7 +435,7 @@ void ufs_truncate (struct inode * inode)
UFSD(("ENTER\n"))
sb = inode->i_sb;
uspi = sb->u.ufs_sb.s_uspi;
uspi = UFS_SB(sb)->s_uspi;
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
return;
......
......@@ -31,7 +31,7 @@ static inline s32
ufs_get_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1,
struct ufs_super_block_third *usb3)
{
switch (sb->u.ufs_sb.s_flags & UFS_ST_MASK) {
switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
case UFS_ST_SUN:
return fs32_to_cpu(sb, usb3->fs_u2.fs_sun.fs_state);
case UFS_ST_SUNx86:
......@@ -46,7 +46,7 @@ static inline void
ufs_set_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1,
struct ufs_super_block_third *usb3, s32 value)
{
switch (sb->u.ufs_sb.s_flags & UFS_ST_MASK) {
switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
case UFS_ST_SUN:
usb3->fs_u2.fs_sun.fs_state = cpu_to_fs32(sb, value);
break;
......@@ -63,7 +63,7 @@ static inline u32
ufs_get_fs_npsect(struct super_block *sb, struct ufs_super_block_first *usb1,
struct ufs_super_block_third *usb3)
{
if ((sb->u.ufs_sb.s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
return fs32_to_cpu(sb, usb3->fs_u2.fs_sunx86.fs_npsect);
else
return fs32_to_cpu(sb, usb1->fs_u1.fs_sun.fs_npsect);
......@@ -74,7 +74,7 @@ ufs_get_fs_qbmask(struct super_block *sb, struct ufs_super_block_third *usb3)
{
u64 tmp;
switch (sb->u.ufs_sb.s_flags & UFS_ST_MASK) {
switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
case UFS_ST_SUN:
((u32 *)&tmp)[0] = usb3->fs_u2.fs_sun.fs_qbmask[0];
((u32 *)&tmp)[1] = usb3->fs_u2.fs_sun.fs_qbmask[1];
......@@ -97,7 +97,7 @@ ufs_get_fs_qfmask(struct super_block *sb, struct ufs_super_block_third *usb3)
{
u64 tmp;
switch (sb->u.ufs_sb.s_flags & UFS_ST_MASK) {
switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
case UFS_ST_SUN:
((u32 *)&tmp)[0] = usb3->fs_u2.fs_sun.fs_qfmask[0];
((u32 *)&tmp)[1] = usb3->fs_u2.fs_sun.fs_qfmask[1];
......@@ -118,7 +118,7 @@ ufs_get_fs_qfmask(struct super_block *sb, struct ufs_super_block_third *usb3)
static inline u16
ufs_get_de_namlen(struct super_block *sb, struct ufs_dir_entry *de)
{
if ((sb->u.ufs_sb.s_flags & UFS_DE_MASK) == UFS_DE_OLD)
if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD)
return fs16_to_cpu(sb, de->d_u.d_namlen);
else
return de->d_u.d_44.d_namlen; /* XXX this seems wrong */
......@@ -127,7 +127,7 @@ ufs_get_de_namlen(struct super_block *sb, struct ufs_dir_entry *de)
static inline void
ufs_set_de_namlen(struct super_block *sb, struct ufs_dir_entry *de, u16 value)
{
if ((sb->u.ufs_sb.s_flags & UFS_DE_MASK) == UFS_DE_OLD)
if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD)
de->d_u.d_namlen = cpu_to_fs16(sb, value);
else
de->d_u.d_44.d_namlen = value; /* XXX this seems wrong */
......@@ -136,7 +136,7 @@ ufs_set_de_namlen(struct super_block *sb, struct ufs_dir_entry *de, u16 value)
static inline void
ufs_set_de_type(struct super_block *sb, struct ufs_dir_entry *de, int mode)
{
if ((sb->u.ufs_sb.s_flags & UFS_DE_MASK) != UFS_DE_44BSD)
if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) != UFS_DE_44BSD)
return;
/*
......@@ -172,7 +172,7 @@ ufs_set_de_type(struct super_block *sb, struct ufs_dir_entry *de, int mode)
static inline u32
ufs_get_inode_uid(struct super_block *sb, struct ufs_inode *inode)
{
switch (sb->u.ufs_sb.s_flags & UFS_UID_MASK) {
switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
case UFS_UID_EFT:
return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_uid);
case UFS_UID_44BSD:
......@@ -185,7 +185,7 @@ ufs_get_inode_uid(struct super_block *sb, struct ufs_inode *inode)
static inline void
ufs_set_inode_uid(struct super_block *sb, struct ufs_inode *inode, u32 value)
{
switch (sb->u.ufs_sb.s_flags & UFS_UID_MASK) {
switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
case UFS_UID_EFT:
inode->ui_u3.ui_sun.ui_uid = cpu_to_fs32(sb, value);
break;
......@@ -199,7 +199,7 @@ ufs_set_inode_uid(struct super_block *sb, struct ufs_inode *inode, u32 value)
static inline u32
ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode)
{
switch (sb->u.ufs_sb.s_flags & UFS_UID_MASK) {
switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
case UFS_UID_EFT:
return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid);
case UFS_UID_44BSD:
......@@ -212,7 +212,7 @@ ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode)
static inline void
ufs_set_inode_gid(struct super_block *sb, struct ufs_inode *inode, u32 value)
{
switch (sb->u.ufs_sb.s_flags & UFS_UID_MASK) {
switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
case UFS_UID_EFT:
inode->ui_u3.ui_sun.ui_gid = cpu_to_fs32(sb, value);
break;
......@@ -481,7 +481,7 @@ static inline void ufs_fragacct (struct super_block * sb, unsigned blockmap,
struct ufs_sb_private_info * uspi;
unsigned fragsize, pos;
uspi = sb->u.ufs_sb.s_uspi;
uspi = UFS_SB(sb)->s_uspi;
fragsize = 0;
for (pos = 0; pos < uspi->s_fpb; pos++) {
......
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
static __inline__ void release_kernel_lock(struct task_struct *task)
{
if (unlikely(task->lock_depth >= 0))
spin_unlock(&kernel_flag);
}
/*
* Re-acquire the kernel lock
*/
static __inline__ void reacquire_kernel_lock(struct task_struct *task)
{
if (unlikely(task->lock_depth >= 0))
spin_lock(&kernel_flag);
}
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static __inline__ void lock_kernel(void)
{
#ifdef CONFIG_PREEMPT
if (current->lock_depth == -1)
spin_lock(&kernel_flag);
++current->lock_depth;
#else
if (!++current->lock_depth)
spin_lock(&kernel_flag);
#endif
}
static __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/config.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#ifdef CONFIG_PREEMPT
#define kernel_locked() preempt_get_count()
#else
#define kernel_locked() spin_is_locked(&kernel_flag)
#endif
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static inline void lock_kernel(void)
{
#ifdef CONFIG_PREEMPT
if (current->lock_depth == -1)
spin_lock(&kernel_flag);
++current->lock_depth;
#else
if (!++current->lock_depth)
spin_lock(&kernel_flag);
#endif
}
static inline void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
#ifndef __ASM_CRIS_SMPLOCK_H
#define __ASM_CRIS_SMPLOCK_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/config.h>
#ifndef CONFIG_SMP
#define lock_kernel() do { } while(0)
#define unlock_kernel() do { } while(0)
#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
#else
#error "We do not support SMP on CRIS"
#endif
#endif
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
local_irq_enable(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* <asm/smplock.h>
*
* i386 SMP lock implementation
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <asm/current.h>
extern spinlock_t kernel_flag;
#define kernel_locked() (current->lock_depth >= 0)
#define get_kernel_lock() spin_lock(&kernel_flag)
#define put_kernel_lock() spin_unlock(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
put_kernel_lock(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
get_kernel_lock(); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static __inline__ void lock_kernel(void)
{
int depth = current->lock_depth+1;
if (!depth)
get_kernel_lock();
current->lock_depth = depth;
}
static __inline__ void unlock_kernel(void)
{
if (current->lock_depth < 0)
BUG();
if (--current->lock_depth < 0)
put_kernel_lock();
}
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <asm/current.h>
#include <asm/hardirq.h>
extern spinlock_t kernel_flag;
#ifdef CONFIG_SMP
# define kernel_locked() spin_is_locked(&kernel_flag)
#else
# define kernel_locked() (1)
#endif
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static __inline__ void
lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
static __inline__ void
unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
local_irq_enable(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/* $Id: smplock.h,v 1.2 1999/10/09 00:01:43 ralf Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
local_irq_enable(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#ifndef _ASM_SMPLOCK_H
#define _ASM_SMPLOCK_H
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
static __inline__ void release_kernel_lock(struct task_struct *task, int cpu)
{
if (task->lock_depth >= 0)
spin_unlock(&kernel_flag);
release_irqlock(cpu);
local_irq_enable();
}
/*
* Re-acquire the kernel lock
*/
static __inline__ void reacquire_kernel_lock(struct task_struct *task)
{
if (task->lock_depth >= 0)
spin_lock(&kernel_flag);
}
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
static __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
#endif /* _ASM_SMPLOCK_H */
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
local_irq_enable(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* BK Id: %F% %I% %G% %U% %#%
*/
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#ifdef __KERNEL__
#ifndef __ASM_SMPLOCK_H__
#define __ASM_SMPLOCK_H__
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#ifdef CONFIG_SMP
#define kernel_locked() spin_is_locked(&kernel_flag)
#elif defined(CONFIG_PREEMPT)
#define kernel_locked() preempt_count()
#endif
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static __inline__ void lock_kernel(void)
{
#ifdef CONFIG_PREEMPT
if (current->lock_depth == -1)
spin_lock(&kernel_flag);
++current->lock_depth;
#else
if (!++current->lock_depth)
spin_lock(&kernel_flag);
#endif /* CONFIG_PREEMPT */
}
static __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
#endif /* __ASM_SMPLOCK_H__ */
#endif /* __KERNEL__ */
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
static __inline__ void unlock_kernel(void)
{
if (current->lock_depth < 0)
BUG();
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* include/asm-s390/smplock.h
*
* S390 version
*
* Derived from "include/asm-i386/smplock.h"
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
local_irq_enable(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* include/asm-s390/smplock.h
*
* S390 version
*
* Derived from "include/asm-i386/smplock.h"
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
local_irq_enable(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
#ifndef __ASM_SH_SMPLOCK_H
#define __ASM_SH_SMPLOCK_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/config.h>
#ifndef CONFIG_SMP
#define lock_kernel() do { } while(0)
#define unlock_kernel() do { } while(0)
#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
#else
#error "We do not support SMP on SH"
#endif /* CONFIG_SMP */
#endif /* __ASM_SH_SMPLOCK_H */
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#define kernel_locked() \
(spin_is_locked(&kernel_flag) &&\
(current->lock_depth >= 0))
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (unlikely(task->lock_depth >= 0)) { \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
local_irq_enable(); \
} \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
#define lock_kernel() \
do { \
if (!++current->lock_depth) \
spin_lock(&kernel_flag); \
} while(0)
#define unlock_kernel() \
do { \
if (--current->lock_depth < 0) \
spin_unlock(&kernel_flag); \
} while(0)
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#ifdef CONFIG_SMP
#define kernel_locked() \
(spin_is_locked(&kernel_flag) &&\
(current->lock_depth >= 0))
#else
#ifdef CONFIG_PREEMPT
#define kernel_locked() preempt_get_count()
#else
#define kernel_locked() 1
#endif
#endif
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
#define lock_kernel() \
do { \
if (!++current->lock_depth) \
spin_lock(&kernel_flag); \
} while(0)
#define unlock_kernel() \
do { \
if (--current->lock_depth < 0) \
spin_unlock(&kernel_flag); \
} while(0)
/*
* <asm/smplock.h>
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <asm/current.h>
extern spinlock_t kernel_flag;
#ifdef CONFIG_SMP
#define kernel_locked() spin_is_locked(&kernel_flag)
#define check_irq_holder(cpu) \
if (global_irq_holder == (cpu)) \
BUG();
#else
#ifdef CONFIG_PREEMPT
#define kernel_locked() preempt_get_count()
#define global_irq_holder 0
#define check_irq_holder(cpu) do {} while(0)
#else
#define kernel_locked() 1
#define check_irq_holder(cpu) \
if (global_irq_holder == (cpu)) \
BUG();
#endif
#endif
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (unlikely(task->lock_depth >= 0)) { \
spin_unlock(&kernel_flag); \
check_irq_holder(cpu); \
} \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
#ifdef CONFIG_PREEMPT
if (current->lock_depth == -1)
spin_lock(&kernel_flag);
++current->lock_depth;
#else
#if 1
if (!++current->lock_depth)
spin_lock(&kernel_flag);
#else
__asm__ __volatile__(
"incl %1\n\t"
"jne 9f"
spin_lock_string
"\n9:"
:"=m" (__dummy_lock(&kernel_flag)),
"=m" (current->lock_depth));
#endif
#endif
}
extern __inline__ void unlock_kernel(void)
{
if (current->lock_depth < 0)
BUG();
#if 1
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
#else
__asm__ __volatile__(
"decl %1\n\t"
"jns 9f\n\t"
spin_unlock_string
"\n9:"
:"=m" (__dummy_lock(&kernel_flag)),
"=m" (current->lock_depth));
#endif
}
......@@ -624,8 +624,6 @@ extern void __kill_fasync(struct fasync_struct *, int, int);
#include <linux/ext3_fs_sb.h>
#include <linux/hpfs_fs_sb.h>
#include <linux/ufs_fs_sb.h>
#include <linux/romfs_fs_sb.h>
extern struct list_head super_blocks;
extern spinlock_t sb_lock;
......@@ -670,8 +668,6 @@ struct super_block {
union {
struct ext3_sb_info ext3_sb;
struct hpfs_sb_info hpfs_sb;
struct ufs_sb_info ufs_sb;
struct romfs_sb_info romfs_sb;
void *generic_sbp;
} u;
/*
......
#ifndef __LINUX_PREEMPT_H
#define __LINUX_PREEMPT_H
/*
* include/linux/preempt.h - macros for accessing and manipulating
* preempt_count (used for kernel preemption, interrupt count, etc.)
*/
#include <linux/config.h>
#define preempt_count() (current_thread_info()->preempt_count)
#define preempt_count() (current_thread_info()->preempt_count)
#define inc_preempt_count() \
do { \
......@@ -31,17 +36,16 @@ do { \
barrier(); \
} while (0)
#define preempt_enable() \
#define preempt_check_resched() \
do { \
preempt_enable_no_resched(); \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)
#define preempt_check_resched() \
#define preempt_enable() \
do { \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
preempt_enable_no_resched(); \
preempt_check_resched(); \
} while (0)
#define inc_preempt_count_non_preempt() do { } while (0)
......@@ -50,7 +54,7 @@ do { \
#else
#define preempt_disable() do { } while (0)
#define preempt_enable_no_resched() do {} while(0)
#define preempt_enable_no_resched() do { } while (0)
#define preempt_enable() do { } while (0)
#define preempt_check_resched() do { } while (0)
......
#ifndef __ROMFS_FS_SB
#define __ROMFS_FS_SB
/* romfs superblock in-core data */
struct romfs_sb_info {
unsigned long s_maxsize;
};
#endif
......@@ -13,7 +13,59 @@
#else
#include <asm/smplock.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <asm/current.h>
extern spinlock_t kernel_flag;
#define kernel_locked() (current->lock_depth >= 0)
#define get_kernel_lock() spin_lock(&kernel_flag)
#define put_kernel_lock() spin_unlock(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
put_kernel_lock(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
get_kernel_lock(); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static __inline__ void lock_kernel(void)
{
int depth = current->lock_depth+1;
if (!depth)
get_kernel_lock();
current->lock_depth = depth;
}
static __inline__ void unlock_kernel(void)
{
if (current->lock_depth < 0)
BUG();
if (--current->lock_depth < 0)
put_kernel_lock();
}
#endif /* CONFIG_SMP */
......
This diff is collapsed.
......@@ -33,6 +33,9 @@
#include <linux/stat.h>
#include <linux/fs.h>
#include <linux/ufs_fs_i.h>
#include <linux/ufs_fs_sb.h>
#define UFS_BBLOCK 0
#define UFS_BBSIZE 8192
#define UFS_SBLOCK 8192
......@@ -398,7 +401,7 @@ struct ufs_super_block {
* Convert cylinder group to base address of its global summary info.
*/
#define fs_cs(indx) \
u.ufs_sb.s_csp[(indx) >> uspi->s_csshift][(indx) & ~uspi->s_csmask]
s_csp[(indx) >> uspi->s_csshift][(indx) & ~uspi->s_csmask]
/*
* Cylinder group block for a file system.
......@@ -780,7 +783,10 @@ extern struct inode_operations ufs_fast_symlink_inode_operations;
/* truncate.c */
extern void ufs_truncate (struct inode *);
#include <linux/ufs_fs_i.h>
static inline struct ufs_sb_info *UFS_SB(struct super_block *sb)
{
return sb->u.generic_sbp;
}
static inline struct ufs_inode_info *UFS_I(struct inode *inode)
{
......
......@@ -731,7 +731,7 @@ asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struc
tsk = next_thread(tsk);
} while (tsk != current);
read_unlock(&tasklist_lock);
if (flag) {
if (flag || !list_empty(&current->ptrace_children)) {
retval = 0;
if (options & WNOHANG)
goto end_wait4;
......
......@@ -28,7 +28,6 @@
#include <asm/pgalloc.h>
#include <asm/rmap.h>
#include <asm/smplock.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment