Commit c8c391f7 authored by Linus Torvalds's avatar Linus Torvalds

Merge http://jfs.bkbits.net/linux-2.5

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 75bfb496 2c1394f5
/*
* Copyright (c) International Business Machines Corp., 2000-2002
* Portions Copyright (c) Christoph Hellwig, 2001-2002
* Copyright (C) International Business Machines Corp., 2000-2004
* Portions Copyright (C) Christoph Hellwig, 2001-2002
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -147,7 +147,7 @@ void jfs_proc_clean(void)
if (base) {
for (i = 0; i < NPROCENT; i++)
remove_proc_entry(Entries[i].name, base);
remove_proc_entry("jfs", base);
remove_proc_entry("jfs", proc_root_fs);
}
}
......
......@@ -177,7 +177,7 @@ static int ciCompare(struct component_name * key, dtpage_t * p, int si,
static void dtGetKey(dtpage_t * p, int i, struct component_name * key,
int flag);
static void ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
static int ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
int ri, struct component_name * key, int flag);
static void dtInsertEntry(dtpage_t * p, int index, struct component_name * key,
......@@ -342,7 +342,6 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
struct metapage *mp;
s64 offset;
uint page_offset;
int rc;
struct tlock *tlck;
s64 xaddr;
......@@ -396,11 +395,11 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
* Allocate the first block & add it to the xtree
*/
xaddr = 0;
if ((rc =
xtInsert(tid, ip, 0, 0, sbi->nbperpage,
&xaddr, 0))) {
if (xtInsert(tid, ip, 0, 0, sbi->nbperpage, &xaddr, 0)) {
jfs_warn("add_index: xtInsert failed!");
return -EPERM;
memcpy(&jfs_ip->i_dirtable, temp_table,
sizeof (temp_table));
goto clean_up;
}
ip->i_size = PSIZE;
ip->i_blocks += LBLK2PBLK(sb, sbi->nbperpage);
......@@ -408,7 +407,9 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
if ((mp = get_index_page(ip, 0)) == 0) {
jfs_err("add_index: get_metapage failed!");
xtTruncate(tid, ip, 0, COMMIT_PWMAP);
return -EPERM;
memcpy(&jfs_ip->i_dirtable, temp_table,
sizeof (temp_table));
goto clean_up;
}
tlck = txLock(tid, ip, mp, tlckDATA);
llck = (struct linelock *) & tlck->lock;
......@@ -438,12 +439,9 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
* This will be the beginning of a new page
*/
xaddr = 0;
if ((rc =
xtInsert(tid, ip, 0, blkno, sbi->nbperpage,
&xaddr, 0))) {
if (xtInsert(tid, ip, 0, blkno, sbi->nbperpage, &xaddr, 0)) {
jfs_warn("add_index: xtInsert failed!");
jfs_ip->next_index--;
return -EPERM;
goto clean_up;
}
ip->i_size += PSIZE;
ip->i_blocks += LBLK2PBLK(sb, sbi->nbperpage);
......@@ -457,7 +455,7 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
if (mp == 0) {
jfs_err("add_index: get/read_metapage failed!");
return -EPERM;
goto clean_up;
}
lock_index(tid, ip, mp, index);
......@@ -472,6 +470,12 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
release_metapage(mp);
return index;
clean_up:
jfs_ip->next_index--;
return 0;
}
/*
......@@ -1152,9 +1156,16 @@ static int dtSplitUp(tid_t tid,
if ((sp->header.flag & BT_ROOT && skip > 1) ||
sp->header.prev != 0 || skip > 1) {
/* compute uppercase router prefix key */
ciGetLeafPrefixKey(lp,
lp->header.nextindex - 1,
rp, 0, &key, sbi->mntflag);
rc = ciGetLeafPrefixKey(lp,
lp->header.nextindex-1,
rp, 0, &key,
sbi->mntflag);
if (rc) {
DT_PUTPAGE(lmp);
DT_PUTPAGE(rmp);
DT_PUTPAGE(smp);
goto splitOut;
}
} else {
/* next to leftmost entry of
lowest internal level */
......@@ -3713,18 +3724,28 @@ static int ciCompare(struct component_name * key, /* search key */
* from two adjacent leaf entries
* across page boundary
*
* return:
* Number of prefix bytes needed to distinguish b from a.
* return: non-zero on error
*
*/
static void ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
static int ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
int ri, struct component_name * key, int flag)
{
int klen, namlen;
wchar_t *pl, *pr, *kname;
wchar_t lname[JFS_NAME_MAX + 1];
struct component_name lkey = { 0, lname };
wchar_t rname[JFS_NAME_MAX + 1];
struct component_name rkey = { 0, rname };
struct component_name lkey;
struct component_name rkey;
lkey.name = (wchar_t *) kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t),
GFP_KERNEL);
if (lkey.name == NULL)
return -ENOSPC;
rkey.name = (wchar_t *) kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t),
GFP_KERNEL);
if (rkey.name == NULL) {
kfree(lkey.name);
return -ENOSPC;
}
/* get left and right key */
dtGetKey(lp, li, &lkey, flag);
......@@ -3749,7 +3770,7 @@ static void ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
*kname = *pr;
if (*pl != *pr) {
key->namlen = klen + 1;
return;
goto free_names;
}
}
......@@ -3760,7 +3781,10 @@ static void ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
} else /* l->namelen == r->namelen */
key->namlen = klen;
return;
free_names:
kfree(lkey.name);
kfree(rkey.name);
return 0;
}
......
......@@ -228,31 +228,36 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
else
mapping = inode->i_mapping;
spin_lock(&meta_lock);
hash_ptr = meta_hash(mapping, lblock);
again:
spin_lock(&meta_lock);
mp = search_hash(hash_ptr, mapping, lblock);
if (mp) {
page_found:
mp->count++;
lock_metapage(mp);
spin_unlock(&meta_lock);
if (test_bit(META_stale, &mp->flag)) {
release_metapage(mp);
goto again;
}
if (test_bit(META_discard, &mp->flag)) {
if (!new) {
spin_unlock(&meta_lock);
jfs_error(inode->i_sb,
"__get_metapage: using a "
"discarded metapage");
release_metapage(mp);
return NULL;
}
clear_bit(META_discard, &mp->flag);
}
mp->count++;
jfs_info("__get_metapage: found 0x%p, in hash", mp);
if (mp->logical_size != size) {
spin_unlock(&meta_lock);
jfs_error(inode->i_sb,
"__get_metapage: mp->logical_size != size");
release_metapage(mp);
return NULL;
}
lock_metapage(mp);
spin_unlock(&meta_lock);
} else {
l2bsize = inode->i_blkbits;
l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize;
......@@ -451,11 +456,13 @@ void release_metapage(struct metapage * mp)
if (--mp->count || atomic_read(&mp->nohomeok)) {
unlock_metapage(mp);
spin_unlock(&meta_lock);
} else {
remove_from_hash(mp, meta_hash(mp->mapping, mp->index));
spin_unlock(&meta_lock);
return;
}
if (mp->page) {
/* Releasing spinlock, we have to check mp->count later */
set_bit(META_stale, &mp->flag);
spin_unlock(&meta_lock);
kunmap(mp->page);
mp->data = 0;
if (test_bit(META_dirty, &mp->flag))
......@@ -472,7 +479,9 @@ void release_metapage(struct metapage * mp)
}
page_cache_release(mp->page);
mp->page = NULL;
INCREMENT(mpStat.pagefree);
spin_lock(&meta_lock);
}
if (mp->lsn) {
......@@ -488,9 +497,16 @@ void release_metapage(struct metapage * mp)
list_del(&mp->synclist);
LOGSYNC_UNLOCK(log);
}
if (mp->count) {
/* Someone else is trying to get this metpage */
unlock_metapage(mp);
spin_unlock(&meta_lock);
return;
}
remove_from_hash(mp, meta_hash(mp->mapping, mp->index));
spin_unlock(&meta_lock);
free_metapage(mp);
}
}
void __invalidate_metapages(struct inode *ip, s64 addr, int len)
......@@ -510,9 +526,20 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len)
for (lblock = addr; lblock < addr + len;
lblock += 1 << l2BlocksPerPage) {
hash_ptr = meta_hash(mapping, lblock);
again:
spin_lock(&meta_lock);
mp = search_hash(hash_ptr, mapping, lblock);
if (mp) {
if (test_bit(META_stale, &mp->flag)) {
/* Racing with release_metapage */
mp->count++;
lock_metapage(mp);
spin_unlock(&meta_lock);
/* racing release_metapage should be done now */
release_metapage(mp);
goto again;
}
set_bit(META_discard, &mp->flag);
spin_unlock(&meta_lock);
} else {
......
......@@ -64,6 +64,7 @@ struct metapage {
#define META_sync 4
#define META_discard 5
#define META_forced 6
#define META_stale 7
#define mark_metapage_dirty(mp) set_bit(META_dirty, &(mp)->flag)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment