Commit 0edd73b3 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] shmem: restore superblock info

To improve shmem scalability, we allowed tmpfs instances which don't need
their blocks or inodes limited not to count them, and not to allocate any
sbinfo.  Which was okay when the only use for the sbinfo was accounting
blocks and inodes; but since then a couple of unrelated projects extending
tmpfs want to store other data in the sbinfo.  Whether either extension
reaches mainline is beside the point: I'm guilty of a bad design decision,
and should restore sbinfo to make any such future extensions easier.

So, once again allocate a shmem_sb_info for every shmem/tmpfs instance, and
now let max_blocks 0 indicate unlimited blocks, and max_inodes 0 unlimited
inodes.  Brent Casavant verified (many months ago) that this does not
perceptibly impact the scalability (since the unlimited sbinfo cacheline is
repeatedly accessed but only once dirtied).

And merge shmem_set_size into its sole caller shmem_remount_fs.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 65ed0b33
...@@ -71,8 +71,8 @@ can be changed on remount. The size parameter also accepts a suffix % ...@@ -71,8 +71,8 @@ can be changed on remount. The size parameter also accepts a suffix %
to limit this tmpfs instance to that percentage of your physical RAM: to limit this tmpfs instance to that percentage of your physical RAM:
the default, when neither size nor nr_blocks is specified, is size=50% the default, when neither size nor nr_blocks is specified, is size=50%
If both nr_blocks (or size) and nr_inodes are set to 0, neither blocks If nr_blocks=0 (or size=0), blocks will not be limited in that instance;
nor inodes will be limited in that instance. It is generally unwise to if nr_inodes=0, inodes will not be limited. It is generally unwise to
mount with such options, since it allows any user with write access to mount with such options, since it allows any user with write access to
use up all the memory on the machine; but enhances the scalability of use up all the memory on the machine; but enhances the scalability of
that instance in a system with many cpus making intensive use of it. that instance in a system with many cpus making intensive use of it.
...@@ -97,4 +97,4 @@ RAM/SWAP in 10240 inodes and it is only accessible by root. ...@@ -97,4 +97,4 @@ RAM/SWAP in 10240 inodes and it is only accessible by root.
Author: Author:
Christoph Rohland <cr@sap.com>, 1.12.01 Christoph Rohland <cr@sap.com>, 1.12.01
Updated: Updated:
Hugh Dickins <hugh@veritas.com>, 01 September 2004 Hugh Dickins <hugh@veritas.com>, 13 March 2005
...@@ -6,8 +6,8 @@ ...@@ -6,8 +6,8 @@
* 2000-2001 Christoph Rohland * 2000-2001 Christoph Rohland
* 2000-2001 SAP AG * 2000-2001 SAP AG
* 2002 Red Hat Inc. * 2002 Red Hat Inc.
* Copyright (C) 2002-2004 Hugh Dickins. * Copyright (C) 2002-2005 Hugh Dickins.
* Copyright (C) 2002-2004 VERITAS Software Corporation. * Copyright (C) 2002-2005 VERITAS Software Corporation.
* Copyright (C) 2004 Andi Kleen, SuSE Labs * Copyright (C) 2004 Andi Kleen, SuSE Labs
* *
* Extended attribute support for tmpfs: * Extended attribute support for tmpfs:
...@@ -194,7 +194,7 @@ static DEFINE_SPINLOCK(shmem_swaplist_lock); ...@@ -194,7 +194,7 @@ static DEFINE_SPINLOCK(shmem_swaplist_lock);
static void shmem_free_blocks(struct inode *inode, long pages) static void shmem_free_blocks(struct inode *inode, long pages)
{ {
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
if (sbinfo) { if (sbinfo->max_blocks) {
spin_lock(&sbinfo->stat_lock); spin_lock(&sbinfo->stat_lock);
sbinfo->free_blocks += pages; sbinfo->free_blocks += pages;
inode->i_blocks -= pages*BLOCKS_PER_PAGE; inode->i_blocks -= pages*BLOCKS_PER_PAGE;
...@@ -357,7 +357,7 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long ...@@ -357,7 +357,7 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long
* page (and perhaps indirect index pages) yet to allocate: * page (and perhaps indirect index pages) yet to allocate:
* a waste to allocate index if we cannot allocate data. * a waste to allocate index if we cannot allocate data.
*/ */
if (sbinfo) { if (sbinfo->max_blocks) {
spin_lock(&sbinfo->stat_lock); spin_lock(&sbinfo->stat_lock);
if (sbinfo->free_blocks <= 1) { if (sbinfo->free_blocks <= 1) {
spin_unlock(&sbinfo->stat_lock); spin_unlock(&sbinfo->stat_lock);
...@@ -677,8 +677,8 @@ static void shmem_delete_inode(struct inode *inode) ...@@ -677,8 +677,8 @@ static void shmem_delete_inode(struct inode *inode)
spin_unlock(&shmem_swaplist_lock); spin_unlock(&shmem_swaplist_lock);
} }
} }
if (sbinfo) {
BUG_ON(inode->i_blocks); BUG_ON(inode->i_blocks);
if (sbinfo->max_inodes) {
spin_lock(&sbinfo->stat_lock); spin_lock(&sbinfo->stat_lock);
sbinfo->free_inodes++; sbinfo->free_inodes++;
spin_unlock(&sbinfo->stat_lock); spin_unlock(&sbinfo->stat_lock);
...@@ -1080,7 +1080,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, ...@@ -1080,7 +1080,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
} else { } else {
shmem_swp_unmap(entry); shmem_swp_unmap(entry);
sbinfo = SHMEM_SB(inode->i_sb); sbinfo = SHMEM_SB(inode->i_sb);
if (sbinfo) { if (sbinfo->max_blocks) {
spin_lock(&sbinfo->stat_lock); spin_lock(&sbinfo->stat_lock);
if (sbinfo->free_blocks == 0 || if (sbinfo->free_blocks == 0 ||
shmem_acct_block(info->flags)) { shmem_acct_block(info->flags)) {
...@@ -1269,7 +1269,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev) ...@@ -1269,7 +1269,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
struct shmem_inode_info *info; struct shmem_inode_info *info;
struct shmem_sb_info *sbinfo = SHMEM_SB(sb); struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
if (sbinfo) { if (sbinfo->max_inodes) {
spin_lock(&sbinfo->stat_lock); spin_lock(&sbinfo->stat_lock);
if (!sbinfo->free_inodes) { if (!sbinfo->free_inodes) {
spin_unlock(&sbinfo->stat_lock); spin_unlock(&sbinfo->stat_lock);
...@@ -1319,7 +1319,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev) ...@@ -1319,7 +1319,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
mpol_shared_policy_init(&info->policy); mpol_shared_policy_init(&info->policy);
break; break;
} }
} else if (sbinfo) { } else if (sbinfo->max_inodes) {
spin_lock(&sbinfo->stat_lock); spin_lock(&sbinfo->stat_lock);
sbinfo->free_inodes++; sbinfo->free_inodes++;
spin_unlock(&sbinfo->stat_lock); spin_unlock(&sbinfo->stat_lock);
...@@ -1328,31 +1328,6 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev) ...@@ -1328,31 +1328,6 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
} }
#ifdef CONFIG_TMPFS #ifdef CONFIG_TMPFS
static int shmem_set_size(struct shmem_sb_info *sbinfo,
unsigned long max_blocks, unsigned long max_inodes)
{
int error;
unsigned long blocks, inodes;
spin_lock(&sbinfo->stat_lock);
blocks = sbinfo->max_blocks - sbinfo->free_blocks;
inodes = sbinfo->max_inodes - sbinfo->free_inodes;
error = -EINVAL;
if (max_blocks < blocks)
goto out;
if (max_inodes < inodes)
goto out;
error = 0;
sbinfo->max_blocks = max_blocks;
sbinfo->free_blocks = max_blocks - blocks;
sbinfo->max_inodes = max_inodes;
sbinfo->free_inodes = max_inodes - inodes;
out:
spin_unlock(&sbinfo->stat_lock);
return error;
}
static struct inode_operations shmem_symlink_inode_operations; static struct inode_operations shmem_symlink_inode_operations;
static struct inode_operations shmem_symlink_inline_operations; static struct inode_operations shmem_symlink_inline_operations;
...@@ -1607,15 +1582,17 @@ static int shmem_statfs(struct super_block *sb, struct kstatfs *buf) ...@@ -1607,15 +1582,17 @@ static int shmem_statfs(struct super_block *sb, struct kstatfs *buf)
buf->f_type = TMPFS_MAGIC; buf->f_type = TMPFS_MAGIC;
buf->f_bsize = PAGE_CACHE_SIZE; buf->f_bsize = PAGE_CACHE_SIZE;
buf->f_namelen = NAME_MAX; buf->f_namelen = NAME_MAX;
if (sbinfo) {
spin_lock(&sbinfo->stat_lock); spin_lock(&sbinfo->stat_lock);
if (sbinfo->max_blocks) {
buf->f_blocks = sbinfo->max_blocks; buf->f_blocks = sbinfo->max_blocks;
buf->f_bavail = buf->f_bfree = sbinfo->free_blocks; buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
}
if (sbinfo->max_inodes) {
buf->f_files = sbinfo->max_inodes; buf->f_files = sbinfo->max_inodes;
buf->f_ffree = sbinfo->free_inodes; buf->f_ffree = sbinfo->free_inodes;
spin_unlock(&sbinfo->stat_lock);
} }
/* else leave those fields 0 like simple_statfs */ /* else leave those fields 0 like simple_statfs */
spin_unlock(&sbinfo->stat_lock);
return 0; return 0;
} }
...@@ -1672,7 +1649,7 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr ...@@ -1672,7 +1649,7 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr
* but each new link needs a new dentry, pinning lowmem, and * but each new link needs a new dentry, pinning lowmem, and
* tmpfs dentries cannot be pruned until they are unlinked. * tmpfs dentries cannot be pruned until they are unlinked.
*/ */
if (sbinfo) { if (sbinfo->max_inodes) {
spin_lock(&sbinfo->stat_lock); spin_lock(&sbinfo->stat_lock);
if (!sbinfo->free_inodes) { if (!sbinfo->free_inodes) {
spin_unlock(&sbinfo->stat_lock); spin_unlock(&sbinfo->stat_lock);
...@@ -1697,7 +1674,7 @@ static int shmem_unlink(struct inode *dir, struct dentry *dentry) ...@@ -1697,7 +1674,7 @@ static int shmem_unlink(struct inode *dir, struct dentry *dentry)
if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) { if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) {
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
if (sbinfo) { if (sbinfo->max_inodes) {
spin_lock(&sbinfo->stat_lock); spin_lock(&sbinfo->stat_lock);
sbinfo->free_inodes++; sbinfo->free_inodes++;
spin_unlock(&sbinfo->stat_lock); spin_unlock(&sbinfo->stat_lock);
...@@ -1921,22 +1898,42 @@ static int shmem_parse_options(char *options, int *mode, uid_t *uid, gid_t *gid, ...@@ -1921,22 +1898,42 @@ static int shmem_parse_options(char *options, int *mode, uid_t *uid, gid_t *gid,
static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
{ {
struct shmem_sb_info *sbinfo = SHMEM_SB(sb); struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
unsigned long max_blocks = 0; unsigned long max_blocks = sbinfo->max_blocks;
unsigned long max_inodes = 0; unsigned long max_inodes = sbinfo->max_inodes;
unsigned long blocks;
unsigned long inodes;
int error = -EINVAL;
if (shmem_parse_options(data, NULL, NULL, NULL,
&max_blocks, &max_inodes))
return error;
if (sbinfo) { spin_lock(&sbinfo->stat_lock);
max_blocks = sbinfo->max_blocks; blocks = sbinfo->max_blocks - sbinfo->free_blocks;
max_inodes = sbinfo->max_inodes; inodes = sbinfo->max_inodes - sbinfo->free_inodes;
} if (max_blocks < blocks)
if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks, &max_inodes)) goto out;
return -EINVAL; if (max_inodes < inodes)
/* Keep it simple: disallow limited <-> unlimited remount */ goto out;
if ((max_blocks || max_inodes) == !sbinfo) /*
return -EINVAL; * Those tests also disallow limited->unlimited while any are in
/* But allow the pointless unlimited -> unlimited remount */ * use, so i_blocks will always be zero when max_blocks is zero;
if (!sbinfo) * but we must separately disallow unlimited->limited, because
return 0; * in that case we have no record of how much is already in use.
return shmem_set_size(sbinfo, max_blocks, max_inodes); */
if (max_blocks && !sbinfo->max_blocks)
goto out;
if (max_inodes && !sbinfo->max_inodes)
goto out;
error = 0;
sbinfo->max_blocks = max_blocks;
sbinfo->free_blocks = max_blocks - blocks;
sbinfo->max_inodes = max_inodes;
sbinfo->free_inodes = max_inodes - inodes;
out:
spin_unlock(&sbinfo->stat_lock);
return error;
} }
#endif #endif
...@@ -1961,11 +1958,11 @@ static int shmem_fill_super(struct super_block *sb, ...@@ -1961,11 +1958,11 @@ static int shmem_fill_super(struct super_block *sb,
uid_t uid = current->fsuid; uid_t uid = current->fsuid;
gid_t gid = current->fsgid; gid_t gid = current->fsgid;
int err = -ENOMEM; int err = -ENOMEM;
struct shmem_sb_info *sbinfo;
#ifdef CONFIG_TMPFS
unsigned long blocks = 0; unsigned long blocks = 0;
unsigned long inodes = 0; unsigned long inodes = 0;
#ifdef CONFIG_TMPFS
/* /*
* Per default we only allow half of the physical ram per * Per default we only allow half of the physical ram per
* tmpfs instance, limiting inodes to one per page of lowmem; * tmpfs instance, limiting inodes to one per page of lowmem;
...@@ -1976,34 +1973,34 @@ static int shmem_fill_super(struct super_block *sb, ...@@ -1976,34 +1973,34 @@ static int shmem_fill_super(struct super_block *sb,
inodes = totalram_pages - totalhigh_pages; inodes = totalram_pages - totalhigh_pages;
if (inodes > blocks) if (inodes > blocks)
inodes = blocks; inodes = blocks;
if (shmem_parse_options(data, &mode, &uid, &gid,
if (shmem_parse_options(data, &mode, &blocks, &inodes))
&uid, &gid, &blocks, &inodes))
return -EINVAL; return -EINVAL;
} }
#else
sb->s_flags |= MS_NOUSER;
#endif
if (blocks || inodes) { /* Round up to L1_CACHE_BYTES to resist false sharing */
struct shmem_sb_info *sbinfo; sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
sbinfo = kmalloc(sizeof(struct shmem_sb_info), GFP_KERNEL); L1_CACHE_BYTES), GFP_KERNEL);
if (!sbinfo) if (!sbinfo)
return -ENOMEM; return -ENOMEM;
sb->s_fs_info = sbinfo;
spin_lock_init(&sbinfo->stat_lock); spin_lock_init(&sbinfo->stat_lock);
sbinfo->max_blocks = blocks; sbinfo->max_blocks = blocks;
sbinfo->free_blocks = blocks; sbinfo->free_blocks = blocks;
sbinfo->max_inodes = inodes; sbinfo->max_inodes = inodes;
sbinfo->free_inodes = inodes; sbinfo->free_inodes = inodes;
}
sb->s_xattr = shmem_xattr_handlers;
#else
sb->s_flags |= MS_NOUSER;
#endif
sb->s_fs_info = sbinfo;
sb->s_maxbytes = SHMEM_MAX_BYTES; sb->s_maxbytes = SHMEM_MAX_BYTES;
sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = TMPFS_MAGIC; sb->s_magic = TMPFS_MAGIC;
sb->s_op = &shmem_ops; sb->s_op = &shmem_ops;
sb->s_xattr = shmem_xattr_handlers;
inode = shmem_get_inode(sb, S_IFDIR | mode, 0); inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
if (!inode) if (!inode)
goto failed; goto failed;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment