Commit 1ebb275a authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw

* git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw: (31 commits)
  GFS2: Fix glock refcount issues
  writeback: remove unused nonblocking and congestion checks (gfs2)
  GFS2: drop rindex glock to refresh rindex list
  GFS2: Tag all metadata with jid
  GFS2: Locking order fix in gfs2_check_blk_state
  GFS2: Remove dirent_first() function
  GFS2: Display nobarrier option in /proc/mounts
  GFS2: add barrier/nobarrier mount options
  GFS2: remove division from new statfs code
  GFS2: Improve statfs and quota usability
  GFS2: Use dquot_send_warning()
  VFS: Export dquot_send_warning
  GFS2: Add set_xquota support
  GFS2: Add get_xquota support
  GFS2: Clean up gfs2_adjust_quota() and do_glock()
  GFS2: Remove constant argument from qd_get()
  GFS2: Remove constant argument from qdsb_get()
  GFS2: Add proper error reporting to quota sync via sysfs
  GFS2: Add get_xstate quota function
  GFS2: Remove obsolete code in quota.c
  ...
parents 83fdbfbf 26bb7505
...@@ -8,6 +8,8 @@ config GFS2_FS ...@@ -8,6 +8,8 @@ config GFS2_FS
select FS_POSIX_ACL select FS_POSIX_ACL
select CRC32 select CRC32
select SLOW_WORK select SLOW_WORK
select QUOTA
select QUOTACTL
help help
A cluster filesystem. A cluster filesystem.
......
This diff is collapsed.
...@@ -13,26 +13,12 @@ ...@@ -13,26 +13,12 @@
#include "incore.h" #include "incore.h"
#define GFS2_POSIX_ACL_ACCESS "posix_acl_access" #define GFS2_POSIX_ACL_ACCESS "posix_acl_access"
#define GFS2_POSIX_ACL_ACCESS_LEN 16
#define GFS2_POSIX_ACL_DEFAULT "posix_acl_default" #define GFS2_POSIX_ACL_DEFAULT "posix_acl_default"
#define GFS2_POSIX_ACL_DEFAULT_LEN 17 #define GFS2_ACL_MAX_ENTRIES 25
#define GFS2_ACL_IS_ACCESS(name, len) \ extern int gfs2_check_acl(struct inode *inode, int mask);
((len) == GFS2_POSIX_ACL_ACCESS_LEN && \ extern int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode);
!memcmp(GFS2_POSIX_ACL_ACCESS, (name), (len))) extern int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
extern struct xattr_handler gfs2_xattr_system_handler;
#define GFS2_ACL_IS_DEFAULT(name, len) \
((len) == GFS2_POSIX_ACL_DEFAULT_LEN && \
!memcmp(GFS2_POSIX_ACL_DEFAULT, (name), (len)))
struct gfs2_ea_request;
int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
struct gfs2_ea_request *er,
int *remove, mode_t *mode);
int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access);
int gfs2_check_acl(struct inode *inode, int mask);
int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip);
int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
#endif /* __ACL_DOT_H__ */ #endif /* __ACL_DOT_H__ */
...@@ -269,7 +269,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping, ...@@ -269,7 +269,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
unsigned offset = i_size & (PAGE_CACHE_SIZE-1); unsigned offset = i_size & (PAGE_CACHE_SIZE-1);
unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize); unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
struct backing_dev_info *bdi = mapping->backing_dev_info;
int i; int i;
int ret; int ret;
...@@ -313,11 +312,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping, ...@@ -313,11 +312,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
if (ret || (--(wbc->nr_to_write) <= 0)) if (ret || (--(wbc->nr_to_write) <= 0))
ret = 1; ret = 1;
if (wbc->nonblocking && bdi_write_congested(bdi)) {
wbc->encountered_congestion = 1;
ret = 1;
}
} }
gfs2_trans_end(sdp); gfs2_trans_end(sdp);
return ret; return ret;
...@@ -338,7 +332,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping, ...@@ -338,7 +332,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
static int gfs2_write_cache_jdata(struct address_space *mapping, static int gfs2_write_cache_jdata(struct address_space *mapping,
struct writeback_control *wbc) struct writeback_control *wbc)
{ {
struct backing_dev_info *bdi = mapping->backing_dev_info;
int ret = 0; int ret = 0;
int done = 0; int done = 0;
struct pagevec pvec; struct pagevec pvec;
...@@ -348,11 +341,6 @@ static int gfs2_write_cache_jdata(struct address_space *mapping, ...@@ -348,11 +341,6 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
int scanned = 0; int scanned = 0;
int range_whole = 0; int range_whole = 0;
if (wbc->nonblocking && bdi_write_congested(bdi)) {
wbc->encountered_congestion = 1;
return 0;
}
pagevec_init(&pvec, 0); pagevec_init(&pvec, 0);
if (wbc->range_cyclic) { if (wbc->range_cyclic) {
index = mapping->writeback_index; /* Start from prev offset */ index = mapping->writeback_index; /* Start from prev offset */
...@@ -819,8 +807,10 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, ...@@ -819,8 +807,10 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
mark_inode_dirty(inode); mark_inode_dirty(inode);
} }
if (inode == sdp->sd_rindex) if (inode == sdp->sd_rindex) {
adjust_fs_space(inode); adjust_fs_space(inode);
ip->i_gh.gh_flags |= GL_NOCACHE;
}
brelse(dibh); brelse(dibh);
gfs2_trans_end(sdp); gfs2_trans_end(sdp);
...@@ -889,8 +879,10 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping, ...@@ -889,8 +879,10 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
mark_inode_dirty(inode); mark_inode_dirty(inode);
} }
if (inode == sdp->sd_rindex) if (inode == sdp->sd_rindex) {
adjust_fs_space(inode); adjust_fs_space(inode);
ip->i_gh.gh_flags |= GL_NOCACHE;
}
brelse(dibh); brelse(dibh);
gfs2_trans_end(sdp); gfs2_trans_end(sdp);
......
...@@ -525,38 +525,6 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf, ...@@ -525,38 +525,6 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
/**
* dirent_first - Return the first dirent
* @dip: the directory
* @bh: The buffer
* @dent: Pointer to list of dirents
*
* return first dirent whether bh points to leaf or stuffed dinode
*
* Returns: IS_LEAF, IS_DINODE, or -errno
*/
static int dirent_first(struct gfs2_inode *dip, struct buffer_head *bh,
struct gfs2_dirent **dent)
{
struct gfs2_meta_header *h = (struct gfs2_meta_header *)bh->b_data;
if (be32_to_cpu(h->mh_type) == GFS2_METATYPE_LF) {
if (gfs2_meta_check(GFS2_SB(&dip->i_inode), bh))
return -EIO;
*dent = (struct gfs2_dirent *)(bh->b_data +
sizeof(struct gfs2_leaf));
return IS_LEAF;
} else {
if (gfs2_metatype_check(GFS2_SB(&dip->i_inode), bh, GFS2_METATYPE_DI))
return -EIO;
*dent = (struct gfs2_dirent *)(bh->b_data +
sizeof(struct gfs2_dinode));
return IS_DINODE;
}
}
static int dirent_check_reclen(struct gfs2_inode *dip, static int dirent_check_reclen(struct gfs2_inode *dip,
const struct gfs2_dirent *d, const void *end_p) const struct gfs2_dirent *d, const void *end_p)
{ {
...@@ -1006,7 +974,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name) ...@@ -1006,7 +974,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
divider = (start + half_len) << (32 - dip->i_depth); divider = (start + half_len) << (32 - dip->i_depth);
/* Copy the entries */ /* Copy the entries */
dirent_first(dip, obh, &dent); dent = (struct gfs2_dirent *)(obh->b_data + sizeof(struct gfs2_leaf));
do { do {
next = dent; next = dent;
......
...@@ -241,15 +241,14 @@ int gfs2_glock_put(struct gfs2_glock *gl) ...@@ -241,15 +241,14 @@ int gfs2_glock_put(struct gfs2_glock *gl)
int rv = 0; int rv = 0;
write_lock(gl_lock_addr(gl->gl_hash)); write_lock(gl_lock_addr(gl->gl_hash));
if (atomic_dec_and_test(&gl->gl_ref)) { if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
hlist_del(&gl->gl_list); hlist_del(&gl->gl_list);
write_unlock(gl_lock_addr(gl->gl_hash));
spin_lock(&lru_lock);
if (!list_empty(&gl->gl_lru)) { if (!list_empty(&gl->gl_lru)) {
list_del_init(&gl->gl_lru); list_del_init(&gl->gl_lru);
atomic_dec(&lru_count); atomic_dec(&lru_count);
} }
spin_unlock(&lru_lock); spin_unlock(&lru_lock);
write_unlock(gl_lock_addr(gl->gl_hash));
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
glock_free(gl); glock_free(gl);
rv = 1; rv = 1;
...@@ -513,7 +512,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) ...@@ -513,7 +512,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
GLOCK_BUG_ON(gl, 1); GLOCK_BUG_ON(gl, 1);
} }
spin_unlock(&gl->gl_spin); spin_unlock(&gl->gl_spin);
gfs2_glock_put(gl);
return; return;
} }
...@@ -524,8 +522,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) ...@@ -524,8 +522,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
if (glops->go_xmote_bh) { if (glops->go_xmote_bh) {
spin_unlock(&gl->gl_spin); spin_unlock(&gl->gl_spin);
rv = glops->go_xmote_bh(gl, gh); rv = glops->go_xmote_bh(gl, gh);
if (rv == -EAGAIN)
return;
spin_lock(&gl->gl_spin); spin_lock(&gl->gl_spin);
if (rv) { if (rv) {
do_error(gl, rv); do_error(gl, rv);
...@@ -540,7 +536,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) ...@@ -540,7 +536,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
clear_bit(GLF_LOCK, &gl->gl_flags); clear_bit(GLF_LOCK, &gl->gl_flags);
out_locked: out_locked:
spin_unlock(&gl->gl_spin); spin_unlock(&gl->gl_spin);
gfs2_glock_put(gl);
} }
static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock, static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
...@@ -600,7 +595,6 @@ __acquires(&gl->gl_spin) ...@@ -600,7 +595,6 @@ __acquires(&gl->gl_spin)
if (!(ret & LM_OUT_ASYNC)) { if (!(ret & LM_OUT_ASYNC)) {
finish_xmote(gl, ret); finish_xmote(gl, ret);
gfs2_glock_hold(gl);
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put(gl); gfs2_glock_put(gl);
} else { } else {
...@@ -672,12 +666,17 @@ __acquires(&gl->gl_spin) ...@@ -672,12 +666,17 @@ __acquires(&gl->gl_spin)
return; return;
out_sched: out_sched:
clear_bit(GLF_LOCK, &gl->gl_flags);
smp_mb__after_clear_bit();
gfs2_glock_hold(gl); gfs2_glock_hold(gl);
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put_nolock(gl); gfs2_glock_put_nolock(gl);
return;
out_unlock: out_unlock:
clear_bit(GLF_LOCK, &gl->gl_flags); clear_bit(GLF_LOCK, &gl->gl_flags);
goto out; smp_mb__after_clear_bit();
return;
} }
static void delete_work_func(struct work_struct *work) static void delete_work_func(struct work_struct *work)
...@@ -707,9 +706,12 @@ static void glock_work_func(struct work_struct *work) ...@@ -707,9 +706,12 @@ static void glock_work_func(struct work_struct *work)
{ {
unsigned long delay = 0; unsigned long delay = 0;
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
int drop_ref = 0;
if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
finish_xmote(gl, gl->gl_reply); finish_xmote(gl, gl->gl_reply);
drop_ref = 1;
}
down_read(&gfs2_umount_flush_sem); down_read(&gfs2_umount_flush_sem);
spin_lock(&gl->gl_spin); spin_lock(&gl->gl_spin);
if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
...@@ -727,6 +729,8 @@ static void glock_work_func(struct work_struct *work) ...@@ -727,6 +729,8 @@ static void glock_work_func(struct work_struct *work)
if (!delay || if (!delay ||
queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
gfs2_glock_put(gl); gfs2_glock_put(gl);
if (drop_ref)
gfs2_glock_put(gl);
} }
/** /**
...@@ -1361,10 +1365,6 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) ...@@ -1361,10 +1365,6 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
list_del_init(&gl->gl_lru); list_del_init(&gl->gl_lru);
atomic_dec(&lru_count); atomic_dec(&lru_count);
/* Check if glock is about to be freed */
if (atomic_read(&gl->gl_ref) == 0)
continue;
/* Test for being demotable */ /* Test for being demotable */
if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
gfs2_glock_hold(gl); gfs2_glock_hold(gl);
...@@ -1375,10 +1375,11 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) ...@@ -1375,10 +1375,11 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
handle_callback(gl, LM_ST_UNLOCKED, 0); handle_callback(gl, LM_ST_UNLOCKED, 0);
nr--; nr--;
} }
clear_bit(GLF_LOCK, &gl->gl_flags);
smp_mb__after_clear_bit();
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put_nolock(gl); gfs2_glock_put_nolock(gl);
spin_unlock(&gl->gl_spin); spin_unlock(&gl->gl_spin);
clear_bit(GLF_LOCK, &gl->gl_flags);
spin_lock(&lru_lock); spin_lock(&lru_lock);
continue; continue;
} }
......
...@@ -180,15 +180,6 @@ static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl) ...@@ -180,15 +180,6 @@ static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
return gl->gl_state == LM_ST_SHARED; return gl->gl_state == LM_ST_SHARED;
} }
static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
{
int ret;
spin_lock(&gl->gl_spin);
ret = test_bit(GLF_DEMOTE, &gl->gl_flags);
spin_unlock(&gl->gl_spin);
return ret;
}
int gfs2_glock_get(struct gfs2_sbd *sdp, int gfs2_glock_get(struct gfs2_sbd *sdp,
u64 number, const struct gfs2_glock_operations *glops, u64 number, const struct gfs2_glock_operations *glops,
int create, struct gfs2_glock **glp); int create, struct gfs2_glock **glp);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#include <linux/gfs2_ondisk.h> #include <linux/gfs2_ondisk.h>
#include <linux/bio.h> #include <linux/bio.h>
#include <linux/posix_acl.h>
#include "gfs2.h" #include "gfs2.h"
#include "incore.h" #include "incore.h"
...@@ -184,8 +185,10 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags) ...@@ -184,8 +185,10 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
if (flags & DIO_METADATA) { if (flags & DIO_METADATA) {
struct address_space *mapping = gl->gl_aspace->i_mapping; struct address_space *mapping = gl->gl_aspace->i_mapping;
truncate_inode_pages(mapping, 0); truncate_inode_pages(mapping, 0);
if (ip) if (ip) {
set_bit(GIF_INVALID, &ip->i_flags); set_bit(GIF_INVALID, &ip->i_flags);
forget_all_cached_acls(&ip->i_inode);
}
} }
if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) if (ip == GFS2_I(gl->gl_sbd->sd_rindex))
......
...@@ -429,7 +429,11 @@ struct gfs2_args { ...@@ -429,7 +429,11 @@ struct gfs2_args {
unsigned int ar_meta:1; /* mount metafs */ unsigned int ar_meta:1; /* mount metafs */
unsigned int ar_discard:1; /* discard requests */ unsigned int ar_discard:1; /* discard requests */
unsigned int ar_errors:2; /* errors=withdraw | panic */ unsigned int ar_errors:2; /* errors=withdraw | panic */
unsigned int ar_nobarrier:1; /* do not send barriers */
int ar_commit; /* Commit interval */ int ar_commit; /* Commit interval */
int ar_statfs_quantum; /* The fast statfs interval */
int ar_quota_quantum; /* The quota interval */
int ar_statfs_percent; /* The % change to force sync */
}; };
struct gfs2_tune { struct gfs2_tune {
...@@ -558,6 +562,7 @@ struct gfs2_sbd { ...@@ -558,6 +562,7 @@ struct gfs2_sbd {
spinlock_t sd_statfs_spin; spinlock_t sd_statfs_spin;
struct gfs2_statfs_change_host sd_statfs_master; struct gfs2_statfs_change_host sd_statfs_master;
struct gfs2_statfs_change_host sd_statfs_local; struct gfs2_statfs_change_host sd_statfs_local;
int sd_statfs_force_sync;
/* Resource group stuff */ /* Resource group stuff */
......
...@@ -871,7 +871,7 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name, ...@@ -871,7 +871,7 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
if (error) if (error)
goto fail_gunlock2; goto fail_gunlock2;
error = gfs2_acl_create(dip, GFS2_I(inode)); error = gfs2_acl_create(dip, inode);
if (error) if (error)
goto fail_gunlock2; goto fail_gunlock2;
...@@ -947,9 +947,7 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf) ...@@ -947,9 +947,7 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC); str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI); str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
str->di_header.__pad0 = 0;
str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI); str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
str->di_header.__pad1 = 0;
str->di_num.no_addr = cpu_to_be64(ip->i_no_addr); str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino); str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
str->di_mode = cpu_to_be32(ip->i_inode.i_mode); str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
......
...@@ -596,7 +596,9 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull) ...@@ -596,7 +596,9 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
memset(lh, 0, sizeof(struct gfs2_log_header)); memset(lh, 0, sizeof(struct gfs2_log_header));
lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
lh->lh_header.__pad0 = cpu_to_be64(0);
lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++); lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
lh->lh_flags = cpu_to_be32(flags); lh->lh_flags = cpu_to_be32(flags);
lh->lh_tail = cpu_to_be32(tail); lh->lh_tail = cpu_to_be32(tail);
......
...@@ -132,6 +132,7 @@ static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type) ...@@ -132,6 +132,7 @@ static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type)
static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
{ {
struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le); struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
struct gfs2_meta_header *mh;
struct gfs2_trans *tr; struct gfs2_trans *tr;
lock_buffer(bd->bd_bh); lock_buffer(bd->bd_bh);
...@@ -148,6 +149,9 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) ...@@ -148,6 +149,9 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
gfs2_meta_check(sdp, bd->bd_bh); gfs2_meta_check(sdp, bd->bd_bh);
gfs2_pin(sdp, bd->bd_bh); gfs2_pin(sdp, bd->bd_bh);
mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
mh->__pad0 = cpu_to_be64(0);
mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
sdp->sd_log_num_buf++; sdp->sd_log_num_buf++;
list_add(&le->le_list, &sdp->sd_log_le_buf); list_add(&le->le_list, &sdp->sd_log_le_buf);
tr->tr_num_buf_new++; tr->tr_num_buf_new++;
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/mount.h> #include <linux/mount.h>
#include <linux/gfs2_ondisk.h> #include <linux/gfs2_ondisk.h>
#include <linux/slow-work.h> #include <linux/slow-work.h>
#include <linux/quotaops.h>
#include "gfs2.h" #include "gfs2.h"
#include "incore.h" #include "incore.h"
...@@ -62,13 +63,10 @@ static void gfs2_tune_init(struct gfs2_tune *gt) ...@@ -62,13 +63,10 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
gt->gt_quota_warn_period = 10; gt->gt_quota_warn_period = 10;
gt->gt_quota_scale_num = 1; gt->gt_quota_scale_num = 1;
gt->gt_quota_scale_den = 1; gt->gt_quota_scale_den = 1;
gt->gt_quota_quantum = 60;
gt->gt_new_files_jdata = 0; gt->gt_new_files_jdata = 0;
gt->gt_max_readahead = 1 << 18; gt->gt_max_readahead = 1 << 18;
gt->gt_stall_secs = 600; gt->gt_stall_secs = 600;
gt->gt_complain_secs = 10; gt->gt_complain_secs = 10;
gt->gt_statfs_quantum = 30;
gt->gt_statfs_slow = 0;
} }
static struct gfs2_sbd *init_sbd(struct super_block *sb) static struct gfs2_sbd *init_sbd(struct super_block *sb)
...@@ -1114,7 +1112,7 @@ void gfs2_online_uevent(struct gfs2_sbd *sdp) ...@@ -1114,7 +1112,7 @@ void gfs2_online_uevent(struct gfs2_sbd *sdp)
* Returns: errno * Returns: errno
*/ */
static int fill_super(struct super_block *sb, void *data, int silent) static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent)
{ {
struct gfs2_sbd *sdp; struct gfs2_sbd *sdp;
struct gfs2_holder mount_gh; struct gfs2_holder mount_gh;
...@@ -1125,17 +1123,7 @@ static int fill_super(struct super_block *sb, void *data, int silent) ...@@ -1125,17 +1123,7 @@ static int fill_super(struct super_block *sb, void *data, int silent)
printk(KERN_WARNING "GFS2: can't alloc struct gfs2_sbd\n"); printk(KERN_WARNING "GFS2: can't alloc struct gfs2_sbd\n");
return -ENOMEM; return -ENOMEM;
} }
sdp->sd_args = *args;
sdp->sd_args.ar_quota = GFS2_QUOTA_DEFAULT;
sdp->sd_args.ar_data = GFS2_DATA_DEFAULT;
sdp->sd_args.ar_commit = 60;
sdp->sd_args.ar_errors = GFS2_ERRORS_DEFAULT;
error = gfs2_mount_args(sdp, &sdp->sd_args, data);
if (error) {
printk(KERN_WARNING "GFS2: can't parse mount arguments\n");
goto fail;
}
if (sdp->sd_args.ar_spectator) { if (sdp->sd_args.ar_spectator) {
sb->s_flags |= MS_RDONLY; sb->s_flags |= MS_RDONLY;
...@@ -1143,11 +1131,15 @@ static int fill_super(struct super_block *sb, void *data, int silent) ...@@ -1143,11 +1131,15 @@ static int fill_super(struct super_block *sb, void *data, int silent)
} }
if (sdp->sd_args.ar_posix_acl) if (sdp->sd_args.ar_posix_acl)
sb->s_flags |= MS_POSIXACL; sb->s_flags |= MS_POSIXACL;
if (sdp->sd_args.ar_nobarrier)
set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
sb->s_magic = GFS2_MAGIC; sb->s_magic = GFS2_MAGIC;
sb->s_op = &gfs2_super_ops; sb->s_op = &gfs2_super_ops;
sb->s_export_op = &gfs2_export_ops; sb->s_export_op = &gfs2_export_ops;
sb->s_xattr = gfs2_xattr_handlers; sb->s_xattr = gfs2_xattr_handlers;
sb->s_qcop = &gfs2_quotactl_ops;
sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
sb->s_time_gran = 1; sb->s_time_gran = 1;
sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_maxbytes = MAX_LFS_FILESIZE;
...@@ -1160,6 +1152,15 @@ static int fill_super(struct super_block *sb, void *data, int silent) ...@@ -1160,6 +1152,15 @@ static int fill_super(struct super_block *sb, void *data, int silent)
sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift; sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift;
sdp->sd_tune.gt_log_flush_secs = sdp->sd_args.ar_commit; sdp->sd_tune.gt_log_flush_secs = sdp->sd_args.ar_commit;
sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum;
if (sdp->sd_args.ar_statfs_quantum) {
sdp->sd_tune.gt_statfs_slow = 0;
sdp->sd_tune.gt_statfs_quantum = sdp->sd_args.ar_statfs_quantum;
}
else {
sdp->sd_tune.gt_statfs_slow = 1;
sdp->sd_tune.gt_statfs_quantum = 30;
}
error = init_names(sdp, silent); error = init_names(sdp, silent);
if (error) if (error)
...@@ -1243,18 +1244,127 @@ static int fill_super(struct super_block *sb, void *data, int silent) ...@@ -1243,18 +1244,127 @@ static int fill_super(struct super_block *sb, void *data, int silent)
return error; return error;
} }
static int gfs2_get_sb(struct file_system_type *fs_type, int flags, static int set_gfs2_super(struct super_block *s, void *data)
const char *dev_name, void *data, struct vfsmount *mnt)
{ {
return get_sb_bdev(fs_type, flags, dev_name, data, fill_super, mnt); s->s_bdev = data;
s->s_dev = s->s_bdev->bd_dev;
/*
* We set the bdi here to the queue backing, file systems can
* overwrite this in ->fill_super()
*/
s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
return 0;
} }
static int test_meta_super(struct super_block *s, void *ptr) static int test_gfs2_super(struct super_block *s, void *ptr)
{ {
struct block_device *bdev = ptr; struct block_device *bdev = ptr;
return (bdev == s->s_bdev); return (bdev == s->s_bdev);
} }
/**
* gfs2_get_sb - Get the GFS2 superblock
* @fs_type: The GFS2 filesystem type
* @flags: Mount flags
* @dev_name: The name of the device
* @data: The mount arguments
* @mnt: The vfsmnt for this mount
*
* Q. Why not use get_sb_bdev() ?
* A. We need to select one of two root directories to mount, independent
* of whether this is the initial, or subsequent, mount of this sb
*
* Returns: 0 or -ve on error
*/
static int gfs2_get_sb(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data, struct vfsmount *mnt)
{
struct block_device *bdev;
struct super_block *s;
fmode_t mode = FMODE_READ;
int error;
struct gfs2_args args;
struct gfs2_sbd *sdp;
if (!(flags & MS_RDONLY))
mode |= FMODE_WRITE;
bdev = open_bdev_exclusive(dev_name, mode, fs_type);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
/*
* once the super is inserted into the list by sget, s_umount
* will protect the lockfs code from trying to start a snapshot
* while we are mounting
*/
mutex_lock(&bdev->bd_fsfreeze_mutex);
if (bdev->bd_fsfreeze_count > 0) {
mutex_unlock(&bdev->bd_fsfreeze_mutex);
error = -EBUSY;
goto error_bdev;
}
s = sget(fs_type, test_gfs2_super, set_gfs2_super, bdev);
mutex_unlock(&bdev->bd_fsfreeze_mutex);
error = PTR_ERR(s);
if (IS_ERR(s))
goto error_bdev;
memset(&args, 0, sizeof(args));
args.ar_quota = GFS2_QUOTA_DEFAULT;
args.ar_data = GFS2_DATA_DEFAULT;
args.ar_commit = 60;
args.ar_statfs_quantum = 30;
args.ar_quota_quantum = 60;
args.ar_errors = GFS2_ERRORS_DEFAULT;
error = gfs2_mount_args(&args, data);
if (error) {
printk(KERN_WARNING "GFS2: can't parse mount arguments\n");
if (s->s_root)
goto error_super;
deactivate_locked_super(s);
return error;
}
if (s->s_root) {
error = -EBUSY;
if ((flags ^ s->s_flags) & MS_RDONLY)
goto error_super;
close_bdev_exclusive(bdev, mode);
} else {
char b[BDEVNAME_SIZE];
s->s_flags = flags;
s->s_mode = mode;
strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
sb_set_blocksize(s, block_size(bdev));
error = fill_super(s, &args, flags & MS_SILENT ? 1 : 0);
if (error) {
deactivate_locked_super(s);
return error;
}
s->s_flags |= MS_ACTIVE;
bdev->bd_super = s;
}
sdp = s->s_fs_info;
mnt->mnt_sb = s;
if (args.ar_meta)
mnt->mnt_root = dget(sdp->sd_master_dir);
else
mnt->mnt_root = dget(sdp->sd_root_dir);
return 0;
error_super:
deactivate_locked_super(s);
error_bdev:
close_bdev_exclusive(bdev, mode);
return error;
}
static int set_meta_super(struct super_block *s, void *ptr) static int set_meta_super(struct super_block *s, void *ptr)
{ {
return -EINVAL; return -EINVAL;
...@@ -1274,13 +1384,17 @@ static int gfs2_get_sb_meta(struct file_system_type *fs_type, int flags, ...@@ -1274,13 +1384,17 @@ static int gfs2_get_sb_meta(struct file_system_type *fs_type, int flags,
dev_name, error); dev_name, error);
return error; return error;
} }
s = sget(&gfs2_fs_type, test_meta_super, set_meta_super, s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super,
path.dentry->d_inode->i_sb->s_bdev); path.dentry->d_inode->i_sb->s_bdev);
path_put(&path); path_put(&path);
if (IS_ERR(s)) { if (IS_ERR(s)) {
printk(KERN_WARNING "GFS2: gfs2 mount does not exist\n"); printk(KERN_WARNING "GFS2: gfs2 mount does not exist\n");
return PTR_ERR(s); return PTR_ERR(s);
} }
if ((flags ^ s->s_flags) & MS_RDONLY) {
deactivate_locked_super(s);
return -EBUSY;
}
sdp = s->s_fs_info; sdp = s->s_fs_info;
mnt->mnt_sb = s; mnt->mnt_sb = s;
mnt->mnt_root = dget(sdp->sd_master_dir); mnt->mnt_root = dget(sdp->sd_master_dir);
......
This diff is collapsed.
...@@ -25,13 +25,15 @@ extern int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid); ...@@ -25,13 +25,15 @@ extern int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid);
extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change, extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
u32 uid, u32 gid); u32 uid, u32 gid);
extern int gfs2_quota_sync(struct gfs2_sbd *sdp); extern int gfs2_quota_sync(struct super_block *sb, int type);
extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id); extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id);
extern int gfs2_quota_init(struct gfs2_sbd *sdp); extern int gfs2_quota_init(struct gfs2_sbd *sdp);
extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp); extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
extern int gfs2_quotad(void *data); extern int gfs2_quotad(void *data);
extern void gfs2_wake_up_statfs(struct gfs2_sbd *sdp);
static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
{ {
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
...@@ -50,5 +52,6 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) ...@@ -50,5 +52,6 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
} }
extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask); extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask);
extern const struct quotactl_ops gfs2_quotactl_ops;
#endif /* __QUOTA_DOT_H__ */ #endif /* __QUOTA_DOT_H__ */
...@@ -410,7 +410,9 @@ static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *hea ...@@ -410,7 +410,9 @@ static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *hea
memset(lh, 0, sizeof(struct gfs2_log_header)); memset(lh, 0, sizeof(struct gfs2_log_header));
lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
lh->lh_header.__pad0 = cpu_to_be64(0);
lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
lh->lh_sequence = cpu_to_be64(head->lh_sequence + 1); lh->lh_sequence = cpu_to_be64(head->lh_sequence + 1);
lh->lh_flags = cpu_to_be32(GFS2_LOG_HEAD_UNMOUNT); lh->lh_flags = cpu_to_be32(GFS2_LOG_HEAD_UNMOUNT);
lh->lh_blkno = cpu_to_be32(lblock); lh->lh_blkno = cpu_to_be32(lblock);
......
...@@ -1710,11 +1710,16 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type) ...@@ -1710,11 +1710,16 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
{ {
struct gfs2_rgrpd *rgd; struct gfs2_rgrpd *rgd;
struct gfs2_holder ri_gh, rgd_gh; struct gfs2_holder ri_gh, rgd_gh;
struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
int ri_locked = 0;
int error; int error;
if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
error = gfs2_rindex_hold(sdp, &ri_gh); error = gfs2_rindex_hold(sdp, &ri_gh);
if (error) if (error)
goto fail; goto fail;
ri_locked = 1;
}
error = -EINVAL; error = -EINVAL;
rgd = gfs2_blk2rgrpd(sdp, no_addr); rgd = gfs2_blk2rgrpd(sdp, no_addr);
...@@ -1730,6 +1735,7 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type) ...@@ -1730,6 +1735,7 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
gfs2_glock_dq_uninit(&rgd_gh); gfs2_glock_dq_uninit(&rgd_gh);
fail_rindex: fail_rindex:
if (ri_locked)
gfs2_glock_dq_uninit(&ri_gh); gfs2_glock_dq_uninit(&ri_gh);
fail: fail:
return error; return error;
......
...@@ -70,6 +70,11 @@ enum { ...@@ -70,6 +70,11 @@ enum {
Opt_commit, Opt_commit,
Opt_err_withdraw, Opt_err_withdraw,
Opt_err_panic, Opt_err_panic,
Opt_statfs_quantum,
Opt_statfs_percent,
Opt_quota_quantum,
Opt_barrier,
Opt_nobarrier,
Opt_error, Opt_error,
}; };
...@@ -101,18 +106,23 @@ static const match_table_t tokens = { ...@@ -101,18 +106,23 @@ static const match_table_t tokens = {
{Opt_commit, "commit=%d"}, {Opt_commit, "commit=%d"},
{Opt_err_withdraw, "errors=withdraw"}, {Opt_err_withdraw, "errors=withdraw"},
{Opt_err_panic, "errors=panic"}, {Opt_err_panic, "errors=panic"},
{Opt_statfs_quantum, "statfs_quantum=%d"},
{Opt_statfs_percent, "statfs_percent=%d"},
{Opt_quota_quantum, "quota_quantum=%d"},
{Opt_barrier, "barrier"},
{Opt_nobarrier, "nobarrier"},
{Opt_error, NULL} {Opt_error, NULL}
}; };
/** /**
* gfs2_mount_args - Parse mount options * gfs2_mount_args - Parse mount options
* @sdp: * @args: The structure into which the parsed options will be written
* @data: * @options: The options to parse
* *
* Return: errno * Return: errno
*/ */
int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options) int gfs2_mount_args(struct gfs2_args *args, char *options)
{ {
char *o; char *o;
int token; int token;
...@@ -157,7 +167,7 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options) ...@@ -157,7 +167,7 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
break; break;
case Opt_debug: case Opt_debug:
if (args->ar_errors == GFS2_ERRORS_PANIC) { if (args->ar_errors == GFS2_ERRORS_PANIC) {
fs_info(sdp, "-o debug and -o errors=panic " printk(KERN_WARNING "GFS2: -o debug and -o errors=panic "
"are mutually exclusive.\n"); "are mutually exclusive.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -210,7 +220,29 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options) ...@@ -210,7 +220,29 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
case Opt_commit: case Opt_commit:
rv = match_int(&tmp[0], &args->ar_commit); rv = match_int(&tmp[0], &args->ar_commit);
if (rv || args->ar_commit <= 0) { if (rv || args->ar_commit <= 0) {
fs_info(sdp, "commit mount option requires a positive numeric argument\n"); printk(KERN_WARNING "GFS2: commit mount option requires a positive numeric argument\n");
return rv ? rv : -EINVAL;
}
break;
case Opt_statfs_quantum:
rv = match_int(&tmp[0], &args->ar_statfs_quantum);
if (rv || args->ar_statfs_quantum < 0) {
printk(KERN_WARNING "GFS2: statfs_quantum mount option requires a non-negative numeric argument\n");
return rv ? rv : -EINVAL;
}
break;
case Opt_quota_quantum:
rv = match_int(&tmp[0], &args->ar_quota_quantum);
if (rv || args->ar_quota_quantum <= 0) {
printk(KERN_WARNING "GFS2: quota_quantum mount option requires a positive numeric argument\n");
return rv ? rv : -EINVAL;
}
break;
case Opt_statfs_percent:
rv = match_int(&tmp[0], &args->ar_statfs_percent);
if (rv || args->ar_statfs_percent < 0 ||
args->ar_statfs_percent > 100) {
printk(KERN_WARNING "statfs_percent mount option requires a numeric argument between 0 and 100\n");
return rv ? rv : -EINVAL; return rv ? rv : -EINVAL;
} }
break; break;
...@@ -219,15 +251,21 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options) ...@@ -219,15 +251,21 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
break; break;
case Opt_err_panic: case Opt_err_panic:
if (args->ar_debug) { if (args->ar_debug) {
fs_info(sdp, "-o debug and -o errors=panic " printk(KERN_WARNING "GFS2: -o debug and -o errors=panic "
"are mutually exclusive.\n"); "are mutually exclusive.\n");
return -EINVAL; return -EINVAL;
} }
args->ar_errors = GFS2_ERRORS_PANIC; args->ar_errors = GFS2_ERRORS_PANIC;
break; break;
case Opt_barrier:
args->ar_nobarrier = 0;
break;
case Opt_nobarrier:
args->ar_nobarrier = 1;
break;
case Opt_error: case Opt_error:
default: default:
fs_info(sdp, "invalid mount option: %s\n", o); printk(KERN_WARNING "GFS2: invalid mount option: %s\n", o);
return -EINVAL; return -EINVAL;
} }
} }
...@@ -442,7 +480,10 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free, ...@@ -442,7 +480,10 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
{ {
struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
struct buffer_head *l_bh; struct buffer_head *l_bh;
s64 x, y;
int need_sync = 0;
int error; int error;
error = gfs2_meta_inode_buffer(l_ip, &l_bh); error = gfs2_meta_inode_buffer(l_ip, &l_bh);
...@@ -456,9 +497,17 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free, ...@@ -456,9 +497,17 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
l_sc->sc_free += free; l_sc->sc_free += free;
l_sc->sc_dinodes += dinodes; l_sc->sc_dinodes += dinodes;
gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode)); gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
if (sdp->sd_args.ar_statfs_percent) {
x = 100 * l_sc->sc_free;
y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
if (x >= y || x <= -y)
need_sync = 1;
}
spin_unlock(&sdp->sd_statfs_spin); spin_unlock(&sdp->sd_statfs_spin);
brelse(l_bh); brelse(l_bh);
if (need_sync)
gfs2_wake_up_statfs(sdp);
} }
void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh, void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
...@@ -484,8 +533,9 @@ void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh, ...@@ -484,8 +533,9 @@ void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
} }
int gfs2_statfs_sync(struct gfs2_sbd *sdp) int gfs2_statfs_sync(struct super_block *sb, int type)
{ {
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
...@@ -521,6 +571,7 @@ int gfs2_statfs_sync(struct gfs2_sbd *sdp) ...@@ -521,6 +571,7 @@ int gfs2_statfs_sync(struct gfs2_sbd *sdp)
goto out_bh2; goto out_bh2;
update_statfs(sdp, m_bh, l_bh); update_statfs(sdp, m_bh, l_bh);
sdp->sd_statfs_force_sync = 0;
gfs2_trans_end(sdp); gfs2_trans_end(sdp);
...@@ -712,8 +763,8 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp) ...@@ -712,8 +763,8 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
int error; int error;
flush_workqueue(gfs2_delete_workqueue); flush_workqueue(gfs2_delete_workqueue);
gfs2_quota_sync(sdp); gfs2_quota_sync(sdp->sd_vfs, 0);
gfs2_statfs_sync(sdp); gfs2_statfs_sync(sdp->sd_vfs, 0);
error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE, error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,
&t_gh); &t_gh);
...@@ -1061,8 +1112,13 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data) ...@@ -1061,8 +1112,13 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
spin_lock(&gt->gt_spin); spin_lock(&gt->gt_spin);
args.ar_commit = gt->gt_log_flush_secs; args.ar_commit = gt->gt_log_flush_secs;
args.ar_quota_quantum = gt->gt_quota_quantum;
if (gt->gt_statfs_slow)
args.ar_statfs_quantum = 0;
else
args.ar_statfs_quantum = gt->gt_statfs_quantum;
spin_unlock(&gt->gt_spin); spin_unlock(&gt->gt_spin);
error = gfs2_mount_args(sdp, &args, data); error = gfs2_mount_args(&args, data);
if (error) if (error)
return error; return error;
...@@ -1097,8 +1153,21 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data) ...@@ -1097,8 +1153,21 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
sb->s_flags |= MS_POSIXACL; sb->s_flags |= MS_POSIXACL;
else else
sb->s_flags &= ~MS_POSIXACL; sb->s_flags &= ~MS_POSIXACL;
if (sdp->sd_args.ar_nobarrier)
set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
else
clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
spin_lock(&gt->gt_spin); spin_lock(&gt->gt_spin);
gt->gt_log_flush_secs = args.ar_commit; gt->gt_log_flush_secs = args.ar_commit;
gt->gt_quota_quantum = args.ar_quota_quantum;
if (args.ar_statfs_quantum) {
gt->gt_statfs_slow = 0;
gt->gt_statfs_quantum = args.ar_statfs_quantum;
}
else {
gt->gt_statfs_slow = 1;
gt->gt_statfs_quantum = 30;
}
spin_unlock(&gt->gt_spin); spin_unlock(&gt->gt_spin);
gfs2_online_uevent(sdp); gfs2_online_uevent(sdp);
...@@ -1179,7 +1248,7 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt) ...@@ -1179,7 +1248,7 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
{ {
struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info; struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info;
struct gfs2_args *args = &sdp->sd_args; struct gfs2_args *args = &sdp->sd_args;
int lfsecs; int val;
if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir)) if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir))
seq_printf(s, ",meta"); seq_printf(s, ",meta");
...@@ -1240,9 +1309,17 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt) ...@@ -1240,9 +1309,17 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
} }
if (args->ar_discard) if (args->ar_discard)
seq_printf(s, ",discard"); seq_printf(s, ",discard");
lfsecs = sdp->sd_tune.gt_log_flush_secs; val = sdp->sd_tune.gt_log_flush_secs;
if (lfsecs != 60) if (val != 60)
seq_printf(s, ",commit=%d", lfsecs); seq_printf(s, ",commit=%d", val);
val = sdp->sd_tune.gt_statfs_quantum;
if (val != 30)
seq_printf(s, ",statfs_quantum=%d", val);
val = sdp->sd_tune.gt_quota_quantum;
if (val != 60)
seq_printf(s, ",quota_quantum=%d", val);
if (args->ar_statfs_percent)
seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
if (args->ar_errors != GFS2_ERRORS_DEFAULT) { if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
const char *state; const char *state;
...@@ -1259,6 +1336,9 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt) ...@@ -1259,6 +1336,9 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
} }
seq_printf(s, ",errors=%s", state); seq_printf(s, ",errors=%s", state);
} }
if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
seq_printf(s, ",nobarrier");
return 0; return 0;
} }
......
...@@ -27,7 +27,7 @@ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp) ...@@ -27,7 +27,7 @@ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
extern void gfs2_jindex_free(struct gfs2_sbd *sdp); extern void gfs2_jindex_free(struct gfs2_sbd *sdp);
extern int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *data); extern int gfs2_mount_args(struct gfs2_args *args, char *data);
extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid); extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
extern int gfs2_jdesc_check(struct gfs2_jdesc *jd); extern int gfs2_jdesc_check(struct gfs2_jdesc *jd);
...@@ -44,7 +44,7 @@ extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, ...@@ -44,7 +44,7 @@ extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc,
const void *buf); const void *buf);
extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh, extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
struct buffer_head *l_bh); struct buffer_head *l_bh);
extern int gfs2_statfs_sync(struct gfs2_sbd *sdp); extern int gfs2_statfs_sync(struct super_block *sb, int type);
extern int gfs2_freeze_fs(struct gfs2_sbd *sdp); extern int gfs2_freeze_fs(struct gfs2_sbd *sdp);
extern void gfs2_unfreeze_fs(struct gfs2_sbd *sdp); extern void gfs2_unfreeze_fs(struct gfs2_sbd *sdp);
......
...@@ -158,7 +158,7 @@ static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf, ...@@ -158,7 +158,7 @@ static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf,
if (simple_strtol(buf, NULL, 0) != 1) if (simple_strtol(buf, NULL, 0) != 1)
return -EINVAL; return -EINVAL;
gfs2_statfs_sync(sdp); gfs2_statfs_sync(sdp->sd_vfs, 0);
return len; return len;
} }
...@@ -171,13 +171,14 @@ static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf, ...@@ -171,13 +171,14 @@ static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf,
if (simple_strtol(buf, NULL, 0) != 1) if (simple_strtol(buf, NULL, 0) != 1)
return -EINVAL; return -EINVAL;
gfs2_quota_sync(sdp); gfs2_quota_sync(sdp->sd_vfs, 0);
return len; return len;
} }
static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf, static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
size_t len) size_t len)
{ {
int error;
u32 id; u32 id;
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
...@@ -185,13 +186,14 @@ static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf, ...@@ -185,13 +186,14 @@ static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
id = simple_strtoul(buf, NULL, 0); id = simple_strtoul(buf, NULL, 0);
gfs2_quota_refresh(sdp, 1, id); error = gfs2_quota_refresh(sdp, 1, id);
return len; return error ? error : len;
} }
static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf, static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
size_t len) size_t len)
{ {
int error;
u32 id; u32 id;
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
...@@ -199,8 +201,8 @@ static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf, ...@@ -199,8 +201,8 @@ static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
id = simple_strtoul(buf, NULL, 0); id = simple_strtoul(buf, NULL, 0);
gfs2_quota_refresh(sdp, 0, id); error = gfs2_quota_refresh(sdp, 0, id);
return len; return error ? error : len;
} }
static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len) static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
......
...@@ -186,7 +186,7 @@ static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh, ...@@ -186,7 +186,7 @@ static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
return 0; return 0;
} }
int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name, static int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name,
struct gfs2_ea_location *el) struct gfs2_ea_location *el)
{ {
struct ea_find ef; struct ea_find ef;
...@@ -516,7 +516,7 @@ static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea, ...@@ -516,7 +516,7 @@ static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
return error; return error;
} }
int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el, static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
char *data, size_t size) char *data, size_t size)
{ {
int ret; int ret;
...@@ -534,6 +534,36 @@ int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el, ...@@ -534,6 +534,36 @@ int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
return len; return len;
} }
int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata)
{
struct gfs2_ea_location el;
int error;
int len;
char *data;
error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, &el);
if (error)
return error;
if (!el.el_ea)
goto out;
if (!GFS2_EA_DATA_LEN(el.el_ea))
goto out;
len = GFS2_EA_DATA_LEN(el.el_ea);
data = kmalloc(len, GFP_NOFS);
error = -ENOMEM;
if (data == NULL)
goto out;
error = gfs2_ea_get_copy(ip, &el, data, len);
if (error == 0)
error = len;
*ppdata = data;
out:
brelse(el.el_bh);
return error;
}
/** /**
* gfs2_xattr_get - Get a GFS2 extended attribute * gfs2_xattr_get - Get a GFS2 extended attribute
* @inode: The inode * @inode: The inode
...@@ -1259,22 +1289,26 @@ static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip, ...@@ -1259,22 +1289,26 @@ static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
return error; return error;
} }
int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el, int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data)
struct iattr *attr, char *data)
{ {
struct gfs2_ea_location el;
struct buffer_head *dibh; struct buffer_head *dibh;
int error; int error;
if (GFS2_EA_IS_STUFFED(el->el_ea)) { error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, GFS2_POSIX_ACL_ACCESS, &el);
if (error)
return error;
if (GFS2_EA_IS_STUFFED(el.el_ea)) {
error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0); error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
if (error) if (error)
return error; return error;
gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1); gfs2_trans_add_bh(ip->i_gl, el.el_bh, 1);
memcpy(GFS2_EA2DATA(el->el_ea), data, memcpy(GFS2_EA2DATA(el.el_ea), data,
GFS2_EA_DATA_LEN(el->el_ea)); GFS2_EA_DATA_LEN(el.el_ea));
} else } else
error = ea_acl_chmod_unstuffed(ip, el->el_ea, data); error = ea_acl_chmod_unstuffed(ip, el.el_ea, data);
if (error) if (error)
return error; return error;
...@@ -1507,18 +1541,6 @@ static int gfs2_xattr_user_set(struct inode *inode, const char *name, ...@@ -1507,18 +1541,6 @@ static int gfs2_xattr_user_set(struct inode *inode, const char *name,
return gfs2_xattr_set(inode, GFS2_EATYPE_USR, name, value, size, flags); return gfs2_xattr_set(inode, GFS2_EATYPE_USR, name, value, size, flags);
} }
static int gfs2_xattr_system_get(struct inode *inode, const char *name,
void *buffer, size_t size)
{
return gfs2_xattr_get(inode, GFS2_EATYPE_SYS, name, buffer, size);
}
static int gfs2_xattr_system_set(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
return gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, value, size, flags);
}
static int gfs2_xattr_security_get(struct inode *inode, const char *name, static int gfs2_xattr_security_get(struct inode *inode, const char *name,
void *buffer, size_t size) void *buffer, size_t size)
{ {
...@@ -1543,12 +1565,6 @@ static struct xattr_handler gfs2_xattr_security_handler = { ...@@ -1543,12 +1565,6 @@ static struct xattr_handler gfs2_xattr_security_handler = {
.set = gfs2_xattr_security_set, .set = gfs2_xattr_security_set,
}; };
static struct xattr_handler gfs2_xattr_system_handler = {
.prefix = XATTR_SYSTEM_PREFIX,
.get = gfs2_xattr_system_get,
.set = gfs2_xattr_system_set,
};
struct xattr_handler *gfs2_xattr_handlers[] = { struct xattr_handler *gfs2_xattr_handlers[] = {
&gfs2_xattr_user_handler, &gfs2_xattr_user_handler,
&gfs2_xattr_security_handler, &gfs2_xattr_security_handler,
......
...@@ -62,11 +62,7 @@ extern int gfs2_ea_dealloc(struct gfs2_inode *ip); ...@@ -62,11 +62,7 @@ extern int gfs2_ea_dealloc(struct gfs2_inode *ip);
/* Exported to acl.c */ /* Exported to acl.c */
extern int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name, extern int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **data);
struct gfs2_ea_location *el); extern int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data);
extern int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
char *data, size_t size);
extern int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
struct iattr *attr, char *data);
#endif /* __EATTR_DOT_H__ */ #endif /* __EATTR_DOT_H__ */
...@@ -17,7 +17,7 @@ config QUOTA ...@@ -17,7 +17,7 @@ config QUOTA
config QUOTA_NETLINK_INTERFACE config QUOTA_NETLINK_INTERFACE
bool "Report quota messages through netlink interface" bool "Report quota messages through netlink interface"
depends on QUOTA && NET depends on QUOTACTL && NET
help help
If you say Y here, quota warnings (about exceeding softlimit, reaching If you say Y here, quota warnings (about exceeding softlimit, reaching
hardlimit, etc.) will be reported through netlink interface. If unsure, hardlimit, etc.) will be reported through netlink interface. If unsure,
......
...@@ -77,10 +77,6 @@ ...@@ -77,10 +77,6 @@
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/quotaops.h> #include <linux/quotaops.h>
#include <linux/writeback.h> /* for inode_lock, oddly enough.. */ #include <linux/writeback.h> /* for inode_lock, oddly enough.. */
#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
#include <net/netlink.h>
#include <net/genetlink.h>
#endif
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -1071,73 +1067,6 @@ static void print_warning(struct dquot *dquot, const int warntype) ...@@ -1071,73 +1067,6 @@ static void print_warning(struct dquot *dquot, const int warntype)
} }
#endif #endif
#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
/* Netlink family structure for quota */
static struct genl_family quota_genl_family = {
.id = GENL_ID_GENERATE,
.hdrsize = 0,
.name = "VFS_DQUOT",
.version = 1,
.maxattr = QUOTA_NL_A_MAX,
};
/* Send warning to userspace about user which exceeded quota */
static void send_warning(const struct dquot *dquot, const char warntype)
{
static atomic_t seq;
struct sk_buff *skb;
void *msg_head;
int ret;
int msg_size = 4 * nla_total_size(sizeof(u32)) +
2 * nla_total_size(sizeof(u64));
/* We have to allocate using GFP_NOFS as we are called from a
* filesystem performing write and thus further recursion into
* the fs to free some data could cause deadlocks. */
skb = genlmsg_new(msg_size, GFP_NOFS);
if (!skb) {
printk(KERN_ERR
"VFS: Not enough memory to send quota warning.\n");
return;
}
msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
&quota_genl_family, 0, QUOTA_NL_C_WARNING);
if (!msg_head) {
printk(KERN_ERR
"VFS: Cannot store netlink header in quota warning.\n");
goto err_out;
}
ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, dquot->dq_type);
if (ret)
goto attr_err_out;
ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, dquot->dq_id);
if (ret)
goto attr_err_out;
ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
if (ret)
goto attr_err_out;
ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR,
MAJOR(dquot->dq_sb->s_dev));
if (ret)
goto attr_err_out;
ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR,
MINOR(dquot->dq_sb->s_dev));
if (ret)
goto attr_err_out;
ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
if (ret)
goto attr_err_out;
genlmsg_end(skb, msg_head);
genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
return;
attr_err_out:
printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
err_out:
kfree_skb(skb);
}
#endif
/* /*
* Write warnings to the console and send warning messages over netlink. * Write warnings to the console and send warning messages over netlink.
* *
...@@ -1145,17 +1074,19 @@ static void send_warning(const struct dquot *dquot, const char warntype) ...@@ -1145,17 +1074,19 @@ static void send_warning(const struct dquot *dquot, const char warntype)
*/ */
static void flush_warnings(struct dquot *const *dquots, char *warntype) static void flush_warnings(struct dquot *const *dquots, char *warntype)
{ {
struct dquot *dq;
int i; int i;
for (i = 0; i < MAXQUOTAS; i++) for (i = 0; i < MAXQUOTAS; i++) {
if (dquots[i] && warntype[i] != QUOTA_NL_NOWARN && dq = dquots[i];
!warning_issued(dquots[i], warntype[i])) { if (dq && warntype[i] != QUOTA_NL_NOWARN &&
!warning_issued(dq, warntype[i])) {
#ifdef CONFIG_PRINT_QUOTA_WARNING #ifdef CONFIG_PRINT_QUOTA_WARNING
print_warning(dquots[i], warntype[i]); print_warning(dq, warntype[i]);
#endif
#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
send_warning(dquots[i], warntype[i]);
#endif #endif
quota_send_warning(dq->dq_type, dq->dq_id,
dq->dq_sb->s_dev, warntype[i]);
}
} }
} }
...@@ -2607,12 +2538,6 @@ static int __init dquot_init(void) ...@@ -2607,12 +2538,6 @@ static int __init dquot_init(void)
register_shrinker(&dqcache_shrinker); register_shrinker(&dqcache_shrinker);
#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
if (genl_register_family(&quota_genl_family) != 0)
printk(KERN_ERR
"VFS: Failed to create quota netlink interface.\n");
#endif
return 0; return 0;
} }
module_init(dquot_init); module_init(dquot_init);
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/quotaops.h> #include <linux/quotaops.h>
#include <linux/types.h> #include <linux/types.h>
#include <net/netlink.h>
#include <net/genetlink.h>
/* Check validity of generic quotactl commands */ /* Check validity of generic quotactl commands */
static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, static int generic_quotactl_valid(struct super_block *sb, int type, int cmd,
...@@ -525,3 +527,94 @@ asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special, ...@@ -525,3 +527,94 @@ asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special,
return ret; return ret;
} }
#endif #endif
#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
/* Netlink family structure for quota */
static struct genl_family quota_genl_family = {
.id = GENL_ID_GENERATE,
.hdrsize = 0,
.name = "VFS_DQUOT",
.version = 1,
.maxattr = QUOTA_NL_A_MAX,
};
/**
* quota_send_warning - Send warning to userspace about exceeded quota
* @type: The quota type: USRQQUOTA, GRPQUOTA,...
* @id: The user or group id of the quota that was exceeded
* @dev: The device on which the fs is mounted (sb->s_dev)
* @warntype: The type of the warning: QUOTA_NL_...
*
* This can be used by filesystems (including those which don't use
* dquot) to send a message to userspace relating to quota limits.
*
*/
void quota_send_warning(short type, unsigned int id, dev_t dev,
const char warntype)
{
static atomic_t seq;
struct sk_buff *skb;
void *msg_head;
int ret;
int msg_size = 4 * nla_total_size(sizeof(u32)) +
2 * nla_total_size(sizeof(u64));
/* We have to allocate using GFP_NOFS as we are called from a
* filesystem performing write and thus further recursion into
* the fs to free some data could cause deadlocks. */
skb = genlmsg_new(msg_size, GFP_NOFS);
if (!skb) {
printk(KERN_ERR
"VFS: Not enough memory to send quota warning.\n");
return;
}
msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
&quota_genl_family, 0, QUOTA_NL_C_WARNING);
if (!msg_head) {
printk(KERN_ERR
"VFS: Cannot store netlink header in quota warning.\n");
goto err_out;
}
ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, type);
if (ret)
goto attr_err_out;
ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, id);
if (ret)
goto attr_err_out;
ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
if (ret)
goto attr_err_out;
ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev));
if (ret)
goto attr_err_out;
ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev));
if (ret)
goto attr_err_out;
ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
if (ret)
goto attr_err_out;
genlmsg_end(skb, msg_head);
genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
return;
attr_err_out:
printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
err_out:
kfree_skb(skb);
}
EXPORT_SYMBOL(quota_send_warning);
static int __init quota_init(void)
{
if (genl_register_family(&quota_genl_family) != 0)
printk(KERN_ERR
"VFS: Failed to create quota netlink interface.\n");
return 0;
};
module_init(quota_init);
#endif
...@@ -36,7 +36,7 @@ posix_acl_from_xattr(const void *value, size_t size) ...@@ -36,7 +36,7 @@ posix_acl_from_xattr(const void *value, size_t size)
if (count == 0) if (count == 0)
return NULL; return NULL;
acl = posix_acl_alloc(count, GFP_KERNEL); acl = posix_acl_alloc(count, GFP_NOFS);
if (!acl) if (!acl)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
acl_e = acl->a_entries; acl_e = acl->a_entries;
......
...@@ -81,7 +81,11 @@ struct gfs2_meta_header { ...@@ -81,7 +81,11 @@ struct gfs2_meta_header {
__be32 mh_type; __be32 mh_type;
__be64 __pad0; /* Was generation number in gfs1 */ __be64 __pad0; /* Was generation number in gfs1 */
__be32 mh_format; __be32 mh_format;
__be32 __pad1; /* Was incarnation number in gfs1 */ /* This union is to keep userspace happy */
union {
__be32 mh_jid; /* Was incarnation number in gfs1 */
__be32 __pad1;
};
}; };
/* /*
......
...@@ -147,6 +147,20 @@ static inline void forget_cached_acl(struct inode *inode, int type) ...@@ -147,6 +147,20 @@ static inline void forget_cached_acl(struct inode *inode, int type)
if (old != ACL_NOT_CACHED) if (old != ACL_NOT_CACHED)
posix_acl_release(old); posix_acl_release(old);
} }
static inline void forget_all_cached_acls(struct inode *inode)
{
struct posix_acl *old_access, *old_default;
spin_lock(&inode->i_lock);
old_access = inode->i_acl;
old_default = inode->i_default_acl;
inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
spin_unlock(&inode->i_lock);
if (old_access != ACL_NOT_CACHED)
posix_acl_release(old_access);
if (old_default != ACL_NOT_CACHED)
posix_acl_release(old_default);
}
#endif #endif
static inline void cache_no_acl(struct inode *inode) static inline void cache_no_acl(struct inode *inode)
......
...@@ -376,6 +376,17 @@ static inline unsigned int dquot_generic_flag(unsigned int flags, int type) ...@@ -376,6 +376,17 @@ static inline unsigned int dquot_generic_flag(unsigned int flags, int type)
return flags >> _DQUOT_STATE_FLAGS; return flags >> _DQUOT_STATE_FLAGS;
} }
#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
extern void quota_send_warning(short type, unsigned int id, dev_t dev,
const char warntype);
#else
static inline void quota_send_warning(short type, unsigned int id, dev_t dev,
const char warntype)
{
return;
}
#endif /* CONFIG_QUOTA_NETLINK_INTERFACE */
struct quota_info { struct quota_info {
unsigned int flags; /* Flags for diskquotas on this device */ unsigned int flags; /* Flags for diskquotas on this device */
struct mutex dqio_mutex; /* lock device while I/O in progress */ struct mutex dqio_mutex; /* lock device while I/O in progress */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment