Commit 1e72c0f7 authored by Steven Whitehouse's avatar Steven Whitehouse

GFS2: Clean up gfs2_adjust_quota() and do_glock()

Both of these functions contained confusing and in one case
duplicate code. This patch adds a new check in do_glock()
so that we report -ENOENT if we are asked to sync a quota
entry which doesn't exist. Due to the previous patch this is
now reported correctly to userspace.

Also there are a few new comments, and I hope that the code
is easier to understand now.
Signed-off-by: default avatarSteven Whitehouse <swhiteho@redhat.com>
parent 6a6ada81
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
* fuzziness in the current usage value of IDs that are being used on different * fuzziness in the current usage value of IDs that are being used on different
* nodes in the cluster simultaneously. So, it is possible for a user on * nodes in the cluster simultaneously. So, it is possible for a user on
* multiple nodes to overrun their quota, but that overrun is controlable. * multiple nodes to overrun their quota, but that overrun is controlable.
* Since quota tags are part of transactions, there is no need to a quota check * Since quota tags are part of transactions, there is no need for a quota check
* program to be run on node crashes or anything like that. * program to be run on node crashes or anything like that.
* *
* There are couple of knobs that let the administrator manage the quota * There are couple of knobs that let the administrator manage the quota
...@@ -66,13 +66,6 @@ ...@@ -66,13 +66,6 @@
#define QUOTA_USER 1 #define QUOTA_USER 1
#define QUOTA_GROUP 0 #define QUOTA_GROUP 0
struct gfs2_quota_host {
u64 qu_limit;
u64 qu_warn;
s64 qu_value;
u32 qu_ll_next;
};
struct gfs2_quota_change_host { struct gfs2_quota_change_host {
u64 qc_change; u64 qc_change;
u32 qc_flags; /* GFS2_QCF_... */ u32 qc_flags; /* GFS2_QCF_... */
...@@ -618,33 +611,19 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change) ...@@ -618,33 +611,19 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
mutex_unlock(&sdp->sd_quota_mutex); mutex_unlock(&sdp->sd_quota_mutex);
} }
static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
{
const struct gfs2_quota *str = buf;
qu->qu_limit = be64_to_cpu(str->qu_limit);
qu->qu_warn = be64_to_cpu(str->qu_warn);
qu->qu_value = be64_to_cpu(str->qu_value);
qu->qu_ll_next = be32_to_cpu(str->qu_ll_next);
}
static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf)
{
struct gfs2_quota *str = buf;
str->qu_limit = cpu_to_be64(qu->qu_limit);
str->qu_warn = cpu_to_be64(qu->qu_warn);
str->qu_value = cpu_to_be64(qu->qu_value);
str->qu_ll_next = cpu_to_be32(qu->qu_ll_next);
memset(&str->qu_reserved, 0, sizeof(str->qu_reserved));
}
/** /**
* gfs2_adjust_quota * gfs2_adjust_quota - adjust record of current block usage
* @ip: The quota inode
* @loc: Offset of the entry in the quota file
* @change: The amount of change to record
* @qd: The quota data
* *
* This function was mostly borrowed from gfs2_block_truncate_page which was * This function was mostly borrowed from gfs2_block_truncate_page which was
* in turn mostly borrowed from ext3 * in turn mostly borrowed from ext3
*
* Returns: 0 or -ve on error
*/ */
static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
s64 change, struct gfs2_quota_data *qd) s64 change, struct gfs2_quota_data *qd)
{ {
...@@ -656,8 +635,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, ...@@ -656,8 +635,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
struct buffer_head *bh; struct buffer_head *bh;
struct page *page; struct page *page;
void *kaddr; void *kaddr;
char *ptr; struct gfs2_quota *qp;
struct gfs2_quota_host qp;
s64 value; s64 value;
int err = -EIO; int err = -EIO;
...@@ -701,18 +679,13 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, ...@@ -701,18 +679,13 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
gfs2_trans_add_bh(ip->i_gl, bh, 0); gfs2_trans_add_bh(ip->i_gl, bh, 0);
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page, KM_USER0);
ptr = kaddr + offset; qp = kaddr + offset;
gfs2_quota_in(&qp, ptr); value = (s64)be64_to_cpu(qp->qu_value) + change;
qp.qu_value += change; qp->qu_value = cpu_to_be64(value);
value = qp.qu_value; qd->qd_qb.qb_value = qp->qu_value;
gfs2_quota_out(&qp, ptr);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
err = 0; err = 0;
qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC);
qd->qd_qb.qb_value = cpu_to_be64(value);
((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC);
((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value);
unlock: unlock:
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
...@@ -741,8 +714,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) ...@@ -741,8 +714,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
for (qx = 0; qx < num_qd; qx++) { for (qx = 0; qx < num_qd; qx++) {
error = gfs2_glock_nq_init(qda[qx]->qd_gl, error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
LM_ST_EXCLUSIVE,
GL_NOCACHE, &ghs[qx]); GL_NOCACHE, &ghs[qx]);
if (error) if (error)
goto out; goto out;
...@@ -797,8 +769,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) ...@@ -797,8 +769,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
qd = qda[x]; qd = qda[x];
offset = qd2offset(qd); offset = qd2offset(qd);
error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync,
(struct gfs2_quota_data *) (struct gfs2_quota_data *)qd);
qd);
if (error) if (error)
goto out_end_trans; goto out_end_trans;
...@@ -829,8 +800,7 @@ static int do_glock(struct gfs2_quota_data *qd, int force_refresh, ...@@ -829,8 +800,7 @@ static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
struct gfs2_holder i_gh; struct gfs2_holder i_gh;
struct gfs2_quota_host q; struct gfs2_quota q;
char buf[sizeof(struct gfs2_quota)];
int error; int error;
struct gfs2_quota_lvb *qlvb; struct gfs2_quota_lvb *qlvb;
...@@ -853,22 +823,23 @@ static int do_glock(struct gfs2_quota_data *qd, int force_refresh, ...@@ -853,22 +823,23 @@ static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
if (error) if (error)
goto fail; goto fail;
memset(buf, 0, sizeof(struct gfs2_quota)); memset(&q, 0, sizeof(struct gfs2_quota));
pos = qd2offset(qd); pos = qd2offset(qd);
error = gfs2_internal_read(ip, NULL, buf, &pos, error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q));
sizeof(struct gfs2_quota));
if (error < 0) if (error < 0)
goto fail_gunlock; goto fail_gunlock;
if ((error < sizeof(q)) && force_refresh) {
error = -ENOENT;
goto fail_gunlock;
}
gfs2_glock_dq_uninit(&i_gh); gfs2_glock_dq_uninit(&i_gh);
gfs2_quota_in(&q, buf);
qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
qlvb->__pad = 0; qlvb->__pad = 0;
qlvb->qb_limit = cpu_to_be64(q.qu_limit); qlvb->qb_limit = q.qu_limit;
qlvb->qb_warn = cpu_to_be64(q.qu_warn); qlvb->qb_warn = q.qu_warn;
qlvb->qb_value = cpu_to_be64(q.qu_value); qlvb->qb_value = q.qu_value;
qd->qd_qb = *qlvb; qd->qd_qb = *qlvb;
gfs2_glock_dq_uninit(q_gh); gfs2_glock_dq_uninit(q_gh);
...@@ -1126,7 +1097,6 @@ int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id) ...@@ -1126,7 +1097,6 @@ int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
gfs2_glock_dq_uninit(&q_gh); gfs2_glock_dq_uninit(&q_gh);
qd_put(qd); qd_put(qd);
return error; return error;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment