Commit 0fc3bcd6 authored by Andreas Gruenbacher's avatar Andreas Gruenbacher

gfs2: Clean up the error handling in gfs2_page_mkwrite

We're setting an error number so that block_page_mkwrite_return
translates it into the corresponding VM_FAULT_* code in several places,
but this is getting confusing, so set the VM_FAULT_* codes directly
instead.  (No change in functionality.)
Signed-off-by: default avatarAndreas Gruenbacher <agruenba@redhat.com>
parent 5d49d350
...@@ -427,22 +427,25 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) ...@@ -427,22 +427,25 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
struct gfs2_alloc_parms ap = { .aflags = 0, }; struct gfs2_alloc_parms ap = { .aflags = 0, };
u64 offset = page_offset(page); u64 offset = page_offset(page);
unsigned int data_blocks, ind_blocks, rblocks; unsigned int data_blocks, ind_blocks, rblocks;
vm_fault_t ret = VM_FAULT_LOCKED;
struct gfs2_holder gh; struct gfs2_holder gh;
unsigned int length; unsigned int length;
loff_t size; loff_t size;
int ret; int err;
sb_start_pagefault(inode->i_sb); sb_start_pagefault(inode->i_sb);
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
ret = gfs2_glock_nq(&gh); err = gfs2_glock_nq(&gh);
if (ret) if (err) {
ret = block_page_mkwrite_return(err);
goto out_uninit; goto out_uninit;
}
/* Check page index against inode size */ /* Check page index against inode size */
size = i_size_read(inode); size = i_size_read(inode);
if (offset >= size) { if (offset >= size) {
ret = -EINVAL; ret = VM_FAULT_SIGBUS;
goto out_unlock; goto out_unlock;
} }
...@@ -469,24 +472,30 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) ...@@ -469,24 +472,30 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
!gfs2_write_alloc_required(ip, offset, length)) { !gfs2_write_alloc_required(ip, offset, length)) {
lock_page(page); lock_page(page);
if (!PageUptodate(page) || page->mapping != inode->i_mapping) { if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
ret = -EAGAIN; ret = VM_FAULT_NOPAGE;
unlock_page(page); unlock_page(page);
} }
goto out_unlock; goto out_unlock;
} }
ret = gfs2_rindex_update(sdp); err = gfs2_rindex_update(sdp);
if (ret) if (err) {
ret = block_page_mkwrite_return(err);
goto out_unlock; goto out_unlock;
}
gfs2_write_calc_reserv(ip, length, &data_blocks, &ind_blocks); gfs2_write_calc_reserv(ip, length, &data_blocks, &ind_blocks);
ap.target = data_blocks + ind_blocks; ap.target = data_blocks + ind_blocks;
ret = gfs2_quota_lock_check(ip, &ap); err = gfs2_quota_lock_check(ip, &ap);
if (ret) if (err) {
ret = block_page_mkwrite_return(err);
goto out_unlock; goto out_unlock;
ret = gfs2_inplace_reserve(ip, &ap); }
if (ret) err = gfs2_inplace_reserve(ip, &ap);
if (err) {
ret = block_page_mkwrite_return(err);
goto out_quota_unlock; goto out_quota_unlock;
}
rblocks = RES_DINODE + ind_blocks; rblocks = RES_DINODE + ind_blocks;
if (gfs2_is_jdata(ip)) if (gfs2_is_jdata(ip))
...@@ -495,27 +504,35 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) ...@@ -495,27 +504,35 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
rblocks += RES_STATFS + RES_QUOTA; rblocks += RES_STATFS + RES_QUOTA;
rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks); rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
} }
ret = gfs2_trans_begin(sdp, rblocks, 0); err = gfs2_trans_begin(sdp, rblocks, 0);
if (ret) if (err) {
ret = block_page_mkwrite_return(err);
goto out_trans_fail; goto out_trans_fail;
}
lock_page(page); lock_page(page);
ret = -EAGAIN;
/* If truncated, we must retry the operation, we may have raced /* If truncated, we must retry the operation, we may have raced
* with the glock demotion code. * with the glock demotion code.
*/ */
if (!PageUptodate(page) || page->mapping != inode->i_mapping) if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
ret = VM_FAULT_NOPAGE;
goto out_trans_end; goto out_trans_end;
}
/* Unstuff, if required, and allocate backing blocks for page */ /* Unstuff, if required, and allocate backing blocks for page */
ret = 0; if (gfs2_is_stuffed(ip)) {
if (gfs2_is_stuffed(ip)) err = gfs2_unstuff_dinode(ip, page);
ret = gfs2_unstuff_dinode(ip, page); if (err) {
if (ret == 0) ret = block_page_mkwrite_return(err);
ret = gfs2_allocate_page_backing(page, length); goto out_trans_end;
}
}
err = gfs2_allocate_page_backing(page, length);
if (err)
ret = block_page_mkwrite_return(err);
out_trans_end: out_trans_end:
if (ret) if (ret != VM_FAULT_LOCKED)
unlock_page(page); unlock_page(page);
gfs2_trans_end(sdp); gfs2_trans_end(sdp);
out_trans_fail: out_trans_fail:
...@@ -526,12 +543,12 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) ...@@ -526,12 +543,12 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
gfs2_glock_dq(&gh); gfs2_glock_dq(&gh);
out_uninit: out_uninit:
gfs2_holder_uninit(&gh); gfs2_holder_uninit(&gh);
if (ret == 0) { if (ret == VM_FAULT_LOCKED) {
set_page_dirty(page); set_page_dirty(page);
wait_for_stable_page(page); wait_for_stable_page(page);
} }
sb_end_pagefault(inode->i_sb); sb_end_pagefault(inode->i_sb);
return block_page_mkwrite_return(ret); return ret;
} }
static vm_fault_t gfs2_fault(struct vm_fault *vmf) static vm_fault_t gfs2_fault(struct vm_fault *vmf)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment