Commit 42eb8fda authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'gfs2-v5.16-rc2-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2

Pull gfs2 fixes from Andreas Gruenbacher:

 - The current iomap_file_buffered_write behavior of failing the entire
   write when part of the user buffer cannot be faulted in leads to an
   endless loop in gfs2. Work around that in gfs2 for now.

 - Various other bugs all over the place.

* tag 'gfs2-v5.16-rc2-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2:
  gfs2: Prevent endless loops in gfs2_file_buffered_write
  gfs2: Fix "Introduce flag for glock holder auto-demotion"
  gfs2: Fix length of holes reported at end-of-file
  gfs2: release iopen glock early in evict
  gfs2: Fix atomic bug in gfs2_instantiate
  gfs2: Only dereference i->iov when iter_is_iovec(i)
parents 3fa59548 554c577c
...@@ -940,7 +940,7 @@ static int __gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length, ...@@ -940,7 +940,7 @@ static int __gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
else if (height == ip->i_height) else if (height == ip->i_height)
ret = gfs2_hole_size(inode, lblock, len, mp, iomap); ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
else else
iomap->length = size - pos; iomap->length = size - iomap->offset;
} else if (flags & IOMAP_WRITE) { } else if (flags & IOMAP_WRITE) {
u64 alloc_size; u64 alloc_size;
......
...@@ -773,8 +773,8 @@ static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i, ...@@ -773,8 +773,8 @@ static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i,
size_t *prev_count, size_t *prev_count,
size_t *window_size) size_t *window_size)
{ {
char __user *p = i->iov[0].iov_base + i->iov_offset;
size_t count = iov_iter_count(i); size_t count = iov_iter_count(i);
char __user *p;
int pages = 1; int pages = 1;
if (likely(!count)) if (likely(!count))
...@@ -787,14 +787,14 @@ static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i, ...@@ -787,14 +787,14 @@ static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i,
if (*prev_count != count || !*window_size) { if (*prev_count != count || !*window_size) {
int pages, nr_dirtied; int pages, nr_dirtied;
pages = min_t(int, BIO_MAX_VECS, pages = min_t(int, BIO_MAX_VECS, DIV_ROUND_UP(count, PAGE_SIZE));
DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE));
nr_dirtied = max(current->nr_dirtied_pause - nr_dirtied = max(current->nr_dirtied_pause -
current->nr_dirtied, 1); current->nr_dirtied, 1);
pages = min(pages, nr_dirtied); pages = min(pages, nr_dirtied);
} }
*prev_count = count; *prev_count = count;
p = i->iov[0].iov_base + i->iov_offset;
*window_size = (size_t)PAGE_SIZE * pages - offset_in_page(p); *window_size = (size_t)PAGE_SIZE * pages - offset_in_page(p);
return true; return true;
} }
...@@ -1013,6 +1013,7 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb, ...@@ -1013,6 +1013,7 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_holder *statfs_gh = NULL; struct gfs2_holder *statfs_gh = NULL;
size_t prev_count = 0, window_size = 0; size_t prev_count = 0, window_size = 0;
size_t orig_count = iov_iter_count(from);
size_t read = 0; size_t read = 0;
ssize_t ret; ssize_t ret;
...@@ -1057,6 +1058,7 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb, ...@@ -1057,6 +1058,7 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
if (inode == sdp->sd_rindex) if (inode == sdp->sd_rindex)
gfs2_glock_dq_uninit(statfs_gh); gfs2_glock_dq_uninit(statfs_gh);
from->count = orig_count - read;
if (should_fault_in_pages(ret, from, &prev_count, &window_size)) { if (should_fault_in_pages(ret, from, &prev_count, &window_size)) {
size_t leftover; size_t leftover;
...@@ -1064,6 +1066,7 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb, ...@@ -1064,6 +1066,7 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
leftover = fault_in_iov_iter_readable(from, window_size); leftover = fault_in_iov_iter_readable(from, window_size);
gfs2_holder_disallow_demote(gh); gfs2_holder_disallow_demote(gh);
if (leftover != window_size) { if (leftover != window_size) {
from->count = min(from->count, window_size - leftover);
if (!gfs2_holder_queued(gh)) { if (!gfs2_holder_queued(gh)) {
if (read) if (read)
goto out_uninit; goto out_uninit;
......
...@@ -411,14 +411,14 @@ static void do_error(struct gfs2_glock *gl, const int ret) ...@@ -411,14 +411,14 @@ static void do_error(struct gfs2_glock *gl, const int ret)
static void demote_incompat_holders(struct gfs2_glock *gl, static void demote_incompat_holders(struct gfs2_glock *gl,
struct gfs2_holder *new_gh) struct gfs2_holder *new_gh)
{ {
struct gfs2_holder *gh; struct gfs2_holder *gh, *tmp;
/* /*
* Demote incompatible holders before we make ourselves eligible. * Demote incompatible holders before we make ourselves eligible.
* (This holder may or may not allow auto-demoting, but we don't want * (This holder may or may not allow auto-demoting, but we don't want
* to demote the new holder before it's even granted.) * to demote the new holder before it's even granted.)
*/ */
list_for_each_entry(gh, &gl->gl_holders, gh_list) { list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
/* /*
* Since holders are at the front of the list, we stop when we * Since holders are at the front of the list, we stop when we
* find the first non-holder. * find the first non-holder.
...@@ -496,7 +496,7 @@ int gfs2_instantiate(struct gfs2_holder *gh) ...@@ -496,7 +496,7 @@ int gfs2_instantiate(struct gfs2_holder *gh)
* Since we unlock the lockref lock, we set a flag to indicate * Since we unlock the lockref lock, we set a flag to indicate
* instantiate is in progress. * instantiate is in progress.
*/ */
if (test_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) { if (test_and_set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) {
wait_on_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG, wait_on_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG,
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
/* /*
...@@ -509,14 +509,10 @@ int gfs2_instantiate(struct gfs2_holder *gh) ...@@ -509,14 +509,10 @@ int gfs2_instantiate(struct gfs2_holder *gh)
goto again; goto again;
} }
set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags);
ret = glops->go_instantiate(gh); ret = glops->go_instantiate(gh);
if (!ret) if (!ret)
clear_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags); clear_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
clear_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags); clear_and_wake_up_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags);
smp_mb__after_atomic();
wake_up_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG);
return ret; return ret;
} }
......
...@@ -1402,13 +1402,6 @@ static void gfs2_evict_inode(struct inode *inode) ...@@ -1402,13 +1402,6 @@ static void gfs2_evict_inode(struct inode *inode)
gfs2_ordered_del_inode(ip); gfs2_ordered_del_inode(ip);
clear_inode(inode); clear_inode(inode);
gfs2_dir_hash_inval(ip); gfs2_dir_hash_inval(ip);
if (ip->i_gl) {
glock_clear_object(ip->i_gl, ip);
wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
gfs2_glock_add_to_lru(ip->i_gl);
gfs2_glock_put_eventually(ip->i_gl);
ip->i_gl = NULL;
}
if (gfs2_holder_initialized(&ip->i_iopen_gh)) { if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
...@@ -1421,6 +1414,13 @@ static void gfs2_evict_inode(struct inode *inode) ...@@ -1421,6 +1414,13 @@ static void gfs2_evict_inode(struct inode *inode)
gfs2_holder_uninit(&ip->i_iopen_gh); gfs2_holder_uninit(&ip->i_iopen_gh);
gfs2_glock_put_eventually(gl); gfs2_glock_put_eventually(gl);
} }
if (ip->i_gl) {
glock_clear_object(ip->i_gl, ip);
wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
gfs2_glock_add_to_lru(ip->i_gl);
gfs2_glock_put_eventually(ip->i_gl);
ip->i_gl = NULL;
}
} }
static struct inode *gfs2_alloc_inode(struct super_block *sb) static struct inode *gfs2_alloc_inode(struct super_block *sb)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment