Commit 1fe4fd6f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xfs-6.2-fixes-2' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux

Pull xfs fixes from Darrick Wong:

 - Remove some incorrect assertions

 - Fix compiler warnings about variables that could be static

 - Fix an off by one error when computing the maximum btree height that
   can cause repair failures

 - Fix the bulkstat-single ioctl not returning the root inode when asked
   to do that

 - Convey NOFS state to inodegc workers to avoid recursion in reclaim

 - Fix unnecessary variable initializations

 - Fix a bug that could result in corruption of the busy extent tree

* tag 'xfs-6.2-fixes-2' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux:
  xfs: fix extent busy updating
  xfs: xfs_qm: remove unnecessary ‘0’ values from error
  xfs: Fix deadlock on xfs_inodegc_worker
  xfs: get root inode correctly at bulkstat
  xfs: fix off-by-one error in xfs_btree_space_to_height
  xfs: make xfs_iomap_page_ops static
  xfs: don't assert if cmap covers imap after cycling lock
parents b7bfaa76 601a27ea
...@@ -4666,7 +4666,12 @@ xfs_btree_space_to_height( ...@@ -4666,7 +4666,12 @@ xfs_btree_space_to_height(
const unsigned int *limits, const unsigned int *limits,
unsigned long long leaf_blocks) unsigned long long leaf_blocks)
{ {
unsigned long long node_blocks = limits[1]; /*
* The root btree block can have fewer than minrecs pointers in it
* because the tree might not be big enough to require that amount of
* fanout. Hence it has a minimum size of 2 pointers, not limits[1].
*/
unsigned long long node_blocks = 2;
unsigned long long blocks_left = leaf_blocks - 1; unsigned long long blocks_left = leaf_blocks - 1;
unsigned int height = 1; unsigned int height = 1;
......
...@@ -236,6 +236,7 @@ xfs_extent_busy_update_extent( ...@@ -236,6 +236,7 @@ xfs_extent_busy_update_extent(
* *
*/ */
busyp->bno = fend; busyp->bno = fend;
busyp->length = bend - fend;
} else if (bbno < fbno) { } else if (bbno < fbno) {
/* /*
* Case 8: * Case 8:
......
...@@ -1853,12 +1853,20 @@ xfs_inodegc_worker( ...@@ -1853,12 +1853,20 @@ xfs_inodegc_worker(
struct xfs_inodegc, work); struct xfs_inodegc, work);
struct llist_node *node = llist_del_all(&gc->list); struct llist_node *node = llist_del_all(&gc->list);
struct xfs_inode *ip, *n; struct xfs_inode *ip, *n;
unsigned int nofs_flag;
WRITE_ONCE(gc->items, 0); WRITE_ONCE(gc->items, 0);
if (!node) if (!node)
return; return;
/*
* We can allocate memory here while doing writeback on behalf of
* memory reclaim. To avoid memory allocation deadlocks set the
* task-wide nofs context for the following operations.
*/
nofs_flag = memalloc_nofs_save();
ip = llist_entry(node, struct xfs_inode, i_gclist); ip = llist_entry(node, struct xfs_inode, i_gclist);
trace_xfs_inodegc_worker(ip->i_mount, READ_ONCE(gc->shrinker_hits)); trace_xfs_inodegc_worker(ip->i_mount, READ_ONCE(gc->shrinker_hits));
...@@ -1867,6 +1875,8 @@ xfs_inodegc_worker( ...@@ -1867,6 +1875,8 @@ xfs_inodegc_worker(
xfs_iflags_set(ip, XFS_INACTIVATING); xfs_iflags_set(ip, XFS_INACTIVATING);
xfs_inodegc_inactivate(ip); xfs_inodegc_inactivate(ip);
} }
memalloc_nofs_restore(nofs_flag);
} }
/* /*
......
...@@ -754,7 +754,7 @@ xfs_bulkstat_fmt( ...@@ -754,7 +754,7 @@ xfs_bulkstat_fmt(
static int static int
xfs_bulk_ireq_setup( xfs_bulk_ireq_setup(
struct xfs_mount *mp, struct xfs_mount *mp,
struct xfs_bulk_ireq *hdr, const struct xfs_bulk_ireq *hdr,
struct xfs_ibulk *breq, struct xfs_ibulk *breq,
void __user *ubuffer) void __user *ubuffer)
{ {
...@@ -780,7 +780,7 @@ xfs_bulk_ireq_setup( ...@@ -780,7 +780,7 @@ xfs_bulk_ireq_setup(
switch (hdr->ino) { switch (hdr->ino) {
case XFS_BULK_IREQ_SPECIAL_ROOT: case XFS_BULK_IREQ_SPECIAL_ROOT:
hdr->ino = mp->m_sb.sb_rootino; breq->startino = mp->m_sb.sb_rootino;
break; break;
default: default:
return -EINVAL; return -EINVAL;
......
...@@ -83,7 +83,7 @@ xfs_iomap_valid( ...@@ -83,7 +83,7 @@ xfs_iomap_valid(
return true; return true;
} }
const struct iomap_page_ops xfs_iomap_page_ops = { static const struct iomap_page_ops xfs_iomap_page_ops = {
.iomap_valid = xfs_iomap_valid, .iomap_valid = xfs_iomap_valid,
}; };
......
...@@ -68,7 +68,7 @@ xfs_qm_dquot_walk( ...@@ -68,7 +68,7 @@ xfs_qm_dquot_walk(
while (1) { while (1) {
struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH]; struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
int error = 0; int error;
int i; int i;
mutex_lock(&qi->qi_tree_lock); mutex_lock(&qi->qi_tree_lock);
......
...@@ -416,8 +416,6 @@ xfs_reflink_fill_cow_hole( ...@@ -416,8 +416,6 @@ xfs_reflink_fill_cow_hole(
goto convert; goto convert;
} }
ASSERT(cmap->br_startoff > imap->br_startoff);
/* Allocate the entire reservation as unwritten blocks. */ /* Allocate the entire reservation as unwritten blocks. */
nimaps = 1; nimaps = 1;
error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount, error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment