Commit 1499b8a3 authored by Dave Chinner's avatar Dave Chinner

Merge branch 'guilt/5.19-miscellaneous' into xfs-5.19-for-next

parents 9a5280b3 2d9ac431
...@@ -1008,6 +1008,7 @@ xfs_rtfree_extent( ...@@ -1008,6 +1008,7 @@ xfs_rtfree_extent(
/* Find all the free records within a given range. */ /* Find all the free records within a given range. */
int int
xfs_rtalloc_query_range( xfs_rtalloc_query_range(
struct xfs_mount *mp,
struct xfs_trans *tp, struct xfs_trans *tp,
const struct xfs_rtalloc_rec *low_rec, const struct xfs_rtalloc_rec *low_rec,
const struct xfs_rtalloc_rec *high_rec, const struct xfs_rtalloc_rec *high_rec,
...@@ -1015,7 +1016,6 @@ xfs_rtalloc_query_range( ...@@ -1015,7 +1016,6 @@ xfs_rtalloc_query_range(
void *priv) void *priv)
{ {
struct xfs_rtalloc_rec rec; struct xfs_rtalloc_rec rec;
struct xfs_mount *mp = tp->t_mountp;
xfs_rtblock_t rtstart; xfs_rtblock_t rtstart;
xfs_rtblock_t rtend; xfs_rtblock_t rtend;
xfs_rtblock_t high_key; xfs_rtblock_t high_key;
...@@ -1048,7 +1048,7 @@ xfs_rtalloc_query_range( ...@@ -1048,7 +1048,7 @@ xfs_rtalloc_query_range(
rec.ar_startext = rtstart; rec.ar_startext = rtstart;
rec.ar_extcount = rtend - rtstart + 1; rec.ar_extcount = rtend - rtstart + 1;
error = fn(tp, &rec, priv); error = fn(mp, tp, &rec, priv);
if (error) if (error)
break; break;
} }
...@@ -1062,6 +1062,7 @@ xfs_rtalloc_query_range( ...@@ -1062,6 +1062,7 @@ xfs_rtalloc_query_range(
/* Find all the free records. */ /* Find all the free records. */
int int
xfs_rtalloc_query_all( xfs_rtalloc_query_all(
struct xfs_mount *mp,
struct xfs_trans *tp, struct xfs_trans *tp,
xfs_rtalloc_query_range_fn fn, xfs_rtalloc_query_range_fn fn,
void *priv) void *priv)
...@@ -1069,10 +1070,10 @@ xfs_rtalloc_query_all( ...@@ -1069,10 +1070,10 @@ xfs_rtalloc_query_all(
struct xfs_rtalloc_rec keys[2]; struct xfs_rtalloc_rec keys[2];
keys[0].ar_startext = 0; keys[0].ar_startext = 0;
keys[1].ar_startext = tp->t_mountp->m_sb.sb_rextents - 1; keys[1].ar_startext = mp->m_sb.sb_rextents - 1;
keys[0].ar_extcount = keys[1].ar_extcount = 0; keys[0].ar_extcount = keys[1].ar_extcount = 0;
return xfs_rtalloc_query_range(tp, &keys[0], &keys[1], fn, priv); return xfs_rtalloc_query_range(mp, tp, &keys[0], &keys[1], fn, priv);
} }
/* Is the given extent all free? */ /* Is the given extent all free? */
......
...@@ -911,6 +911,11 @@ xfs_log_sb( ...@@ -911,6 +911,11 @@ xfs_log_sb(
* reservations that have been taken out percpu counters. If we have an * reservations that have been taken out percpu counters. If we have an
* unclean shutdown, this will be corrected by log recovery rebuilding * unclean shutdown, this will be corrected by log recovery rebuilding
* the counters from the AGF block counts. * the counters from the AGF block counts.
*
* Do not update sb_frextents here because it is not part of the lazy
* sb counters, despite having a percpu counter. It is always kept
* consistent with the ondisk rtbitmap by xfs_trans_apply_sb_deltas()
* and hence we don't need have to update it here.
*/ */
if (xfs_has_lazysbcount(mp)) { if (xfs_has_lazysbcount(mp)) {
mp->m_sb.sb_icount = percpu_counter_sum(&mp->m_icount); mp->m_sb.sb_icount = percpu_counter_sum(&mp->m_icount);
......
...@@ -40,6 +40,7 @@ xchk_setup_rt( ...@@ -40,6 +40,7 @@ xchk_setup_rt(
/* Scrub a free extent record from the realtime bitmap. */ /* Scrub a free extent record from the realtime bitmap. */
STATIC int STATIC int
xchk_rtbitmap_rec( xchk_rtbitmap_rec(
struct xfs_mount *mp,
struct xfs_trans *tp, struct xfs_trans *tp,
const struct xfs_rtalloc_rec *rec, const struct xfs_rtalloc_rec *rec,
void *priv) void *priv)
...@@ -48,10 +49,10 @@ xchk_rtbitmap_rec( ...@@ -48,10 +49,10 @@ xchk_rtbitmap_rec(
xfs_rtblock_t startblock; xfs_rtblock_t startblock;
xfs_rtblock_t blockcount; xfs_rtblock_t blockcount;
startblock = rec->ar_startext * tp->t_mountp->m_sb.sb_rextsize; startblock = rec->ar_startext * mp->m_sb.sb_rextsize;
blockcount = rec->ar_extcount * tp->t_mountp->m_sb.sb_rextsize; blockcount = rec->ar_extcount * mp->m_sb.sb_rextsize;
if (!xfs_verify_rtext(sc->mp, startblock, blockcount)) if (!xfs_verify_rtext(mp, startblock, blockcount))
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
return 0; return 0;
} }
...@@ -114,7 +115,7 @@ xchk_rtbitmap( ...@@ -114,7 +115,7 @@ xchk_rtbitmap(
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
return error; return error;
error = xfs_rtalloc_query_all(sc->tp, xchk_rtbitmap_rec, sc); error = xfs_rtalloc_query_all(sc->mp, sc->tp, xchk_rtbitmap_rec, sc);
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error)) if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out; goto out;
......
...@@ -694,9 +694,7 @@ xfs_file_buffered_write( ...@@ -694,9 +694,7 @@ xfs_file_buffered_write(
struct kiocb *iocb, struct kiocb *iocb,
struct iov_iter *from) struct iov_iter *from)
{ {
struct file *file = iocb->ki_filp; struct inode *inode = iocb->ki_filp->f_mapping->host;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
struct xfs_inode *ip = XFS_I(inode); struct xfs_inode *ip = XFS_I(inode);
ssize_t ret; ssize_t ret;
bool cleared_space = false; bool cleared_space = false;
...@@ -767,9 +765,7 @@ xfs_file_write_iter( ...@@ -767,9 +765,7 @@ xfs_file_write_iter(
struct kiocb *iocb, struct kiocb *iocb,
struct iov_iter *from) struct iov_iter *from)
{ {
struct file *file = iocb->ki_filp; struct inode *inode = iocb->ki_filp->f_mapping->host;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
struct xfs_inode *ip = XFS_I(inode); struct xfs_inode *ip = XFS_I(inode);
ssize_t ret; ssize_t ret;
size_t ocount = iov_iter_count(from); size_t ocount = iov_iter_count(from);
...@@ -1167,12 +1163,10 @@ xfs_file_open( ...@@ -1167,12 +1163,10 @@ xfs_file_open(
struct inode *inode, struct inode *inode,
struct file *file) struct file *file)
{ {
if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
return -EFBIG;
if (xfs_is_shutdown(XFS_M(inode->i_sb))) if (xfs_is_shutdown(XFS_M(inode->i_sb)))
return -EIO; return -EIO;
file->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC; file->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
return 0; return generic_file_open(inode, file);
} }
STATIC int STATIC int
......
...@@ -450,11 +450,11 @@ xfs_getfsmap_logdev( ...@@ -450,11 +450,11 @@ xfs_getfsmap_logdev(
/* Transform a rtbitmap "record" into a fsmap */ /* Transform a rtbitmap "record" into a fsmap */
STATIC int STATIC int
xfs_getfsmap_rtdev_rtbitmap_helper( xfs_getfsmap_rtdev_rtbitmap_helper(
struct xfs_mount *mp,
struct xfs_trans *tp, struct xfs_trans *tp,
const struct xfs_rtalloc_rec *rec, const struct xfs_rtalloc_rec *rec,
void *priv) void *priv)
{ {
struct xfs_mount *mp = tp->t_mountp;
struct xfs_getfsmap_info *info = priv; struct xfs_getfsmap_info *info = priv;
struct xfs_rmap_irec irec; struct xfs_rmap_irec irec;
xfs_daddr_t rec_daddr; xfs_daddr_t rec_daddr;
...@@ -535,7 +535,7 @@ xfs_getfsmap_rtdev_rtbitmap_query( ...@@ -535,7 +535,7 @@ xfs_getfsmap_rtdev_rtbitmap_query(
do_div(alow.ar_startext, mp->m_sb.sb_rextsize); do_div(alow.ar_startext, mp->m_sb.sb_rextsize);
if (do_div(ahigh.ar_startext, mp->m_sb.sb_rextsize)) if (do_div(ahigh.ar_startext, mp->m_sb.sb_rextsize))
ahigh.ar_startext++; ahigh.ar_startext++;
error = xfs_rtalloc_query_range(tp, &alow, &ahigh, error = xfs_rtalloc_query_range(mp, tp, &alow, &ahigh,
xfs_getfsmap_rtdev_rtbitmap_helper, info); xfs_getfsmap_rtdev_rtbitmap_helper, info);
if (error) if (error)
goto err; goto err;
...@@ -547,7 +547,7 @@ xfs_getfsmap_rtdev_rtbitmap_query( ...@@ -547,7 +547,7 @@ xfs_getfsmap_rtdev_rtbitmap_query(
info->last = true; info->last = true;
ahigh.ar_startext = min(mp->m_sb.sb_rextents, ahigh.ar_startext); ahigh.ar_startext = min(mp->m_sb.sb_rextents, ahigh.ar_startext);
error = xfs_getfsmap_rtdev_rtbitmap_helper(tp, &ahigh, info); error = xfs_getfsmap_rtdev_rtbitmap_helper(mp, tp, &ahigh, info);
if (error) if (error)
goto err; goto err;
err: err:
......
...@@ -349,10 +349,7 @@ xfs_fs_counts( ...@@ -349,10 +349,7 @@ xfs_fs_counts(
cnt->freeino = percpu_counter_read_positive(&mp->m_ifree); cnt->freeino = percpu_counter_read_positive(&mp->m_ifree);
cnt->freedata = percpu_counter_read_positive(&mp->m_fdblocks) - cnt->freedata = percpu_counter_read_positive(&mp->m_fdblocks) -
xfs_fdblocks_unavailable(mp); xfs_fdblocks_unavailable(mp);
cnt->freertx = percpu_counter_read_positive(&mp->m_frextents);
spin_lock(&mp->m_sb_lock);
cnt->freertx = mp->m_sb.sb_frextents;
spin_unlock(&mp->m_sb_lock);
} }
/* /*
......
...@@ -1916,13 +1916,16 @@ xfs_inodegc_want_queue_rt_file( ...@@ -1916,13 +1916,16 @@ xfs_inodegc_want_queue_rt_file(
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
struct xfs_mount *mp = ip->i_mount; struct xfs_mount *mp = ip->i_mount;
uint64_t freertx;
if (!XFS_IS_REALTIME_INODE(ip)) if (!XFS_IS_REALTIME_INODE(ip))
return false; return false;
freertx = READ_ONCE(mp->m_sb.sb_frextents); if (__percpu_counter_compare(&mp->m_frextents,
return freertx < mp->m_low_rtexts[XFS_LOWSP_5_PCNT]; mp->m_low_rtexts[XFS_LOWSP_5_PCNT],
XFS_FDBLOCKS_BATCH) < 0)
return true;
return false;
} }
#else #else
# define xfs_inodegc_want_queue_rt_file(ip) (false) # define xfs_inodegc_want_queue_rt_file(ip) (false)
......
...@@ -27,40 +27,32 @@ __xfs_printk( ...@@ -27,40 +27,32 @@ __xfs_printk(
printk("%sXFS: %pV\n", level, vaf); printk("%sXFS: %pV\n", level, vaf);
} }
#define define_xfs_printk_level(func, kern_level) \ void
void func(const struct xfs_mount *mp, const char *fmt, ...) \ xfs_printk_level(
{ \ const char *kern_level,
struct va_format vaf; \ const struct xfs_mount *mp,
va_list args; \ const char *fmt, ...)
int level; \ {
\ struct va_format vaf;
va_start(args, fmt); \ va_list args;
\ int level;
vaf.fmt = fmt; \
vaf.va = &args; \ va_start(args, fmt);
\ vaf.fmt = fmt;
__xfs_printk(kern_level, mp, &vaf); \ vaf.va = &args;
va_end(args); \
\ __xfs_printk(kern_level, mp, &vaf);
if (!kstrtoint(kern_level, 0, &level) && \
level <= LOGLEVEL_ERR && \ va_end(args);
xfs_error_level >= XFS_ERRLEVEL_HIGH) \
xfs_stack_trace(); \ if (!kstrtoint(kern_level, 0, &level) &&
} \ level <= LOGLEVEL_ERR &&
xfs_error_level >= XFS_ERRLEVEL_HIGH)
define_xfs_printk_level(xfs_emerg, KERN_EMERG); xfs_stack_trace();
define_xfs_printk_level(xfs_alert, KERN_ALERT); }
define_xfs_printk_level(xfs_crit, KERN_CRIT);
define_xfs_printk_level(xfs_err, KERN_ERR);
define_xfs_printk_level(xfs_warn, KERN_WARNING);
define_xfs_printk_level(xfs_notice, KERN_NOTICE);
define_xfs_printk_level(xfs_info, KERN_INFO);
#ifdef DEBUG
define_xfs_printk_level(xfs_debug, KERN_DEBUG);
#endif
void void
xfs_alert_tag( _xfs_alert_tag(
const struct xfs_mount *mp, const struct xfs_mount *mp,
int panic_tag, int panic_tag,
const char *fmt, ...) const char *fmt, ...)
......
...@@ -6,33 +6,45 @@ ...@@ -6,33 +6,45 @@
struct xfs_mount; struct xfs_mount;
extern __printf(2, 3)
void xfs_emerg(const struct xfs_mount *mp, const char *fmt, ...);
extern __printf(2, 3)
void xfs_alert(const struct xfs_mount *mp, const char *fmt, ...);
extern __printf(3, 4) extern __printf(3, 4)
void xfs_alert_tag(const struct xfs_mount *mp, int tag, const char *fmt, ...); void xfs_printk_level(const char *kern_level, const struct xfs_mount *mp,
extern __printf(2, 3) const char *fmt, ...);
void xfs_crit(const struct xfs_mount *mp, const char *fmt, ...);
extern __printf(2, 3)
void xfs_err(const struct xfs_mount *mp, const char *fmt, ...);
extern __printf(2, 3)
void xfs_warn(const struct xfs_mount *mp, const char *fmt, ...);
extern __printf(2, 3)
void xfs_notice(const struct xfs_mount *mp, const char *fmt, ...);
extern __printf(2, 3)
void xfs_info(const struct xfs_mount *mp, const char *fmt, ...);
#define xfs_printk_index_wrap(kern_level, mp, fmt, ...) \
({ \
printk_index_subsys_emit("%sXFS%s: ", kern_level, fmt); \
xfs_printk_level(kern_level, mp, fmt, ##__VA_ARGS__); \
})
#define xfs_emerg(mp, fmt, ...) \
xfs_printk_index_wrap(KERN_EMERG, mp, fmt, ##__VA_ARGS__)
#define xfs_alert(mp, fmt, ...) \
xfs_printk_index_wrap(KERN_ALERT, mp, fmt, ##__VA_ARGS__)
#define xfs_crit(mp, fmt, ...) \
xfs_printk_index_wrap(KERN_CRIT, mp, fmt, ##__VA_ARGS__)
#define xfs_err(mp, fmt, ...) \
xfs_printk_index_wrap(KERN_ERR, mp, fmt, ##__VA_ARGS__)
#define xfs_warn(mp, fmt, ...) \
xfs_printk_index_wrap(KERN_WARNING, mp, fmt, ##__VA_ARGS__)
#define xfs_notice(mp, fmt, ...) \
xfs_printk_index_wrap(KERN_NOTICE, mp, fmt, ##__VA_ARGS__)
#define xfs_info(mp, fmt, ...) \
xfs_printk_index_wrap(KERN_INFO, mp, fmt, ##__VA_ARGS__)
#ifdef DEBUG #ifdef DEBUG
extern __printf(2, 3) #define xfs_debug(mp, fmt, ...) \
void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...); xfs_printk_index_wrap(KERN_DEBUG, mp, fmt, ##__VA_ARGS__)
#else #else
static inline __printf(2, 3) #define xfs_debug(mp, fmt, ...) do {} while (0)
void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
{
}
#endif #endif
#define xfs_alert_tag(mp, tag, fmt, ...) \
({ \
printk_index_subsys_emit("%sXFS%s: ", KERN_ALERT, fmt); \
_xfs_alert_tag(mp, tag, fmt, ##__VA_ARGS__); \
})
extern __printf(3, 4)
void _xfs_alert_tag(const struct xfs_mount *mp, int tag, const char *fmt, ...);
#define xfs_printk_ratelimited(func, dev, fmt, ...) \ #define xfs_printk_ratelimited(func, dev, fmt, ...) \
do { \ do { \
static DEFINE_RATELIMIT_STATE(_rs, \ static DEFINE_RATELIMIT_STATE(_rs, \
......
...@@ -468,6 +468,8 @@ STATIC int ...@@ -468,6 +468,8 @@ STATIC int
xfs_check_summary_counts( xfs_check_summary_counts(
struct xfs_mount *mp) struct xfs_mount *mp)
{ {
int error = 0;
/* /*
* The AG0 superblock verifier rejects in-progress filesystems, * The AG0 superblock verifier rejects in-progress filesystems,
* so we should never see the flag set this far into mounting. * so we should never see the flag set this far into mounting.
...@@ -506,11 +508,32 @@ xfs_check_summary_counts( ...@@ -506,11 +508,32 @@ xfs_check_summary_counts(
* superblock to be correct and we don't need to do anything here. * superblock to be correct and we don't need to do anything here.
* Otherwise, recalculate the summary counters. * Otherwise, recalculate the summary counters.
*/ */
if ((!xfs_has_lazysbcount(mp) || xfs_is_clean(mp)) && if ((xfs_has_lazysbcount(mp) && !xfs_is_clean(mp)) ||
!xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS)) xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS)) {
return 0; error = xfs_initialize_perag_data(mp, mp->m_sb.sb_agcount);
if (error)
return error;
}
return xfs_initialize_perag_data(mp, mp->m_sb.sb_agcount); /*
* Older kernels misused sb_frextents to reflect both incore
* reservations made by running transactions and the actual count of
* free rt extents in the ondisk metadata. Transactions committed
* during runtime can therefore contain a superblock update that
* undercounts the number of free rt extents tracked in the rt bitmap.
* A clean unmount record will have the correct frextents value since
* there can be no other transactions running at that point.
*
* If we're mounting the rt volume after recovering the log, recompute
* frextents from the rtbitmap file to fix the inconsistency.
*/
if (xfs_has_realtime(mp) && !xfs_is_clean(mp)) {
error = xfs_rtalloc_reinit_frextents(mp);
if (error)
return error;
}
return 0;
} }
/* /*
...@@ -784,11 +807,6 @@ xfs_mountfs( ...@@ -784,11 +807,6 @@ xfs_mountfs(
goto out_inodegc_shrinker; goto out_inodegc_shrinker;
} }
/* Make sure the summary counts are ok. */
error = xfs_check_summary_counts(mp);
if (error)
goto out_log_dealloc;
/* Enable background inode inactivation workers. */ /* Enable background inode inactivation workers. */
xfs_inodegc_start(mp); xfs_inodegc_start(mp);
xfs_blockgc_start(mp); xfs_blockgc_start(mp);
...@@ -844,6 +862,11 @@ xfs_mountfs( ...@@ -844,6 +862,11 @@ xfs_mountfs(
goto out_rele_rip; goto out_rele_rip;
} }
/* Make sure the summary counts are ok. */
error = xfs_check_summary_counts(mp);
if (error)
goto out_rtunmount;
/* /*
* If this is a read-only mount defer the superblock updates until * If this is a read-only mount defer the superblock updates until
* the next remount into writeable mode. Otherwise we would never * the next remount into writeable mode. Otherwise we would never
...@@ -1087,24 +1110,33 @@ xfs_fs_writable( ...@@ -1087,24 +1110,33 @@ xfs_fs_writable(
return true; return true;
} }
/* Adjust m_fdblocks or m_frextents. */
int int
xfs_mod_fdblocks( xfs_mod_freecounter(
struct xfs_mount *mp, struct xfs_mount *mp,
struct percpu_counter *counter,
int64_t delta, int64_t delta,
bool rsvd) bool rsvd)
{ {
int64_t lcounter; int64_t lcounter;
long long res_used; long long res_used;
uint64_t set_aside = 0;
s32 batch; s32 batch;
uint64_t set_aside; bool has_resv_pool;
ASSERT(counter == &mp->m_fdblocks || counter == &mp->m_frextents);
has_resv_pool = (counter == &mp->m_fdblocks);
if (rsvd)
ASSERT(has_resv_pool);
if (delta > 0) { if (delta > 0) {
/* /*
* If the reserve pool is depleted, put blocks back into it * If the reserve pool is depleted, put blocks back into it
* first. Most of the time the pool is full. * first. Most of the time the pool is full.
*/ */
if (likely(mp->m_resblks == mp->m_resblks_avail)) { if (likely(!has_resv_pool ||
percpu_counter_add(&mp->m_fdblocks, delta); mp->m_resblks == mp->m_resblks_avail)) {
percpu_counter_add(counter, delta);
return 0; return 0;
} }
...@@ -1116,7 +1148,7 @@ xfs_mod_fdblocks( ...@@ -1116,7 +1148,7 @@ xfs_mod_fdblocks(
} else { } else {
delta -= res_used; delta -= res_used;
mp->m_resblks_avail = mp->m_resblks; mp->m_resblks_avail = mp->m_resblks;
percpu_counter_add(&mp->m_fdblocks, delta); percpu_counter_add(counter, delta);
} }
spin_unlock(&mp->m_sb_lock); spin_unlock(&mp->m_sb_lock);
return 0; return 0;
...@@ -1130,7 +1162,7 @@ xfs_mod_fdblocks( ...@@ -1130,7 +1162,7 @@ xfs_mod_fdblocks(
* then make everything serialise as we are real close to * then make everything serialise as we are real close to
* ENOSPC. * ENOSPC.
*/ */
if (__percpu_counter_compare(&mp->m_fdblocks, 2 * XFS_FDBLOCKS_BATCH, if (__percpu_counter_compare(counter, 2 * XFS_FDBLOCKS_BATCH,
XFS_FDBLOCKS_BATCH) < 0) XFS_FDBLOCKS_BATCH) < 0)
batch = 1; batch = 1;
else else
...@@ -1147,9 +1179,10 @@ xfs_mod_fdblocks( ...@@ -1147,9 +1179,10 @@ xfs_mod_fdblocks(
* problems (i.e. transaction abort, pagecache discards, etc.) than * problems (i.e. transaction abort, pagecache discards, etc.) than
* slightly premature -ENOSPC. * slightly premature -ENOSPC.
*/ */
set_aside = xfs_fdblocks_unavailable(mp); if (has_resv_pool)
percpu_counter_add_batch(&mp->m_fdblocks, delta, batch); set_aside = xfs_fdblocks_unavailable(mp);
if (__percpu_counter_compare(&mp->m_fdblocks, set_aside, percpu_counter_add_batch(counter, delta, batch);
if (__percpu_counter_compare(counter, set_aside,
XFS_FDBLOCKS_BATCH) >= 0) { XFS_FDBLOCKS_BATCH) >= 0) {
/* we had space! */ /* we had space! */
return 0; return 0;
...@@ -1160,8 +1193,8 @@ xfs_mod_fdblocks( ...@@ -1160,8 +1193,8 @@ xfs_mod_fdblocks(
* that took us to ENOSPC. * that took us to ENOSPC.
*/ */
spin_lock(&mp->m_sb_lock); spin_lock(&mp->m_sb_lock);
percpu_counter_add(&mp->m_fdblocks, -delta); percpu_counter_add(counter, -delta);
if (!rsvd) if (!has_resv_pool || !rsvd)
goto fdblocks_enospc; goto fdblocks_enospc;
lcounter = (long long)mp->m_resblks_avail + delta; lcounter = (long long)mp->m_resblks_avail + delta;
...@@ -1178,24 +1211,6 @@ xfs_mod_fdblocks( ...@@ -1178,24 +1211,6 @@ xfs_mod_fdblocks(
return -ENOSPC; return -ENOSPC;
} }
int
xfs_mod_frextents(
struct xfs_mount *mp,
int64_t delta)
{
int64_t lcounter;
int ret = 0;
spin_lock(&mp->m_sb_lock);
lcounter = mp->m_sb.sb_frextents + delta;
if (lcounter < 0)
ret = -ENOSPC;
else
mp->m_sb.sb_frextents = lcounter;
spin_unlock(&mp->m_sb_lock);
return ret;
}
/* /*
* Used to free the superblock along various error paths. * Used to free the superblock along various error paths.
*/ */
......
...@@ -183,6 +183,8 @@ typedef struct xfs_mount { ...@@ -183,6 +183,8 @@ typedef struct xfs_mount {
struct percpu_counter m_icount; /* allocated inodes counter */ struct percpu_counter m_icount; /* allocated inodes counter */
struct percpu_counter m_ifree; /* free inodes counter */ struct percpu_counter m_ifree; /* free inodes counter */
struct percpu_counter m_fdblocks; /* free block counter */ struct percpu_counter m_fdblocks; /* free block counter */
struct percpu_counter m_frextents; /* free rt extent counter */
/* /*
* Count of data device blocks reserved for delayed allocations, * Count of data device blocks reserved for delayed allocations,
* including indlen blocks. Does not include allocated CoW staging * including indlen blocks. Does not include allocated CoW staging
...@@ -494,9 +496,20 @@ xfs_fdblocks_unavailable( ...@@ -494,9 +496,20 @@ xfs_fdblocks_unavailable(
return mp->m_alloc_set_aside + atomic64_read(&mp->m_allocbt_blks); return mp->m_alloc_set_aside + atomic64_read(&mp->m_allocbt_blks);
} }
extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta, int xfs_mod_freecounter(struct xfs_mount *mp, struct percpu_counter *counter,
bool reserved); int64_t delta, bool rsvd);
extern int xfs_mod_frextents(struct xfs_mount *mp, int64_t delta);
static inline int
xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta, bool reserved)
{
return xfs_mod_freecounter(mp, &mp->m_fdblocks, delta, reserved);
}
static inline int
xfs_mod_frextents(struct xfs_mount *mp, int64_t delta)
{
return xfs_mod_freecounter(mp, &mp->m_frextents, delta, false);
}
extern int xfs_readsb(xfs_mount_t *, int); extern int xfs_readsb(xfs_mount_t *, int);
extern void xfs_freesb(xfs_mount_t *); extern void xfs_freesb(xfs_mount_t *);
......
...@@ -1284,6 +1284,44 @@ xfs_rtmount_init( ...@@ -1284,6 +1284,44 @@ xfs_rtmount_init(
return 0; return 0;
} }
static int
xfs_rtalloc_count_frextent(
struct xfs_mount *mp,
struct xfs_trans *tp,
const struct xfs_rtalloc_rec *rec,
void *priv)
{
uint64_t *valp = priv;
*valp += rec->ar_extcount;
return 0;
}
/*
* Reinitialize the number of free realtime extents from the realtime bitmap.
* Callers must ensure that there is no other activity in the filesystem.
*/
int
xfs_rtalloc_reinit_frextents(
struct xfs_mount *mp)
{
uint64_t val = 0;
int error;
xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
error = xfs_rtalloc_query_all(mp, NULL, xfs_rtalloc_count_frextent,
&val);
xfs_iunlock(mp->m_rbmip, XFS_ILOCK_EXCL);
if (error)
return error;
spin_lock(&mp->m_sb_lock);
mp->m_sb.sb_frextents = val;
spin_unlock(&mp->m_sb_lock);
percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents);
return 0;
}
/* /*
* Get the bitmap and summary inodes and the summary cache into the mount * Get the bitmap and summary inodes and the summary cache into the mount
* structure at mount time. * structure at mount time.
......
...@@ -22,6 +22,7 @@ struct xfs_rtalloc_rec { ...@@ -22,6 +22,7 @@ struct xfs_rtalloc_rec {
}; };
typedef int (*xfs_rtalloc_query_range_fn)( typedef int (*xfs_rtalloc_query_range_fn)(
struct xfs_mount *mp,
struct xfs_trans *tp, struct xfs_trans *tp,
const struct xfs_rtalloc_rec *rec, const struct xfs_rtalloc_rec *rec,
void *priv); void *priv);
...@@ -123,27 +124,29 @@ int xfs_rtmodify_summary(struct xfs_mount *mp, struct xfs_trans *tp, int log, ...@@ -123,27 +124,29 @@ int xfs_rtmodify_summary(struct xfs_mount *mp, struct xfs_trans *tp, int log,
int xfs_rtfree_range(struct xfs_mount *mp, struct xfs_trans *tp, int xfs_rtfree_range(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_rtblock_t start, xfs_extlen_t len, xfs_rtblock_t start, xfs_extlen_t len,
struct xfs_buf **rbpp, xfs_fsblock_t *rsb); struct xfs_buf **rbpp, xfs_fsblock_t *rsb);
int xfs_rtalloc_query_range(struct xfs_trans *tp, int xfs_rtalloc_query_range(struct xfs_mount *mp, struct xfs_trans *tp,
const struct xfs_rtalloc_rec *low_rec, const struct xfs_rtalloc_rec *low_rec,
const struct xfs_rtalloc_rec *high_rec, const struct xfs_rtalloc_rec *high_rec,
xfs_rtalloc_query_range_fn fn, void *priv); xfs_rtalloc_query_range_fn fn, void *priv);
int xfs_rtalloc_query_all(struct xfs_trans *tp, int xfs_rtalloc_query_all(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_rtalloc_query_range_fn fn, xfs_rtalloc_query_range_fn fn,
void *priv); void *priv);
bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno); bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno);
int xfs_rtalloc_extent_is_free(struct xfs_mount *mp, struct xfs_trans *tp, int xfs_rtalloc_extent_is_free(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_rtblock_t start, xfs_extlen_t len, xfs_rtblock_t start, xfs_extlen_t len,
bool *is_free); bool *is_free);
int xfs_rtalloc_reinit_frextents(struct xfs_mount *mp);
#else #else
# define xfs_rtallocate_extent(t,b,min,max,l,f,p,rb) (ENOSYS) # define xfs_rtallocate_extent(t,b,min,max,l,f,p,rb) (ENOSYS)
# define xfs_rtfree_extent(t,b,l) (ENOSYS) # define xfs_rtfree_extent(t,b,l) (ENOSYS)
# define xfs_rtpick_extent(m,t,l,rb) (ENOSYS) # define xfs_rtpick_extent(m,t,l,rb) (ENOSYS)
# define xfs_growfs_rt(mp,in) (ENOSYS) # define xfs_growfs_rt(mp,in) (ENOSYS)
# define xfs_rtalloc_query_range(t,l,h,f,p) (ENOSYS) # define xfs_rtalloc_query_range(t,l,h,f,p) (ENOSYS)
# define xfs_rtalloc_query_all(t,f,p) (ENOSYS) # define xfs_rtalloc_query_all(m,t,f,p) (ENOSYS)
# define xfs_rtbuf_get(m,t,b,i,p) (ENOSYS) # define xfs_rtbuf_get(m,t,b,i,p) (ENOSYS)
# define xfs_verify_rtbno(m, r) (false) # define xfs_verify_rtbno(m, r) (false)
# define xfs_rtalloc_extent_is_free(m,t,s,l,i) (ENOSYS) # define xfs_rtalloc_extent_is_free(m,t,s,l,i) (ENOSYS)
# define xfs_rtalloc_reinit_frextents(m) (0)
static inline int /* error */ static inline int /* error */
xfs_rtmount_init( xfs_rtmount_init(
xfs_mount_t *mp) /* file system mount structure */ xfs_mount_t *mp) /* file system mount structure */
......
...@@ -843,9 +843,11 @@ xfs_fs_statfs( ...@@ -843,9 +843,11 @@ xfs_fs_statfs(
if (XFS_IS_REALTIME_MOUNT(mp) && if (XFS_IS_REALTIME_MOUNT(mp) &&
(ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) { (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
s64 freertx;
statp->f_blocks = sbp->sb_rblocks; statp->f_blocks = sbp->sb_rblocks;
statp->f_bavail = statp->f_bfree = freertx = percpu_counter_sum_positive(&mp->m_frextents);
sbp->sb_frextents * sbp->sb_rextsize; statp->f_bavail = statp->f_bfree = freertx * sbp->sb_rextsize;
} }
return 0; return 0;
...@@ -1015,8 +1017,14 @@ xfs_init_percpu_counters( ...@@ -1015,8 +1017,14 @@ xfs_init_percpu_counters(
if (error) if (error)
goto free_fdblocks; goto free_fdblocks;
error = percpu_counter_init(&mp->m_frextents, 0, GFP_KERNEL);
if (error)
goto free_delalloc;
return 0; return 0;
free_delalloc:
percpu_counter_destroy(&mp->m_delalloc_blks);
free_fdblocks: free_fdblocks:
percpu_counter_destroy(&mp->m_fdblocks); percpu_counter_destroy(&mp->m_fdblocks);
free_ifree: free_ifree:
...@@ -1033,6 +1041,7 @@ xfs_reinit_percpu_counters( ...@@ -1033,6 +1041,7 @@ xfs_reinit_percpu_counters(
percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount); percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree); percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks); percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents);
} }
static void static void
...@@ -1045,6 +1054,7 @@ xfs_destroy_percpu_counters( ...@@ -1045,6 +1054,7 @@ xfs_destroy_percpu_counters(
ASSERT(xfs_is_shutdown(mp) || ASSERT(xfs_is_shutdown(mp) ||
percpu_counter_sum(&mp->m_delalloc_blks) == 0); percpu_counter_sum(&mp->m_delalloc_blks) == 0);
percpu_counter_destroy(&mp->m_delalloc_blks); percpu_counter_destroy(&mp->m_delalloc_blks);
percpu_counter_destroy(&mp->m_frextents);
} }
static int static int
......
...@@ -498,10 +498,31 @@ xfs_trans_apply_sb_deltas( ...@@ -498,10 +498,31 @@ xfs_trans_apply_sb_deltas(
be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta); be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
} }
if (tp->t_frextents_delta) /*
be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta); * Updating frextents requires careful handling because it does not
if (tp->t_res_frextents_delta) * behave like the lazysb counters because we cannot rely on log
be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta); * recovery in older kenels to recompute the value from the rtbitmap.
* This means that the ondisk frextents must be consistent with the
* rtbitmap.
*
* Therefore, log the frextents change to the ondisk superblock and
* update the incore superblock so that future calls to xfs_log_sb
* write the correct value ondisk.
*
* Don't touch m_frextents because it includes incore reservations,
* and those are handled by the unreserve function.
*/
if (tp->t_frextents_delta || tp->t_res_frextents_delta) {
struct xfs_mount *mp = tp->t_mountp;
int64_t rtxdelta;
rtxdelta = tp->t_frextents_delta + tp->t_res_frextents_delta;
spin_lock(&mp->m_sb_lock);
be64_add_cpu(&sbp->sb_frextents, rtxdelta);
mp->m_sb.sb_frextents += rtxdelta;
spin_unlock(&mp->m_sb_lock);
}
if (tp->t_dblocks_delta) { if (tp->t_dblocks_delta) {
be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta); be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
...@@ -614,7 +635,12 @@ xfs_trans_unreserve_and_mod_sb( ...@@ -614,7 +635,12 @@ xfs_trans_unreserve_and_mod_sb(
if (ifreedelta) if (ifreedelta)
percpu_counter_add(&mp->m_ifree, ifreedelta); percpu_counter_add(&mp->m_ifree, ifreedelta);
if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY)) if (rtxdelta) {
error = xfs_mod_frextents(mp, rtxdelta);
ASSERT(!error);
}
if (!(tp->t_flags & XFS_TRANS_SB_DIRTY))
return; return;
/* apply remaining deltas */ /* apply remaining deltas */
...@@ -622,7 +648,12 @@ xfs_trans_unreserve_and_mod_sb( ...@@ -622,7 +648,12 @@ xfs_trans_unreserve_and_mod_sb(
mp->m_sb.sb_fdblocks += tp->t_fdblocks_delta + tp->t_res_fdblocks_delta; mp->m_sb.sb_fdblocks += tp->t_fdblocks_delta + tp->t_res_fdblocks_delta;
mp->m_sb.sb_icount += idelta; mp->m_sb.sb_icount += idelta;
mp->m_sb.sb_ifree += ifreedelta; mp->m_sb.sb_ifree += ifreedelta;
mp->m_sb.sb_frextents += rtxdelta; /*
* Do not touch sb_frextents here because we are dealing with incore
* reservation. sb_frextents is not part of the lazy sb counters so it
* must be consistent with the ondisk rtbitmap and must never include
* incore reservations.
*/
mp->m_sb.sb_dblocks += tp->t_dblocks_delta; mp->m_sb.sb_dblocks += tp->t_dblocks_delta;
mp->m_sb.sb_agcount += tp->t_agcount_delta; mp->m_sb.sb_agcount += tp->t_agcount_delta;
mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta; mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment