Commit df5660cf authored by Darrick J. Wong's avatar Darrick J. Wong Committed by Dave Chinner

xfs: implement per-mount warnings for scrub and shrink usage

Currently, we don't have a consistent story around logging when an
EXPERIMENTAL feature gets turned on at runtime -- online fsck and shrink
log a message once per day across all mounts, and the recently merged
LARP mode only ever does it once per insmod cycle or reboot.

Because EXPERIMENTAL tags are supposed to go away eventually, convert
the existing daily warnings into state flags that travel with the mount,
and warn once per mount.  Making this an opstate flag means that we'll
be able to capture the experimental usage in the ftrace output too.
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Signed-off-by: default avatarDave Chinner <david@fromorbit.com>
parent 37403796
......@@ -340,20 +340,6 @@ static const struct xchk_meta_ops meta_scrub_ops[] = {
},
};
/* This isn't a stable feature, warn once per day. */
static inline void
xchk_experimental_warning(
struct xfs_mount *mp)
{
static struct ratelimit_state scrub_warning = RATELIMIT_STATE_INIT(
"xchk_warning", 86400 * HZ, 1);
ratelimit_set_flags(&scrub_warning, RATELIMIT_MSG_ON_RELEASE);
if (__ratelimit(&scrub_warning))
xfs_alert(mp,
"EXPERIMENTAL online scrub feature in use. Use at your own risk!");
}
static int
xchk_validate_inputs(
struct xfs_mount *mp,
......@@ -478,7 +464,8 @@ xfs_scrub_metadata(
if (error)
goto out;
xchk_experimental_warning(mp);
xfs_warn_mount(mp, XFS_OPSTATE_WARNED_SCRUB,
"EXPERIMENTAL online scrub feature in use. Use at your own risk!");
sc = kmem_zalloc(sizeof(struct xfs_scrub), KM_NOFS | KM_MAYFAIL);
if (!sc) {
......
......@@ -149,12 +149,7 @@ xfs_growfs_data_private(
error = xfs_resizefs_init_new_ags(tp, &id, oagcount, nagcount,
delta, &lastag_extended);
} else {
static struct ratelimit_state shrink_warning = \
RATELIMIT_STATE_INIT("shrink_warning", 86400 * HZ, 1);
ratelimit_set_flags(&shrink_warning, RATELIMIT_MSG_ON_RELEASE);
if (__ratelimit(&shrink_warning))
xfs_alert(mp,
xfs_warn_mount(mp, XFS_OPSTATE_WARNED_SHRINK,
"EXPERIMENTAL online shrink feature in use. Use at your own risk!");
error = xfs_ag_shrink_space(mp, &tp, nagcount - 1, -delta);
......
......@@ -75,6 +75,12 @@ do { \
#define xfs_debug_ratelimited(dev, fmt, ...) \
xfs_printk_ratelimited(xfs_debug, dev, fmt, ##__VA_ARGS__)
#define xfs_warn_mount(mp, warntag, fmt, ...) \
do { \
if (xfs_should_warn((mp), (warntag))) \
xfs_warn((mp), (fmt), ##__VA_ARGS__); \
} while (0)
#define xfs_warn_once(dev, fmt, ...) \
xfs_printk_once(xfs_warn, dev, fmt, ##__VA_ARGS__)
#define xfs_notice_once(dev, fmt, ...) \
......
......@@ -391,6 +391,11 @@ __XFS_HAS_FEAT(nouuid, NOUUID)
*/
#define XFS_OPSTATE_BLOCKGC_ENABLED 6
/* Kernel has logged a warning about online fsck being used on this fs. */
#define XFS_OPSTATE_WARNED_SCRUB 7
/* Kernel has logged a warning about shrink being used on this fs. */
#define XFS_OPSTATE_WARNED_SHRINK 8
#define __XFS_IS_OPSTATE(name, NAME) \
static inline bool xfs_is_ ## name (struct xfs_mount *mp) \
{ \
......@@ -413,6 +418,12 @@ __XFS_IS_OPSTATE(readonly, READONLY)
__XFS_IS_OPSTATE(inodegc_enabled, INODEGC_ENABLED)
__XFS_IS_OPSTATE(blockgc_enabled, BLOCKGC_ENABLED)
static inline bool
xfs_should_warn(struct xfs_mount *mp, long nr)
{
return !test_and_set_bit(nr, &mp->m_opstate);
}
#define XFS_OPSTATE_STRINGS \
{ (1UL << XFS_OPSTATE_UNMOUNTING), "unmounting" }, \
{ (1UL << XFS_OPSTATE_CLEAN), "clean" }, \
......@@ -420,7 +431,9 @@ __XFS_IS_OPSTATE(blockgc_enabled, BLOCKGC_ENABLED)
{ (1UL << XFS_OPSTATE_INODE32), "inode32" }, \
{ (1UL << XFS_OPSTATE_READONLY), "read_only" }, \
{ (1UL << XFS_OPSTATE_INODEGC_ENABLED), "inodegc" }, \
{ (1UL << XFS_OPSTATE_BLOCKGC_ENABLED), "blockgc" }
{ (1UL << XFS_OPSTATE_BLOCKGC_ENABLED), "blockgc" }, \
{ (1UL << XFS_OPSTATE_WARNED_SCRUB), "wscrub" }, \
{ (1UL << XFS_OPSTATE_WARNED_SHRINK), "wshrink" }
/*
* Max and min values for mount-option defined I/O
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment