Commit 4e0e6040 authored by Dave Chinner's avatar Dave Chinner Committed by Ben Myers

xfs: add CRC checks to the AGF

The AGF already has some self identifying fields (e.g. the sequence
number) so we only need to add the uuid to it to identify the
filesystem it belongs to. The location is fixed based on the
sequence number, so there's no need to add a block number, either.

Hence the only additional fields are the CRC and LSN fields. These
are unlogged, so place some space between the end of the logged
fields and them so that future expansion of the AGF for logged
fields can be placed adjacent to the existing logged fields and
hence not complicate the field-derived range based logging we
currently have.

Based originally on a patch from myself, modified further by
Christoph Hellwig and then modified again to fit into the
verifier structure with additional fields by myself. The multiple
signed-off-by tags indicate the age and history of this patch.
Signed-off-by: default avatarDave Chinner <dgc@sgi.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarBen Myers <bpm@sgi.com>
Signed-off-by: default avatarBen Myers <bpm@sgi.com>
parent ee1a47ab
...@@ -63,12 +63,29 @@ typedef struct xfs_agf { ...@@ -63,12 +63,29 @@ typedef struct xfs_agf {
__be32 agf_spare0; /* spare field */ __be32 agf_spare0; /* spare field */
__be32 agf_levels[XFS_BTNUM_AGF]; /* btree levels */ __be32 agf_levels[XFS_BTNUM_AGF]; /* btree levels */
__be32 agf_spare1; /* spare field */ __be32 agf_spare1; /* spare field */
__be32 agf_flfirst; /* first freelist block's index */ __be32 agf_flfirst; /* first freelist block's index */
__be32 agf_fllast; /* last freelist block's index */ __be32 agf_fllast; /* last freelist block's index */
__be32 agf_flcount; /* count of blocks in freelist */ __be32 agf_flcount; /* count of blocks in freelist */
__be32 agf_freeblks; /* total free blocks */ __be32 agf_freeblks; /* total free blocks */
__be32 agf_longest; /* longest free space */ __be32 agf_longest; /* longest free space */
__be32 agf_btreeblks; /* # of blocks held in AGF btrees */ __be32 agf_btreeblks; /* # of blocks held in AGF btrees */
uuid_t agf_uuid; /* uuid of filesystem */
/*
* reserve some contiguous space for future logged fields before we add
* the unlogged fields. This makes the range logging via flags and
* structure offsets much simpler.
*/
__be64 agf_spare64[16];
/* unlogged fields, written during buffer writeback. */
__be64 agf_lsn; /* last write sequence */
__be32 agf_crc; /* crc of agf sector */
__be32 agf_spare2;
/* structure must be padded to 64 bit alignment */
} xfs_agf_t; } xfs_agf_t;
#define XFS_AGF_MAGICNUM 0x00000001 #define XFS_AGF_MAGICNUM 0x00000001
...@@ -83,7 +100,8 @@ typedef struct xfs_agf { ...@@ -83,7 +100,8 @@ typedef struct xfs_agf {
#define XFS_AGF_FREEBLKS 0x00000200 #define XFS_AGF_FREEBLKS 0x00000200
#define XFS_AGF_LONGEST 0x00000400 #define XFS_AGF_LONGEST 0x00000400
#define XFS_AGF_BTREEBLKS 0x00000800 #define XFS_AGF_BTREEBLKS 0x00000800
#define XFS_AGF_NUM_BITS 12 #define XFS_AGF_UUID 0x00001000
#define XFS_AGF_NUM_BITS 13
#define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1) #define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1)
#define XFS_AGF_FLAGS \ #define XFS_AGF_FLAGS \
...@@ -98,7 +116,8 @@ typedef struct xfs_agf { ...@@ -98,7 +116,8 @@ typedef struct xfs_agf {
{ XFS_AGF_FLCOUNT, "FLCOUNT" }, \ { XFS_AGF_FLCOUNT, "FLCOUNT" }, \
{ XFS_AGF_FREEBLKS, "FREEBLKS" }, \ { XFS_AGF_FREEBLKS, "FREEBLKS" }, \
{ XFS_AGF_LONGEST, "LONGEST" }, \ { XFS_AGF_LONGEST, "LONGEST" }, \
{ XFS_AGF_BTREEBLKS, "BTREEBLKS" } { XFS_AGF_BTREEBLKS, "BTREEBLKS" }, \
{ XFS_AGF_UUID, "UUID" }
/* disk block (xfs_daddr_t) in the AG */ /* disk block (xfs_daddr_t) in the AG */
#define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log)) #define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log))
......
...@@ -33,7 +33,9 @@ ...@@ -33,7 +33,9 @@
#include "xfs_alloc.h" #include "xfs_alloc.h"
#include "xfs_extent_busy.h" #include "xfs_extent_busy.h"
#include "xfs_error.h" #include "xfs_error.h"
#include "xfs_cksum.h"
#include "xfs_trace.h" #include "xfs_trace.h"
#include "xfs_buf_item.h"
struct workqueue_struct *xfs_alloc_wq; struct workqueue_struct *xfs_alloc_wq;
...@@ -2058,11 +2060,14 @@ xfs_alloc_log_agf( ...@@ -2058,11 +2060,14 @@ xfs_alloc_log_agf(
offsetof(xfs_agf_t, agf_freeblks), offsetof(xfs_agf_t, agf_freeblks),
offsetof(xfs_agf_t, agf_longest), offsetof(xfs_agf_t, agf_longest),
offsetof(xfs_agf_t, agf_btreeblks), offsetof(xfs_agf_t, agf_btreeblks),
offsetof(xfs_agf_t, agf_uuid),
sizeof(xfs_agf_t) sizeof(xfs_agf_t)
}; };
trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_); trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
xfs_trans_buf_set_type(tp, bp, XFS_BLF_AGF_BUF);
xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last); xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
xfs_trans_log_buf(tp, bp, (uint)first, (uint)last); xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
} }
...@@ -2143,22 +2148,24 @@ xfs_alloc_put_freelist( ...@@ -2143,22 +2148,24 @@ xfs_alloc_put_freelist(
return 0; return 0;
} }
static void static bool
xfs_agf_verify( xfs_agf_verify(
struct xfs_mount *mp,
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
struct xfs_mount *mp = bp->b_target->bt_mount; struct xfs_agf *agf = XFS_BUF_TO_AGF(bp);
struct xfs_agf *agf;
int agf_ok;
agf = XFS_BUF_TO_AGF(bp); if (xfs_sb_version_hascrc(&mp->m_sb) &&
!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_uuid))
return false;
agf_ok = agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) && if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) && XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) && be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) && be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) && be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp); be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)))
return false;
/* /*
* during growfs operations, the perag is not fully initialised, * during growfs operations, the perag is not fully initialised,
...@@ -2166,33 +2173,58 @@ xfs_agf_verify( ...@@ -2166,33 +2173,58 @@ xfs_agf_verify(
* use it by using uncached buffers that don't have the perag attached * use it by using uncached buffers that don't have the perag attached
* so we can detect and avoid this problem. * so we can detect and avoid this problem.
*/ */
if (bp->b_pag) if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
agf_ok = agf_ok && be32_to_cpu(agf->agf_seqno) == return false;
bp->b_pag->pag_agno;
if (xfs_sb_version_haslazysbcount(&mp->m_sb)) if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
agf_ok = agf_ok && be32_to_cpu(agf->agf_btreeblks) <= be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
be32_to_cpu(agf->agf_length); return false;
return true;;
if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF,
XFS_RANDOM_ALLOC_READ_AGF))) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, agf);
xfs_buf_ioerror(bp, EFSCORRUPTED);
}
} }
static void static void
xfs_agf_read_verify( xfs_agf_read_verify(
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
xfs_agf_verify(bp); struct xfs_mount *mp = bp->b_target->bt_mount;
int agf_ok = 1;
if (xfs_sb_version_hascrc(&mp->m_sb))
agf_ok = xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
offsetof(struct xfs_agf, agf_crc));
agf_ok = agf_ok && xfs_agf_verify(mp, bp);
if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF,
XFS_RANDOM_ALLOC_READ_AGF))) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
xfs_buf_ioerror(bp, EFSCORRUPTED);
}
} }
static void static void
xfs_agf_write_verify( xfs_agf_write_verify(
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
xfs_agf_verify(bp); struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_buf_log_item *bip = bp->b_fspriv;
if (!xfs_agf_verify(mp, bp)) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
xfs_buf_ioerror(bp, EFSCORRUPTED);
return;
}
if (!xfs_sb_version_hascrc(&mp->m_sb))
return;
if (bip)
XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
offsetof(struct xfs_agf, agf_crc));
} }
const struct xfs_buf_ops xfs_agf_buf_ops = { const struct xfs_buf_ops xfs_agf_buf_ops = {
......
...@@ -45,12 +45,14 @@ extern kmem_zone_t *xfs_buf_item_zone; ...@@ -45,12 +45,14 @@ extern kmem_zone_t *xfs_buf_item_zone;
* once the changes have been replayed into the buffer. * once the changes have been replayed into the buffer.
*/ */
#define XFS_BLF_BTREE_BUF (1<<5) #define XFS_BLF_BTREE_BUF (1<<5)
#define XFS_BLF_AGF_BUF (1<<6)
#define XFS_BLF_TYPE_MASK \ #define XFS_BLF_TYPE_MASK \
(XFS_BLF_UDQUOT_BUF | \ (XFS_BLF_UDQUOT_BUF | \
XFS_BLF_PDQUOT_BUF | \ XFS_BLF_PDQUOT_BUF | \
XFS_BLF_GDQUOT_BUF | \ XFS_BLF_GDQUOT_BUF | \
XFS_BLF_BTREE_BUF) XFS_BLF_BTREE_BUF | \
XFS_BLF_AGF_BUF)
#define XFS_BLF_CHUNK 128 #define XFS_BLF_CHUNK 128
#define XFS_BLF_SHIFT 7 #define XFS_BLF_SHIFT 7
......
...@@ -247,6 +247,9 @@ xfs_growfs_data_private( ...@@ -247,6 +247,9 @@ xfs_growfs_data_private(
tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp); tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp);
agf->agf_freeblks = cpu_to_be32(tmpsize); agf->agf_freeblks = cpu_to_be32(tmpsize);
agf->agf_longest = cpu_to_be32(tmpsize); agf->agf_longest = cpu_to_be32(tmpsize);
if (xfs_sb_version_hascrc(&mp->m_sb))
uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_uuid);
error = xfs_bwrite(bp); error = xfs_bwrite(bp);
xfs_buf_relse(bp); xfs_buf_relse(bp);
if (error) if (error)
......
...@@ -1953,6 +1953,14 @@ xlog_recover_do_reg_buffer( ...@@ -1953,6 +1953,14 @@ xlog_recover_do_reg_buffer(
break; break;
} }
break; break;
case XFS_BLF_AGF_BUF:
if (*(__be32 *)bp->b_addr != cpu_to_be32(XFS_AGF_MAGIC)) {
xfs_warn(mp, "Bad AGF block magic!");
ASSERT(0);
break;
}
bp->b_ops = &xfs_agf_buf_ops;
break;
default: default:
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment