Commit 98dc08ba authored by Eric Biggers's avatar Eric Biggers

fsverity: stop using PG_error to track error status

As a step towards freeing the PG_error flag for other uses, change ext4
and f2fs to stop using PG_error to track verity errors.  Instead, if a
verity error occurs, just mark the whole bio as failed.  The coarser
granularity isn't really a problem since it isn't any worse than what
the block layer provides, and errors from a multi-page readahead aren't
reported to applications unless a single-page read fails too.

f2fs supports compression, which makes the f2fs changes a bit more
complicated than desired, but the basic premise still works.

Note: there are still a few uses of PageError in f2fs, but they are on
the write path, so they are unrelated and this patch doesn't touch them.
Reviewed-by: default avatarChao Yu <chao@kernel.org>
Acked-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20221129070401.156114-1-ebiggers@kernel.org
parent f0c4d9fc
...@@ -75,14 +75,10 @@ static void __read_end_io(struct bio *bio) ...@@ -75,14 +75,10 @@ static void __read_end_io(struct bio *bio)
bio_for_each_segment_all(bv, bio, iter_all) { bio_for_each_segment_all(bv, bio, iter_all) {
page = bv->bv_page; page = bv->bv_page;
/* PG_error was set if verity failed. */ if (bio->bi_status)
if (bio->bi_status || PageError(page)) {
ClearPageUptodate(page); ClearPageUptodate(page);
/* will re-read again later */ else
ClearPageError(page);
} else {
SetPageUptodate(page); SetPageUptodate(page);
}
unlock_page(page); unlock_page(page);
} }
if (bio->bi_private) if (bio->bi_private)
......
...@@ -1711,50 +1711,27 @@ static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task) ...@@ -1711,50 +1711,27 @@ static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
} }
} }
/* static void f2fs_verify_cluster(struct work_struct *work)
* Update and unlock the cluster's pagecache pages, and release the reference to
* the decompress_io_ctx that was being held for I/O completion.
*/
static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
bool in_task)
{ {
struct decompress_io_ctx *dic =
container_of(work, struct decompress_io_ctx, verity_work);
int i; int i;
/* Verify, update, and unlock the decompressed pages. */
for (i = 0; i < dic->cluster_size; i++) { for (i = 0; i < dic->cluster_size; i++) {
struct page *rpage = dic->rpages[i]; struct page *rpage = dic->rpages[i];
if (!rpage) if (!rpage)
continue; continue;
/* PG_error was set if verity failed. */ if (fsverity_verify_page(rpage))
if (failed || PageError(rpage)) {
ClearPageUptodate(rpage);
/* will re-read again later */
ClearPageError(rpage);
} else {
SetPageUptodate(rpage); SetPageUptodate(rpage);
} else
ClearPageUptodate(rpage);
unlock_page(rpage); unlock_page(rpage);
} }
f2fs_put_dic(dic, in_task); f2fs_put_dic(dic, true);
}
static void f2fs_verify_cluster(struct work_struct *work)
{
struct decompress_io_ctx *dic =
container_of(work, struct decompress_io_ctx, verity_work);
int i;
/* Verify the cluster's decompressed pages with fs-verity. */
for (i = 0; i < dic->cluster_size; i++) {
struct page *rpage = dic->rpages[i];
if (rpage && !fsverity_verify_page(rpage))
SetPageError(rpage);
}
__f2fs_decompress_end_io(dic, false, true);
} }
/* /*
...@@ -1764,6 +1741,8 @@ static void f2fs_verify_cluster(struct work_struct *work) ...@@ -1764,6 +1741,8 @@ static void f2fs_verify_cluster(struct work_struct *work)
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed, void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
bool in_task) bool in_task)
{ {
int i;
if (!failed && dic->need_verity) { if (!failed && dic->need_verity) {
/* /*
* Note that to avoid deadlocks, the verity work can't be done * Note that to avoid deadlocks, the verity work can't be done
...@@ -1773,9 +1752,28 @@ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed, ...@@ -1773,9 +1752,28 @@ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
*/ */
INIT_WORK(&dic->verity_work, f2fs_verify_cluster); INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
fsverity_enqueue_verify_work(&dic->verity_work); fsverity_enqueue_verify_work(&dic->verity_work);
} else { return;
__f2fs_decompress_end_io(dic, failed, in_task);
} }
/* Update and unlock the cluster's pagecache pages. */
for (i = 0; i < dic->cluster_size; i++) {
struct page *rpage = dic->rpages[i];
if (!rpage)
continue;
if (failed)
ClearPageUptodate(rpage);
else
SetPageUptodate(rpage);
unlock_page(rpage);
}
/*
* Release the reference to the decompress_io_ctx that was being held
* for I/O completion.
*/
f2fs_put_dic(dic, in_task);
} }
/* /*
......
...@@ -116,43 +116,56 @@ struct bio_post_read_ctx { ...@@ -116,43 +116,56 @@ struct bio_post_read_ctx {
struct f2fs_sb_info *sbi; struct f2fs_sb_info *sbi;
struct work_struct work; struct work_struct work;
unsigned int enabled_steps; unsigned int enabled_steps;
/*
* decompression_attempted keeps track of whether
* f2fs_end_read_compressed_page() has been called on the pages in the
* bio that belong to a compressed cluster yet.
*/
bool decompression_attempted;
block_t fs_blkaddr; block_t fs_blkaddr;
}; };
/*
* Update and unlock a bio's pages, and free the bio.
*
* This marks pages up-to-date only if there was no error in the bio (I/O error,
* decryption error, or verity error), as indicated by bio->bi_status.
*
* "Compressed pages" (pagecache pages backed by a compressed cluster on-disk)
* aren't marked up-to-date here, as decompression is done on a per-compression-
* cluster basis rather than a per-bio basis. Instead, we only must do two
* things for each compressed page here: call f2fs_end_read_compressed_page()
* with failed=true if an error occurred before it would have normally gotten
* called (i.e., I/O error or decryption error, but *not* verity error), and
* release the bio's reference to the decompress_io_ctx of the page's cluster.
*/
static void f2fs_finish_read_bio(struct bio *bio, bool in_task) static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
{ {
struct bio_vec *bv; struct bio_vec *bv;
struct bvec_iter_all iter_all; struct bvec_iter_all iter_all;
struct bio_post_read_ctx *ctx = bio->bi_private;
/*
* Update and unlock the bio's pagecache pages, and put the
* decompression context for any compressed pages.
*/
bio_for_each_segment_all(bv, bio, iter_all) { bio_for_each_segment_all(bv, bio, iter_all) {
struct page *page = bv->bv_page; struct page *page = bv->bv_page;
if (f2fs_is_compressed_page(page)) { if (f2fs_is_compressed_page(page)) {
if (bio->bi_status) if (ctx && !ctx->decompression_attempted)
f2fs_end_read_compressed_page(page, true, 0, f2fs_end_read_compressed_page(page, true, 0,
in_task); in_task);
f2fs_put_page_dic(page, in_task); f2fs_put_page_dic(page, in_task);
continue; continue;
} }
/* PG_error was set if verity failed. */ if (bio->bi_status)
if (bio->bi_status || PageError(page)) {
ClearPageUptodate(page); ClearPageUptodate(page);
/* will re-read again later */ else
ClearPageError(page);
} else {
SetPageUptodate(page); SetPageUptodate(page);
}
dec_page_count(F2FS_P_SB(page), __read_io_type(page)); dec_page_count(F2FS_P_SB(page), __read_io_type(page));
unlock_page(page); unlock_page(page);
} }
if (bio->bi_private) if (ctx)
mempool_free(bio->bi_private, bio_post_read_ctx_pool); mempool_free(ctx, bio_post_read_ctx_pool);
bio_put(bio); bio_put(bio);
} }
...@@ -185,8 +198,10 @@ static void f2fs_verify_bio(struct work_struct *work) ...@@ -185,8 +198,10 @@ static void f2fs_verify_bio(struct work_struct *work)
struct page *page = bv->bv_page; struct page *page = bv->bv_page;
if (!f2fs_is_compressed_page(page) && if (!f2fs_is_compressed_page(page) &&
!fsverity_verify_page(page)) !fsverity_verify_page(page)) {
SetPageError(page); bio->bi_status = BLK_STS_IOERR;
break;
}
} }
} else { } else {
fsverity_verify_bio(bio); fsverity_verify_bio(bio);
...@@ -245,6 +260,8 @@ static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx, ...@@ -245,6 +260,8 @@ static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx,
blkaddr++; blkaddr++;
} }
ctx->decompression_attempted = true;
/* /*
* Optimization: if all the bio's pages are compressed, then scheduling * Optimization: if all the bio's pages are compressed, then scheduling
* the per-bio verity work is unnecessary, as verity will be fully * the per-bio verity work is unnecessary, as verity will be fully
...@@ -1062,6 +1079,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, ...@@ -1062,6 +1079,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
ctx->sbi = sbi; ctx->sbi = sbi;
ctx->enabled_steps = post_read_steps; ctx->enabled_steps = post_read_steps;
ctx->fs_blkaddr = blkaddr; ctx->fs_blkaddr = blkaddr;
ctx->decompression_attempted = false;
bio->bi_private = ctx; bio->bi_private = ctx;
} }
iostat_alloc_and_bind_ctx(sbi, bio, ctx); iostat_alloc_and_bind_ctx(sbi, bio, ctx);
...@@ -1089,7 +1107,6 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page, ...@@ -1089,7 +1107,6 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
bio_put(bio); bio_put(bio);
return -EFAULT; return -EFAULT;
} }
ClearPageError(page);
inc_page_count(sbi, F2FS_RD_DATA); inc_page_count(sbi, F2FS_RD_DATA);
f2fs_update_iostat(sbi, NULL, FS_DATA_READ_IO, F2FS_BLKSIZE); f2fs_update_iostat(sbi, NULL, FS_DATA_READ_IO, F2FS_BLKSIZE);
__submit_bio(sbi, bio, DATA); __submit_bio(sbi, bio, DATA);
...@@ -2141,7 +2158,6 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page, ...@@ -2141,7 +2158,6 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page,
inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA); inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
f2fs_update_iostat(F2FS_I_SB(inode), NULL, FS_DATA_READ_IO, f2fs_update_iostat(F2FS_I_SB(inode), NULL, FS_DATA_READ_IO,
F2FS_BLKSIZE); F2FS_BLKSIZE);
ClearPageError(page);
*last_block_in_bio = block_nr; *last_block_in_bio = block_nr;
goto out; goto out;
out: out:
...@@ -2289,7 +2305,6 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, ...@@ -2289,7 +2305,6 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
inc_page_count(sbi, F2FS_RD_DATA); inc_page_count(sbi, F2FS_RD_DATA);
f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE); f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
ClearPageError(page);
*last_block_in_bio = blkaddr; *last_block_in_bio = blkaddr;
} }
...@@ -2306,7 +2321,6 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, ...@@ -2306,7 +2321,6 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
for (i = 0; i < cc->cluster_size; i++) { for (i = 0; i < cc->cluster_size; i++) {
if (cc->rpages[i]) { if (cc->rpages[i]) {
ClearPageUptodate(cc->rpages[i]); ClearPageUptodate(cc->rpages[i]);
ClearPageError(cc->rpages[i]);
unlock_page(cc->rpages[i]); unlock_page(cc->rpages[i]);
} }
} }
...@@ -2403,7 +2417,6 @@ static int f2fs_mpage_readpages(struct inode *inode, ...@@ -2403,7 +2417,6 @@ static int f2fs_mpage_readpages(struct inode *inode,
#ifdef CONFIG_F2FS_FS_COMPRESSION #ifdef CONFIG_F2FS_FS_COMPRESSION
set_error_page: set_error_page:
#endif #endif
SetPageError(page);
zero_user_segment(page, 0, PAGE_SIZE); zero_user_segment(page, 0, PAGE_SIZE);
unlock_page(page); unlock_page(page);
} }
......
...@@ -200,9 +200,8 @@ EXPORT_SYMBOL_GPL(fsverity_verify_page); ...@@ -200,9 +200,8 @@ EXPORT_SYMBOL_GPL(fsverity_verify_page);
* @bio: the bio to verify * @bio: the bio to verify
* *
* Verify a set of pages that have just been read from a verity file. The pages * Verify a set of pages that have just been read from a verity file. The pages
* must be pagecache pages that are still locked and not yet uptodate. Pages * must be pagecache pages that are still locked and not yet uptodate. If a
* that fail verification are set to the Error state. Verification is skipped * page fails verification, then bio->bi_status is set to an error status.
* for pages already in the Error state, e.g. due to fscrypt decryption failure.
* *
* This is a helper function for use by the ->readahead() method of filesystems * This is a helper function for use by the ->readahead() method of filesystems
* that issue bios to read data directly into the page cache. Filesystems that * that issue bios to read data directly into the page cache. Filesystems that
...@@ -244,9 +243,10 @@ void fsverity_verify_bio(struct bio *bio) ...@@ -244,9 +243,10 @@ void fsverity_verify_bio(struct bio *bio)
unsigned long level0_ra_pages = unsigned long level0_ra_pages =
min(max_ra_pages, params->level0_blocks - level0_index); min(max_ra_pages, params->level0_blocks - level0_index);
if (!PageError(page) && if (!verify_page(inode, vi, req, page, level0_ra_pages)) {
!verify_page(inode, vi, req, page, level0_ra_pages)) bio->bi_status = BLK_STS_IOERR;
SetPageError(page); break;
}
} }
fsverity_free_hash_request(params->hash_alg, req); fsverity_free_hash_request(params->hash_alg, req);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment