Commit b0bc0cb8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'erofs-for-5.17-rc3-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs

Pull erofs fixes from Gao Xiang:
 "Two fixes related to fsdax cleanup in this cycle and ztailpacking to
  fix small compressed data inlining. There is also a trivial cleanup to
  rearrange code for better reading.

  Summary:

   - fix fsdax partition offset misbehavior

   - clean up z_erofs_decompressqueue_work() declaration

   - fix up EOF lcluster inlining, especially for small compressed data"

* tag 'erofs-for-5.17-rc3-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs:
  erofs: fix small compressed files inlining
  erofs: avoid unnecessary z_erofs_decompressqueue_work() declaration
  erofs: fix fsdax partition offset handling
parents 7c4a9459 24331050
...@@ -252,12 +252,10 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, ...@@ -252,12 +252,10 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
return ret; return ret;
iomap->offset = map.m_la; iomap->offset = map.m_la;
if (flags & IOMAP_DAX) { if (flags & IOMAP_DAX)
iomap->dax_dev = mdev.m_daxdev; iomap->dax_dev = mdev.m_daxdev;
iomap->offset += mdev.m_dax_part_off; else
} else {
iomap->bdev = mdev.m_bdev; iomap->bdev = mdev.m_bdev;
}
iomap->length = map.m_llen; iomap->length = map.m_llen;
iomap->flags = 0; iomap->flags = 0;
iomap->private = NULL; iomap->private = NULL;
...@@ -284,6 +282,8 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, ...@@ -284,6 +282,8 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
} else { } else {
iomap->type = IOMAP_MAPPED; iomap->type = IOMAP_MAPPED;
iomap->addr = mdev.m_pa; iomap->addr = mdev.m_pa;
if (flags & IOMAP_DAX)
iomap->addr += mdev.m_dax_part_off;
} }
return 0; return 0;
} }
......
...@@ -810,68 +810,11 @@ static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi, ...@@ -810,68 +810,11 @@ static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi,
return false; return false;
} }
static void z_erofs_decompressqueue_work(struct work_struct *work);
static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
bool sync, int bios)
{
struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
/* wake up the caller thread for sync decompression */
if (sync) {
unsigned long flags;
spin_lock_irqsave(&io->u.wait.lock, flags);
if (!atomic_add_return(bios, &io->pending_bios))
wake_up_locked(&io->u.wait);
spin_unlock_irqrestore(&io->u.wait.lock, flags);
return;
}
if (atomic_add_return(bios, &io->pending_bios))
return;
/* Use workqueue and sync decompression for atomic contexts only */
if (in_atomic() || irqs_disabled()) {
queue_work(z_erofs_workqueue, &io->u.work);
/* enable sync decompression for readahead */
if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
return;
}
z_erofs_decompressqueue_work(&io->u.work);
}
static bool z_erofs_page_is_invalidated(struct page *page) static bool z_erofs_page_is_invalidated(struct page *page)
{ {
return !page->mapping && !z_erofs_is_shortlived_page(page); return !page->mapping && !z_erofs_is_shortlived_page(page);
} }
static void z_erofs_decompressqueue_endio(struct bio *bio)
{
tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
blk_status_t err = bio->bi_status;
struct bio_vec *bvec;
struct bvec_iter_all iter_all;
bio_for_each_segment_all(bvec, bio, iter_all) {
struct page *page = bvec->bv_page;
DBG_BUGON(PageUptodate(page));
DBG_BUGON(z_erofs_page_is_invalidated(page));
if (err)
SetPageError(page);
if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
if (!err)
SetPageUptodate(page);
unlock_page(page);
}
}
z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
bio_put(bio);
}
static int z_erofs_decompress_pcluster(struct super_block *sb, static int z_erofs_decompress_pcluster(struct super_block *sb,
struct z_erofs_pcluster *pcl, struct z_erofs_pcluster *pcl,
struct page **pagepool) struct page **pagepool)
...@@ -1123,6 +1066,35 @@ static void z_erofs_decompressqueue_work(struct work_struct *work) ...@@ -1123,6 +1066,35 @@ static void z_erofs_decompressqueue_work(struct work_struct *work)
kvfree(bgq); kvfree(bgq);
} }
static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
bool sync, int bios)
{
struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
/* wake up the caller thread for sync decompression */
if (sync) {
unsigned long flags;
spin_lock_irqsave(&io->u.wait.lock, flags);
if (!atomic_add_return(bios, &io->pending_bios))
wake_up_locked(&io->u.wait);
spin_unlock_irqrestore(&io->u.wait.lock, flags);
return;
}
if (atomic_add_return(bios, &io->pending_bios))
return;
/* Use workqueue and sync decompression for atomic contexts only */
if (in_atomic() || irqs_disabled()) {
queue_work(z_erofs_workqueue, &io->u.work);
/* enable sync decompression for readahead */
if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
return;
}
z_erofs_decompressqueue_work(&io->u.work);
}
static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
unsigned int nr, unsigned int nr,
struct page **pagepool, struct page **pagepool,
...@@ -1300,6 +1272,33 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, ...@@ -1300,6 +1272,33 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
qtail[JQ_BYPASS] = &pcl->next; qtail[JQ_BYPASS] = &pcl->next;
} }
static void z_erofs_decompressqueue_endio(struct bio *bio)
{
tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
blk_status_t err = bio->bi_status;
struct bio_vec *bvec;
struct bvec_iter_all iter_all;
bio_for_each_segment_all(bvec, bio, iter_all) {
struct page *page = bvec->bv_page;
DBG_BUGON(PageUptodate(page));
DBG_BUGON(z_erofs_page_is_invalidated(page));
if (err)
SetPageError(page);
if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
if (!err)
SetPageUptodate(page);
unlock_page(page);
}
}
z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
bio_put(bio);
}
static void z_erofs_submit_queue(struct super_block *sb, static void z_erofs_submit_queue(struct super_block *sb,
struct z_erofs_decompress_frontend *f, struct z_erofs_decompress_frontend *f,
struct page **pagepool, struct page **pagepool,
......
...@@ -630,6 +630,13 @@ static int z_erofs_do_map_blocks(struct inode *inode, ...@@ -630,6 +630,13 @@ static int z_erofs_do_map_blocks(struct inode *inode,
if (endoff >= m.clusterofs) { if (endoff >= m.clusterofs) {
m.headtype = m.type; m.headtype = m.type;
map->m_la = (m.lcn << lclusterbits) | m.clusterofs; map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
/*
* For ztailpacking files, in order to inline data more
* effectively, special EOF lclusters are now supported
* which can have three parts at most.
*/
if (ztailpacking && end > inode->i_size)
end = inode->i_size;
break; break;
} }
/* m.lcn should be >= 1 if endoff < m.clusterofs */ /* m.lcn should be >= 1 if endoff < m.clusterofs */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment