mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
erofs: avoid unnecessary z_erofs_decompressqueue_work() declaration
Just code rearrange. No logic changes. Link: https://lore.kernel.org/r/20220121091412.86086-1-hsiangkao@linux.alibaba.com Reviewed-by: Yue Hu <huyue2@yulong.com> Reviewed-by: Chao Yu <chao@kernel.org> Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
This commit is contained in:
parent
e33f42b20b
commit
7865827c43
113
fs/erofs/zdata.c
113
fs/erofs/zdata.c
@ -810,68 +810,11 @@ static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi,
|
||||
return false;
|
||||
}
|
||||
|
||||
static void z_erofs_decompressqueue_work(struct work_struct *work);
|
||||
static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
|
||||
bool sync, int bios)
|
||||
{
|
||||
struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
|
||||
|
||||
/* wake up the caller thread for sync decompression */
|
||||
if (sync) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&io->u.wait.lock, flags);
|
||||
if (!atomic_add_return(bios, &io->pending_bios))
|
||||
wake_up_locked(&io->u.wait);
|
||||
spin_unlock_irqrestore(&io->u.wait.lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
if (atomic_add_return(bios, &io->pending_bios))
|
||||
return;
|
||||
/* Use workqueue and sync decompression for atomic contexts only */
|
||||
if (in_atomic() || irqs_disabled()) {
|
||||
queue_work(z_erofs_workqueue, &io->u.work);
|
||||
/* enable sync decompression for readahead */
|
||||
if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
|
||||
sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
|
||||
return;
|
||||
}
|
||||
z_erofs_decompressqueue_work(&io->u.work);
|
||||
}
|
||||
|
||||
static bool z_erofs_page_is_invalidated(struct page *page)
|
||||
{
|
||||
return !page->mapping && !z_erofs_is_shortlived_page(page);
|
||||
}
|
||||
|
||||
static void z_erofs_decompressqueue_endio(struct bio *bio)
|
||||
{
|
||||
tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
|
||||
struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
|
||||
blk_status_t err = bio->bi_status;
|
||||
struct bio_vec *bvec;
|
||||
struct bvec_iter_all iter_all;
|
||||
|
||||
bio_for_each_segment_all(bvec, bio, iter_all) {
|
||||
struct page *page = bvec->bv_page;
|
||||
|
||||
DBG_BUGON(PageUptodate(page));
|
||||
DBG_BUGON(z_erofs_page_is_invalidated(page));
|
||||
|
||||
if (err)
|
||||
SetPageError(page);
|
||||
|
||||
if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
|
||||
if (!err)
|
||||
SetPageUptodate(page);
|
||||
unlock_page(page);
|
||||
}
|
||||
}
|
||||
z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
static int z_erofs_decompress_pcluster(struct super_block *sb,
|
||||
struct z_erofs_pcluster *pcl,
|
||||
struct page **pagepool)
|
||||
@ -1123,6 +1066,35 @@ static void z_erofs_decompressqueue_work(struct work_struct *work)
|
||||
kvfree(bgq);
|
||||
}
|
||||
|
||||
static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
|
||||
bool sync, int bios)
|
||||
{
|
||||
struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
|
||||
|
||||
/* wake up the caller thread for sync decompression */
|
||||
if (sync) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&io->u.wait.lock, flags);
|
||||
if (!atomic_add_return(bios, &io->pending_bios))
|
||||
wake_up_locked(&io->u.wait);
|
||||
spin_unlock_irqrestore(&io->u.wait.lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
if (atomic_add_return(bios, &io->pending_bios))
|
||||
return;
|
||||
/* Use workqueue and sync decompression for atomic contexts only */
|
||||
if (in_atomic() || irqs_disabled()) {
|
||||
queue_work(z_erofs_workqueue, &io->u.work);
|
||||
/* enable sync decompression for readahead */
|
||||
if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
|
||||
sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
|
||||
return;
|
||||
}
|
||||
z_erofs_decompressqueue_work(&io->u.work);
|
||||
}
|
||||
|
||||
static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
|
||||
unsigned int nr,
|
||||
struct page **pagepool,
|
||||
@ -1300,6 +1272,33 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
|
||||
qtail[JQ_BYPASS] = &pcl->next;
|
||||
}
|
||||
|
||||
static void z_erofs_decompressqueue_endio(struct bio *bio)
|
||||
{
|
||||
tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
|
||||
struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
|
||||
blk_status_t err = bio->bi_status;
|
||||
struct bio_vec *bvec;
|
||||
struct bvec_iter_all iter_all;
|
||||
|
||||
bio_for_each_segment_all(bvec, bio, iter_all) {
|
||||
struct page *page = bvec->bv_page;
|
||||
|
||||
DBG_BUGON(PageUptodate(page));
|
||||
DBG_BUGON(z_erofs_page_is_invalidated(page));
|
||||
|
||||
if (err)
|
||||
SetPageError(page);
|
||||
|
||||
if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
|
||||
if (!err)
|
||||
SetPageUptodate(page);
|
||||
unlock_page(page);
|
||||
}
|
||||
}
|
||||
z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
static void z_erofs_submit_queue(struct super_block *sb,
|
||||
struct z_erofs_decompress_frontend *f,
|
||||
struct page **pagepool,
|
||||
|
Loading…
Reference in New Issue
Block a user