mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-19 11:04:00 +08:00
erofs: clean up z_erofs_submit_queue()
A label and extra variables will be eliminated, which is more cleaner. Link: https://lore.kernel.org/r/20200121064819.139469-1-gaoxiang25@huawei.com Reviewed-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
This commit is contained in:
parent
587a67b777
commit
1e4a295567
@ -1148,7 +1148,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
|
||||
qtail[JQ_BYPASS] = &pcl->next;
|
||||
}
|
||||
|
||||
static bool z_erofs_submit_queue(struct super_block *sb,
|
||||
static void z_erofs_submit_queue(struct super_block *sb,
|
||||
z_erofs_next_pcluster_t owned_head,
|
||||
struct list_head *pagepool,
|
||||
struct z_erofs_decompressqueue *fgq,
|
||||
@ -1157,19 +1157,12 @@ static bool z_erofs_submit_queue(struct super_block *sb,
|
||||
struct erofs_sb_info *const sbi = EROFS_SB(sb);
|
||||
z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
|
||||
struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
|
||||
struct bio *bio;
|
||||
void *bi_private;
|
||||
/* since bio will be NULL, no need to initialize last_index */
|
||||
pgoff_t uninitialized_var(last_index);
|
||||
bool force_submit = false;
|
||||
unsigned int nr_bios;
|
||||
unsigned int nr_bios = 0;
|
||||
struct bio *bio = NULL;
|
||||
|
||||
if (owned_head == Z_EROFS_PCLUSTER_TAIL)
|
||||
return false;
|
||||
|
||||
force_submit = false;
|
||||
bio = NULL;
|
||||
nr_bios = 0;
|
||||
bi_private = jobqueueset_init(sb, q, fgq, force_fg);
|
||||
qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
|
||||
qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
|
||||
@ -1179,11 +1172,9 @@ static bool z_erofs_submit_queue(struct super_block *sb,
|
||||
|
||||
do {
|
||||
struct z_erofs_pcluster *pcl;
|
||||
unsigned int clusterpages;
|
||||
pgoff_t first_index;
|
||||
struct page *page;
|
||||
unsigned int i = 0, bypass = 0;
|
||||
int err;
|
||||
pgoff_t cur, end;
|
||||
unsigned int i = 0;
|
||||
bool bypass = true;
|
||||
|
||||
/* no possible 'owned_head' equals the following */
|
||||
DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
|
||||
@ -1191,55 +1182,50 @@ static bool z_erofs_submit_queue(struct super_block *sb,
|
||||
|
||||
pcl = container_of(owned_head, struct z_erofs_pcluster, next);
|
||||
|
||||
clusterpages = BIT(pcl->clusterbits);
|
||||
cur = pcl->obj.index;
|
||||
end = cur + BIT(pcl->clusterbits);
|
||||
|
||||
/* close the main owned chain at first */
|
||||
owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
|
||||
Z_EROFS_PCLUSTER_TAIL_CLOSED);
|
||||
|
||||
first_index = pcl->obj.index;
|
||||
force_submit |= (first_index != last_index + 1);
|
||||
do {
|
||||
struct page *page;
|
||||
int err;
|
||||
|
||||
repeat:
|
||||
page = pickup_page_for_submission(pcl, i, pagepool,
|
||||
MNGD_MAPPING(sbi),
|
||||
GFP_NOFS);
|
||||
if (!page) {
|
||||
force_submit = true;
|
||||
++bypass;
|
||||
goto skippage;
|
||||
}
|
||||
page = pickup_page_for_submission(pcl, i++, pagepool,
|
||||
MNGD_MAPPING(sbi),
|
||||
GFP_NOFS);
|
||||
if (!page)
|
||||
continue;
|
||||
|
||||
if (bio && force_submit) {
|
||||
if (bio && cur != last_index + 1) {
|
||||
submit_bio_retry:
|
||||
submit_bio(bio);
|
||||
bio = NULL;
|
||||
}
|
||||
submit_bio(bio);
|
||||
bio = NULL;
|
||||
}
|
||||
|
||||
if (!bio) {
|
||||
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
|
||||
if (!bio) {
|
||||
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
|
||||
|
||||
bio->bi_end_io = z_erofs_decompressqueue_endio;
|
||||
bio_set_dev(bio, sb->s_bdev);
|
||||
bio->bi_iter.bi_sector = (sector_t)(first_index + i) <<
|
||||
LOG_SECTORS_PER_BLOCK;
|
||||
bio->bi_private = bi_private;
|
||||
bio->bi_opf = REQ_OP_READ;
|
||||
bio->bi_end_io = z_erofs_decompressqueue_endio;
|
||||
bio_set_dev(bio, sb->s_bdev);
|
||||
bio->bi_iter.bi_sector = (sector_t)cur <<
|
||||
LOG_SECTORS_PER_BLOCK;
|
||||
bio->bi_private = bi_private;
|
||||
bio->bi_opf = REQ_OP_READ;
|
||||
++nr_bios;
|
||||
}
|
||||
|
||||
++nr_bios;
|
||||
}
|
||||
err = bio_add_page(bio, page, PAGE_SIZE, 0);
|
||||
if (err < PAGE_SIZE)
|
||||
goto submit_bio_retry;
|
||||
|
||||
err = bio_add_page(bio, page, PAGE_SIZE, 0);
|
||||
if (err < PAGE_SIZE)
|
||||
goto submit_bio_retry;
|
||||
last_index = cur;
|
||||
bypass = false;
|
||||
} while (++cur < end);
|
||||
|
||||
force_submit = false;
|
||||
last_index = first_index + i;
|
||||
skippage:
|
||||
if (++i < clusterpages)
|
||||
goto repeat;
|
||||
|
||||
if (bypass < clusterpages)
|
||||
if (!bypass)
|
||||
qtail[JQ_SUBMIT] = &pcl->next;
|
||||
else
|
||||
move_to_bypass_jobqueue(pcl, qtail, owned_head);
|
||||
@ -1254,10 +1240,9 @@ skippage:
|
||||
*/
|
||||
if (!*force_fg && !nr_bios) {
|
||||
kvfree(q[JQ_SUBMIT]);
|
||||
return true;
|
||||
return;
|
||||
}
|
||||
z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void z_erofs_runqueue(struct super_block *sb,
|
||||
@ -1266,9 +1251,9 @@ static void z_erofs_runqueue(struct super_block *sb,
|
||||
{
|
||||
struct z_erofs_decompressqueue io[NR_JOBQUEUES];
|
||||
|
||||
if (!z_erofs_submit_queue(sb, clt->owned_head,
|
||||
pagepool, io, &force_fg))
|
||||
if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
|
||||
return;
|
||||
z_erofs_submit_queue(sb, clt->owned_head, pagepool, io, &force_fg);
|
||||
|
||||
/* handle bypass queue (no i/o pclusters) immediately */
|
||||
z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
|
||||
|
Loading…
Reference in New Issue
Block a user