mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-13 05:54:23 +08:00
zram: refactor highlevel read and write handling
Instead of having an outer loop in __zram_make_request and then branch out for reads vs writes for each loop iteration in zram_bvec_rw, split the main handler into separat zram_bio_read and zram_bio_write handlers that also include the functionality formerly in zram_bvec_rw. Link: https://lkml.kernel.org/r/20230411171459.567614-8-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Minchan Kim <minchan@kernel.org> Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Jens Axboe <axboe@kernel.dk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
57de7bd830
commit
82ca875d25
@ -1921,38 +1921,7 @@ static void zram_bio_discard(struct zram *zram, struct bio *bio)
|
||||
bio_endio(bio);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns errno if it has some problem. Otherwise return 0 or 1.
|
||||
* Returns 0 if IO request was done synchronously
|
||||
* Returns 1 if IO request was successfully submitted.
|
||||
*/
|
||||
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
|
||||
int offset, enum req_op op, struct bio *bio)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!op_is_write(op)) {
|
||||
ret = zram_bvec_read(zram, bvec, index, offset, bio);
|
||||
if (unlikely(ret < 0)) {
|
||||
atomic64_inc(&zram->stats.failed_reads);
|
||||
return ret;
|
||||
}
|
||||
flush_dcache_page(bvec->bv_page);
|
||||
} else {
|
||||
ret = zram_bvec_write(zram, bvec, index, offset, bio);
|
||||
if (unlikely(ret < 0)) {
|
||||
atomic64_inc(&zram->stats.failed_writes);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
zram_slot_lock(zram, index);
|
||||
zram_accessed(zram, index);
|
||||
zram_slot_unlock(zram, index);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __zram_make_request(struct zram *zram, struct bio *bio)
|
||||
static void zram_bio_read(struct zram *zram, struct bio *bio)
|
||||
{
|
||||
struct bvec_iter iter;
|
||||
struct bio_vec bv;
|
||||
@ -1964,11 +1933,42 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
|
||||
u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
|
||||
SECTOR_SHIFT;
|
||||
|
||||
if (zram_bvec_rw(zram, &bv, index, offset, bio_op(bio),
|
||||
bio) < 0) {
|
||||
if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) {
|
||||
atomic64_inc(&zram->stats.failed_reads);
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
break;
|
||||
}
|
||||
flush_dcache_page(bv.bv_page);
|
||||
|
||||
zram_slot_lock(zram, index);
|
||||
zram_accessed(zram, index);
|
||||
zram_slot_unlock(zram, index);
|
||||
}
|
||||
bio_end_io_acct(bio, start_time);
|
||||
bio_endio(bio);
|
||||
}
|
||||
|
||||
static void zram_bio_write(struct zram *zram, struct bio *bio)
|
||||
{
|
||||
struct bvec_iter iter;
|
||||
struct bio_vec bv;
|
||||
unsigned long start_time;
|
||||
|
||||
start_time = bio_start_io_acct(bio);
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
|
||||
u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
|
||||
SECTOR_SHIFT;
|
||||
|
||||
if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) {
|
||||
atomic64_inc(&zram->stats.failed_writes);
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
break;
|
||||
}
|
||||
|
||||
zram_slot_lock(zram, index);
|
||||
zram_accessed(zram, index);
|
||||
zram_slot_unlock(zram, index);
|
||||
}
|
||||
bio_end_io_acct(bio, start_time);
|
||||
bio_endio(bio);
|
||||
@ -1983,8 +1983,10 @@ static void zram_submit_bio(struct bio *bio)
|
||||
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_READ:
|
||||
zram_bio_read(zram, bio);
|
||||
break;
|
||||
case REQ_OP_WRITE:
|
||||
__zram_make_request(zram, bio);
|
||||
zram_bio_write(zram, bio);
|
||||
break;
|
||||
case REQ_OP_DISCARD:
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
|
Loading…
Reference in New Issue
Block a user