block: Generalize chunk_sectors support as boundary support

The purpose of the chunk_sectors limit is to ensure that a mergeble request
fits within the boundary of the chunck_sector value.

Such a feature will be useful for other request_queue boundary limits, so
generalize the chunk_sectors merge code.

This idea was proposed by Hannes Reinecke.

Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: John Garry <john.g.garry@oracle.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Acked-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Link: https://lore.kernel.org/r/20240620125359.2684798-3-john.g.garry@oracle.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
John Garry 2024-06-20 12:53:51 +00:00 committed by Jens Axboe
parent 8d1dfd51c8
commit f70167a7a6
3 changed files with 22 additions and 13 deletions

View File

@ -154,6 +154,11 @@ static struct bio *bio_split_write_zeroes(struct bio *bio,
return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs);
}
static inline unsigned int blk_boundary_sectors(const struct queue_limits *lim)
{
return lim->chunk_sectors;
}
/*
* Return the maximum number of sectors from the start of a bio that may be
* submitted as a single request to a block device. If enough sectors remain,
@ -167,12 +172,13 @@ static inline unsigned get_max_io_size(struct bio *bio,
{
unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
unsigned boundary_sectors = blk_boundary_sectors(lim);
unsigned max_sectors = lim->max_sectors, start, end;
if (lim->chunk_sectors) {
if (boundary_sectors) {
max_sectors = min(max_sectors,
blk_chunk_sectors_left(bio->bi_iter.bi_sector,
lim->chunk_sectors));
blk_boundary_sectors_left(bio->bi_iter.bi_sector,
boundary_sectors));
}
start = bio->bi_iter.bi_sector & (pbs - 1);
@ -588,19 +594,21 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
sector_t offset)
{
struct request_queue *q = rq->q;
unsigned int max_sectors;
struct queue_limits *lim = &q->limits;
unsigned int max_sectors, boundary_sectors;
if (blk_rq_is_passthrough(rq))
return q->limits.max_hw_sectors;
boundary_sectors = blk_boundary_sectors(lim);
max_sectors = blk_queue_get_max_sectors(rq);
if (!q->limits.chunk_sectors ||
if (!boundary_sectors ||
req_op(rq) == REQ_OP_DISCARD ||
req_op(rq) == REQ_OP_SECURE_ERASE)
return max_sectors;
return min(max_sectors,
blk_chunk_sectors_left(offset, q->limits.chunk_sectors));
blk_boundary_sectors_left(offset, boundary_sectors));
}
static inline int ll_new_hw_segment(struct request *req, struct bio *bio,

View File

@ -1188,7 +1188,7 @@ static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
return len;
return min_t(sector_t, len,
min(max_sectors ? : queue_max_sectors(ti->table->md->queue),
blk_chunk_sectors_left(target_offset, max_granularity)));
blk_boundary_sectors_left(target_offset, max_granularity)));
}
static inline sector_t max_io_len(struct dm_target *ti, sector_t sector)

View File

@ -907,14 +907,15 @@ static inline bool bio_straddles_zones(struct bio *bio)
}
/*
* Return how much of the chunk is left to be used for I/O at a given offset.
* Return how much within the boundary is left to be used for I/O at a given
* offset.
*/
static inline unsigned int blk_chunk_sectors_left(sector_t offset,
unsigned int chunk_sectors)
static inline unsigned int blk_boundary_sectors_left(sector_t offset,
unsigned int boundary_sectors)
{
if (unlikely(!is_power_of_2(chunk_sectors)))
return chunk_sectors - sector_div(offset, chunk_sectors);
return chunk_sectors - (offset & (chunk_sectors - 1));
if (unlikely(!is_power_of_2(boundary_sectors)))
return boundary_sectors - sector_div(offset, boundary_sectors);
return boundary_sectors - (offset & (boundary_sectors - 1));
}
/**