2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-14 16:23:51 +08:00

block: cache inode size in bdev

Reading the inode size brings in a new cacheline for IO submit, and
it's in the hot path being checked for every single IO. When doing
millions of IOs per core per second, this is noticeable overhead.

Cache the nr_sectors in the bdev itself.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2021-10-18 11:39:45 -06:00
parent e4ae4735f7
commit f09313c57a
4 changed files with 9 additions and 6 deletions

View File

@ -58,6 +58,7 @@ void set_capacity(struct gendisk *disk, sector_t sectors)
spin_lock(&bdev->bd_size_lock);
i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
bdev->bd_nr_sectors = sectors;
spin_unlock(&bdev->bd_size_lock);
}
EXPORT_SYMBOL(set_capacity);

View File

@ -91,6 +91,7 @@ static void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
{
spin_lock(&bdev->bd_size_lock);
i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
bdev->bd_nr_sectors = sectors;
spin_unlock(&bdev->bd_size_lock);
}

View File

@ -39,6 +39,7 @@ struct bio_crypt_ctx;
struct block_device {
sector_t bd_start_sect;
sector_t bd_nr_sectors;
struct disk_stats __percpu *bd_stats;
unsigned long bd_stamp;
bool bd_read_only; /* read-only policy */

View File

@ -236,14 +236,14 @@ static inline sector_t get_start_sect(struct block_device *bdev)
return bdev->bd_start_sect;
}
static inline loff_t bdev_nr_bytes(struct block_device *bdev)
{
return i_size_read(bdev->bd_inode);
}
static inline sector_t bdev_nr_sectors(struct block_device *bdev)
{
return bdev_nr_bytes(bdev) >> SECTOR_SHIFT;
return bdev->bd_nr_sectors;
}
static inline loff_t bdev_nr_bytes(struct block_device *bdev)
{
return bdev_nr_sectors(bdev) << SECTOR_SHIFT;
}
static inline sector_t get_capacity(struct gendisk *disk)