Merge branch 'for-6.11/block-limits' into for-6.11/block

Pull in block limits branch, which exists as a shared branch for both
the block and SCSI tree.

* for-6.11/block-limits: (26 commits)
  block: move integrity information into queue_limits
  block: invert the BLK_INTEGRITY_{GENERATE,VERIFY} flags
  block: bypass the STABLE_WRITES flag for protection information
  block: don't require stable pages for non-PI metadata
  block: use kstrtoul in flag_store
  block: factor out flag_{store,show} helper for integrity
  block: remove the blk_flush_integrity call in blk_integrity_unregister
  block: remove the blk_integrity_profile structure
  dm-integrity: use the nop integrity profile
  md/raid1: don't free conf on raid0_run failure
  md/raid0: don't free conf on raid0_run failure
  block: initialize integrity buffer to zero before writing it to media
  block: add special APIs for run-time disabling of discard and friends
  block: remove unused queue limits API
  sr: convert to the atomic queue limits API
  sd: convert to the atomic queue limits API
  sd: cleanup zoned queue limits initialization
  sd: factor out a sd_discard_mode helper
  sd: simplify the disable case in sd_config_discard
  sd: add a sd_disable_write_same helper
  ...
This commit is contained in:
Jens Axboe 2024-06-14 10:22:08 -06:00
commit e3e72fe4cb
39 changed files with 736 additions and 1278 deletions

View File

@ -153,18 +153,11 @@ bio_free() will automatically free the bip.
4.2 Block Device
----------------
Because the format of the protection data is tied to the physical
disk, each block device has been extended with a block integrity
profile (struct blk_integrity). This optional profile is registered
with the block layer using blk_integrity_register().
The profile contains callback functions for generating and verifying
the protection data, as well as getting and setting application tags.
The profile also contains a few constants to aid in completing,
merging and splitting the integrity metadata.
Block devices can set up the integrity information in the integrity
sub-struture of the queue_limits structure.
Layered block devices will need to pick a profile that's appropriate
for all subdevices. blk_integrity_compare() can help with that. DM
for all subdevices. queue_limits_stack_integrity() can help with that. DM
and MD linear, RAID0 and RAID1 are currently supported. RAID4/5/6
will require extra work due to the application tag.
@ -250,42 +243,6 @@ will require extra work due to the application tag.
integrity upon completion.
5.4 Registering A Block Device As Capable Of Exchanging Integrity Metadata
--------------------------------------------------------------------------
To enable integrity exchange on a block device the gendisk must be
registered as capable:
`int blk_integrity_register(gendisk, blk_integrity);`
The blk_integrity struct is a template and should contain the
following::
static struct blk_integrity my_profile = {
.name = "STANDARDSBODY-TYPE-VARIANT-CSUM",
.generate_fn = my_generate_fn,
.verify_fn = my_verify_fn,
.tuple_size = sizeof(struct my_tuple_size),
.tag_size = <tag bytes per hw sector>,
};
'name' is a text string which will be visible in sysfs. This is
part of the userland API so chose it carefully and never change
it. The format is standards body-type-variant.
E.g. T10-DIF-TYPE1-IP or T13-EPP-0-CRC.
'generate_fn' generates appropriate integrity metadata (for WRITE).
'verify_fn' verifies that the data buffer matches the integrity
metadata.
'tuple_size' must be set to match the size of the integrity
metadata per sector. I.e. 8 for DIF and EPP.
'tag_size' must be set to identify how many bytes of tag space
are available per hardware sector. For DIF this is either 2 or
0 depending on the value of the Control Mode Page ATO bit.
----------------------------------------------------------------------
2007-12-24 Martin K. Petersen <martin.petersen@oracle.com>

View File

@ -447,43 +447,31 @@ static int bulk_req_safe_read(
return n;
}
/* Called without dev->lock held, and only in interrupt context. */
static void ubd_handler(void)
static void ubd_end_request(struct io_thread_req *io_req)
{
int n;
int count;
while(1){
n = bulk_req_safe_read(
thread_fd,
irq_req_buffer,
&irq_remainder,
&irq_remainder_size,
UBD_REQ_BUFFER_SIZE
);
if (n < 0) {
if(n == -EAGAIN)
break;
printk(KERN_ERR "spurious interrupt in ubd_handler, "
"err = %d\n", -n);
return;
}
for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
struct io_thread_req *io_req = (*irq_req_buffer)[count];
if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) {
blk_queue_max_discard_sectors(io_req->req->q, 0);
blk_queue_max_write_zeroes_sectors(io_req->req->q, 0);
}
blk_mq_end_request(io_req->req, io_req->error);
kfree(io_req);
}
if (io_req->error == BLK_STS_NOTSUPP) {
if (req_op(io_req->req) == REQ_OP_DISCARD)
blk_queue_disable_discard(io_req->req->q);
else if (req_op(io_req->req) == REQ_OP_WRITE_ZEROES)
blk_queue_disable_write_zeroes(io_req->req->q);
}
blk_mq_end_request(io_req->req, io_req->error);
kfree(io_req);
}
static irqreturn_t ubd_intr(int irq, void *dev)
{
ubd_handler();
int len, i;
while ((len = bulk_req_safe_read(thread_fd, irq_req_buffer,
&irq_remainder, &irq_remainder_size,
UBD_REQ_BUFFER_SIZE)) >= 0) {
for (i = 0; i < len / sizeof(struct io_thread_req *); i++)
ubd_end_request((*irq_req_buffer)[i]);
}
if (len < 0 && len != -EAGAIN)
pr_err("spurious interrupt in %s, err = %d\n", __func__, len);
return IRQ_HANDLED;
}

View File

@ -62,6 +62,8 @@ config BLK_DEV_BSGLIB
config BLK_DEV_INTEGRITY
bool "Block layer data integrity support"
select CRC_T10DIF
select CRC64_ROCKSOFT
help
Some storage devices allow extra information to be
stored/retrieved to help protect the data. The block layer
@ -72,12 +74,6 @@ config BLK_DEV_INTEGRITY
T10/SCSI Data Integrity Field or the T13/ATA External Path
Protection. If in doubt, say N.
config BLK_DEV_INTEGRITY_T10
tristate
depends on BLK_DEV_INTEGRITY
select CRC_T10DIF
select CRC64_ROCKSOFT
config BLK_DEV_WRITE_MOUNTED
bool "Allow writing to mounted block devices"
default y

View File

@ -26,8 +26,7 @@ obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o
bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
obj-$(CONFIG_IOSCHED_BFQ) += bfq.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o
obj-$(CONFIG_BLK_DEV_INTEGRITY_T10) += t10-pi.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o
obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o
obj-$(CONFIG_BLK_MQ_VIRTIO) += blk-mq-virtio.o
obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o

View File

@ -378,10 +378,9 @@ EXPORT_SYMBOL_GPL(bio_integrity_map_user);
* bio_integrity_process - Process integrity metadata for a bio
* @bio: bio to generate/verify integrity metadata for
* @proc_iter: iterator to process
* @proc_fn: Pointer to the relevant processing function
*/
static blk_status_t bio_integrity_process(struct bio *bio,
struct bvec_iter *proc_iter, integrity_processing_fn *proc_fn)
struct bvec_iter *proc_iter)
{
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
struct blk_integrity_iter iter;
@ -392,17 +391,18 @@ static blk_status_t bio_integrity_process(struct bio *bio,
iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
iter.interval = 1 << bi->interval_exp;
iter.tuple_size = bi->tuple_size;
iter.seed = proc_iter->bi_sector;
iter.prot_buf = bvec_virt(bip->bip_vec);
iter.pi_offset = bi->pi_offset;
__bio_for_each_segment(bv, bio, bviter, *proc_iter) {
void *kaddr = bvec_kmap_local(&bv);
iter.data_buf = kaddr;
iter.data_size = bv.bv_len;
ret = proc_fn(&iter);
if (bio_data_dir(bio) == WRITE)
blk_integrity_generate(&iter, bi);
else
ret = blk_integrity_verify(&iter, bi);
kunmap_local(kaddr);
if (ret)
@ -432,6 +432,7 @@ bool bio_integrity_prep(struct bio *bio)
unsigned long start, end;
unsigned int len, nr_pages;
unsigned int bytes, offset, i;
gfp_t gfp = GFP_NOIO;
if (!bi)
return true;
@ -447,18 +448,24 @@ bool bio_integrity_prep(struct bio *bio)
return true;
if (bio_data_dir(bio) == READ) {
if (!bi->profile->verify_fn ||
!(bi->flags & BLK_INTEGRITY_VERIFY))
if (bi->flags & BLK_INTEGRITY_NOVERIFY)
return true;
} else {
if (!bi->profile->generate_fn ||
!(bi->flags & BLK_INTEGRITY_GENERATE))
if (bi->flags & BLK_INTEGRITY_NOGENERATE)
return true;
/*
* Zero the memory allocated to not leak uninitialized kernel
* memory to disk. For PI this only affects the app tag, but
* for non-integrity metadata it affects the entire metadata
* buffer.
*/
gfp |= __GFP_ZERO;
}
/* Allocate kernel buffer for protection data */
len = bio_integrity_bytes(bi, bio_sectors(bio));
buf = kmalloc(len, GFP_NOIO);
buf = kmalloc(len, gfp);
if (unlikely(buf == NULL)) {
printk(KERN_ERR "could not allocate integrity buffer\n");
goto err_end_io;
@ -479,7 +486,7 @@ bool bio_integrity_prep(struct bio *bio)
bip->bip_flags |= BIP_BLOCK_INTEGRITY;
bip_set_seed(bip, bio->bi_iter.bi_sector);
if (bi->flags & BLK_INTEGRITY_IP_CHECKSUM)
if (bi->csum_type == BLK_INTEGRITY_CSUM_IP)
bip->bip_flags |= BIP_IP_CHECKSUM;
/* Map it */
@ -502,12 +509,10 @@ bool bio_integrity_prep(struct bio *bio)
}
/* Auto-generate integrity metadata if this is a write */
if (bio_data_dir(bio) == WRITE) {
bio_integrity_process(bio, &bio->bi_iter,
bi->profile->generate_fn);
} else {
if (bio_data_dir(bio) == WRITE)
bio_integrity_process(bio, &bio->bi_iter);
else
bip->bio_iter = bio->bi_iter;
}
return true;
err_end_io:
@ -530,15 +535,13 @@ static void bio_integrity_verify_fn(struct work_struct *work)
struct bio_integrity_payload *bip =
container_of(work, struct bio_integrity_payload, bip_work);
struct bio *bio = bip->bip_bio;
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
/*
* At the moment verify is called bio's iterator was advanced
* during split and completion, we need to rewind iterator to
* it's original position.
*/
bio->bi_status = bio_integrity_process(bio, &bip->bio_iter,
bi->profile->verify_fn);
bio->bi_status = bio_integrity_process(bio, &bip->bio_iter);
bio_integrity_free(bio);
bio_endio(bio);
}
@ -560,7 +563,7 @@ bool __bio_integrity_endio(struct bio *bio)
struct bio_integrity_payload *bip = bio_integrity(bio);
if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
(bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->profile->verify_fn) {
(bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->csum_type) {
INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
queue_work(kintegrityd_wq, &bip->bip_work);
return false;

View File

@ -107,60 +107,6 @@ new_segment:
}
EXPORT_SYMBOL(blk_rq_map_integrity_sg);
/**
* blk_integrity_compare - Compare integrity profile of two disks
* @gd1: Disk to compare
* @gd2: Disk to compare
*
* Description: Meta-devices like DM and MD need to verify that all
* sub-devices use the same integrity format before advertising to
* upper layers that they can send/receive integrity metadata. This
* function can be used to check whether two gendisk devices have
* compatible integrity formats.
*/
int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2)
{
struct blk_integrity *b1 = &gd1->queue->integrity;
struct blk_integrity *b2 = &gd2->queue->integrity;
if (!b1->profile && !b2->profile)
return 0;
if (!b1->profile || !b2->profile)
return -1;
if (b1->interval_exp != b2->interval_exp) {
pr_err("%s: %s/%s protection interval %u != %u\n",
__func__, gd1->disk_name, gd2->disk_name,
1 << b1->interval_exp, 1 << b2->interval_exp);
return -1;
}
if (b1->tuple_size != b2->tuple_size) {
pr_err("%s: %s/%s tuple sz %u != %u\n", __func__,
gd1->disk_name, gd2->disk_name,
b1->tuple_size, b2->tuple_size);
return -1;
}
if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) {
pr_err("%s: %s/%s tag sz %u != %u\n", __func__,
gd1->disk_name, gd2->disk_name,
b1->tag_size, b2->tag_size);
return -1;
}
if (b1->profile != b2->profile) {
pr_err("%s: %s/%s type %s != %s\n", __func__,
gd1->disk_name, gd2->disk_name,
b1->profile->name, b2->profile->name);
return -1;
}
return 0;
}
EXPORT_SYMBOL(blk_integrity_compare);
bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
struct request *next)
{
@ -214,7 +160,65 @@ bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
static inline struct blk_integrity *dev_to_bi(struct device *dev)
{
return &dev_to_disk(dev)->queue->integrity;
return &dev_to_disk(dev)->queue->limits.integrity;
}
const char *blk_integrity_profile_name(struct blk_integrity *bi)
{
switch (bi->csum_type) {
case BLK_INTEGRITY_CSUM_IP:
if (bi->flags & BLK_INTEGRITY_REF_TAG)
return "T10-DIF-TYPE1-IP";
return "T10-DIF-TYPE3-IP";
case BLK_INTEGRITY_CSUM_CRC:
if (bi->flags & BLK_INTEGRITY_REF_TAG)
return "T10-DIF-TYPE1-CRC";
return "T10-DIF-TYPE3-CRC";
case BLK_INTEGRITY_CSUM_CRC64:
if (bi->flags & BLK_INTEGRITY_REF_TAG)
return "EXT-DIF-TYPE1-CRC64";
return "EXT-DIF-TYPE3-CRC64";
case BLK_INTEGRITY_CSUM_NONE:
break;
}
return "nop";
}
EXPORT_SYMBOL_GPL(blk_integrity_profile_name);
static ssize_t flag_store(struct device *dev, struct device_attribute *attr,
const char *page, size_t count, unsigned char flag)
{
struct request_queue *q = dev_to_disk(dev)->queue;
struct queue_limits lim;
unsigned long val;
int err;
err = kstrtoul(page, 10, &val);
if (err)
return err;
/* note that the flags are inverted vs the values in the sysfs files */
lim = queue_limits_start_update(q);
if (val)
lim.integrity.flags &= ~flag;
else
lim.integrity.flags |= flag;
blk_mq_freeze_queue(q);
err = queue_limits_commit_update(q, &lim);
blk_mq_unfreeze_queue(q);
if (err)
return err;
return count;
}
static ssize_t flag_show(struct device *dev, struct device_attribute *attr,
char *page, unsigned char flag)
{
struct blk_integrity *bi = dev_to_bi(dev);
return sysfs_emit(page, "%d\n", !(bi->flags & flag));
}
static ssize_t format_show(struct device *dev, struct device_attribute *attr,
@ -222,9 +226,9 @@ static ssize_t format_show(struct device *dev, struct device_attribute *attr,
{
struct blk_integrity *bi = dev_to_bi(dev);
if (bi->profile && bi->profile->name)
return sysfs_emit(page, "%s\n", bi->profile->name);
return sysfs_emit(page, "none\n");
if (!bi->tuple_size)
return sysfs_emit(page, "none\n");
return sysfs_emit(page, "%s\n", blk_integrity_profile_name(bi));
}
static ssize_t tag_size_show(struct device *dev, struct device_attribute *attr,
@ -249,49 +253,26 @@ static ssize_t read_verify_store(struct device *dev,
struct device_attribute *attr,
const char *page, size_t count)
{
struct blk_integrity *bi = dev_to_bi(dev);
char *p = (char *) page;
unsigned long val = simple_strtoul(p, &p, 10);
if (val)
bi->flags |= BLK_INTEGRITY_VERIFY;
else
bi->flags &= ~BLK_INTEGRITY_VERIFY;
return count;
return flag_store(dev, attr, page, count, BLK_INTEGRITY_NOVERIFY);
}
static ssize_t read_verify_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct blk_integrity *bi = dev_to_bi(dev);
return sysfs_emit(page, "%d\n", !!(bi->flags & BLK_INTEGRITY_VERIFY));
return flag_show(dev, attr, page, BLK_INTEGRITY_NOVERIFY);
}
static ssize_t write_generate_store(struct device *dev,
struct device_attribute *attr,
const char *page, size_t count)
{
struct blk_integrity *bi = dev_to_bi(dev);
char *p = (char *) page;
unsigned long val = simple_strtoul(p, &p, 10);
if (val)
bi->flags |= BLK_INTEGRITY_GENERATE;
else
bi->flags &= ~BLK_INTEGRITY_GENERATE;
return count;
return flag_store(dev, attr, page, count, BLK_INTEGRITY_NOGENERATE);
}
static ssize_t write_generate_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct blk_integrity *bi = dev_to_bi(dev);
return sysfs_emit(page, "%d\n", !!(bi->flags & BLK_INTEGRITY_GENERATE));
return flag_show(dev, attr, page, BLK_INTEGRITY_NOGENERATE);
}
static ssize_t device_is_integrity_capable_show(struct device *dev,
@ -325,81 +306,3 @@ const struct attribute_group blk_integrity_attr_group = {
.name = "integrity",
.attrs = integrity_attrs,
};
static blk_status_t blk_integrity_nop_fn(struct blk_integrity_iter *iter)
{
return BLK_STS_OK;
}
static void blk_integrity_nop_prepare(struct request *rq)
{
}
static void blk_integrity_nop_complete(struct request *rq,
unsigned int nr_bytes)
{
}
static const struct blk_integrity_profile nop_profile = {
.name = "nop",
.generate_fn = blk_integrity_nop_fn,
.verify_fn = blk_integrity_nop_fn,
.prepare_fn = blk_integrity_nop_prepare,
.complete_fn = blk_integrity_nop_complete,
};
/**
* blk_integrity_register - Register a gendisk as being integrity-capable
* @disk: struct gendisk pointer to make integrity-aware
* @template: block integrity profile to register
*
* Description: When a device needs to advertise itself as being able to
* send/receive integrity metadata it must use this function to register
* the capability with the block layer. The template is a blk_integrity
* struct with values appropriate for the underlying hardware. See
* Documentation/block/data-integrity.rst.
*/
void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
{
struct blk_integrity *bi = &disk->queue->integrity;
bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE |
template->flags;
bi->interval_exp = template->interval_exp ? :
ilog2(queue_logical_block_size(disk->queue));
bi->profile = template->profile ? template->profile : &nop_profile;
bi->tuple_size = template->tuple_size;
bi->tag_size = template->tag_size;
bi->pi_offset = template->pi_offset;
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue);
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
if (disk->queue->crypto_profile) {
pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together. Disabling hardware inline encryption.\n");
disk->queue->crypto_profile = NULL;
}
#endif
}
EXPORT_SYMBOL(blk_integrity_register);
/**
* blk_integrity_unregister - Unregister block integrity profile
* @disk: disk whose integrity profile to unregister
*
* Description: This function unregisters the integrity capability from
* a block device.
*/
void blk_integrity_unregister(struct gendisk *disk)
{
struct blk_integrity *bi = &disk->queue->integrity;
if (!bi->profile)
return;
/* ensure all bios are off the integrity workqueue */
blk_flush_integrity();
blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
memset(bi, 0, sizeof(*bi));
}
EXPORT_SYMBOL(blk_integrity_unregister);

View File

@ -804,10 +804,8 @@ static void blk_complete_request(struct request *req)
if (!bio)
return;
#ifdef CONFIG_BLK_DEV_INTEGRITY
if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ)
req->q->integrity.profile->complete_fn(req, total_bytes);
#endif
blk_integrity_complete(req, total_bytes);
/*
* Upper layers may call blk_crypto_evict_key() anytime after the last
@ -875,11 +873,9 @@ bool blk_update_request(struct request *req, blk_status_t error,
if (!req->bio)
return false;
#ifdef CONFIG_BLK_DEV_INTEGRITY
if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
error == BLK_STS_OK)
req->q->integrity.profile->complete_fn(req, nr_bytes);
#endif
blk_integrity_complete(req, nr_bytes);
/*
* Upper layers may call blk_crypto_evict_key() anytime after the last
@ -1264,10 +1260,9 @@ void blk_mq_start_request(struct request *rq)
WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
rq->mq_hctx->tags->rqs[rq->tag] = rq;
#ifdef CONFIG_BLK_DEV_INTEGRITY
if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
q->integrity.profile->prepare_fn(rq);
#endif
blk_integrity_prepare(rq);
if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num);
}

View File

@ -6,7 +6,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blk-integrity.h>
#include <linux/pagemap.h>
#include <linux/backing-dev-defs.h>
#include <linux/gcd.h>
@ -97,6 +97,36 @@ static int blk_validate_zoned_limits(struct queue_limits *lim)
return 0;
}
static int blk_validate_integrity_limits(struct queue_limits *lim)
{
struct blk_integrity *bi = &lim->integrity;
if (!bi->tuple_size) {
if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE ||
bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) {
pr_warn("invalid PI settings.\n");
return -EINVAL;
}
return 0;
}
if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
pr_warn("integrity support disabled.\n");
return -EINVAL;
}
if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE &&
(bi->flags & BLK_INTEGRITY_REF_TAG)) {
pr_warn("ref tag not support without checksum.\n");
return -EINVAL;
}
if (!bi->interval_exp)
bi->interval_exp = ilog2(lim->logical_block_size);
return 0;
}
/*
* Check that the limits in lim are valid, initialize defaults for unset
* values, and cap values based on others where needed.
@ -105,6 +135,7 @@ static int blk_validate_limits(struct queue_limits *lim)
{
unsigned int max_hw_sectors;
unsigned int logical_block_sectors;
int err;
/*
* Unless otherwise specified, default to 512 byte logical blocks and a
@ -153,6 +184,13 @@ static int blk_validate_limits(struct queue_limits *lim)
if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
return -EINVAL;
lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
} else if (lim->io_opt) {
lim->max_sectors =
min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT);
} else if (lim->io_min &&
lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
lim->max_sectors =
min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT);
} else {
lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
}
@ -223,6 +261,9 @@ static int blk_validate_limits(struct queue_limits *lim)
lim->misaligned = 0;
}
err = blk_validate_integrity_limits(lim);
if (err)
return err;
return blk_validate_zoned_limits(lim);
}
@ -256,13 +297,24 @@ int queue_limits_commit_update(struct request_queue *q,
struct queue_limits *lim)
__releases(q->limits_lock)
{
int error = blk_validate_limits(lim);
int error;
if (!error) {
q->limits = *lim;
if (q->disk)
blk_apply_bdi_limits(q->disk->bdi, lim);
error = blk_validate_limits(lim);
if (error)
goto out_unlock;
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
if (q->crypto_profile && lim->integrity.tag_size) {
pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
error = -EINVAL;
goto out_unlock;
}
#endif
q->limits = *lim;
if (q->disk)
blk_apply_bdi_limits(q->disk->bdi, lim);
out_unlock:
mutex_unlock(&q->limits_lock);
return error;
}
@ -286,198 +338,6 @@ int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
}
EXPORT_SYMBOL_GPL(queue_limits_set);
/**
* blk_queue_chunk_sectors - set size of the chunk for this queue
* @q: the request queue for the device
* @chunk_sectors: chunk sectors in the usual 512b unit
*
* Description:
* If a driver doesn't want IOs to cross a given chunk size, it can set
* this limit and prevent merging across chunks. Note that the block layer
* must accept a page worth of data at any offset. So if the crossing of
* chunks is a hard limitation in the driver, it must still be prepared
* to split single page bios.
**/
void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
{
q->limits.chunk_sectors = chunk_sectors;
}
EXPORT_SYMBOL(blk_queue_chunk_sectors);
/**
* blk_queue_max_discard_sectors - set max sectors for a single discard
* @q: the request queue for the device
* @max_discard_sectors: maximum number of sectors to discard
**/
void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors)
{
struct queue_limits *lim = &q->limits;
lim->max_hw_discard_sectors = max_discard_sectors;
lim->max_discard_sectors =
min(max_discard_sectors, lim->max_user_discard_sectors);
}
EXPORT_SYMBOL(blk_queue_max_discard_sectors);
/**
* blk_queue_max_secure_erase_sectors - set max sectors for a secure erase
* @q: the request queue for the device
* @max_sectors: maximum number of sectors to secure_erase
**/
void blk_queue_max_secure_erase_sectors(struct request_queue *q,
unsigned int max_sectors)
{
q->limits.max_secure_erase_sectors = max_sectors;
}
EXPORT_SYMBOL(blk_queue_max_secure_erase_sectors);
/**
* blk_queue_max_write_zeroes_sectors - set max sectors for a single
* write zeroes
* @q: the request queue for the device
* @max_write_zeroes_sectors: maximum number of sectors to write per command
**/
void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
unsigned int max_write_zeroes_sectors)
{
q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
}
EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
/**
* blk_queue_max_zone_append_sectors - set max sectors for a single zone append
* @q: the request queue for the device
* @max_zone_append_sectors: maximum number of sectors to write per command
*
* Sets the maximum number of sectors allowed for zone append commands. If
* Specifying 0 for @max_zone_append_sectors indicates that the queue does
* not natively support zone append operations and that the block layer must
* emulate these operations using regular writes.
**/
void blk_queue_max_zone_append_sectors(struct request_queue *q,
unsigned int max_zone_append_sectors)
{
unsigned int max_sectors = 0;
if (WARN_ON(!blk_queue_is_zoned(q)))
return;
if (max_zone_append_sectors) {
max_sectors = min(q->limits.max_hw_sectors,
max_zone_append_sectors);
max_sectors = min(q->limits.chunk_sectors, max_sectors);
/*
* Signal eventual driver bugs resulting in the max_zone_append
* sectors limit being 0 due to the chunk_sectors limit (zone
* size) not set or the max_hw_sectors limit not set.
*/
WARN_ON_ONCE(!max_sectors);
}
q->limits.max_zone_append_sectors = max_sectors;
}
EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
/**
* blk_queue_logical_block_size - set logical block size for the queue
* @q: the request queue for the device
* @size: the logical block size, in bytes
*
* Description:
* This should be set to the lowest possible block size that the
* storage device can address. The default of 512 covers most
* hardware.
**/
void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
{
struct queue_limits *limits = &q->limits;
limits->logical_block_size = size;
if (limits->discard_granularity < limits->logical_block_size)
limits->discard_granularity = limits->logical_block_size;
if (limits->physical_block_size < size)
limits->physical_block_size = size;
if (limits->io_min < limits->physical_block_size)
limits->io_min = limits->physical_block_size;
limits->max_hw_sectors =
round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT);
limits->max_sectors =
round_down(limits->max_sectors, size >> SECTOR_SHIFT);
}
EXPORT_SYMBOL(blk_queue_logical_block_size);
/**
* blk_queue_physical_block_size - set physical block size for the queue
* @q: the request queue for the device
* @size: the physical block size, in bytes
*
* Description:
* This should be set to the lowest possible sector size that the
* hardware can operate on without reverting to read-modify-write
* operations.
*/
void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
{
q->limits.physical_block_size = size;
if (q->limits.physical_block_size < q->limits.logical_block_size)
q->limits.physical_block_size = q->limits.logical_block_size;
if (q->limits.discard_granularity < q->limits.physical_block_size)
q->limits.discard_granularity = q->limits.physical_block_size;
if (q->limits.io_min < q->limits.physical_block_size)
q->limits.io_min = q->limits.physical_block_size;
}
EXPORT_SYMBOL(blk_queue_physical_block_size);
/**
* blk_queue_zone_write_granularity - set zone write granularity for the queue
* @q: the request queue for the zoned device
* @size: the zone write granularity size, in bytes
*
* Description:
* This should be set to the lowest possible size allowing to write in
* sequential zones of a zoned block device.
*/
void blk_queue_zone_write_granularity(struct request_queue *q,
unsigned int size)
{
if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
return;
q->limits.zone_write_granularity = size;
if (q->limits.zone_write_granularity < q->limits.logical_block_size)
q->limits.zone_write_granularity = q->limits.logical_block_size;
}
EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity);
/**
* blk_queue_alignment_offset - set physical block alignment offset
* @q: the request queue for the device
* @offset: alignment offset in bytes
*
* Description:
* Some devices are naturally misaligned to compensate for things like
* the legacy DOS partition table 63-sector offset. Low-level drivers
* should call this function for devices whose first sector is not
* naturally aligned.
*/
void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
{
q->limits.alignment_offset =
offset & (q->limits.physical_block_size - 1);
q->limits.misaligned = 0;
}
EXPORT_SYMBOL(blk_queue_alignment_offset);
void disk_update_readahead(struct gendisk *disk)
{
blk_apply_bdi_limits(disk->bdi, &disk->queue->limits);
@ -507,26 +367,6 @@ void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
}
EXPORT_SYMBOL(blk_limits_io_min);
/**
* blk_queue_io_min - set minimum request size for the queue
* @q: the request queue for the device
* @min: smallest I/O size in bytes
*
* Description:
* Storage devices may report a granularity or preferred minimum I/O
* size which is the smallest request the device can perform without
* incurring a performance penalty. For disk drives this is often the
* physical block size. For RAID arrays it is often the stripe chunk
* size. A properly aligned multiple of minimum_io_size is the
* preferred request size for workloads where a high number of I/O
* operations is desired.
*/
void blk_queue_io_min(struct request_queue *q, unsigned int min)
{
blk_limits_io_min(&q->limits, min);
}
EXPORT_SYMBOL(blk_queue_io_min);
/**
* blk_limits_io_opt - set optimal request size for a device
* @limits: the queue limits
@ -780,6 +620,67 @@ void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
}
EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
/**
* queue_limits_stack_integrity - stack integrity profile
* @t: target queue limits
* @b: base queue limits
*
* Check if the integrity profile in the @b can be stacked into the
* target @t. Stacking is possible if either:
*
* a) does not have any integrity information stacked into it yet
* b) the integrity profile in @b is identical to the one in @t
*
* If @b can be stacked into @t, return %true. Else return %false and clear the
* integrity information in @t.
*/
bool queue_limits_stack_integrity(struct queue_limits *t,
struct queue_limits *b)
{
struct blk_integrity *ti = &t->integrity;
struct blk_integrity *bi = &b->integrity;
if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
return true;
if (!ti->tuple_size) {
/* inherit the settings from the first underlying device */
if (!(ti->flags & BLK_INTEGRITY_STACKED)) {
ti->flags = BLK_INTEGRITY_DEVICE_CAPABLE |
(bi->flags & BLK_INTEGRITY_REF_TAG);
ti->csum_type = bi->csum_type;
ti->tuple_size = bi->tuple_size;
ti->pi_offset = bi->pi_offset;
ti->interval_exp = bi->interval_exp;
ti->tag_size = bi->tag_size;
goto done;
}
if (!bi->tuple_size)
goto done;
}
if (ti->tuple_size != bi->tuple_size)
goto incompatible;
if (ti->interval_exp != bi->interval_exp)
goto incompatible;
if (ti->tag_size != bi->tag_size)
goto incompatible;
if (ti->csum_type != bi->csum_type)
goto incompatible;
if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
(bi->flags & BLK_INTEGRITY_REF_TAG))
goto incompatible;
done:
ti->flags |= BLK_INTEGRITY_STACKED;
return true;
incompatible:
memset(ti, 0, sizeof(*ti));
return false;
}
EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
/**
* blk_queue_update_dma_pad - update pad mask
* @q: the request queue for the device
@ -834,25 +735,6 @@ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
}
EXPORT_SYMBOL_GPL(blk_queue_write_cache);
/**
* disk_set_zoned - inidicate a zoned device
* @disk: gendisk to configure
*/
void disk_set_zoned(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
/*
* Set the zone write granularity to the device logical block
* size by default. The driver can change this value if needed.
*/
q->limits.zoned = true;
blk_queue_zone_write_granularity(q, queue_logical_block_size(q));
}
EXPORT_SYMBOL_GPL(disk_set_zoned);
int bdev_alignment_offset(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);

View File

@ -9,6 +9,7 @@
#include <xen/xen.h>
#include "blk-crypto-internal.h"
struct blk_integrity_iter;
struct elevator_type;
/* Max future timer expiry for timeouts */
@ -673,4 +674,11 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
const struct blk_holder_ops *hops, struct file *bdev_file);
int bdev_permission(dev_t dev, blk_mode_t mode, void *holder);
void blk_integrity_generate(struct blk_integrity_iter *iter,
struct blk_integrity *bi);
blk_status_t blk_integrity_verify(struct blk_integrity_iter *iter,
struct blk_integrity *bi);
void blk_integrity_prepare(struct request *rq);
void blk_integrity_complete(struct request *rq, unsigned int nr_bytes);
#endif /* BLK_INTERNAL_H */

View File

@ -11,17 +11,14 @@
#include <linux/module.h>
#include <net/checksum.h>
#include <asm/unaligned.h>
#include "blk.h"
typedef __be16 (csum_fn) (__be16, void *, unsigned int);
static __be16 t10_pi_crc_fn(__be16 crc, void *data, unsigned int len)
static __be16 t10_pi_csum(__be16 csum, void *data, unsigned int len,
unsigned char csum_type)
{
return cpu_to_be16(crc_t10dif_update(be16_to_cpu(crc), data, len));
}
static __be16 t10_pi_ip_fn(__be16 csum, void *data, unsigned int len)
{
return (__force __be16)ip_compute_csum(data, len);
if (csum_type == BLK_INTEGRITY_CSUM_IP)
return (__force __be16)ip_compute_csum(data, len);
return cpu_to_be16(crc_t10dif_update(be16_to_cpu(csum), data, len));
}
/*
@ -29,48 +26,44 @@ static __be16 t10_pi_ip_fn(__be16 csum, void *data, unsigned int len)
* 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
* tag.
*/
static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
csum_fn *fn, enum t10_dif_type type)
static void t10_pi_generate(struct blk_integrity_iter *iter,
struct blk_integrity *bi)
{
u8 offset = iter->pi_offset;
u8 offset = bi->pi_offset;
unsigned int i;
for (i = 0 ; i < iter->data_size ; i += iter->interval) {
struct t10_pi_tuple *pi = iter->prot_buf + offset;
pi->guard_tag = fn(0, iter->data_buf, iter->interval);
pi->guard_tag = t10_pi_csum(0, iter->data_buf, iter->interval,
bi->csum_type);
if (offset)
pi->guard_tag = fn(pi->guard_tag, iter->prot_buf,
offset);
pi->guard_tag = t10_pi_csum(pi->guard_tag,
iter->prot_buf, offset, bi->csum_type);
pi->app_tag = 0;
if (type == T10_PI_TYPE1_PROTECTION)
if (bi->flags & BLK_INTEGRITY_REF_TAG)
pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed));
else
pi->ref_tag = 0;
iter->data_buf += iter->interval;
iter->prot_buf += iter->tuple_size;
iter->prot_buf += bi->tuple_size;
iter->seed++;
}
return BLK_STS_OK;
}
static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
csum_fn *fn, enum t10_dif_type type)
struct blk_integrity *bi)
{
u8 offset = iter->pi_offset;
u8 offset = bi->pi_offset;
unsigned int i;
BUG_ON(type == T10_PI_TYPE0_PROTECTION);
for (i = 0 ; i < iter->data_size ; i += iter->interval) {
struct t10_pi_tuple *pi = iter->prot_buf + offset;
__be16 csum;
if (type == T10_PI_TYPE1_PROTECTION ||
type == T10_PI_TYPE2_PROTECTION) {
if (bi->flags & BLK_INTEGRITY_REF_TAG) {
if (pi->app_tag == T10_PI_APP_ESCAPE)
goto next;
@ -82,15 +75,17 @@ static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
iter->seed, be32_to_cpu(pi->ref_tag));
return BLK_STS_PROTECTION;
}
} else if (type == T10_PI_TYPE3_PROTECTION) {
} else {
if (pi->app_tag == T10_PI_APP_ESCAPE &&
pi->ref_tag == T10_PI_REF_ESCAPE)
goto next;
}
csum = fn(0, iter->data_buf, iter->interval);
csum = t10_pi_csum(0, iter->data_buf, iter->interval,
bi->csum_type);
if (offset)
csum = fn(csum, iter->prot_buf, offset);
csum = t10_pi_csum(csum, iter->prot_buf, offset,
bi->csum_type);
if (pi->guard_tag != csum) {
pr_err("%s: guard tag error at sector %llu " \
@ -102,33 +97,13 @@ static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
next:
iter->data_buf += iter->interval;
iter->prot_buf += iter->tuple_size;
iter->prot_buf += bi->tuple_size;
iter->seed++;
}
return BLK_STS_OK;
}
static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
{
return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
}
static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
{
return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
}
static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
{
return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
}
static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
{
return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
}
/**
* t10_pi_type1_prepare - prepare PI prior submitting request to device
* @rq: request with PI that should be prepared
@ -141,7 +116,7 @@ static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
*/
static void t10_pi_type1_prepare(struct request *rq)
{
struct blk_integrity *bi = &rq->q->integrity;
struct blk_integrity *bi = &rq->q->limits.integrity;
const int tuple_sz = bi->tuple_size;
u32 ref_tag = t10_pi_ref_tag(rq);
u8 offset = bi->pi_offset;
@ -192,7 +167,7 @@ static void t10_pi_type1_prepare(struct request *rq)
*/
static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
{
struct blk_integrity *bi = &rq->q->integrity;
struct blk_integrity *bi = &rq->q->limits.integrity;
unsigned intervals = nr_bytes >> bi->interval_exp;
const int tuple_sz = bi->tuple_size;
u32 ref_tag = t10_pi_ref_tag(rq);
@ -225,81 +200,15 @@ static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
}
}
static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
{
return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
}
static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
{
return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
}
static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
{
return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
}
static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
{
return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
}
/* Type 3 does not have a reference tag so no remapping is required. */
static void t10_pi_type3_prepare(struct request *rq)
{
}
/* Type 3 does not have a reference tag so no remapping is required. */
static void t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes)
{
}
const struct blk_integrity_profile t10_pi_type1_crc = {
.name = "T10-DIF-TYPE1-CRC",
.generate_fn = t10_pi_type1_generate_crc,
.verify_fn = t10_pi_type1_verify_crc,
.prepare_fn = t10_pi_type1_prepare,
.complete_fn = t10_pi_type1_complete,
};
EXPORT_SYMBOL(t10_pi_type1_crc);
const struct blk_integrity_profile t10_pi_type1_ip = {
.name = "T10-DIF-TYPE1-IP",
.generate_fn = t10_pi_type1_generate_ip,
.verify_fn = t10_pi_type1_verify_ip,
.prepare_fn = t10_pi_type1_prepare,
.complete_fn = t10_pi_type1_complete,
};
EXPORT_SYMBOL(t10_pi_type1_ip);
const struct blk_integrity_profile t10_pi_type3_crc = {
.name = "T10-DIF-TYPE3-CRC",
.generate_fn = t10_pi_type3_generate_crc,
.verify_fn = t10_pi_type3_verify_crc,
.prepare_fn = t10_pi_type3_prepare,
.complete_fn = t10_pi_type3_complete,
};
EXPORT_SYMBOL(t10_pi_type3_crc);
const struct blk_integrity_profile t10_pi_type3_ip = {
.name = "T10-DIF-TYPE3-IP",
.generate_fn = t10_pi_type3_generate_ip,
.verify_fn = t10_pi_type3_verify_ip,
.prepare_fn = t10_pi_type3_prepare,
.complete_fn = t10_pi_type3_complete,
};
EXPORT_SYMBOL(t10_pi_type3_ip);
static __be64 ext_pi_crc64(u64 crc, void *data, unsigned int len)
{
return cpu_to_be64(crc64_rocksoft_update(crc, data, len));
}
static blk_status_t ext_pi_crc64_generate(struct blk_integrity_iter *iter,
enum t10_dif_type type)
static void ext_pi_crc64_generate(struct blk_integrity_iter *iter,
struct blk_integrity *bi)
{
u8 offset = iter->pi_offset;
u8 offset = bi->pi_offset;
unsigned int i;
for (i = 0 ; i < iter->data_size ; i += iter->interval) {
@ -311,17 +220,15 @@ static blk_status_t ext_pi_crc64_generate(struct blk_integrity_iter *iter,
iter->prot_buf, offset);
pi->app_tag = 0;
if (type == T10_PI_TYPE1_PROTECTION)
if (bi->flags & BLK_INTEGRITY_REF_TAG)
put_unaligned_be48(iter->seed, pi->ref_tag);
else
put_unaligned_be48(0ULL, pi->ref_tag);
iter->data_buf += iter->interval;
iter->prot_buf += iter->tuple_size;
iter->prot_buf += bi->tuple_size;
iter->seed++;
}
return BLK_STS_OK;
}
static bool ext_pi_ref_escape(u8 *ref_tag)
@ -332,9 +239,9 @@ static bool ext_pi_ref_escape(u8 *ref_tag)
}
static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter,
enum t10_dif_type type)
struct blk_integrity *bi)
{
u8 offset = iter->pi_offset;
u8 offset = bi->pi_offset;
unsigned int i;
for (i = 0; i < iter->data_size; i += iter->interval) {
@ -342,7 +249,7 @@ static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter,
u64 ref, seed;
__be64 csum;
if (type == T10_PI_TYPE1_PROTECTION) {
if (bi->flags & BLK_INTEGRITY_REF_TAG) {
if (pi->app_tag == T10_PI_APP_ESCAPE)
goto next;
@ -353,7 +260,7 @@ static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter,
iter->disk_name, seed, ref);
return BLK_STS_PROTECTION;
}
} else if (type == T10_PI_TYPE3_PROTECTION) {
} else {
if (pi->app_tag == T10_PI_APP_ESCAPE &&
ext_pi_ref_escape(pi->ref_tag))
goto next;
@ -374,26 +281,16 @@ static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter,
next:
iter->data_buf += iter->interval;
iter->prot_buf += iter->tuple_size;
iter->prot_buf += bi->tuple_size;
iter->seed++;
}
return BLK_STS_OK;
}
static blk_status_t ext_pi_type1_verify_crc64(struct blk_integrity_iter *iter)
{
return ext_pi_crc64_verify(iter, T10_PI_TYPE1_PROTECTION);
}
static blk_status_t ext_pi_type1_generate_crc64(struct blk_integrity_iter *iter)
{
return ext_pi_crc64_generate(iter, T10_PI_TYPE1_PROTECTION);
}
static void ext_pi_type1_prepare(struct request *rq)
{
struct blk_integrity *bi = &rq->q->integrity;
struct blk_integrity *bi = &rq->q->limits.integrity;
const int tuple_sz = bi->tuple_size;
u64 ref_tag = ext_pi_ref_tag(rq);
u8 offset = bi->pi_offset;
@ -433,7 +330,7 @@ static void ext_pi_type1_prepare(struct request *rq)
static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
{
struct blk_integrity *bi = &rq->q->integrity;
struct blk_integrity *bi = &rq->q->limits.integrity;
unsigned intervals = nr_bytes >> bi->interval_exp;
const int tuple_sz = bi->tuple_size;
u64 ref_tag = ext_pi_ref_tag(rq);
@ -467,33 +364,61 @@ static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
}
}
static blk_status_t ext_pi_type3_verify_crc64(struct blk_integrity_iter *iter)
void blk_integrity_generate(struct blk_integrity_iter *iter,
struct blk_integrity *bi)
{
return ext_pi_crc64_verify(iter, T10_PI_TYPE3_PROTECTION);
switch (bi->csum_type) {
case BLK_INTEGRITY_CSUM_CRC64:
ext_pi_crc64_generate(iter, bi);
break;
case BLK_INTEGRITY_CSUM_CRC:
case BLK_INTEGRITY_CSUM_IP:
t10_pi_generate(iter, bi);
break;
default:
break;
}
}
static blk_status_t ext_pi_type3_generate_crc64(struct blk_integrity_iter *iter)
blk_status_t blk_integrity_verify(struct blk_integrity_iter *iter,
struct blk_integrity *bi)
{
return ext_pi_crc64_generate(iter, T10_PI_TYPE3_PROTECTION);
switch (bi->csum_type) {
case BLK_INTEGRITY_CSUM_CRC64:
return ext_pi_crc64_verify(iter, bi);
case BLK_INTEGRITY_CSUM_CRC:
case BLK_INTEGRITY_CSUM_IP:
return t10_pi_verify(iter, bi);
default:
return BLK_STS_OK;
}
}
const struct blk_integrity_profile ext_pi_type1_crc64 = {
.name = "EXT-DIF-TYPE1-CRC64",
.generate_fn = ext_pi_type1_generate_crc64,
.verify_fn = ext_pi_type1_verify_crc64,
.prepare_fn = ext_pi_type1_prepare,
.complete_fn = ext_pi_type1_complete,
};
EXPORT_SYMBOL_GPL(ext_pi_type1_crc64);
void blk_integrity_prepare(struct request *rq)
{
struct blk_integrity *bi = &rq->q->limits.integrity;
const struct blk_integrity_profile ext_pi_type3_crc64 = {
.name = "EXT-DIF-TYPE3-CRC64",
.generate_fn = ext_pi_type3_generate_crc64,
.verify_fn = ext_pi_type3_verify_crc64,
.prepare_fn = t10_pi_type3_prepare,
.complete_fn = t10_pi_type3_complete,
};
EXPORT_SYMBOL_GPL(ext_pi_type3_crc64);
if (!(bi->flags & BLK_INTEGRITY_REF_TAG))
return;
if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC64)
ext_pi_type1_prepare(rq);
else
t10_pi_type1_prepare(rq);
}
void blk_integrity_complete(struct request *rq, unsigned int nr_bytes)
{
struct blk_integrity *bi = &rq->q->limits.integrity;
if (!(bi->flags & BLK_INTEGRITY_REF_TAG))
return;
if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC64)
ext_pi_type1_complete(rq, nr_bytes);
else
t10_pi_type1_complete(rq, nr_bytes);
}
MODULE_DESCRIPTION("T10 Protection Information module");
MODULE_LICENSE("GPL");

View File

@ -1808,7 +1808,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
{
struct queue_limits lim = {
.max_hw_sectors = 65536,
.max_user_sectors = 256,
.io_opt = 256 << SECTOR_SHIFT,
.max_segments = USHRT_MAX,
.max_segment_size = UINT_MAX,
};

View File

@ -4954,9 +4954,8 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
struct queue_limits lim = {
.max_hw_sectors = objset_bytes >> SECTOR_SHIFT,
.max_user_sectors = objset_bytes >> SECTOR_SHIFT,
.io_opt = objset_bytes,
.io_min = rbd_dev->opts->alloc_size,
.io_opt = rbd_dev->opts->alloc_size,
.max_segments = USHRT_MAX,
.max_segment_size = UINT_MAX,
};

View File

@ -1605,8 +1605,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
blkif_req(req)->error = BLK_STS_NOTSUPP;
info->feature_discard = 0;
info->feature_secdiscard = 0;
blk_queue_max_discard_sectors(rq, 0);
blk_queue_max_secure_erase_sectors(rq, 0);
blk_queue_disable_discard(rq);
blk_queue_disable_secure_erase(rq);
}
break;
case BLKIF_OP_FLUSH_DISKCACHE:

View File

@ -206,7 +206,6 @@ struct dm_table {
bool integrity_supported:1;
bool singleton:1;
unsigned integrity_added:1;
/*
* Indicates the rw permissions for the new logical device. This

View File

@ -1176,8 +1176,8 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
struct mapped_device *md = dm_table_get_md(ti->table);
/* From now we require underlying device with our integrity profile */
if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
/* We require an underlying device with non-PI metadata */
if (!bi || bi->csum_type != BLK_INTEGRITY_CSUM_NONE) {
ti->error = "Integrity profile not supported.";
return -EINVAL;
}

View File

@ -350,25 +350,6 @@ static struct kmem_cache *journal_io_cache;
#define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
#endif
static void dm_integrity_prepare(struct request *rq)
{
}
static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
{
}
/*
* DM Integrity profile, protection is performed layer above (dm-crypt)
*/
static const struct blk_integrity_profile dm_integrity_profile = {
.name = "DM-DIF-EXT-TAG",
.generate_fn = NULL,
.verify_fn = NULL,
.prepare_fn = dm_integrity_prepare,
.complete_fn = dm_integrity_complete,
};
static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
static void integrity_bio_wait(struct work_struct *w);
static void dm_integrity_dtr(struct dm_target *ti);
@ -3494,6 +3475,17 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim
limits->dma_alignment = limits->logical_block_size - 1;
limits->discard_granularity = ic->sectors_per_block << SECTOR_SHIFT;
}
if (!ic->internal_hash) {
struct blk_integrity *bi = &limits->integrity;
memset(bi, 0, sizeof(*bi));
bi->tuple_size = ic->tag_size;
bi->tag_size = bi->tuple_size;
bi->interval_exp =
ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
}
limits->max_integrity_segments = USHRT_MAX;
}
@ -3650,20 +3642,6 @@ try_smaller_buffer:
return 0;
}
static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
{
struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
struct blk_integrity bi;
memset(&bi, 0, sizeof(bi));
bi.profile = &dm_integrity_profile;
bi.tuple_size = ic->tag_size;
bi.tag_size = bi.tuple_size;
bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
blk_integrity_register(disk, &bi);
}
static void dm_integrity_free_page_list(struct page_list *pl)
{
unsigned int i;
@ -4649,9 +4627,6 @@ try_smaller_buffer:
}
}
if (!ic->internal_hash)
dm_integrity_set(ti, ic);
ti->num_flush_bios = 1;
ti->flush_supported = true;
if (ic->discard)

View File

@ -425,6 +425,13 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
q->limits.logical_block_size,
q->limits.alignment_offset,
(unsigned long long) start << SECTOR_SHIFT);
/*
* Only stack the integrity profile if the target doesn't have native
* integrity support.
*/
if (!dm_target_has_integrity(ti->type))
queue_limits_stack_integrity_bdev(limits, bdev);
return 0;
}
@ -702,9 +709,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
t->immutable_target_type = ti->type;
}
if (dm_target_has_integrity(ti->type))
t->integrity_added = 1;
ti->table = t;
ti->begin = start;
ti->len = len;
@ -1119,99 +1123,6 @@ static int dm_table_build_index(struct dm_table *t)
return r;
}
static bool integrity_profile_exists(struct gendisk *disk)
{
return !!blk_get_integrity(disk);
}
/*
* Get a disk whose integrity profile reflects the table's profile.
* Returns NULL if integrity support was inconsistent or unavailable.
*/
static struct gendisk *dm_table_get_integrity_disk(struct dm_table *t)
{
struct list_head *devices = dm_table_get_devices(t);
struct dm_dev_internal *dd = NULL;
struct gendisk *prev_disk = NULL, *template_disk = NULL;
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
if (!dm_target_passes_integrity(ti->type))
goto no_integrity;
}
list_for_each_entry(dd, devices, list) {
template_disk = dd->dm_dev->bdev->bd_disk;
if (!integrity_profile_exists(template_disk))
goto no_integrity;
else if (prev_disk &&
blk_integrity_compare(prev_disk, template_disk) < 0)
goto no_integrity;
prev_disk = template_disk;
}
return template_disk;
no_integrity:
if (prev_disk)
DMWARN("%s: integrity not set: %s and %s profile mismatch",
dm_device_name(t->md),
prev_disk->disk_name,
template_disk->disk_name);
return NULL;
}
/*
* Register the mapped device for blk_integrity support if the
* underlying devices have an integrity profile. But all devices may
* not have matching profiles (checking all devices isn't reliable
* during table load because this table may use other DM device(s) which
* must be resumed before they will have an initialized integity
* profile). Consequently, stacked DM devices force a 2 stage integrity
* profile validation: First pass during table load, final pass during
* resume.
*/
static int dm_table_register_integrity(struct dm_table *t)
{
struct mapped_device *md = t->md;
struct gendisk *template_disk = NULL;
/* If target handles integrity itself do not register it here. */
if (t->integrity_added)
return 0;
template_disk = dm_table_get_integrity_disk(t);
if (!template_disk)
return 0;
if (!integrity_profile_exists(dm_disk(md))) {
t->integrity_supported = true;
/*
* Register integrity profile during table load; we can do
* this because the final profile must match during resume.
*/
blk_integrity_register(dm_disk(md),
blk_get_integrity(template_disk));
return 0;
}
/*
* If DM device already has an initialized integrity
* profile the new profile should not conflict.
*/
if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
DMERR("%s: conflict with existing integrity profile: %s profile mismatch",
dm_device_name(t->md),
template_disk->disk_name);
return 1;
}
/* Preserve existing integrity profile */
t->integrity_supported = true;
return 0;
}
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct dm_crypto_profile {
@ -1423,12 +1334,6 @@ int dm_table_complete(struct dm_table *t)
return r;
}
r = dm_table_register_integrity(t);
if (r) {
DMERR("could not register integrity profile.");
return r;
}
r = dm_table_construct_crypto_profile(t);
if (r) {
DMERR("could not construct crypto profile.");
@ -1688,6 +1593,14 @@ int dm_calculate_queue_limits(struct dm_table *t,
blk_set_stacking_limits(limits);
t->integrity_supported = true;
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
if (!dm_target_passes_integrity(ti->type))
t->integrity_supported = false;
}
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
@ -1738,6 +1651,18 @@ combine_limits:
dm_device_name(t->md),
(unsigned long long) ti->begin,
(unsigned long long) ti->len);
if (t->integrity_supported ||
dm_target_has_integrity(ti->type)) {
if (!queue_limits_stack_integrity(limits, &ti_limits)) {
DMWARN("%s: adding target device (start sect %llu len %llu) "
"disabled integrity support due to incompatibility",
dm_device_name(t->md),
(unsigned long long) ti->begin,
(unsigned long long) ti->len);
t->integrity_supported = false;
}
}
}
/*
@ -1761,36 +1686,6 @@ combine_limits:
return validate_hardware_logical_block_alignment(t, limits);
}
/*
* Verify that all devices have an integrity profile that matches the
* DM device's registered integrity profile. If the profiles don't
* match then unregister the DM device's integrity profile.
*/
static void dm_table_verify_integrity(struct dm_table *t)
{
struct gendisk *template_disk = NULL;
if (t->integrity_added)
return;
if (t->integrity_supported) {
/*
* Verify that the original integrity profile
* matches all the devices in this table.
*/
template_disk = dm_table_get_integrity_disk(t);
if (template_disk &&
blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
return;
}
if (integrity_profile_exists(dm_disk(t->md))) {
DMWARN("%s: unable to establish an integrity profile",
dm_device_name(t->md));
blk_integrity_unregister(dm_disk(t->md));
}
}
static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@ -2004,8 +1899,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
else
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
dm_table_verify_integrity(t);
/*
* Some devices don't use blk_integrity but still want stable pages
* because they do their own checksumming.

View File

@ -2420,36 +2420,10 @@ static LIST_HEAD(pending_raid_disks);
*/
int md_integrity_register(struct mddev *mddev)
{
struct md_rdev *rdev, *reference = NULL;
if (list_empty(&mddev->disks))
return 0; /* nothing to do */
if (mddev_is_dm(mddev) || blk_get_integrity(mddev->gendisk))
return 0; /* shouldn't register, or already is */
rdev_for_each(rdev, mddev) {
/* skip spares and non-functional disks */
if (test_bit(Faulty, &rdev->flags))
continue;
if (rdev->raid_disk < 0)
continue;
if (!reference) {
/* Use the first rdev as the reference */
reference = rdev;
continue;
}
/* does this rdev's profile match the reference profile? */
if (blk_integrity_compare(reference->bdev->bd_disk,
rdev->bdev->bd_disk) < 0)
return -EINVAL;
}
if (!reference || !bdev_get_integrity(reference->bdev))
return 0;
/*
* All component devices are integrity capable and have matching
* profiles, register the common profile for the md device.
*/
blk_integrity_register(mddev->gendisk,
bdev_get_integrity(reference->bdev));
if (mddev_is_dm(mddev) || !blk_get_integrity(mddev->gendisk))
return 0; /* shouldn't register */
pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) ||
@ -2469,32 +2443,6 @@ int md_integrity_register(struct mddev *mddev)
}
EXPORT_SYMBOL(md_integrity_register);
/*
* Attempt to add an rdev, but only if it is consistent with the current
* integrity profile
*/
int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
{
struct blk_integrity *bi_mddev;
if (mddev_is_dm(mddev))
return 0;
bi_mddev = blk_get_integrity(mddev->gendisk);
if (!bi_mddev) /* nothing to do */
return 0;
if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
pr_err("%s: incompatible integrity profile for %pg\n",
mdname(mddev), rdev->bdev);
return -ENXIO;
}
return 0;
}
EXPORT_SYMBOL(md_integrity_add_rdev);
static bool rdev_read_only(struct md_rdev *rdev)
{
return bdev_read_only(rdev->bdev) ||
@ -5840,14 +5788,20 @@ static const struct kobj_type md_ktype = {
int mdp_major = 0;
/* stack the limit for all rdevs into lim */
void mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim)
int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim,
unsigned int flags)
{
struct md_rdev *rdev;
rdev_for_each(rdev, mddev) {
queue_limits_stack_bdev(lim, rdev->bdev, rdev->data_offset,
mddev->gendisk->disk_name);
if ((flags & MDDEV_STACK_INTEGRITY) &&
!queue_limits_stack_integrity_bdev(lim, rdev->bdev))
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(mddev_stack_rdev_limits);
@ -5862,6 +5816,14 @@ int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev)
lim = queue_limits_start_update(mddev->gendisk->queue);
queue_limits_stack_bdev(&lim, rdev->bdev, rdev->data_offset,
mddev->gendisk->disk_name);
if (!queue_limits_stack_integrity_bdev(&lim, rdev->bdev)) {
pr_err("%s: incompatible integrity profile for %pg\n",
mdname(mddev), rdev->bdev);
queue_limits_cancel_update(mddev->gendisk->queue);
return -ENXIO;
}
return queue_limits_commit_update(mddev->gendisk->queue, &lim);
}
EXPORT_SYMBOL_GPL(mddev_stack_new_rdev);

View File

@ -889,7 +889,6 @@ extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
extern int md_check_no_bitmap(struct mddev *mddev);
extern int md_integrity_register(struct mddev *mddev);
extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern int mddev_init(struct mddev *mddev);
@ -988,7 +987,9 @@ void md_autostart_arrays(int part);
int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info);
int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info);
int do_md_run(struct mddev *mddev);
void mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim);
#define MDDEV_STACK_INTEGRITY (1u << 0)
int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim,
unsigned int flags);
int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev);
void mddev_update_io_opt(struct mddev *mddev, unsigned int nr_stripes);

View File

@ -377,13 +377,18 @@ static void raid0_free(struct mddev *mddev, void *priv)
static int raid0_set_limits(struct mddev *mddev)
{
struct queue_limits lim;
int err;
blk_set_stacking_limits(&lim);
lim.max_hw_sectors = mddev->chunk_sectors;
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * mddev->raid_disks;
mddev_stack_rdev_limits(mddev, &lim);
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err) {
queue_limits_cancel_update(mddev->gendisk->queue);
return err;
}
return queue_limits_set(mddev->gendisk->queue, &lim);
}

View File

@ -1906,9 +1906,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (mddev->recovery_disabled == conf->recovery_disabled)
return -EBUSY;
if (md_integrity_add_rdev(rdev, mddev))
return -ENXIO;
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
@ -3195,10 +3192,15 @@ static struct r1conf *setup_conf(struct mddev *mddev)
static int raid1_set_limits(struct mddev *mddev)
{
struct queue_limits lim;
int err;
blk_set_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
mddev_stack_rdev_limits(mddev, &lim);
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err) {
queue_limits_cancel_update(mddev->gendisk->queue);
return err;
}
return queue_limits_set(mddev->gendisk->queue, &lim);
}

View File

@ -2082,9 +2082,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
return -EINVAL;
if (md_integrity_add_rdev(rdev, mddev))
return -ENXIO;
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
@ -3975,12 +3972,17 @@ static int raid10_set_queue_limits(struct mddev *mddev)
{
struct r10conf *conf = mddev->private;
struct queue_limits lim;
int err;
blk_set_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
mddev_stack_rdev_limits(mddev, &lim);
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err) {
queue_limits_cancel_update(mddev->gendisk->queue);
return err;
}
return queue_limits_set(mddev->gendisk->queue, &lim);
}

View File

@ -7712,7 +7712,7 @@ static int raid5_set_limits(struct mddev *mddev)
lim.raid_partial_stripes_expensive = 1;
lim.discard_granularity = stripe;
lim.max_write_zeroes_sectors = 0;
mddev_stack_rdev_limits(mddev, &lim);
mddev_stack_rdev_limits(mddev, &lim, 0);
rdev_for_each(rdev, mddev)
queue_limits_stack_bdev(&lim, rdev->bdev, rdev->new_data_offset,
mddev->gendisk->disk_name);

View File

@ -1504,6 +1504,11 @@ static int btt_blk_init(struct btt *btt)
};
int rc;
if (btt_meta_size(btt) && IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
lim.integrity.tuple_size = btt_meta_size(btt);
lim.integrity.tag_size = btt_meta_size(btt);
}
btt->btt_disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
if (IS_ERR(btt->btt_disk))
return PTR_ERR(btt->btt_disk);
@ -1516,14 +1521,6 @@ static int btt_blk_init(struct btt *btt)
blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_disk->queue);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, btt->btt_disk->queue);
if (btt_meta_size(btt) && IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
struct blk_integrity bi = {
.tuple_size = btt_meta_size(btt),
.tag_size = btt_meta_size(btt),
};
blk_integrity_register(btt->btt_disk, &bi);
}
set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
rc = device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL);
if (rc)

View File

@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config NVME_CORE
tristate
select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
config BLK_DEV_NVME
tristate "NVM Express block device"

View File

@ -1723,11 +1723,12 @@ int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head)
static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head,
struct queue_limits *lim)
{
struct blk_integrity integrity = { };
struct blk_integrity *bi = &lim->integrity;
blk_integrity_unregister(disk);
memset(bi, 0, sizeof(*bi));
if (!head->ms)
return true;
@ -1744,17 +1745,16 @@ static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head)
case NVME_NS_DPS_PI_TYPE3:
switch (head->guard_type) {
case NVME_NVM_NS_16B_GUARD:
integrity.profile = &t10_pi_type3_crc;
integrity.tag_size = sizeof(u16) + sizeof(u32);
integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
bi->csum_type = BLK_INTEGRITY_CSUM_CRC;
bi->tag_size = sizeof(u16) + sizeof(u32);
bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
break;
case NVME_NVM_NS_64B_GUARD:
integrity.profile = &ext_pi_type3_crc64;
integrity.tag_size = sizeof(u16) + 6;
integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
bi->csum_type = BLK_INTEGRITY_CSUM_CRC64;
bi->tag_size = sizeof(u16) + 6;
bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
break;
default:
integrity.profile = NULL;
break;
}
break;
@ -1762,28 +1762,27 @@ static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head)
case NVME_NS_DPS_PI_TYPE2:
switch (head->guard_type) {
case NVME_NVM_NS_16B_GUARD:
integrity.profile = &t10_pi_type1_crc;
integrity.tag_size = sizeof(u16);
integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
bi->csum_type = BLK_INTEGRITY_CSUM_CRC;
bi->tag_size = sizeof(u16);
bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE |
BLK_INTEGRITY_REF_TAG;
break;
case NVME_NVM_NS_64B_GUARD:
integrity.profile = &ext_pi_type1_crc64;
integrity.tag_size = sizeof(u16);
integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
bi->csum_type = BLK_INTEGRITY_CSUM_CRC64;
bi->tag_size = sizeof(u16);
bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE |
BLK_INTEGRITY_REF_TAG;
break;
default:
integrity.profile = NULL;
break;
}
break;
default:
integrity.profile = NULL;
break;
}
integrity.tuple_size = head->ms;
integrity.pi_offset = head->pi_offset;
blk_integrity_register(disk, &integrity);
bi->tuple_size = head->ms;
bi->pi_offset = head->pi_offset;
return true;
}
@ -2106,11 +2105,6 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
ns->head->ids.csi == NVME_CSI_ZNS)
nvme_update_zone_info(ns, &lim, &zi);
ret = queue_limits_commit_update(ns->disk->queue, &lim);
if (ret) {
blk_mq_unfreeze_queue(ns->disk->queue);
goto out;
}
/*
* Register a metadata profile for PI, or the plain non-integrity NVMe
@ -2118,9 +2112,15 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
* I/O to namespaces with metadata except when the namespace supports
* PI, as it can strip/insert in that case.
*/
if (!nvme_init_integrity(ns->disk, ns->head))
if (!nvme_init_integrity(ns->disk, ns->head, &lim))
capacity = 0;
ret = queue_limits_commit_update(ns->disk->queue, &lim);
if (ret) {
blk_mq_unfreeze_queue(ns->disk->queue);
goto out;
}
set_capacity_and_notify(ns->disk, capacity);
/*
@ -2192,14 +2192,6 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
struct queue_limits lim;
blk_mq_freeze_queue(ns->head->disk->queue);
if (unsupported)
ns->head->disk->flags |= GENHD_FL_HIDDEN;
else
nvme_init_integrity(ns->head->disk, ns->head);
set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk));
set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
nvme_mpath_revalidate_paths(ns);
/*
* queue_limits mixes values that are the hardware limitations
* for bio splitting with what is the device configuration.
@ -2222,7 +2214,16 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
lim.io_opt = ns_lim->io_opt;
queue_limits_stack_bdev(&lim, ns->disk->part0, 0,
ns->head->disk->disk_name);
if (unsupported)
ns->head->disk->flags |= GENHD_FL_HIDDEN;
else
nvme_init_integrity(ns->head->disk, ns->head, &lim);
ret = queue_limits_commit_update(ns->head->disk->queue, &lim);
set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk));
set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
nvme_mpath_revalidate_paths(ns);
blk_mq_unfreeze_queue(ns->head->disk->queue);
}

View File

@ -875,7 +875,8 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
nvme_mpath_set_live(ns);
}
if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
if (test_bit(QUEUE_FLAG_STABLE_WRITES, &ns->queue->queue_flags) &&
ns->head->disk)
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
ns->head->disk->queue);
#ifdef CONFIG_BLK_DEV_ZONED

View File

@ -6,7 +6,6 @@ config NVME_TARGET
depends on CONFIGFS_FS
select NVME_KEYRING if NVME_TARGET_TCP_TLS
select KEYS if NVME_TARGET_TCP_TLS
select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
select SGL_ALLOC
help
This enabled target side support for the NVMe protocol, that is

View File

@ -61,15 +61,17 @@ static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
{
struct blk_integrity *bi = bdev_get_integrity(ns->bdev);
if (bi) {
if (!bi)
return;
if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC) {
ns->metadata_size = bi->tuple_size;
if (bi->profile == &t10_pi_type1_crc)
if (bi->flags & BLK_INTEGRITY_REF_TAG)
ns->pi_type = NVME_NS_DPS_PI_TYPE1;
else if (bi->profile == &t10_pi_type3_crc)
ns->pi_type = NVME_NS_DPS_PI_TYPE3;
else
/* Unsupported metadata type */
ns->metadata_size = 0;
ns->pi_type = NVME_NS_DPS_PI_TYPE3;
} else {
ns->metadata_size = 0;
}
}
@ -102,7 +104,7 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
ns->pi_type = 0;
ns->metadata_size = 0;
if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10))
if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
nvmet_bdev_ns_enable_integrity(ns);
if (bdev_is_zoned(ns->bdev)) {

View File

@ -82,7 +82,6 @@ comment "SCSI support type (disk, tape, CD-ROM)"
config BLK_DEV_SD
tristate "SCSI disk support"
depends on SCSI
select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
help
If you want to use SCSI hard disks, Fibre Channel disks,
Serial ATA (SATA) or Parallel ATA (PATA) hard disks,

View File

@ -101,12 +101,13 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC);
#define SD_MINORS 16
static void sd_config_discard(struct scsi_disk *, unsigned int);
static void sd_config_write_same(struct scsi_disk *);
static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim,
unsigned int mode);
static void sd_config_write_same(struct scsi_disk *sdkp,
struct queue_limits *lim);
static int sd_revalidate_disk(struct gendisk *);
static void sd_unlock_native_capacity(struct gendisk *disk);
static void sd_shutdown(struct device *);
static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
static void scsi_disk_release(struct device *cdev);
static DEFINE_IDA(sd_index_ida);
@ -456,25 +457,30 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
int mode;
struct queue_limits lim;
int mode, err;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (sd_is_zoned(sdkp)) {
sd_config_discard(sdkp, SD_LBP_DISABLE);
return count;
}
if (sdp->type != TYPE_DISK)
return -EINVAL;
/* ignore the provisioning mode for ZBC devices */
if (sd_is_zoned(sdkp))
return count;
mode = sysfs_match_string(lbp_mode, buf);
if (mode < 0)
return -EINVAL;
sd_config_discard(sdkp, mode);
lim = queue_limits_start_update(sdkp->disk->queue);
sd_config_discard(sdkp, &lim, mode);
blk_mq_freeze_queue(sdkp->disk->queue);
err = queue_limits_commit_update(sdkp->disk->queue, &lim);
blk_mq_unfreeze_queue(sdkp->disk->queue);
if (err)
return err;
return count;
}
static DEVICE_ATTR_RW(provisioning_mode);
@ -557,6 +563,7 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
struct queue_limits lim;
unsigned long max;
int err;
@ -578,8 +585,13 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
sdkp->max_ws_blocks = max;
}
sd_config_write_same(sdkp);
lim = queue_limits_start_update(sdkp->disk->queue);
sd_config_write_same(sdkp, &lim);
blk_mq_freeze_queue(sdkp->disk->queue);
err = queue_limits_commit_update(sdkp->disk->queue, &lim);
blk_mq_unfreeze_queue(sdkp->disk->queue);
if (err)
return err;
return count;
}
static DEVICE_ATTR_RW(max_write_same_blocks);
@ -822,25 +834,28 @@ static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
return protect;
}
static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
static void sd_disable_discard(struct scsi_disk *sdkp)
{
sdkp->provisioning_mode = SD_LBP_DISABLE;
blk_queue_disable_discard(sdkp->disk->queue);
}
static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim,
unsigned int mode)
{
struct request_queue *q = sdkp->disk->queue;
unsigned int logical_block_size = sdkp->device->sector_size;
unsigned int max_blocks = 0;
q->limits.discard_alignment =
sdkp->unmap_alignment * logical_block_size;
q->limits.discard_granularity =
max(sdkp->physical_block_size,
sdkp->unmap_granularity * logical_block_size);
lim->discard_alignment = sdkp->unmap_alignment * logical_block_size;
lim->discard_granularity = max(sdkp->physical_block_size,
sdkp->unmap_granularity * logical_block_size);
sdkp->provisioning_mode = mode;
switch (mode) {
case SD_LBP_FULL:
case SD_LBP_DISABLE:
blk_queue_max_discard_sectors(q, 0);
return;
break;
case SD_LBP_UNMAP:
max_blocks = min_not_zero(sdkp->max_unmap_blocks,
@ -871,7 +886,8 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
break;
}
blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
lim->max_hw_discard_sectors = max_blocks *
(logical_block_size >> SECTOR_SHIFT);
}
static void *sd_set_special_bvec(struct request *rq, unsigned int data_len)
@ -999,9 +1015,16 @@ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
return sd_setup_write_same10_cmnd(cmd, false);
}
static void sd_config_write_same(struct scsi_disk *sdkp)
static void sd_disable_write_same(struct scsi_disk *sdkp)
{
sdkp->device->no_write_same = 1;
sdkp->max_ws_blocks = 0;
blk_queue_disable_write_zeroes(sdkp->disk->queue);
}
static void sd_config_write_same(struct scsi_disk *sdkp,
struct queue_limits *lim)
{
struct request_queue *q = sdkp->disk->queue;
unsigned int logical_block_size = sdkp->device->sector_size;
if (sdkp->device->no_write_same) {
@ -1055,8 +1078,8 @@ static void sd_config_write_same(struct scsi_disk *sdkp)
}
out:
blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks *
(logical_block_size >> 9));
lim->max_write_zeroes_sectors =
sdkp->max_ws_blocks * (logical_block_size >> SECTOR_SHIFT);
}
static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
@ -2246,15 +2269,14 @@ static int sd_done(struct scsi_cmnd *SCpnt)
case 0x24: /* INVALID FIELD IN CDB */
switch (SCpnt->cmnd[0]) {
case UNMAP:
sd_config_discard(sdkp, SD_LBP_DISABLE);
sd_disable_discard(sdkp);
break;
case WRITE_SAME_16:
case WRITE_SAME:
if (SCpnt->cmnd[1] & 8) { /* UNMAP */
sd_config_discard(sdkp, SD_LBP_DISABLE);
sd_disable_discard(sdkp);
} else {
sdkp->device->no_write_same = 1;
sd_config_write_same(sdkp);
sd_disable_write_same(sdkp);
req->rq_flags |= RQF_QUIET;
}
break;
@ -2460,11 +2482,13 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer
return 0;
}
static void sd_config_protection(struct scsi_disk *sdkp)
static void sd_config_protection(struct scsi_disk *sdkp,
struct queue_limits *lim)
{
struct scsi_device *sdp = sdkp->device;
sd_dif_config_host(sdkp);
if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
sd_dif_config_host(sdkp, lim);
if (!sdkp->protection_type)
return;
@ -2513,7 +2537,7 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
#define READ_CAPACITY_RETRIES_ON_RESET 10
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
unsigned char *buffer)
struct queue_limits *lim, unsigned char *buffer)
{
unsigned char cmd[16];
struct scsi_sense_hdr sshdr;
@ -2587,7 +2611,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
/* Lowest aligned logical block */
alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
blk_queue_alignment_offset(sdp->request_queue, alignment);
lim->alignment_offset = alignment;
if (alignment && sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp,
"physical block alignment offset: %u\n", alignment);
@ -2598,7 +2622,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
if (buffer[14] & 0x40) /* LBPRZ */
sdkp->lbprz = 1;
sd_config_discard(sdkp, SD_LBP_WS16);
sd_config_discard(sdkp, lim, SD_LBP_WS16);
}
sdkp->capacity = lba + 1;
@ -2701,13 +2725,14 @@ static int sd_try_rc16_first(struct scsi_device *sdp)
* read disk capacity
*/
static void
sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
sd_read_capacity(struct scsi_disk *sdkp, struct queue_limits *lim,
unsigned char *buffer)
{
int sector_size;
struct scsi_device *sdp = sdkp->device;
if (sd_try_rc16_first(sdp)) {
sector_size = read_capacity_16(sdkp, sdp, buffer);
sector_size = read_capacity_16(sdkp, sdp, lim, buffer);
if (sector_size == -EOVERFLOW)
goto got_data;
if (sector_size == -ENODEV)
@ -2727,7 +2752,7 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
int old_sector_size = sector_size;
sd_printk(KERN_NOTICE, sdkp, "Very big device. "
"Trying to use READ CAPACITY(16).\n");
sector_size = read_capacity_16(sdkp, sdp, buffer);
sector_size = read_capacity_16(sdkp, sdp, lim, buffer);
if (sector_size < 0) {
sd_printk(KERN_NOTICE, sdkp,
"Using 0xffffffff as device size\n");
@ -2786,9 +2811,8 @@ got_data:
*/
sector_size = 512;
}
blk_queue_logical_block_size(sdp->request_queue, sector_size);
blk_queue_physical_block_size(sdp->request_queue,
sdkp->physical_block_size);
lim->logical_block_size = sector_size;
lim->physical_block_size = sdkp->physical_block_size;
sdkp->device->sector_size = sector_size;
if (sdkp->capacity > 0xffffffff)
@ -3191,11 +3215,30 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
return;
}
/**
* sd_read_block_limits - Query disk device for preferred I/O sizes.
* @sdkp: disk to query
static unsigned int sd_discard_mode(struct scsi_disk *sdkp)
{
if (!sdkp->lbpvpd) {
/* LBP VPD page not provided */
if (sdkp->max_unmap_blocks)
return SD_LBP_UNMAP;
return SD_LBP_WS16;
}
/* LBP VPD page tells us what to use */
if (sdkp->lbpu && sdkp->max_unmap_blocks)
return SD_LBP_UNMAP;
if (sdkp->lbpws)
return SD_LBP_WS16;
if (sdkp->lbpws10)
return SD_LBP_WS10;
return SD_LBP_DISABLE;
}
/*
* Query disk device for preferred I/O sizes.
*/
static void sd_read_block_limits(struct scsi_disk *sdkp)
static void sd_read_block_limits(struct scsi_disk *sdkp,
struct queue_limits *lim)
{
struct scsi_vpd *vpd;
@ -3229,23 +3272,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
sdkp->unmap_alignment =
get_unaligned_be32(&vpd->data[32]) & ~(1 << 31);
if (!sdkp->lbpvpd) { /* LBP VPD page not provided */
if (sdkp->max_unmap_blocks)
sd_config_discard(sdkp, SD_LBP_UNMAP);
else
sd_config_discard(sdkp, SD_LBP_WS16);
} else { /* LBP VPD page tells us what to use */
if (sdkp->lbpu && sdkp->max_unmap_blocks)
sd_config_discard(sdkp, SD_LBP_UNMAP);
else if (sdkp->lbpws)
sd_config_discard(sdkp, SD_LBP_WS16);
else if (sdkp->lbpws10)
sd_config_discard(sdkp, SD_LBP_WS10);
else
sd_config_discard(sdkp, SD_LBP_DISABLE);
}
sd_config_discard(sdkp, lim, sd_discard_mode(sdkp));
}
out:
@ -3264,11 +3291,9 @@ static void sd_read_block_limits_ext(struct scsi_disk *sdkp)
rcu_read_unlock();
}
/**
* sd_read_block_characteristics - Query block dev. characteristics
* @sdkp: disk to query
*/
static void sd_read_block_characteristics(struct scsi_disk *sdkp)
/* Query block device characteristics */
static void sd_read_block_characteristics(struct scsi_disk *sdkp,
struct queue_limits *lim)
{
struct request_queue *q = sdkp->disk->queue;
struct scsi_vpd *vpd;
@ -3294,29 +3319,26 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
#ifdef CONFIG_BLK_DEV_ZONED /* sd_probe rejects ZBD devices early otherwise */
if (sdkp->device->type == TYPE_ZBC) {
/*
* Host-managed.
*/
disk_set_zoned(sdkp->disk);
lim->zoned = true;
/*
* Per ZBC and ZAC specifications, writes in sequential write
* required zones of host-managed devices must be aligned to
* the device physical block size.
*/
blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
lim->zone_write_granularity = sdkp->physical_block_size;
} else {
/*
* Host-aware devices are treated as conventional.
*/
WARN_ON_ONCE(blk_queue_is_zoned(q));
lim->zoned = false;
}
#endif /* CONFIG_BLK_DEV_ZONED */
if (!sdkp->first_scan)
return;
if (blk_queue_is_zoned(q))
if (lim->zoned)
sd_printk(KERN_NOTICE, sdkp, "Host-managed zoned block device\n");
else if (sdkp->zoned == 1)
sd_printk(KERN_NOTICE, sdkp, "Host-aware SMR disk used as regular disk\n");
@ -3592,8 +3614,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
struct scsi_device *sdp = sdkp->device;
struct request_queue *q = sdkp->disk->queue;
sector_t old_capacity = sdkp->capacity;
struct queue_limits lim;
unsigned char *buffer;
unsigned int dev_max, rw_max;
unsigned int dev_max;
int err;
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
"sd_revalidate_disk\n"));
@ -3614,12 +3638,14 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_spinup_disk(sdkp);
lim = queue_limits_start_update(sdkp->disk->queue);
/*
* Without media there is no reason to ask; moreover, some devices
* react badly if we do.
*/
if (sdkp->media_present) {
sd_read_capacity(sdkp, buffer);
sd_read_capacity(sdkp, &lim, buffer);
/*
* Some USB/UAS devices return generic values for mode pages
* until the media has been accessed. Trigger a READ operation
@ -3638,10 +3664,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
if (scsi_device_supports_vpd(sdp)) {
sd_read_block_provisioning(sdkp);
sd_read_block_limits(sdkp);
sd_read_block_limits(sdkp, &lim);
sd_read_block_limits_ext(sdkp);
sd_read_block_characteristics(sdkp);
sd_zbc_read_zones(sdkp, buffer);
sd_read_block_characteristics(sdkp, &lim);
sd_zbc_read_zones(sdkp, &lim, buffer);
sd_read_cpr(sdkp);
}
@ -3653,7 +3679,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_app_tag_own(sdkp, buffer);
sd_read_write_same(sdkp, buffer);
sd_read_security(sdkp, buffer);
sd_config_protection(sdkp);
sd_config_protection(sdkp, &lim);
}
/*
@ -3667,50 +3693,36 @@ static int sd_revalidate_disk(struct gendisk *disk)
/* Some devices report a maximum block count for READ/WRITE requests. */
dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
lim.max_dev_sectors = logical_to_sectors(sdp, dev_max);
if (sd_validate_min_xfer_size(sdkp))
blk_queue_io_min(sdkp->disk->queue,
logical_to_bytes(sdp, sdkp->min_xfer_blocks));
lim.io_min = logical_to_bytes(sdp, sdkp->min_xfer_blocks);
else
blk_queue_io_min(sdkp->disk->queue, 0);
if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
} else {
q->limits.io_opt = 0;
rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
(sector_t)BLK_DEF_MAX_SECTORS_CAP);
}
lim.io_min = 0;
/*
* Limit default to SCSI host optimal sector limit if set. There may be
* an impact on performance for when the size of a request exceeds this
* host limit.
*/
rw_max = min_not_zero(rw_max, sdp->host->opt_sectors);
/* Do not exceed controller limit */
rw_max = min(rw_max, queue_max_hw_sectors(q));
/*
* Only update max_sectors if previously unset or if the current value
* exceeds the capabilities of the hardware.
*/
if (sdkp->first_scan ||
q->limits.max_sectors > q->limits.max_dev_sectors ||
q->limits.max_sectors > q->limits.max_hw_sectors) {
q->limits.max_sectors = rw_max;
q->limits.max_user_sectors = rw_max;
lim.io_opt = sdp->host->opt_sectors << SECTOR_SHIFT;
if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
lim.io_opt = min_not_zero(lim.io_opt,
logical_to_bytes(sdp, sdkp->opt_xfer_blocks));
}
sdkp->first_scan = 0;
set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity));
sd_config_write_same(sdkp);
sd_config_write_same(sdkp, &lim);
kfree(buffer);
blk_mq_freeze_queue(sdkp->disk->queue);
err = queue_limits_commit_update(sdkp->disk->queue, &lim);
blk_mq_unfreeze_queue(sdkp->disk->queue);
if (err)
return err;
/*
* For a zoned drive, revalidating the zones can be done only once
* the gendisk capacity is set. So if this fails, set back the gendisk

View File

@ -220,17 +220,7 @@ static inline sector_t sectors_to_logical(struct scsi_device *sdev, sector_t sec
return sector >> (ilog2(sdev->sector_size) - 9);
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
extern void sd_dif_config_host(struct scsi_disk *);
#else /* CONFIG_BLK_DEV_INTEGRITY */
static inline void sd_dif_config_host(struct scsi_disk *disk)
{
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
void sd_dif_config_host(struct scsi_disk *sdkp, struct queue_limits *lim);
static inline int sd_is_zoned(struct scsi_disk *sdkp)
{
@ -239,7 +229,8 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
#ifdef CONFIG_BLK_DEV_ZONED
int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]);
int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim,
u8 buf[SD_BUF_SIZE]);
int sd_zbc_revalidate_zones(struct scsi_disk *sdkp);
blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
unsigned char op, bool all);
@ -250,7 +241,8 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
#else /* CONFIG_BLK_DEV_ZONED */
static inline int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
struct queue_limits *lim, u8 buf[SD_BUF_SIZE])
{
return 0;
}

View File

@ -24,14 +24,15 @@
/*
* Configure exchange of protection information between OS and HBA.
*/
void sd_dif_config_host(struct scsi_disk *sdkp)
void sd_dif_config_host(struct scsi_disk *sdkp, struct queue_limits *lim)
{
struct scsi_device *sdp = sdkp->device;
struct gendisk *disk = sdkp->disk;
u8 type = sdkp->protection_type;
struct blk_integrity bi;
struct blk_integrity *bi = &lim->integrity;
int dif, dix;
memset(bi, 0, sizeof(*bi));
dif = scsi_host_dif_capable(sdp->host, type);
dix = scsi_host_dix_capable(sdp->host, type);
@ -39,45 +40,33 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
dif = 0; dix = 1;
}
if (!dix) {
blk_integrity_unregister(disk);
if (!dix)
return;
}
memset(&bi, 0, sizeof(bi));
/* Enable DMA of protection information */
if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) {
if (type == T10_PI_TYPE3_PROTECTION)
bi.profile = &t10_pi_type3_ip;
else
bi.profile = &t10_pi_type1_ip;
if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP)
bi->csum_type = BLK_INTEGRITY_CSUM_IP;
else
bi->csum_type = BLK_INTEGRITY_CSUM_CRC;
bi.flags |= BLK_INTEGRITY_IP_CHECKSUM;
} else
if (type == T10_PI_TYPE3_PROTECTION)
bi.profile = &t10_pi_type3_crc;
else
bi.profile = &t10_pi_type1_crc;
if (type != T10_PI_TYPE3_PROTECTION)
bi->flags |= BLK_INTEGRITY_REF_TAG;
bi.tuple_size = sizeof(struct t10_pi_tuple);
bi->tuple_size = sizeof(struct t10_pi_tuple);
if (dif && type) {
bi.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
if (!sdkp->ATO)
goto out;
return;
if (type == T10_PI_TYPE3_PROTECTION)
bi.tag_size = sizeof(u16) + sizeof(u32);
bi->tag_size = sizeof(u16) + sizeof(u32);
else
bi.tag_size = sizeof(u16);
bi->tag_size = sizeof(u16);
}
sd_first_printk(KERN_NOTICE, sdkp,
"Enabling DIX %s, application tag size %u bytes\n",
bi.profile->name, bi.tag_size);
out:
blk_integrity_register(disk, &bi);
blk_integrity_profile_name(bi), bi->tag_size);
}

View File

@ -565,12 +565,6 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
sdkp->zone_info.zone_blocks = zone_blocks;
sdkp->zone_info.nr_zones = nr_zones;
blk_queue_chunk_sectors(q,
logical_to_sectors(sdkp->device, zone_blocks));
/* Enable block layer zone append emulation */
blk_queue_max_zone_append_sectors(q, 0);
flags = memalloc_noio_save();
ret = blk_revalidate_disk_zones(disk);
memalloc_noio_restore(flags);
@ -588,13 +582,15 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
/**
* sd_zbc_read_zones - Read zone information and update the request queue
* @sdkp: SCSI disk pointer.
* @lim: queue limits to read into
* @buf: 512 byte buffer used for storing SCSI command output.
*
* Read zone information and update the request queue zone characteristics and
* also the zoned device information in *sdkp. Called by sd_revalidate_disk()
* before the gendisk capacity has been set.
*/
int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim,
u8 buf[SD_BUF_SIZE])
{
struct gendisk *disk = sdkp->disk;
struct request_queue *q = disk->queue;
@ -625,17 +621,20 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
if (ret != 0)
goto err;
nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
sdkp->early_zone_info.nr_zones = nr_zones;
sdkp->early_zone_info.zone_blocks = zone_blocks;
/* The drive satisfies the kernel restrictions: set it up */
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
if (sdkp->zones_max_open == U32_MAX)
disk_set_max_open_zones(disk, 0);
lim->max_open_zones = 0;
else
disk_set_max_open_zones(disk, sdkp->zones_max_open);
disk_set_max_active_zones(disk, 0);
nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
sdkp->early_zone_info.nr_zones = nr_zones;
sdkp->early_zone_info.zone_blocks = zone_blocks;
lim->max_open_zones = sdkp->zones_max_open;
lim->max_active_zones = 0;
lim->chunk_sectors = logical_to_sectors(sdkp->device, zone_blocks);
/* Enable block layer zone append emulation */
lim->max_zone_append_sectors = 0;
return 0;

View File

@ -111,7 +111,7 @@ static struct lock_class_key sr_bio_compl_lkclass;
static int sr_open(struct cdrom_device_info *, int);
static void sr_release(struct cdrom_device_info *);
static void get_sectorsize(struct scsi_cd *);
static int get_sectorsize(struct scsi_cd *);
static int get_capabilities(struct scsi_cd *);
static unsigned int sr_check_events(struct cdrom_device_info *cdi,
@ -473,15 +473,15 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
return BLK_STS_IOERR;
}
static void sr_revalidate_disk(struct scsi_cd *cd)
static int sr_revalidate_disk(struct scsi_cd *cd)
{
struct scsi_sense_hdr sshdr;
/* if the unit is not ready, nothing more to do */
if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
return;
return 0;
sr_cd_check(&cd->cdi);
get_sectorsize(cd);
return get_sectorsize(cd);
}
static int sr_block_open(struct gendisk *disk, blk_mode_t mode)
@ -494,13 +494,16 @@ static int sr_block_open(struct gendisk *disk, blk_mode_t mode)
return -ENXIO;
scsi_autopm_get_device(sdev);
if (disk_check_media_change(disk))
sr_revalidate_disk(cd);
if (disk_check_media_change(disk)) {
ret = sr_revalidate_disk(cd);
if (ret)
goto out;
}
mutex_lock(&cd->lock);
ret = cdrom_open(&cd->cdi, mode);
mutex_unlock(&cd->lock);
out:
scsi_autopm_put_device(sdev);
if (ret)
scsi_device_put(cd->device);
@ -685,7 +688,9 @@ static int sr_probe(struct device *dev)
blk_pm_runtime_init(sdev->request_queue, dev);
dev_set_drvdata(dev, cd);
sr_revalidate_disk(cd);
error = sr_revalidate_disk(cd);
if (error)
goto unregister_cdrom;
error = device_add_disk(&sdev->sdev_gendev, disk, NULL);
if (error)
@ -714,13 +719,14 @@ fail:
}
static void get_sectorsize(struct scsi_cd *cd)
static int get_sectorsize(struct scsi_cd *cd)
{
struct request_queue *q = cd->device->request_queue;
static const u8 cmd[10] = { READ_CAPACITY };
unsigned char buffer[8] = { };
int the_result;
struct queue_limits lim;
int err;
int sector_size;
struct request_queue *queue;
struct scsi_failure failure_defs[] = {
{
.result = SCMD_FAILURE_RESULT_ANY,
@ -736,10 +742,10 @@ static void get_sectorsize(struct scsi_cd *cd)
};
/* Do the command and wait.. */
the_result = scsi_execute_cmd(cd->device, cmd, REQ_OP_DRV_IN, buffer,
err = scsi_execute_cmd(cd->device, cmd, REQ_OP_DRV_IN, buffer,
sizeof(buffer), SR_TIMEOUT, MAX_RETRIES,
&exec_args);
if (the_result) {
if (err) {
cd->capacity = 0x1fffff;
sector_size = 2048; /* A guess, just in case */
} else {
@ -789,10 +795,12 @@ static void get_sectorsize(struct scsi_cd *cd)
set_capacity(cd->disk, cd->capacity);
}
queue = cd->device->request_queue;
blk_queue_logical_block_size(queue, sector_size);
return;
lim = queue_limits_start_update(q);
lim.logical_block_size = sector_size;
blk_mq_freeze_queue(q);
err = queue_limits_commit_update(q, &lim);
blk_mq_unfreeze_queue(q);
return err;
}
static int get_capabilities(struct scsi_cd *cd)

View File

@ -148,35 +148,38 @@ static int iblock_configure_device(struct se_device *dev)
dev->dev_attrib.is_nonrot = 1;
bi = bdev_get_integrity(bd);
if (bi) {
struct bio_set *bs = &ib_dev->ibd_bio_set;
if (!bi)
return 0;
if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
!strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
pr_err("IBLOCK export of blk_integrity: %s not"
" supported\n", bi->profile->name);
ret = -ENOSYS;
goto out_blkdev_put;
}
if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
} else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
switch (bi->csum_type) {
case BLK_INTEGRITY_CSUM_IP:
pr_err("IBLOCK export of blk_integrity: %s not supported\n",
blk_integrity_profile_name(bi));
ret = -ENOSYS;
goto out_blkdev_put;
case BLK_INTEGRITY_CSUM_CRC:
if (bi->flags & BLK_INTEGRITY_REF_TAG)
dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
}
if (dev->dev_attrib.pi_prot_type) {
if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
pr_err("Unable to allocate bioset for PI\n");
ret = -ENOMEM;
goto out_blkdev_put;
}
pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
&bs->bio_integrity_pool);
}
dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
else
dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
break;
default:
break;
}
if (dev->dev_attrib.pi_prot_type) {
struct bio_set *bs = &ib_dev->ibd_bio_set;
if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
pr_err("Unable to allocate bioset for PI\n");
ret = -ENOMEM;
goto out_blkdev_put;
}
pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
&bs->bio_integrity_pool);
}
dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
return 0;
out_blkdev_put:

View File

@ -7,10 +7,11 @@
struct request;
enum blk_integrity_flags {
BLK_INTEGRITY_VERIFY = 1 << 0,
BLK_INTEGRITY_GENERATE = 1 << 1,
BLK_INTEGRITY_NOVERIFY = 1 << 0,
BLK_INTEGRITY_NOGENERATE = 1 << 1,
BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
BLK_INTEGRITY_REF_TAG = 1 << 3,
BLK_INTEGRITY_STACKED = 1 << 4,
};
struct blk_integrity_iter {
@ -19,39 +20,34 @@ struct blk_integrity_iter {
sector_t seed;
unsigned int data_size;
unsigned short interval;
unsigned char tuple_size;
unsigned char pi_offset;
const char *disk_name;
};
typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
typedef void (integrity_prepare_fn) (struct request *);
typedef void (integrity_complete_fn) (struct request *, unsigned int);
struct blk_integrity_profile {
integrity_processing_fn *generate_fn;
integrity_processing_fn *verify_fn;
integrity_prepare_fn *prepare_fn;
integrity_complete_fn *complete_fn;
const char *name;
};
const char *blk_integrity_profile_name(struct blk_integrity *bi);
bool queue_limits_stack_integrity(struct queue_limits *t,
struct queue_limits *b);
static inline bool queue_limits_stack_integrity_bdev(struct queue_limits *t,
struct block_device *bdev)
{
return queue_limits_stack_integrity(t, &bdev->bd_disk->queue->limits);
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
void blk_integrity_register(struct gendisk *, struct blk_integrity *);
void blk_integrity_unregister(struct gendisk *);
int blk_integrity_compare(struct gendisk *, struct gendisk *);
int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
struct scatterlist *);
int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
static inline bool
blk_integrity_queue_supports_integrity(struct request_queue *q)
{
return q->limits.integrity.tuple_size;
}
static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
{
struct blk_integrity *bi = &disk->queue->integrity;
if (!bi->profile)
if (!blk_integrity_queue_supports_integrity(disk->queue))
return NULL;
return bi;
return &disk->queue->limits.integrity;
}
static inline struct blk_integrity *
@ -60,12 +56,6 @@ bdev_get_integrity(struct block_device *bdev)
return blk_get_integrity(bdev->bd_disk);
}
static inline bool
blk_integrity_queue_supports_integrity(struct request_queue *q)
{
return q->integrity.profile;
}
static inline unsigned short
queue_max_integrity_segments(const struct request_queue *q)
{
@ -134,17 +124,6 @@ blk_integrity_queue_supports_integrity(struct request_queue *q)
{
return false;
}
static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
{
return 0;
}
static inline void blk_integrity_register(struct gendisk *d,
struct blk_integrity *b)
{
}
static inline void blk_integrity_unregister(struct gendisk *d)
{
}
static inline unsigned short
queue_max_integrity_segments(const struct request_queue *q)
{
@ -172,4 +151,5 @@ static inline struct bio_vec *rq_integrity_vec(struct request *rq)
return NULL;
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
#endif /* _LINUX_BLK_INTEGRITY_H */

View File

@ -105,9 +105,16 @@ enum {
struct disk_events;
struct badblocks;
enum blk_integrity_checksum {
BLK_INTEGRITY_CSUM_NONE = 0,
BLK_INTEGRITY_CSUM_IP = 1,
BLK_INTEGRITY_CSUM_CRC = 2,
BLK_INTEGRITY_CSUM_CRC64 = 3,
} __packed ;
struct blk_integrity {
const struct blk_integrity_profile *profile;
unsigned char flags;
enum blk_integrity_checksum csum_type;
unsigned char tuple_size;
unsigned char pi_offset;
unsigned char interval_exp;
@ -327,13 +334,13 @@ struct queue_limits {
* due to possible offsets.
*/
unsigned int dma_alignment;
struct blk_integrity integrity;
};
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
void *data);
void disk_set_zoned(struct gendisk *disk);
#define BLK_ALL_ZONES ((unsigned int)-1)
int blkdev_report_zones(struct block_device *bdev, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
@ -414,10 +421,6 @@ struct request_queue {
struct queue_limits limits;
#ifdef CONFIG_BLK_DEV_INTEGRITY
struct blk_integrity integrity;
#endif /* CONFIG_BLK_DEV_INTEGRITY */
#ifdef CONFIG_PM
struct device *dev;
enum rpm_status rpm_status;
@ -566,8 +569,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
#define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
#define blk_queue_stable_writes(q) \
test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
#define blk_queue_zone_resetall(q) \
@ -638,18 +639,6 @@ static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
return sector >> ilog2(disk->queue->limits.chunk_sectors);
}
static inline void disk_set_max_open_zones(struct gendisk *disk,
unsigned int max_open_zones)
{
disk->queue->limits.max_open_zones = max_open_zones;
}
static inline void disk_set_max_active_zones(struct gendisk *disk,
unsigned int max_active_zones)
{
disk->queue->limits.max_active_zones = max_active_zones;
}
static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
{
return bdev->bd_disk->queue->limits.max_open_zones;
@ -926,27 +915,33 @@ static inline void queue_limits_cancel_update(struct request_queue *q)
mutex_unlock(&q->limits_lock);
}
/*
* These helpers are for drivers that have sloppy feature negotiation and might
* have to disable DISCARD, WRITE_ZEROES or SECURE_DISCARD from the I/O
* completion handler when the device returned an indicator that the respective
* feature is not actually supported. They are racy and the driver needs to
* cope with that. Try to avoid this scheme if you can.
*/
static inline void blk_queue_disable_discard(struct request_queue *q)
{
q->limits.max_discard_sectors = 0;
}
static inline void blk_queue_disable_secure_erase(struct request_queue *q)
{
q->limits.max_secure_erase_sectors = 0;
}
static inline void blk_queue_disable_write_zeroes(struct request_queue *q)
{
q->limits.max_write_zeroes_sectors = 0;
}
/*
* Access functions for manipulating queue properties
*/
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
void blk_queue_max_secure_erase_sectors(struct request_queue *q,
unsigned int max_sectors);
extern void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors);
extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
unsigned int max_write_same_sectors);
extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
unsigned int max_zone_append_sectors);
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
void blk_queue_zone_write_granularity(struct request_queue *q,
unsigned int size);
extern void blk_queue_alignment_offset(struct request_queue *q,
unsigned int alignment);
void disk_update_readahead(struct gendisk *disk);
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
extern void blk_set_stacking_limits(struct queue_limits *lim);
@ -1301,8 +1296,12 @@ static inline bool bdev_synchronous(struct block_device *bdev)
static inline bool bdev_stable_writes(struct block_device *bdev)
{
return test_bit(QUEUE_FLAG_STABLE_WRITES,
&bdev_get_queue(bdev)->queue_flags);
struct request_queue *q = bdev_get_queue(bdev);
if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE)
return true;
return test_bit(QUEUE_FLAG_STABLE_WRITES, &q->queue_flags);
}
static inline bool bdev_write_cache(struct block_device *bdev)

View File

@ -39,20 +39,11 @@ struct t10_pi_tuple {
static inline u32 t10_pi_ref_tag(struct request *rq)
{
unsigned int shift = ilog2(queue_logical_block_size(rq->q));
unsigned int shift = rq->q->limits.integrity.interval_exp;
#ifdef CONFIG_BLK_DEV_INTEGRITY
if (rq->q->integrity.interval_exp)
shift = rq->q->integrity.interval_exp;
#endif
return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff;
}
extern const struct blk_integrity_profile t10_pi_type1_crc;
extern const struct blk_integrity_profile t10_pi_type1_ip;
extern const struct blk_integrity_profile t10_pi_type3_crc;
extern const struct blk_integrity_profile t10_pi_type3_ip;
struct crc64_pi_tuple {
__be64 guard_tag;
__be16 app_tag;
@ -70,16 +61,9 @@ static inline u64 lower_48_bits(u64 n)
static inline u64 ext_pi_ref_tag(struct request *rq)
{
unsigned int shift = ilog2(queue_logical_block_size(rq->q));
unsigned int shift = rq->q->limits.integrity.interval_exp;
#ifdef CONFIG_BLK_DEV_INTEGRITY
if (rq->q->integrity.interval_exp)
shift = rq->q->integrity.interval_exp;
#endif
return lower_48_bits(blk_rq_pos(rq) >> (shift - SECTOR_SHIFT));
}
extern const struct blk_integrity_profile ext_pi_type1_crc64;
extern const struct blk_integrity_profile ext_pi_type3_crc64;
#endif