block: Use blk_queue_flag_*() in drivers instead of queue_flag_*()

This patch has been generated as follows:

for verb in set_unlocked clear_unlocked set clear; do
  replace-in-files queue_flag_${verb} blk_queue_flag_${verb%_unlocked} \
    $(git grep -lw queue_flag_${verb} drivers block/bsg*)
done

Except for protecting all queue flag changes with the queue lock
this patch does not change any functionality.

Cc: Mike Snitzer <snitzer@redhat.com>
Cc: Shaohua Li <shli@fb.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Acked-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Bart Van Assche 2018-03-07 17:10:10 -08:00 committed by Jens Axboe
parent bf3a2b310e
commit 8b904b5b6b
38 changed files with 89 additions and 89 deletions

View File

@ -275,8 +275,8 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
q->queuedata = dev; q->queuedata = dev;
q->bsg_job_fn = job_fn; q->bsg_job_fn = job_fn;
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); blk_queue_flag_set(QUEUE_FLAG_BIDI, q);
queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q); blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
blk_queue_softirq_done(q, bsg_softirq_done); blk_queue_softirq_done(q, bsg_softirq_done);
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);

View File

@ -1212,10 +1212,10 @@ static void decide_on_discard_support(struct drbd_device *device,
* topology on all peers. */ * topology on all peers. */
blk_queue_discard_granularity(q, 512); blk_queue_discard_granularity(q, 512);
q->limits.max_discard_sectors = drbd_max_discard_sectors(connection); q->limits.max_discard_sectors = drbd_max_discard_sectors(connection);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
q->limits.max_write_zeroes_sectors = drbd_max_discard_sectors(connection); q->limits.max_write_zeroes_sectors = drbd_max_discard_sectors(connection);
} else { } else {
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
blk_queue_discard_granularity(q, 0); blk_queue_discard_granularity(q, 0);
q->limits.max_discard_sectors = 0; q->limits.max_discard_sectors = 0;
q->limits.max_write_zeroes_sectors = 0; q->limits.max_write_zeroes_sectors = 0;

View File

@ -214,10 +214,10 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
blk_mq_freeze_queue(lo->lo_queue); blk_mq_freeze_queue(lo->lo_queue);
lo->use_dio = use_dio; lo->use_dio = use_dio;
if (use_dio) { if (use_dio) {
queue_flag_clear_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue);
lo->lo_flags |= LO_FLAGS_DIRECT_IO; lo->lo_flags |= LO_FLAGS_DIRECT_IO;
} else { } else {
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO; lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
} }
blk_mq_unfreeze_queue(lo->lo_queue); blk_mq_unfreeze_queue(lo->lo_queue);
@ -817,7 +817,7 @@ static void loop_config_discard(struct loop_device *lo)
q->limits.discard_alignment = 0; q->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(q, 0); blk_queue_max_discard_sectors(q, 0);
blk_queue_max_write_zeroes_sectors(q, 0); blk_queue_max_write_zeroes_sectors(q, 0);
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
return; return;
} }
@ -826,7 +826,7 @@ static void loop_config_discard(struct loop_device *lo)
blk_queue_max_discard_sectors(q, UINT_MAX >> 9); blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
} }
static void loop_unprepare_queue(struct loop_device *lo) static void loop_unprepare_queue(struct loop_device *lo)
@ -1808,7 +1808,7 @@ static int loop_add(struct loop_device **l, int i)
* page. For directio mode, merge does help to dispatch bigger request * page. For directio mode, merge does help to dispatch bigger request
* to underlayer disk. We will enable merge once directio is enabled. * to underlayer disk. We will enable merge once directio is enabled.
*/ */
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
err = -ENOMEM; err = -ENOMEM;
disk = lo->lo_disk = alloc_disk(1 << part_shift); disk = lo->lo_disk = alloc_disk(1 << part_shift);

View File

@ -964,7 +964,7 @@ static void nbd_parse_flags(struct nbd_device *nbd)
else else
set_disk_ro(nbd->disk, false); set_disk_ro(nbd->disk, false);
if (config->flags & NBD_FLAG_SEND_TRIM) if (config->flags & NBD_FLAG_SEND_TRIM)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue);
if (config->flags & NBD_FLAG_SEND_FLUSH) { if (config->flags & NBD_FLAG_SEND_FLUSH) {
if (config->flags & NBD_FLAG_SEND_FUA) if (config->flags & NBD_FLAG_SEND_FUA)
blk_queue_write_cache(nbd->disk->queue, true, true); blk_queue_write_cache(nbd->disk->queue, true, true);
@ -1040,7 +1040,7 @@ static void nbd_config_put(struct nbd_device *nbd)
nbd->config = NULL; nbd->config = NULL;
nbd->tag_set.timeout = 0; nbd->tag_set.timeout = 0;
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
mutex_unlock(&nbd->config_lock); mutex_unlock(&nbd->config_lock);
nbd_put(nbd); nbd_put(nbd);
@ -1488,8 +1488,8 @@ static int nbd_dev_add(int index)
/* /*
* Tell the block layer that we are not a rotational device * Tell the block layer that we are not a rotational device
*/ */
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
disk->queue->limits.discard_granularity = 512; disk->queue->limits.discard_granularity = 512;
blk_queue_max_discard_sectors(disk->queue, UINT_MAX); blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
blk_queue_max_segment_size(disk->queue, UINT_MAX); blk_queue_max_segment_size(disk->queue, UINT_MAX);

View File

@ -1525,7 +1525,7 @@ static void null_config_discard(struct nullb *nullb)
nullb->q->limits.discard_granularity = nullb->dev->blocksize; nullb->q->limits.discard_granularity = nullb->dev->blocksize;
nullb->q->limits.discard_alignment = nullb->dev->blocksize; nullb->q->limits.discard_alignment = nullb->dev->blocksize;
blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9); blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nullb->q); blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
} }
static int null_open(struct block_device *bdev, fmode_t mode) static int null_open(struct block_device *bdev, fmode_t mode)
@ -1810,8 +1810,8 @@ static int null_add_dev(struct nullb_device *dev)
} }
nullb->q->queuedata = nullb; nullb->q->queuedata = nullb;
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
mutex_lock(&lock); mutex_lock(&lock);
nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL); nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);

View File

@ -4370,7 +4370,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
goto out_tag_set; goto out_tag_set;
} }
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
/* set io sizes to object size */ /* set io sizes to object size */
@ -4383,7 +4383,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
blk_queue_io_opt(q, segment_size); blk_queue_io_opt(q, segment_size);
/* enable the discard support */ /* enable the discard support */
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
q->limits.discard_granularity = segment_size; q->limits.discard_granularity = segment_size;
blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE); blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE);

View File

@ -287,10 +287,10 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors); blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE); blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue); blk_queue_flag_set(QUEUE_FLAG_NONROT, card->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, card->queue); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, card->queue);
if (rsxx_discard_supported(card)) { if (rsxx_discard_supported(card)) {
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, card->queue); blk_queue_flag_set(QUEUE_FLAG_DISCARD, card->queue);
blk_queue_max_discard_sectors(card->queue, blk_queue_max_discard_sectors(card->queue,
RSXX_HW_BLK_SIZE >> 9); RSXX_HW_BLK_SIZE >> 9);
card->queue->limits.discard_granularity = RSXX_HW_BLK_SIZE; card->queue->limits.discard_granularity = RSXX_HW_BLK_SIZE;

View File

@ -2858,8 +2858,8 @@ static int skd_cons_disk(struct skd_device *skdev)
/* set optimal I/O size to 8KB */ /* set optimal I/O size to 8KB */
blk_queue_io_opt(q, 8192); blk_queue_io_opt(q, 8192);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
blk_queue_rq_timeout(q, 8 * HZ); blk_queue_rq_timeout(q, 8 * HZ);

View File

@ -931,15 +931,15 @@ static void blkif_set_queue_limits(struct blkfront_info *info)
unsigned int segments = info->max_indirect_segments ? : unsigned int segments = info->max_indirect_segments ? :
BLKIF_MAX_SEGMENTS_PER_REQUEST; BLKIF_MAX_SEGMENTS_PER_REQUEST;
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); blk_queue_flag_set(QUEUE_FLAG_VIRT, rq);
if (info->feature_discard) { if (info->feature_discard) {
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
blk_queue_max_discard_sectors(rq, get_capacity(gd)); blk_queue_max_discard_sectors(rq, get_capacity(gd));
rq->limits.discard_granularity = info->discard_granularity; rq->limits.discard_granularity = info->discard_granularity;
rq->limits.discard_alignment = info->discard_alignment; rq->limits.discard_alignment = info->discard_alignment;
if (info->feature_secdiscard) if (info->feature_secdiscard)
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq); blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
} }
/* Hard sector size and max sectors impersonate the equiv. hardware. */ /* Hard sector size and max sectors impersonate the equiv. hardware. */
@ -1610,8 +1610,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
blkif_req(req)->error = BLK_STS_NOTSUPP; blkif_req(req)->error = BLK_STS_NOTSUPP;
info->feature_discard = 0; info->feature_discard = 0;
info->feature_secdiscard = 0; info->feature_secdiscard = 0;
queue_flag_clear(QUEUE_FLAG_DISCARD, rq); blk_queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
queue_flag_clear(QUEUE_FLAG_SECERASE, rq); blk_queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
} }
break; break;
case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_FLUSH_DISKCACHE:

View File

@ -1530,8 +1530,8 @@ static int zram_add(void)
/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */ /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
set_capacity(zram->disk, 0); set_capacity(zram->disk, 0);
/* zram devices sort of resembles non-rotational disks */ /* zram devices sort of resembles non-rotational disks */
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
/* /*
* To ensure that we always get PAGE_SIZE aligned * To ensure that we always get PAGE_SIZE aligned
@ -1544,7 +1544,7 @@ static int zram_add(void)
blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
zram->disk->queue->limits.discard_granularity = PAGE_SIZE; zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX); blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue); blk_queue_flag_set(QUEUE_FLAG_DISCARD, zram->disk->queue);
/* /*
* zram_bio_discard() will clear all logical blocks if logical block * zram_bio_discard() will clear all logical blocks if logical block

View File

@ -687,8 +687,8 @@ static void ide_disk_setup(ide_drive_t *drive)
queue_max_sectors(q) / 2); queue_max_sectors(q) / 2);
if (ata_id_is_ssd(id)) { if (ata_id_is_ssd(id)) {
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
} }
/* calculate drive capacity, and select LBA if possible */ /* calculate drive capacity, and select LBA if possible */

View File

@ -773,7 +773,7 @@ static int ide_init_queue(ide_drive_t *drive)
q->request_fn = do_ide_request; q->request_fn = do_ide_request;
q->initialize_rq_fn = ide_initialize_rq; q->initialize_rq_fn = ide_initialize_rq;
q->cmd_size = sizeof(struct ide_request); q->cmd_size = sizeof(struct ide_request);
queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q); blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
if (blk_init_allocated_queue(q) < 0) { if (blk_init_allocated_queue(q) < 0) {
blk_cleanup_queue(q); blk_cleanup_queue(q);
return 1; return 1;

View File

@ -1067,7 +1067,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
tqueue->limits.discard_granularity = geo->sec_per_chk * geo->sec_size; tqueue->limits.discard_granularity = geo->sec_per_chk * geo->sec_size;
tqueue->limits.discard_alignment = 0; tqueue->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9); blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue); blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue);
pr_info("pblk(%s): luns:%u, lines:%d, secs:%llu, buf entries:%u\n", pr_info("pblk(%s): luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
tdisk->disk_name, tdisk->disk_name,

View File

@ -1861,7 +1861,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
q->limits = *limits; q->limits = *limits;
if (!dm_table_supports_discards(t)) { if (!dm_table_supports_discards(t)) {
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
/* Must also clear discard limits... */ /* Must also clear discard limits... */
q->limits.max_discard_sectors = 0; q->limits.max_discard_sectors = 0;
q->limits.max_hw_discard_sectors = 0; q->limits.max_hw_discard_sectors = 0;
@ -1869,7 +1869,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
q->limits.discard_alignment = 0; q->limits.discard_alignment = 0;
q->limits.discard_misaligned = 0; q->limits.discard_misaligned = 0;
} else } else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) { if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
wc = true; wc = true;
@ -1879,15 +1879,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
blk_queue_write_cache(q, wc, fua); blk_queue_write_cache(q, wc, fua);
if (dm_table_supports_dax(t)) if (dm_table_supports_dax(t))
queue_flag_set_unlocked(QUEUE_FLAG_DAX, q); blk_queue_flag_set(QUEUE_FLAG_DAX, q);
if (dm_table_supports_dax_write_cache(t)) if (dm_table_supports_dax_write_cache(t))
dax_write_cache(t->md->dax_dev, true); dax_write_cache(t->md->dax_dev, true);
/* Ensure that all underlying devices are non-rotational. */ /* Ensure that all underlying devices are non-rotational. */
if (dm_table_all_devices_attribute(t, device_is_nonrot)) if (dm_table_all_devices_attribute(t, device_is_nonrot))
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
else else
queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
if (!dm_table_supports_write_same(t)) if (!dm_table_supports_write_same(t))
q->limits.max_write_same_sectors = 0; q->limits.max_write_same_sectors = 0;
@ -1895,9 +1895,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
q->limits.max_write_zeroes_sectors = 0; q->limits.max_write_zeroes_sectors = 0;
if (dm_table_all_devices_attribute(t, queue_supports_sg_merge)) if (dm_table_all_devices_attribute(t, queue_supports_sg_merge))
queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); blk_queue_flag_clear(QUEUE_FLAG_NO_SG_MERGE, q);
else else
queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);
dm_table_verify_integrity(t); dm_table_verify_integrity(t);
@ -1908,7 +1908,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
* have it set. * have it set.
*/ */
if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
} }
unsigned int dm_table_get_num_targets(struct dm_table *t) unsigned int dm_table_get_num_targets(struct dm_table *t)

View File

@ -138,9 +138,9 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
} }
if (!discard_supported) if (!discard_supported)
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
else else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
/* /*
* Here we calculate the device offsets. * Here we calculate the device offsets.

View File

@ -5608,9 +5608,9 @@ int md_run(struct mddev *mddev)
if (mddev->degraded) if (mddev->degraded)
nonrot = false; nonrot = false;
if (nonrot) if (nonrot)
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue); blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
else else
queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue); blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
mddev->queue->backing_dev_info->congested_data = mddev; mddev->queue->backing_dev_info->congested_data = mddev;
mddev->queue->backing_dev_info->congested_fn = md_congested; mddev->queue->backing_dev_info->congested_fn = md_congested;
} }

View File

@ -399,9 +399,9 @@ static int raid0_run(struct mddev *mddev)
discard_supported = true; discard_supported = true;
} }
if (!discard_supported) if (!discard_supported)
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
else else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
} }
/* calculate array device size */ /* calculate array device size */

View File

@ -1760,7 +1760,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
} }
} }
if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
print_conf(conf); print_conf(conf);
return err; return err;
} }
@ -3099,10 +3099,10 @@ static int raid1_run(struct mddev *mddev)
if (mddev->queue) { if (mddev->queue) {
if (discard_supported) if (discard_supported)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, blk_queue_flag_set(QUEUE_FLAG_DISCARD,
mddev->queue); mddev->queue);
else else
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
mddev->queue); mddev->queue);
} }

View File

@ -1845,7 +1845,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
break; break;
} }
if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
print_conf(conf); print_conf(conf);
return err; return err;
@ -3844,10 +3844,10 @@ static int raid10_run(struct mddev *mddev)
if (mddev->queue) { if (mddev->queue) {
if (discard_supported) if (discard_supported)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, blk_queue_flag_set(QUEUE_FLAG_DISCARD,
mddev->queue); mddev->queue);
else else
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
mddev->queue); mddev->queue);
} }
/* need to check that every block has at least one working mirror */ /* need to check that every block has at least one working mirror */

View File

@ -7444,10 +7444,10 @@ static int raid5_run(struct mddev *mddev)
if (devices_handle_discard_safely && if (devices_handle_discard_safely &&
mddev->queue->limits.max_discard_sectors >= (stripe >> 9) && mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
mddev->queue->limits.discard_granularity >= stripe) mddev->queue->limits.discard_granularity >= stripe)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, blk_queue_flag_set(QUEUE_FLAG_DISCARD,
mddev->queue); mddev->queue);
else else
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
mddev->queue); mddev->queue);
blk_queue_max_hw_sectors(mddev->queue, UINT_MAX); blk_queue_max_hw_sectors(mddev->queue, UINT_MAX);

View File

@ -185,14 +185,14 @@ static void mmc_queue_setup_discard(struct request_queue *q,
if (!max_discard) if (!max_discard)
return; return;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
blk_queue_max_discard_sectors(q, max_discard); blk_queue_max_discard_sectors(q, max_discard);
q->limits.discard_granularity = card->pref_erase << 9; q->limits.discard_granularity = card->pref_erase << 9;
/* granularity must not be greater than max. discard */ /* granularity must not be greater than max. discard */
if (card->pref_erase > max_discard) if (card->pref_erase > max_discard)
q->limits.discard_granularity = 0; q->limits.discard_granularity = 0;
if (mmc_can_secure_erase_trim(card)) if (mmc_can_secure_erase_trim(card))
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
} }
/** /**
@ -356,8 +356,8 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
if (mmc_can_erase(card)) if (mmc_can_erase(card))
mmc_queue_setup_discard(mq->queue, card); mmc_queue_setup_discard(mq->queue, card);

View File

@ -419,11 +419,11 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
blk_queue_logical_block_size(new->rq, tr->blksize); blk_queue_logical_block_size(new->rq, tr->blksize);
blk_queue_bounce_limit(new->rq, BLK_BOUNCE_HIGH); blk_queue_bounce_limit(new->rq, BLK_BOUNCE_HIGH);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq); blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, new->rq); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
if (tr->discard) { if (tr->discard) {
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq); blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq);
blk_queue_max_discard_sectors(new->rq, UINT_MAX); blk_queue_max_discard_sectors(new->rq, UINT_MAX);
} }

View File

@ -266,7 +266,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
blk_queue_make_request(q, nd_blk_make_request); blk_queue_make_request(q, nd_blk_make_request);
blk_queue_max_hw_sectors(q, UINT_MAX); blk_queue_max_hw_sectors(q, UINT_MAX);
blk_queue_logical_block_size(q, nsblk_sector_size(nsblk)); blk_queue_logical_block_size(q, nsblk_sector_size(nsblk));
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
q->queuedata = nsblk; q->queuedata = nsblk;
disk = alloc_disk(0); disk = alloc_disk(0);

View File

@ -1542,7 +1542,7 @@ static int btt_blk_init(struct btt *btt)
blk_queue_make_request(btt->btt_queue, btt_make_request); blk_queue_make_request(btt->btt_queue, btt_make_request);
blk_queue_logical_block_size(btt->btt_queue, btt->sector_size); blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX); blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue); blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_queue);
btt->btt_queue->queuedata = btt; btt->btt_queue->queuedata = btt;
set_capacity(btt->btt_disk, 0); set_capacity(btt->btt_disk, 0);

View File

@ -388,8 +388,8 @@ static int pmem_attach_disk(struct device *dev,
blk_queue_physical_block_size(q, PAGE_SIZE); blk_queue_physical_block_size(q, PAGE_SIZE);
blk_queue_logical_block_size(q, pmem_sector_size(ndns)); blk_queue_logical_block_size(q, pmem_sector_size(ndns));
blk_queue_max_hw_sectors(q, UINT_MAX); blk_queue_max_hw_sectors(q, UINT_MAX);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
queue_flag_set_unlocked(QUEUE_FLAG_DAX, q); blk_queue_flag_set(QUEUE_FLAG_DAX, q);
q->queuedata = pmem; q->queuedata = pmem;
disk = alloc_disk_node(0, nid); disk = alloc_disk_node(0, nid);

View File

@ -1358,7 +1358,7 @@ static void nvme_config_discard(struct nvme_ctrl *ctrl,
blk_queue_max_discard_sectors(queue, UINT_MAX); blk_queue_max_discard_sectors(queue, UINT_MAX);
blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES); blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, queue); blk_queue_flag_set(QUEUE_FLAG_DISCARD, queue);
if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
@ -2949,7 +2949,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
ns->queue = blk_mq_init_queue(ctrl->tagset); ns->queue = blk_mq_init_queue(ctrl->tagset);
if (IS_ERR(ns->queue)) if (IS_ERR(ns->queue))
goto out_free_ns; goto out_free_ns;
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
ns->queue->queuedata = ns; ns->queue->queuedata = ns;
ns->ctrl = ctrl; ns->ctrl = ctrl;

View File

@ -168,7 +168,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
q->queuedata = head; q->queuedata = head;
blk_queue_make_request(q, nvme_ns_head_make_request); blk_queue_make_request(q, nvme_ns_head_make_request);
q->poll_fn = nvme_ns_head_poll; q->poll_fn = nvme_ns_head_poll;
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* set to a default value for 512 until disk is validated */ /* set to a default value for 512 until disk is validated */
blk_queue_logical_block_size(q, 512); blk_queue_logical_block_size(q, 512);

View File

@ -3210,7 +3210,7 @@ static void dasd_setup_queue(struct dasd_block *block)
} else { } else {
max = block->base->discipline->max_blocks << block->s2b_shift; max = block->base->discipline->max_blocks << block->s2b_shift;
} }
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
q->limits.max_dev_sectors = max; q->limits.max_dev_sectors = max;
blk_queue_logical_block_size(q, logical_block_size); blk_queue_logical_block_size(q, logical_block_size);
blk_queue_max_hw_sectors(q, max); blk_queue_max_hw_sectors(q, max);
@ -3233,7 +3233,7 @@ static void dasd_setup_queue(struct dasd_block *block)
blk_queue_max_discard_sectors(q, max_discard_sectors); blk_queue_max_discard_sectors(q, max_discard_sectors);
blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
} }
} }

View File

@ -633,7 +633,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->gd->private_data = dev_info; dev_info->gd->private_data = dev_info;
blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096); blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
queue_flag_set_unlocked(QUEUE_FLAG_DAX, dev_info->dcssblk_queue); blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->dcssblk_queue);
seg_byte_size = (dev_info->end - dev_info->start + 1); seg_byte_size = (dev_info->end - dev_info->start + 1);
set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors

View File

@ -472,8 +472,8 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
blk_queue_logical_block_size(rq, 1 << 12); blk_queue_logical_block_size(rq, 1 << 12);
blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */ blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
blk_queue_max_segments(rq, nr_max_blk); blk_queue_max_segments(rq, nr_max_blk);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq); blk_queue_flag_set(QUEUE_FLAG_NONROT, rq);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq);
bdev->gendisk = alloc_disk(SCM_NR_PARTS); bdev->gendisk = alloc_disk(SCM_NR_PARTS);
if (!bdev->gendisk) { if (!bdev->gendisk) {

View File

@ -348,8 +348,8 @@ static int __init xpram_setup_blkdev(void)
put_disk(xpram_disks[i]); put_disk(xpram_disks[i]);
goto out; goto out;
} }
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]); blk_queue_flag_set(QUEUE_FLAG_NONROT, xpram_queues[i]);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]);
blk_queue_make_request(xpram_queues[i], xpram_make_request); blk_queue_make_request(xpram_queues[i], xpram_make_request);
blk_queue_logical_block_size(xpram_queues[i], 4096); blk_queue_logical_block_size(xpram_queues[i], 4096);
} }

View File

@ -1864,7 +1864,7 @@ megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512)); blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue); blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue);
blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1); blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
} }

View File

@ -1908,7 +1908,7 @@ megasas_is_prp_possible(struct megasas_instance *instance,
* then sending IOs with holes. * then sending IOs with holes.
* *
* Though driver can request block layer to disable IO merging by calling- * Though driver can request block layer to disable IO merging by calling-
* queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue) but * blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue) but
* user may tune sysfs parameter- nomerges again to 0 or 1. * user may tune sysfs parameter- nomerges again to 0 or 1.
* *
* If in future IO scheduling is enabled with SCSI BLK MQ, * If in future IO scheduling is enabled with SCSI BLK MQ,

View File

@ -2352,7 +2352,7 @@ scsih_slave_configure(struct scsi_device *sdev)
** merged and can eliminate holes created during merging ** merged and can eliminate holes created during merging
** operation. ** operation.
**/ **/
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
sdev->request_queue); sdev->request_queue);
blk_queue_virt_boundary(sdev->request_queue, blk_queue_virt_boundary(sdev->request_queue,
ioc->page_size - 1); ioc->page_size - 1);

View File

@ -3897,7 +3897,7 @@ static int scsi_debug_slave_alloc(struct scsi_device *sdp)
if (sdebug_verbose) if (sdebug_verbose)
pr_info("slave_alloc <%u %u %u %llu>\n", pr_info("slave_alloc <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue); blk_queue_flag_set(QUEUE_FLAG_BIDI, sdp->request_queue);
return 0; return 0;
} }

View File

@ -2140,7 +2140,7 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
{ {
struct device *dev = shost->dma_dev; struct device *dev = shost->dma_dev;
queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q); blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
/* /*
* this limit is imposed by hardware restrictions * this limit is imposed by hardware restrictions

View File

@ -227,8 +227,8 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
* by default assume old behaviour and bounce for any highmem page * by default assume old behaviour and bounce for any highmem page
*/ */
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); blk_queue_flag_set(QUEUE_FLAG_BIDI, q);
queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q); blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
return 0; return 0;
} }

View File

@ -714,7 +714,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
case SD_LBP_FULL: case SD_LBP_FULL:
case SD_LBP_DISABLE: case SD_LBP_DISABLE:
blk_queue_max_discard_sectors(q, 0); blk_queue_max_discard_sectors(q, 0);
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
return; return;
case SD_LBP_UNMAP: case SD_LBP_UNMAP:
@ -747,7 +747,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
} }
blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9)); blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
} }
static int sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) static int sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
@ -2952,8 +2952,8 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
rot = get_unaligned_be16(&buffer[4]); rot = get_unaligned_be16(&buffer[4]);
if (rot == 1) { if (rot == 1) {
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
} }
if (sdkp->device->type == TYPE_ZBC) { if (sdkp->device->type == TYPE_ZBC) {