block: Use blk_queue_flag_*() in drivers instead of queue_flag_*()

This patch has been generated as follows:

for verb in set_unlocked clear_unlocked set clear; do
  replace-in-files queue_flag_${verb} blk_queue_flag_${verb%_unlocked} \
    $(git grep -lw queue_flag_${verb} drivers block/bsg*)
done

Except for protecting all queue flag changes with the queue lock
this patch does not change any functionality.

Cc: Mike Snitzer <snitzer@redhat.com>
Cc: Shaohua Li <shli@fb.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Acked-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: azrim <mirzaspc@gmail.com>
This commit is contained in:
Bart Van Assche 2018-03-07 17:10:10 -08:00 committed by azrim
parent fe5f1e0f23
commit 288663d8ad
No known key found for this signature in database
GPG Key ID: 497F8FB059B45D1C
36 changed files with 90 additions and 90 deletions

View File

@ -274,8 +274,8 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
q->queuedata = dev;
q->bsg_job_fn = job_fn;
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
blk_queue_flag_set(QUEUE_FLAG_BIDI, q);
blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
blk_queue_softirq_done(q, bsg_softirq_done);
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);

View File

@ -1215,10 +1215,10 @@ static void decide_on_discard_support(struct drbd_device *device,
* topology on all peers. */
blk_queue_discard_granularity(q, 512);
q->limits.max_discard_sectors = drbd_max_discard_sectors(connection);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
q->limits.max_write_zeroes_sectors = drbd_max_discard_sectors(connection);
} else {
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
blk_queue_discard_granularity(q, 0);
q->limits.max_discard_sectors = 0;
q->limits.max_write_zeroes_sectors = 0;

View File

@ -217,10 +217,10 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
blk_mq_freeze_queue(lo->lo_queue);
lo->use_dio = use_dio;
if (use_dio) {
queue_flag_clear_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);
blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue);
lo->lo_flags |= LO_FLAGS_DIRECT_IO;
} else {
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
}
if (lo->lo_state == Lo_bound)
@ -852,7 +852,7 @@ static void loop_config_discard(struct loop_device *lo)
q->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(q, 0);
blk_queue_max_write_zeroes_sectors(q, 0);
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
return;
}
@ -861,7 +861,7 @@ static void loop_config_discard(struct loop_device *lo)
blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
}
static void loop_unprepare_queue(struct loop_device *lo)
@ -1870,7 +1870,7 @@ static int loop_add(struct loop_device **l, int i)
* page. For directio mode, merge does help to dispatch bigger request
* to underlayer disk. We will enable merge once directio is enabled.
*/
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
err = -ENOMEM;
disk = lo->lo_disk = alloc_disk(1 << part_shift);

View File

@ -1086,7 +1086,7 @@ static void nbd_parse_flags(struct nbd_device *nbd)
else
set_disk_ro(nbd->disk, false);
if (config->flags & NBD_FLAG_SEND_TRIM)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue);
if (config->flags & NBD_FLAG_SEND_FLUSH) {
if (config->flags & NBD_FLAG_SEND_FUA)
blk_queue_write_cache(nbd->disk->queue, true, true);
@ -1166,7 +1166,7 @@ static void nbd_config_put(struct nbd_device *nbd)
nbd->recv_workq = NULL;
nbd->tag_set.timeout = 0;
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
mutex_unlock(&nbd->config_lock);
nbd_put(nbd);
@ -1660,8 +1660,8 @@ static int nbd_dev_add(int index)
/*
* Tell the block layer that we are not a rotational device
*/
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
disk->queue->limits.discard_granularity = 512;
blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
blk_queue_max_segment_size(disk->queue, UINT_MAX);

View File

@ -1635,7 +1635,7 @@ static void null_config_discard(struct nullb *nullb)
nullb->q->limits.discard_granularity = nullb->dev->blocksize;
nullb->q->limits.discard_alignment = nullb->dev->blocksize;
blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nullb->q);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
}
static int null_open(struct block_device *bdev, fmode_t mode)
@ -1888,8 +1888,8 @@ static int null_add_dev(struct nullb_device *dev)
}
nullb->q->queuedata = nullb;
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
mutex_lock(&lock);
nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);

View File

@ -4411,7 +4411,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
goto out_tag_set;
}
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
/* set io sizes to object size */
@ -4424,7 +4424,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
blk_queue_io_opt(q, segment_size);
/* enable the discard support */
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
q->limits.discard_granularity = segment_size;
q->limits.discard_alignment = segment_size;
blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);

View File

@ -287,10 +287,10 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, card->queue);
blk_queue_flag_set(QUEUE_FLAG_NONROT, card->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, card->queue);
if (rsxx_discard_supported(card)) {
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, card->queue);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, card->queue);
blk_queue_max_discard_sectors(card->queue,
RSXX_HW_BLK_SIZE >> 9);
card->queue->limits.discard_granularity = RSXX_HW_BLK_SIZE;

View File

@ -2856,8 +2856,8 @@ static int skd_cons_disk(struct skd_device *skdev)
/* set optimal I/O size to 8KB */
blk_queue_io_opt(q, 8192);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
blk_queue_rq_timeout(q, 8 * HZ);

View File

@ -943,16 +943,16 @@ static void blkif_set_queue_limits(struct blkfront_info *info)
unsigned int segments = info->max_indirect_segments ? :
BLKIF_MAX_SEGMENTS_PER_REQUEST;
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
blk_queue_flag_set(QUEUE_FLAG_VIRT, rq);
if (info->feature_discard) {
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
blk_queue_max_discard_sectors(rq, get_capacity(gd));
rq->limits.discard_granularity = info->discard_granularity ?:
info->physical_sector_size;
rq->limits.discard_alignment = info->discard_alignment;
if (info->feature_secdiscard)
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
}
/* Hard sector size and max sectors impersonate the equiv. hardware. */
@ -1662,8 +1662,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
blkif_req(req)->error = BLK_STS_NOTSUPP;
info->feature_discard = 0;
info->feature_secdiscard = 0;
queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
blk_queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
}
break;
case BLKIF_OP_FLUSH_DISKCACHE:

View File

@ -2063,8 +2063,8 @@ static int zram_add(void)
/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
set_capacity(zram->disk, 0);
/* zram devices sort of resembles non-rotational disks */
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
/*
* To ensure that we always get PAGE_SIZE aligned
@ -2077,7 +2077,7 @@ static int zram_add(void)
blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, zram->disk->queue);
/*
* zram_bio_discard() will clear all logical blocks if logical block

View File

@ -687,8 +687,8 @@ static void ide_disk_setup(ide_drive_t *drive)
queue_max_sectors(q) / 2);
if (ata_id_is_ssd(id)) {
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
}
/* calculate drive capacity, and select LBA if possible */

View File

@ -773,7 +773,7 @@ static int ide_init_queue(ide_drive_t *drive)
q->request_fn = do_ide_request;
q->initialize_rq_fn = ide_initialize_rq;
q->cmd_size = sizeof(struct ide_request);
queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
if (blk_init_allocated_queue(q) < 0) {
blk_cleanup_queue(q);
return 1;

View File

@ -1007,7 +1007,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
tqueue->limits.discard_granularity = geo->pgs_per_blk * geo->pfpg_size;
tqueue->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue);
pr_info("pblk init: luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
geo->nr_luns, pblk->l_mg.nr_lines,

View File

@ -1855,9 +1855,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
q->limits = *limits;
if (!dm_table_supports_discards(t))
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
wc = true;
@ -1867,18 +1867,18 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
blk_queue_write_cache(q, wc, fua);
if (dm_table_supports_dax(t))
queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
else
queue_flag_clear_unlocked(QUEUE_FLAG_DAX, q);
blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
dax_write_cache(t->md->dax_dev, true);
/* Ensure that all underlying devices are non-rotational. */
if (dm_table_any_dev_attr(t, device_is_rotational, NULL))
queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
else
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
if (!dm_table_supports_write_same(t))
q->limits.max_write_same_sectors = 0;
@ -1886,9 +1886,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
q->limits.max_write_zeroes_sectors = 0;
if (dm_table_any_dev_attr(t, queue_no_sg_merge, NULL))
queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);
else
queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
blk_queue_flag_clear(QUEUE_FLAG_NO_SG_MERGE, q);
dm_table_verify_integrity(t);
@ -1914,7 +1914,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
*/
if (blk_queue_add_random(q) &&
dm_table_any_dev_attr(t, device_is_not_random, NULL))
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
/*
* QUEUE_FLAG_STACKABLE must be set after all queue settings are
@ -1927,7 +1927,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
*/
smp_mb();
if (dm_table_request_based(t))
queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
blk_queue_flag_set(QUEUE_FLAG_STACKABLE, q);
/* io_pages is used for readahead */
q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9);

View File

@ -138,9 +138,9 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
}
if (!discard_supported)
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
/*
* Here we calculate the device offsets.

View File

@ -5655,9 +5655,9 @@ int md_run(struct mddev *mddev)
if (mddev->degraded)
nonrot = false;
if (nonrot)
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
else
queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
mddev->queue->backing_dev_info->congested_data = mddev;
mddev->queue->backing_dev_info->congested_fn = md_congested;
}

View File

@ -419,9 +419,9 @@ static int raid0_run(struct mddev *mddev)
discard_supported = true;
}
if (!discard_supported)
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
}
/* calculate array device size */

View File

@ -1765,7 +1765,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
}
}
if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
print_conf(conf);
return err;
}
@ -3131,10 +3131,10 @@ static int raid1_run(struct mddev *mddev)
if (mddev->queue) {
if (discard_supported)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
blk_queue_flag_set(QUEUE_FLAG_DISCARD,
mddev->queue);
else
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
mddev->queue);
}

View File

@ -1819,7 +1819,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
break;
}
if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
print_conf(conf);
return err;
@ -3714,10 +3714,10 @@ static int raid10_run(struct mddev *mddev)
if (mddev->queue) {
if (discard_supported)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
blk_queue_flag_set(QUEUE_FLAG_DISCARD,
mddev->queue);
else
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
mddev->queue);
}
/* need to check that every block has at least one working mirror */

View File

@ -7453,10 +7453,10 @@ static int raid5_run(struct mddev *mddev)
if (devices_handle_discard_safely &&
mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
mddev->queue->limits.discard_granularity >= stripe)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
blk_queue_flag_set(QUEUE_FLAG_DISCARD,
mddev->queue);
else
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
mddev->queue);
blk_queue_max_hw_sectors(mddev->queue, UINT_MAX);

View File

@ -183,14 +183,14 @@ static void mmc_queue_setup_discard(struct request_queue *q,
if (!max_discard)
return;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
blk_queue_max_discard_sectors(q, max_discard);
q->limits.discard_granularity = card->pref_erase << 9;
/* granularity must not be greater than max. discard */
if (card->pref_erase > max_discard)
q->limits.discard_granularity = SECTOR_SIZE;
if (mmc_can_secure_erase_trim(card))
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
}
/**
@ -208,7 +208,7 @@ void mmc_cmdq_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = *mmc_dev(host)->dma_mask;
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
if (mmc_can_erase(card))
mmc_queue_setup_discard(mq->queue, card);
@ -473,8 +473,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
}
blk_queue_prep_rq(mq->queue, mmc_prep_request);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
if (mmc_can_erase(card))
mmc_queue_setup_discard(mq->queue, card);

View File

@ -419,11 +419,11 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
blk_queue_logical_block_size(new->rq, tr->blksize);
blk_queue_bounce_limit(new->rq, BLK_BOUNCE_HIGH);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, new->rq);
blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
if (tr->discard) {
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq);
blk_queue_max_discard_sectors(new->rq, UINT_MAX);
}

View File

@ -266,7 +266,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
blk_queue_make_request(q, nd_blk_make_request);
blk_queue_max_hw_sectors(q, UINT_MAX);
blk_queue_logical_block_size(q, nsblk_sector_size(nsblk));
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
q->queuedata = nsblk;
disk = alloc_disk(0);

View File

@ -1551,7 +1551,7 @@ static int btt_blk_init(struct btt *btt)
blk_queue_make_request(btt->btt_queue, btt_make_request);
blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_queue);
btt->btt_queue->queuedata = btt;
if (btt_meta_size(btt)) {

View File

@ -393,8 +393,8 @@ static int pmem_attach_disk(struct device *dev,
blk_queue_physical_block_size(q, PAGE_SIZE);
blk_queue_logical_block_size(q, pmem_sector_size(ndns));
blk_queue_max_hw_sectors(q, UINT_MAX);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
q->queuedata = pmem;
disk = alloc_disk_node(0, nid);

View File

@ -1180,7 +1180,7 @@ static void nvme_config_discard(struct nvme_ns *ns)
}
blk_queue_max_discard_sectors(ns->queue, UINT_MAX);
blk_queue_max_discard_segments(ns->queue, NVME_DSM_MAX_RANGES);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, ns->queue);
if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
blk_queue_max_write_zeroes_sectors(ns->queue, UINT_MAX);
@ -2369,7 +2369,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
ns->queue = blk_mq_init_queue(ctrl->tagset);
if (IS_ERR(ns->queue))
goto out_release_instance;
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
ns->queue->queuedata = ns;
ns->ctrl = ctrl;

View File

@ -3234,7 +3234,7 @@ static void dasd_setup_queue(struct dasd_block *block)
} else {
max = block->base->discipline->max_blocks << block->s2b_shift;
}
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
q->limits.max_dev_sectors = max;
blk_queue_logical_block_size(q, logical_block_size);
blk_queue_max_hw_sectors(q, max);
@ -3257,7 +3257,7 @@ static void dasd_setup_queue(struct dasd_block *block)
blk_queue_max_discard_sectors(q, max_discard_sectors);
blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
}
}

View File

@ -632,7 +632,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->gd->private_data = dev_info;
blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
queue_flag_set_unlocked(QUEUE_FLAG_DAX, dev_info->dcssblk_queue);
blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->dcssblk_queue);
seg_byte_size = (dev_info->end - dev_info->start + 1);
set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors

View File

@ -472,8 +472,8 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
blk_queue_logical_block_size(rq, 1 << 12);
blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
blk_queue_max_segments(rq, nr_max_blk);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq);
blk_queue_flag_set(QUEUE_FLAG_NONROT, rq);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq);
bdev->gendisk = alloc_disk(SCM_NR_PARTS);
if (!bdev->gendisk) {

View File

@ -347,8 +347,8 @@ static int __init xpram_setup_blkdev(void)
put_disk(xpram_disks[i]);
goto out;
}
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]);
blk_queue_flag_set(QUEUE_FLAG_NONROT, xpram_queues[i]);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]);
blk_queue_make_request(xpram_queues[i], xpram_make_request);
blk_queue_logical_block_size(xpram_queues[i], 4096);
}

View File

@ -1825,7 +1825,7 @@ megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue);
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue);
blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
}

View File

@ -1594,7 +1594,7 @@ megasas_is_prp_possible(struct megasas_instance *instance,
* then sending IOs with holes.
*
* Though driver can request block layer to disable IO merging by calling-
* queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue) but
* blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue) but
* user may tune sysfs parameter- nomerges again to 0 or 1.
*
* If in future IO scheduling is enabled with SCSI BLK MQ,

View File

@ -3637,7 +3637,7 @@ static int scsi_debug_slave_alloc(struct scsi_device *sdp)
if (sdebug_verbose)
pr_info("slave_alloc <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
blk_queue_flag_set(QUEUE_FLAG_BIDI, sdp->request_queue);
return 0;
}

View File

@ -2148,7 +2148,7 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
{
struct device *dev = shost->dma_dev;
queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
/*
* this limit is imposed by hardware restrictions

View File

@ -227,8 +227,8 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
* by default assume old behaviour and bounce for any highmem page
*/
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
blk_queue_flag_set(QUEUE_FLAG_BIDI, q);
blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
return 0;
}

View File

@ -721,7 +721,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
case SD_LBP_FULL:
case SD_LBP_DISABLE:
blk_queue_max_discard_sectors(q, 0);
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
return;
case SD_LBP_UNMAP:
@ -754,7 +754,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
}
blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
}
static int sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
@ -2901,8 +2901,8 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
rot = get_unaligned_be16(&buffer[4]);
if (rot == 1) {
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
}
if (sdkp->device->type == TYPE_ZBC) {