block: support PREEMPT_ONLY

When queue is in PREEMPT_ONLY mode, only REQ_PREEMPT request
can be allocated and dispatched, other requests won't be allowed
to enter I/O path.

This is useful for supporting safe SCSI quiesce.

Part of this patch is from Bart's '[PATCH v4 4∕7] block: Add the QUEUE_FLAG_PREEMPT_ONLY
request queue flag'.

Change-Id: Iffe29f0d6385e56bd6352c3f5c09a11346c12142
Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name>
Tested-by: Martin Steigerwald <martin@lichtvoll.de>
Cc: Bart Van Assche <Bart.VanAssche@wdc.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Patch-mainline: linux-block@vger.kernel.org @ 03/10/2017, 22:04
Signed-off-by: Pradeep P V K <ppvk@codeaurora.org>
This commit is contained in:
Ming Lei 2017-10-03 22:04:05 +08:00 committed by Gerrit - the friendly Code Review server
parent 0bb3f8652d
commit 8e66a7a2e1
2 changed files with 47 additions and 4 deletions

View File

@ -351,6 +351,34 @@ void blk_sync_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_sync_queue);
void blk_set_preempt_only(struct request_queue *q, bool preempt_only)
{
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
if (preempt_only)
queue_flag_set(QUEUE_FLAG_PREEMPT_ONLY, q);
else
queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
spin_unlock_irqrestore(q->queue_lock, flags);
/*
* The synchronize_rcu() implicied in blk_mq_freeze_queue()
* or the explicit one will make sure the above write on
* PREEMPT_ONLY is observed in blk_queue_enter() before
* running blk_mq_unfreeze_queue().
*
* blk_mq_freeze_queue() also drains up any request in queue,
* so blk_queue_enter() will see the above updated value of
* PREEMPT flag before any new allocation.
*/
if (!blk_mq_freeze_queue(q))
synchronize_rcu();
blk_mq_unfreeze_queue(q);
}
EXPORT_SYMBOL(blk_set_preempt_only);
/**
* __blk_run_queue_uncond - run a queue whether or not it has been stopped
* @q: The queue to run
@ -820,8 +848,16 @@ int blk_queue_enter(struct request_queue *q, unsigned int op)
{
while (true) {
if (percpu_ref_tryget_live(&q->q_usage_counter))
return 0;
rcu_read_lock_sched();
if (__percpu_ref_tryget_live(&q->q_usage_counter)) {
if (likely((op & REQ_PREEMPT) ||
!blk_queue_preempt_only(q))) {
rcu_read_unlock_sched();
return 0;
} else
percpu_ref_put(&q->q_usage_counter);
}
rcu_read_unlock_sched();
if (op & REQ_NOWAIT)
return -EBUSY;
@ -836,8 +872,10 @@ int blk_queue_enter(struct request_queue *q, unsigned int op)
smp_rmb();
wait_event(q->mq_freeze_wq,
!atomic_read(&q->mq_freeze_depth) ||
blk_queue_dying(q));
(!atomic_read(&q->mq_freeze_depth) &&
((op & REQ_PREEMPT) ||
!blk_queue_preempt_only(q))) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
}

View File

@ -655,6 +655,7 @@ struct request_queue {
#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */
#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
#define QUEUE_FLAG_PREEMPT_ONLY 29 /* only process REQ_PREEMPT requests */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@ -759,6 +760,10 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
REQ_FAILFAST_DRIVER))
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
#define blk_queue_preempt_only(q) \
test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
extern void blk_set_preempt_only(struct request_queue *q, bool preempt_only);
static inline bool blk_account_rq(struct request *rq)
{