mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
blk-mq: remove REQ_ATOM_STARTED
After the recent updates to use generation number and state based synchronization, we can easily replace REQ_ATOM_STARTED usages by adding an extra state to distinguish completed but not yet freed state. Add MQ_RQ_COMPLETE and replace REQ_ATOM_STARTED usages with blk_mq_rq_state() tests. REQ_ATOM_STARTED no longer has any users left and is removed. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: azrim <mirzaspc@gmail.com>
This commit is contained in:
parent
96448f9930
commit
7115d2889b
@ -273,7 +273,6 @@ static const char *const cmd_flag_name[] = {
|
|||||||
#define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
|
#define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
|
||||||
static const char *const rqf_name[] = {
|
static const char *const rqf_name[] = {
|
||||||
RQF_NAME(SORTED),
|
RQF_NAME(SORTED),
|
||||||
RQF_NAME(STARTED),
|
|
||||||
RQF_NAME(QUEUED),
|
RQF_NAME(QUEUED),
|
||||||
RQF_NAME(SOFTBARRIER),
|
RQF_NAME(SOFTBARRIER),
|
||||||
RQF_NAME(FLUSH_SEQ),
|
RQF_NAME(FLUSH_SEQ),
|
||||||
@ -296,7 +295,6 @@ static const char *const rqf_name[] = {
|
|||||||
#define RQAF_NAME(name) [REQ_ATOM_##name] = #name
|
#define RQAF_NAME(name) [REQ_ATOM_##name] = #name
|
||||||
static const char *const rqaf_name[] = {
|
static const char *const rqaf_name[] = {
|
||||||
RQAF_NAME(COMPLETE),
|
RQAF_NAME(COMPLETE),
|
||||||
RQAF_NAME(STARTED),
|
|
||||||
RQAF_NAME(POLL_SLEPT),
|
RQAF_NAME(POLL_SLEPT),
|
||||||
};
|
};
|
||||||
#undef RQAF_NAME
|
#undef RQAF_NAME
|
||||||
@ -410,7 +408,7 @@ static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
|
|||||||
const struct show_busy_params *params = data;
|
const struct show_busy_params *params = data;
|
||||||
|
|
||||||
if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx &&
|
if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx &&
|
||||||
test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
|
blk_mq_rq_state(rq) != MQ_RQ_IDLE)
|
||||||
__blk_mq_debugfs_rq_show(params->m,
|
__blk_mq_debugfs_rq_show(params->m,
|
||||||
list_entry_rq(&rq->queuelist));
|
list_entry_rq(&rq->queuelist));
|
||||||
}
|
}
|
||||||
|
@ -503,7 +503,6 @@ void blk_mq_free_request(struct request *rq)
|
|||||||
blk_put_rl(blk_rq_rl(rq));
|
blk_put_rl(blk_rq_rl(rq));
|
||||||
|
|
||||||
blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
|
blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
|
||||||
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
|
|
||||||
clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
|
clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
|
||||||
if (rq->tag != -1)
|
if (rq->tag != -1)
|
||||||
blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
|
blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
|
||||||
@ -551,6 +550,7 @@ static void __blk_mq_complete_request(struct request *rq)
|
|||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT);
|
WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT);
|
||||||
|
blk_mq_rq_update_state(rq, MQ_RQ_COMPLETE);
|
||||||
|
|
||||||
if (rq->internal_tag != -1)
|
if (rq->internal_tag != -1)
|
||||||
blk_mq_sched_completed_request(rq);
|
blk_mq_sched_completed_request(rq);
|
||||||
@ -662,7 +662,7 @@ EXPORT_SYMBOL(blk_mq_complete_request);
|
|||||||
|
|
||||||
int blk_mq_request_started(struct request *rq)
|
int blk_mq_request_started(struct request *rq)
|
||||||
{
|
{
|
||||||
return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
|
return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_mq_request_started);
|
EXPORT_SYMBOL_GPL(blk_mq_request_started);
|
||||||
|
|
||||||
@ -681,7 +681,6 @@ void blk_mq_start_request(struct request *rq)
|
|||||||
}
|
}
|
||||||
|
|
||||||
WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
|
WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
|
||||||
WARN_ON_ONCE(test_bit(REQ_ATOM_STARTED, &rq->atomic_flags));
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mark @rq in-flight which also advances the generation number,
|
* Mark @rq in-flight which also advances the generation number,
|
||||||
@ -703,8 +702,6 @@ void blk_mq_start_request(struct request *rq)
|
|||||||
write_seqcount_end(&rq->gstate_seq);
|
write_seqcount_end(&rq->gstate_seq);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
|
|
||||||
set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
|
|
||||||
|
|
||||||
if (q->dma_drain_size && blk_rq_bytes(rq)) {
|
if (q->dma_drain_size && blk_rq_bytes(rq)) {
|
||||||
/*
|
/*
|
||||||
* Make sure space for the drain appears. We know we can do
|
* Make sure space for the drain appears. We know we can do
|
||||||
@ -717,13 +714,9 @@ void blk_mq_start_request(struct request *rq)
|
|||||||
EXPORT_SYMBOL(blk_mq_start_request);
|
EXPORT_SYMBOL(blk_mq_start_request);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When we reach here because queue is busy, REQ_ATOM_COMPLETE
|
* When we reach here because queue is busy, it's safe to change the state
|
||||||
* flag isn't set yet, so there may be race with timeout handler,
|
* to IDLE without checking @rq->aborted_gstate because we should still be
|
||||||
* but given rq->deadline is just set in .queue_rq() under
|
* holding the RCU read lock and thus protected against timeout.
|
||||||
* this situation, the race won't be possible in reality because
|
|
||||||
* rq->timeout should be set as big enough to cover the window
|
|
||||||
* between blk_mq_start_request() called from .queue_rq() and
|
|
||||||
* clearing REQ_ATOM_STARTED here.
|
|
||||||
*/
|
*/
|
||||||
static void __blk_mq_requeue_request(struct request *rq)
|
static void __blk_mq_requeue_request(struct request *rq)
|
||||||
{
|
{
|
||||||
@ -732,7 +725,7 @@ static void __blk_mq_requeue_request(struct request *rq)
|
|||||||
trace_block_rq_requeue(q, rq);
|
trace_block_rq_requeue(q, rq);
|
||||||
wbt_requeue(q->rq_wb, &rq->issue_stat);
|
wbt_requeue(q->rq_wb, &rq->issue_stat);
|
||||||
|
|
||||||
if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
|
if (blk_mq_rq_state(rq) != MQ_RQ_IDLE) {
|
||||||
blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
|
blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
|
||||||
if (q->dma_drain_size && blk_rq_bytes(rq))
|
if (q->dma_drain_size && blk_rq_bytes(rq))
|
||||||
rq->nr_phys_segments--;
|
rq->nr_phys_segments--;
|
||||||
@ -842,18 +835,6 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
|
|||||||
const struct blk_mq_ops *ops = req->q->mq_ops;
|
const struct blk_mq_ops *ops = req->q->mq_ops;
|
||||||
enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
|
enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
|
||||||
|
|
||||||
/*
|
|
||||||
* We know that complete is set at this point. If STARTED isn't set
|
|
||||||
* anymore, then the request isn't active and the "timeout" should
|
|
||||||
* just be ignored. This can happen due to the bitflag ordering.
|
|
||||||
* Timeout first checks if STARTED is set, and if it is, assumes
|
|
||||||
* the request is active. But if we race with completion, then
|
|
||||||
* both flags will get cleared. So check here again, and ignore
|
|
||||||
* a timeout event with a request that isn't active.
|
|
||||||
*/
|
|
||||||
if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
|
|
||||||
return;
|
|
||||||
|
|
||||||
req->rq_flags |= RQF_MQ_TIMEOUT_EXPIRED;
|
req->rq_flags |= RQF_MQ_TIMEOUT_EXPIRED;
|
||||||
|
|
||||||
if (ops->timeout)
|
if (ops->timeout)
|
||||||
@ -889,8 +870,7 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
|||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
|
||||||
if ((rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED) ||
|
if (rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED)
|
||||||
!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* read coherent snapshots of @rq->state_gen and @rq->deadline */
|
/* read coherent snapshots of @rq->state_gen and @rq->deadline */
|
||||||
@ -3106,8 +3086,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
|
|||||||
|
|
||||||
hrtimer_init_sleeper(&hs, current);
|
hrtimer_init_sleeper(&hs, current);
|
||||||
do {
|
do {
|
||||||
if (test_bit(REQ_ATOM_STARTED, &rq->atomic_flags) &&
|
if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
|
||||||
blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
|
|
||||||
break;
|
break;
|
||||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||||
hrtimer_start_expires(&hs.timer, mode);
|
hrtimer_start_expires(&hs.timer, mode);
|
||||||
|
@ -33,6 +33,7 @@ struct blk_mq_ctx {
|
|||||||
enum mq_rq_state {
|
enum mq_rq_state {
|
||||||
MQ_RQ_IDLE = 0,
|
MQ_RQ_IDLE = 0,
|
||||||
MQ_RQ_IN_FLIGHT = 1,
|
MQ_RQ_IN_FLIGHT = 1,
|
||||||
|
MQ_RQ_COMPLETE = 2,
|
||||||
|
|
||||||
MQ_RQ_STATE_BITS = 2,
|
MQ_RQ_STATE_BITS = 2,
|
||||||
MQ_RQ_STATE_MASK = (1 << MQ_RQ_STATE_BITS) - 1,
|
MQ_RQ_STATE_MASK = (1 << MQ_RQ_STATE_BITS) - 1,
|
||||||
|
@ -144,7 +144,6 @@ void blk_account_io_done(struct request *req);
|
|||||||
*/
|
*/
|
||||||
enum rq_atomic_flags {
|
enum rq_atomic_flags {
|
||||||
REQ_ATOM_COMPLETE = 0,
|
REQ_ATOM_COMPLETE = 0,
|
||||||
REQ_ATOM_STARTED,
|
|
||||||
|
|
||||||
REQ_ATOM_POLL_SLEPT,
|
REQ_ATOM_POLL_SLEPT,
|
||||||
};
|
};
|
||||||
|
Loading…
x
Reference in New Issue
Block a user