mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
Revert "block: support PREEMPT_ONLY" [2/7]
This reverts commit 8e66a7a2e1dd42007e00c67b530b108720b7fef4. Reason for revert: this patchset is either mismerged or incomplete. UFS may fail to resume from suspend with the log containing these lines: [23703.703882] scsi host0: runtime PM trying to activate child device host0 but parent (1d84000.ufshc) is not active [23703.703977] scsi target0:0:0: runtime PM trying to activate child device target0:0:0 but parent (host0) is not active [23703.704369] sd 0:0:0:0: runtime PM trying to activate child device 0:0:0:0 but parent (target0:0:0) is not active [23703.704380] scsi 0:0:0:49488: runtime PM trying to activate child device 0:0:0:49488 but parent (target0:0:0) is not active [23703.704470] sd 0:0:0:3: runtime PM trying to activate child device 0:0:0:3 but parent (target0:0:0) is not active [23703.704478] sd 0:0:0:5: runtime PM trying to activate child device 0:0:0:5 but parent (target0:0:0) is not active [23703.704557] sd 0:0:0:1: runtime PM trying to activate child device 0:0:0:1 but parent (target0:0:0) is not active [23703.704565] scsi 0:0:0:49476: runtime PM trying to activate child device 0:0:0:49476 but parent (target0:0:0) is not active [23703.704643] sd 0:0:0:4: runtime PM trying to activate child device 0:0:0:4 but parent (target0:0:0) is not active [23703.704651] scsi 0:0:0:49456: runtime PM trying to activate child device 0:0:0:49456 but parent (target0:0:0) is not active [23703.704722] sd 0:0:0:2: runtime PM trying to activate child device 0:0:0:2 but parent (target0:0:0) is not active This shoudn't be a problem [1], but the device becomes unresponsive until forced reboot is done by holding power button. [1] https://lkml.org/lkml/2020/11/13/2 Test: the mentioned kernel messages do not cause any issue Change-Id: I4fd8a095c89fb9c979cd3c4757119f8e09392780 Signed-off-by: Alexander Winkowski <dereference23@outlook.com>
This commit is contained in:
parent
21630a7142
commit
82a0bf27e1
@ -428,34 +428,6 @@ void blk_sync_queue(struct request_queue *q)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_sync_queue);
|
||||
|
||||
void blk_set_preempt_only(struct request_queue *q, bool preempt_only)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
if (preempt_only)
|
||||
queue_flag_set(QUEUE_FLAG_PREEMPT_ONLY, q);
|
||||
else
|
||||
queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
|
||||
/*
|
||||
* The synchronize_rcu() implicied in blk_mq_freeze_queue()
|
||||
* or the explicit one will make sure the above write on
|
||||
* PREEMPT_ONLY is observed in blk_queue_enter() before
|
||||
* running blk_mq_unfreeze_queue().
|
||||
*
|
||||
* blk_mq_freeze_queue() also drains up any request in queue,
|
||||
* so blk_queue_enter() will see the above updated value of
|
||||
* PREEMPT flag before any new allocation.
|
||||
*/
|
||||
if (!blk_mq_freeze_queue(q))
|
||||
synchronize_rcu();
|
||||
|
||||
blk_mq_unfreeze_queue(q);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_set_preempt_only);
|
||||
|
||||
/**
|
||||
* __blk_run_queue_uncond - run a queue whether or not it has been stopped
|
||||
* @q: The queue to run
|
||||
@ -923,16 +895,8 @@ int blk_queue_enter(struct request_queue *q, unsigned int op)
|
||||
{
|
||||
while (true) {
|
||||
|
||||
rcu_read_lock_sched();
|
||||
if (__percpu_ref_tryget_live(&q->q_usage_counter)) {
|
||||
if (likely((op & REQ_PREEMPT) ||
|
||||
!blk_queue_preempt_only(q))) {
|
||||
rcu_read_unlock_sched();
|
||||
return 0;
|
||||
} else
|
||||
percpu_ref_put(&q->q_usage_counter);
|
||||
}
|
||||
rcu_read_unlock_sched();
|
||||
if (percpu_ref_tryget_live(&q->q_usage_counter))
|
||||
return 0;
|
||||
|
||||
if (op & REQ_NOWAIT)
|
||||
return -EBUSY;
|
||||
@ -947,10 +911,8 @@ int blk_queue_enter(struct request_queue *q, unsigned int op)
|
||||
smp_rmb();
|
||||
|
||||
wait_event(q->mq_freeze_wq,
|
||||
(!atomic_read(&q->mq_freeze_depth) &&
|
||||
((op & REQ_PREEMPT) ||
|
||||
!blk_queue_preempt_only(q))) ||
|
||||
blk_queue_dying(q));
|
||||
!atomic_read(&q->mq_freeze_depth) ||
|
||||
blk_queue_dying(q));
|
||||
if (blk_queue_dying(q))
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -695,7 +695,6 @@ struct request_queue {
|
||||
#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */
|
||||
#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
|
||||
#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
|
||||
#define QUEUE_FLAG_PREEMPT_ONLY 29 /* only process REQ_PREEMPT requests */
|
||||
|
||||
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_STACKABLE) | \
|
||||
(1 << QUEUE_FLAG_SAME_COMP) | \
|
||||
@ -804,10 +803,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
|
||||
REQ_FAILFAST_DRIVER))
|
||||
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
|
||||
#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
|
||||
#define blk_queue_preempt_only(q) \
|
||||
test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
|
||||
|
||||
extern void blk_set_preempt_only(struct request_queue *q, bool preempt_only);
|
||||
|
||||
static inline bool blk_account_rq(struct request *rq)
|
||||
{
|
||||
|
Loading…
x
Reference in New Issue
Block a user