From 76816685f876ca304982d7a859f659d92059f076 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 10 May 2023 09:32:41 -0700 Subject: [PATCH] block: Improve shared tag set performance Remove the code for fair tag sharing because it significantly hurts performance for UFS devices. Removing this code is safe because the legacy block layer worked fine without any equivalent fairness algorithm. This algorithm hurts performance for UFS devices because UFS devices have multiple logical units. One of these logical units (WLUN) is used to submit control commands, e.g. START STOP UNIT. If any request is submitted to the WLUN, the queue depth is reduced from 31 to 15 or lower for data LUNs. See also https://lore.kernel.org/linux-scsi/20221229030645.11558-1-ed.tsai@mediatek.com/ Bug: 281845090 Change-Id: Ia6d75917d533f32fffc68348b52fd3d972c9074c Link: https://lore.kernel.org/linux-block/20230103195337.158625-1-bvanassche@acm.org/ Cc: Christoph Hellwig Cc: Martin K. Petersen Cc: Ming Lei Cc: Keith Busch Cc: Damien Le Moal Cc: Ed Tsai Signed-off-by: Bart Van Assche Signed-off-by: Richard Raya --- block/blk-mq-tag.c | 34 ---------------------------------- 1 file changed, 34 deletions(-) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index e4b3eeaffc82..4e24ecb08cf1 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -59,43 +59,9 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) blk_mq_tag_wakeup_all(tags, false); } -/* - * For shared tag users, we track the number of currently active users - * and attempt to provide a fair share of the tag depth for each of them. - */ -static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, - struct sbitmap_queue *bt) -{ - unsigned int depth, users; - - if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) - return true; - if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) - return true; - - /* - * Don't try dividing an ant - */ - if (bt->sb.depth == 1) - return true; - - users = atomic_read(&hctx->tags->active_queues); - if (!users) - return true; - - /* - * Allow at least some tags - */ - depth = max((bt->sb.depth + users - 1) / users, 4U); - return atomic_read(&hctx->nr_active) < depth; -} - static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt) { - if (!(data->flags & BLK_MQ_REQ_INTERNAL) && - !hctx_may_queue(data->hctx, bt)) - return -1; if (data->shallow_depth) return __sbitmap_queue_get_shallow(bt, data->shallow_depth); else