mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
Add UNPLUG traces to all appropriate places
Added blk_unplug interface, allowing all invocations of unplugs to result in a generated blktrace UNPLUG. Signed-off-by: Alan D. Brunelle <Alan.Brunelle@hp.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
d85532ed28
commit
2ad8b1ef11
@ -1621,15 +1621,7 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
|
|||||||
{
|
{
|
||||||
struct request_queue *q = bdi->unplug_io_data;
|
struct request_queue *q = bdi->unplug_io_data;
|
||||||
|
|
||||||
/*
|
blk_unplug(q);
|
||||||
* devices don't necessarily have an ->unplug_fn defined
|
|
||||||
*/
|
|
||||||
if (q->unplug_fn) {
|
|
||||||
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
|
|
||||||
q->rq.count[READ] + q->rq.count[WRITE]);
|
|
||||||
|
|
||||||
q->unplug_fn(q);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_unplug_work(struct work_struct *work)
|
static void blk_unplug_work(struct work_struct *work)
|
||||||
@ -1653,6 +1645,20 @@ static void blk_unplug_timeout(unsigned long data)
|
|||||||
kblockd_schedule_work(&q->unplug_work);
|
kblockd_schedule_work(&q->unplug_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void blk_unplug(struct request_queue *q)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* devices don't necessarily have an ->unplug_fn defined
|
||||||
|
*/
|
||||||
|
if (q->unplug_fn) {
|
||||||
|
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
|
||||||
|
q->rq.count[READ] + q->rq.count[WRITE]);
|
||||||
|
|
||||||
|
q->unplug_fn(q);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(blk_unplug);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_start_queue - restart a previously stopped queue
|
* blk_start_queue - restart a previously stopped queue
|
||||||
* @q: The &struct request_queue in question
|
* @q: The &struct request_queue in question
|
||||||
|
@ -1207,8 +1207,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
|
|||||||
prepare_to_wait(&bitmap->overflow_wait, &__wait,
|
prepare_to_wait(&bitmap->overflow_wait, &__wait,
|
||||||
TASK_UNINTERRUPTIBLE);
|
TASK_UNINTERRUPTIBLE);
|
||||||
spin_unlock_irq(&bitmap->lock);
|
spin_unlock_irq(&bitmap->lock);
|
||||||
bitmap->mddev->queue
|
blk_unplug(bitmap->mddev->queue);
|
||||||
->unplug_fn(bitmap->mddev->queue);
|
|
||||||
schedule();
|
schedule();
|
||||||
finish_wait(&bitmap->overflow_wait, &__wait);
|
finish_wait(&bitmap->overflow_wait, &__wait);
|
||||||
continue;
|
continue;
|
||||||
|
@ -1000,8 +1000,7 @@ void dm_table_unplug_all(struct dm_table *t)
|
|||||||
struct dm_dev *dd = list_entry(d, struct dm_dev, list);
|
struct dm_dev *dd = list_entry(d, struct dm_dev, list);
|
||||||
struct request_queue *q = bdev_get_queue(dd->bdev);
|
struct request_queue *q = bdev_get_queue(dd->bdev);
|
||||||
|
|
||||||
if (q->unplug_fn)
|
blk_unplug(q);
|
||||||
q->unplug_fn(q);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,8 +87,7 @@ static void linear_unplug(struct request_queue *q)
|
|||||||
|
|
||||||
for (i=0; i < mddev->raid_disks; i++) {
|
for (i=0; i < mddev->raid_disks; i++) {
|
||||||
struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
|
struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
|
||||||
if (r_queue->unplug_fn)
|
blk_unplug(r_queue);
|
||||||
r_queue->unplug_fn(r_queue);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5445,7 +5445,7 @@ void md_do_sync(mddev_t *mddev)
|
|||||||
* about not overloading the IO subsystem. (things like an
|
* about not overloading the IO subsystem. (things like an
|
||||||
* e2fsck being done on the RAID array should execute fast)
|
* e2fsck being done on the RAID array should execute fast)
|
||||||
*/
|
*/
|
||||||
mddev->queue->unplug_fn(mddev->queue);
|
blk_unplug(mddev->queue);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
||||||
currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
|
currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
|
||||||
@ -5464,7 +5464,7 @@ void md_do_sync(mddev_t *mddev)
|
|||||||
* this also signals 'finished resyncing' to md_stop
|
* this also signals 'finished resyncing' to md_stop
|
||||||
*/
|
*/
|
||||||
out:
|
out:
|
||||||
mddev->queue->unplug_fn(mddev->queue);
|
blk_unplug(mddev->queue);
|
||||||
|
|
||||||
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
|
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
|
||||||
|
|
||||||
|
@ -125,8 +125,7 @@ static void unplug_slaves(mddev_t *mddev)
|
|||||||
atomic_inc(&rdev->nr_pending);
|
atomic_inc(&rdev->nr_pending);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (r_queue->unplug_fn)
|
blk_unplug(r_queue);
|
||||||
r_queue->unplug_fn(r_queue);
|
|
||||||
|
|
||||||
rdev_dec_pending(rdev, mddev);
|
rdev_dec_pending(rdev, mddev);
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
@ -35,8 +35,7 @@ static void raid0_unplug(struct request_queue *q)
|
|||||||
for (i=0; i<mddev->raid_disks; i++) {
|
for (i=0; i<mddev->raid_disks; i++) {
|
||||||
struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
|
struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
|
||||||
|
|
||||||
if (r_queue->unplug_fn)
|
blk_unplug(r_queue);
|
||||||
r_queue->unplug_fn(r_queue);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -549,8 +549,7 @@ static void unplug_slaves(mddev_t *mddev)
|
|||||||
atomic_inc(&rdev->nr_pending);
|
atomic_inc(&rdev->nr_pending);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (r_queue->unplug_fn)
|
blk_unplug(r_queue);
|
||||||
r_queue->unplug_fn(r_queue);
|
|
||||||
|
|
||||||
rdev_dec_pending(rdev, mddev);
|
rdev_dec_pending(rdev, mddev);
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
@ -593,8 +593,7 @@ static void unplug_slaves(mddev_t *mddev)
|
|||||||
atomic_inc(&rdev->nr_pending);
|
atomic_inc(&rdev->nr_pending);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (r_queue->unplug_fn)
|
blk_unplug(r_queue);
|
||||||
r_queue->unplug_fn(r_queue);
|
|
||||||
|
|
||||||
rdev_dec_pending(rdev, mddev);
|
rdev_dec_pending(rdev, mddev);
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
@ -3186,8 +3186,7 @@ static void unplug_slaves(mddev_t *mddev)
|
|||||||
atomic_inc(&rdev->nr_pending);
|
atomic_inc(&rdev->nr_pending);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (r_queue->unplug_fn)
|
blk_unplug(r_queue);
|
||||||
r_queue->unplug_fn(r_queue);
|
|
||||||
|
|
||||||
rdev_dec_pending(rdev, mddev);
|
rdev_dec_pending(rdev, mddev);
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
@ -697,6 +697,7 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
|||||||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||||
struct request *, int, rq_end_io_fn *);
|
struct request *, int, rq_end_io_fn *);
|
||||||
extern int blk_verify_command(unsigned char *, int);
|
extern int blk_verify_command(unsigned char *, int);
|
||||||
|
extern void blk_unplug(struct request_queue *q);
|
||||||
|
|
||||||
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
|
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
|
||||||
{
|
{
|
||||||
|
Loading…
x
Reference in New Issue
Block a user