block: use ktime_get_ns() instead of sched_clock() for cfq and bfq

cfq and bfq have some internal fields that use sched_clock() which can
trivially use ktime_get_ns() instead. Their timestamp fields in struct
request can also use ktime_get_ns(), which resolves the 8 year old
comment added by commit 28f4197e5d47 ("block: disable preemption before
using sched_clock()").

Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: azrim <mirzaspc@gmail.com>
This commit is contained in:
Omar Sandoval 2018-05-09 02:08:51 -07:00 committed by azrim
parent 43de7fae25
commit 0beacf4e16
No known key found for this signature in database
GPG Key ID: 497F8FB059B45D1C
2 changed files with 16 additions and 16 deletions

View File

@ -55,13 +55,13 @@ BFQG_FLAG_FNS(empty)
/* This should be called with the scheduler lock held. */ /* This should be called with the scheduler lock held. */
static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
{ {
unsigned long long now; u64 now;
if (!bfqg_stats_waiting(stats)) if (!bfqg_stats_waiting(stats))
return; return;
now = sched_clock(); now = ktime_get_ns();
if (time_after64(now, stats->start_group_wait_time)) if (now > stats->start_group_wait_time)
blkg_stat_add(&stats->group_wait_time, blkg_stat_add(&stats->group_wait_time,
now - stats->start_group_wait_time); now - stats->start_group_wait_time);
bfqg_stats_clear_waiting(stats); bfqg_stats_clear_waiting(stats);
@ -77,20 +77,20 @@ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
return; return;
if (bfqg == curr_bfqg) if (bfqg == curr_bfqg)
return; return;
stats->start_group_wait_time = sched_clock(); stats->start_group_wait_time = ktime_get_ns();
bfqg_stats_mark_waiting(stats); bfqg_stats_mark_waiting(stats);
} }
/* This should be called with the scheduler lock held. */ /* This should be called with the scheduler lock held. */
static void bfqg_stats_end_empty_time(struct bfqg_stats *stats) static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
{ {
unsigned long long now; u64 now;
if (!bfqg_stats_empty(stats)) if (!bfqg_stats_empty(stats))
return; return;
now = sched_clock(); now = ktime_get_ns();
if (time_after64(now, stats->start_empty_time)) if (now > stats->start_empty_time)
blkg_stat_add(&stats->empty_time, blkg_stat_add(&stats->empty_time,
now - stats->start_empty_time); now - stats->start_empty_time);
bfqg_stats_clear_empty(stats); bfqg_stats_clear_empty(stats);
@ -116,7 +116,7 @@ void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
if (bfqg_stats_empty(stats)) if (bfqg_stats_empty(stats))
return; return;
stats->start_empty_time = sched_clock(); stats->start_empty_time = ktime_get_ns();
bfqg_stats_mark_empty(stats); bfqg_stats_mark_empty(stats);
} }
@ -125,9 +125,9 @@ void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
struct bfqg_stats *stats = &bfqg->stats; struct bfqg_stats *stats = &bfqg->stats;
if (bfqg_stats_idling(stats)) { if (bfqg_stats_idling(stats)) {
unsigned long long now = sched_clock(); u64 now = ktime_get_ns();
if (time_after64(now, stats->start_idle_time)) if (now > stats->start_idle_time)
blkg_stat_add(&stats->idle_time, blkg_stat_add(&stats->idle_time,
now - stats->start_idle_time); now - stats->start_idle_time);
bfqg_stats_clear_idling(stats); bfqg_stats_clear_idling(stats);
@ -138,7 +138,7 @@ void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
{ {
struct bfqg_stats *stats = &bfqg->stats; struct bfqg_stats *stats = &bfqg->stats;
stats->start_idle_time = sched_clock(); stats->start_idle_time = ktime_get_ns();
bfqg_stats_mark_idling(stats); bfqg_stats_mark_idling(stats);
} }

View File

@ -730,9 +730,9 @@ struct bfqg_stats {
/* total time with empty current active q with other requests queued */ /* total time with empty current active q with other requests queued */
struct blkg_stat empty_time; struct blkg_stat empty_time;
/* fields after this shouldn't be cleared on stat reset */ /* fields after this shouldn't be cleared on stat reset */
uint64_t start_group_wait_time; u64 start_group_wait_time;
uint64_t start_idle_time; u64 start_idle_time;
uint64_t start_empty_time; u64 start_empty_time;
uint16_t flags; uint16_t flags;
#endif /* CONFIG_BFQ_GROUP_IOSCHED */ #endif /* CONFIG_BFQ_GROUP_IOSCHED */
}; };
@ -854,8 +854,8 @@ void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
unsigned int op); unsigned int op);
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op); void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op);
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op); void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op);
void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time, void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
uint64_t io_start_time, unsigned int op); u64 io_start_time_ns, unsigned int op);
void bfqg_stats_update_dequeue(struct bfq_group *bfqg); void bfqg_stats_update_dequeue(struct bfq_group *bfqg);
void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg); void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg);
void bfqg_stats_update_idle_time(struct bfq_group *bfqg); void bfqg_stats_update_idle_time(struct bfq_group *bfqg);