mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
cpufreq: schedutil: Don't skip freq update when limits change
commit 600f5badb78c316146d062cfd7af4a2cfb655baa upstream. To avoid reducing the frequency of a CPU prematurely, we skip reducing the frequency if the CPU had been busy recently. This should not be done when the limits of the policy are changed, for example due to thermal throttling. We should always get the frequency within the new limits as soon as possible. Trying to fix this by using only one flag, i.e. need_freq_update, can lead to a race condition where the flag gets cleared without forcing us to change the frequency at least once. And so this patch introduces another flag to avoid that race condition. Fixes: ecd288429126 ("cpufreq: schedutil: Don't set next_freq to UINT_MAX") Change-Id: Ifd3e127d5e7b33842a2f94a90e25541484df37fc Cc: v4.18+ <stable@vger.kernel.org> # v4.18+ Reported-by: Doug Smythies <dsmythies@telus.net> Tested-by: Doug Smythies <dsmythies@telus.net> Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Richard Raya <rdxzv.dev@gmail.com>
This commit is contained in:
parent
897d9e3a9e
commit
16232faa0f
@ -50,6 +50,7 @@ struct sugov_policy {
|
||||
struct task_struct *thread;
|
||||
bool work_in_progress;
|
||||
|
||||
bool limits_changed;
|
||||
bool need_freq_update;
|
||||
};
|
||||
|
||||
@ -106,7 +107,9 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
|
||||
!cpufreq_can_do_remote_dvfs(sg_policy->policy))
|
||||
return false;
|
||||
|
||||
if (unlikely(sg_policy->need_freq_update)) {
|
||||
if (unlikely(sg_policy->limits_changed)) {
|
||||
sg_policy->limits_changed = false;
|
||||
sg_policy->need_freq_update = true;
|
||||
return true;
|
||||
}
|
||||
/* No need to recalculate next freq for min_rate_limit_us
|
||||
@ -354,7 +357,9 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
|
||||
if (!sugov_should_update_freq(sg_policy, time))
|
||||
return;
|
||||
|
||||
busy = use_pelt() && sugov_cpu_is_busy(sg_cpu);
|
||||
/* Limits may have changed, don't skip frequency update */
|
||||
busy = use_pelt() && !sg_policy->need_freq_update &&
|
||||
sugov_cpu_is_busy(sg_cpu);
|
||||
|
||||
if (flags & SCHED_CPUFREQ_DL) {
|
||||
/* clear cache when it's bypassed */
|
||||
@ -833,6 +838,7 @@ static int sugov_start(struct cpufreq_policy *policy)
|
||||
sg_policy->last_freq_update_time = 0;
|
||||
sg_policy->next_freq = 0;
|
||||
sg_policy->work_in_progress = false;
|
||||
sg_policy->limits_changed = false;
|
||||
sg_policy->need_freq_update = false;
|
||||
sg_policy->cached_raw_freq = 0;
|
||||
|
||||
@ -888,7 +894,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
|
||||
raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
|
||||
}
|
||||
|
||||
sg_policy->need_freq_update = true;
|
||||
sg_policy->limits_changed = true;
|
||||
}
|
||||
|
||||
static struct cpufreq_governor schedutil_gov = {
|
||||
|
Loading…
x
Reference in New Issue
Block a user