schedutil: Restore CAF's hispeed boost and predicted load

This reverts commit f0504b6779eed2da109c403b2f986be2fb80ebec.

Change-Id: I93328b5782682f1a3545c763dc585f73e7a43ec7
Signed-off-by: Richard Raya <rdxzv.dev@gmail.com>
This commit is contained in:
Richard Raya 2024-05-17 18:26:12 -03:00
parent 4198920708
commit 4d42f71668

View File

@ -25,6 +25,9 @@ struct sugov_tunables {
struct gov_attr_set attr_set;
unsigned int up_rate_limit_us;
unsigned int down_rate_limit_us;
unsigned int hispeed_load;
unsigned int hispeed_freq;
bool pl;
};
struct sugov_policy {
@ -39,8 +42,14 @@ struct sugov_policy {
s64 up_rate_delay_ns;
s64 down_rate_delay_ns;
s64 freq_update_delay_ns;
u64 last_ws;
u64 curr_cycles;
u64 last_cyc_update_time;
unsigned long avg_cap;
unsigned int next_freq;
unsigned int cached_raw_freq;
unsigned long hispeed_util;
unsigned long max;
/* The next fields are only needed if fast switch cannot be used. */
struct irq_work irq_work;
@ -152,6 +161,71 @@ static inline bool use_pelt(void)
#endif
}
static inline bool conservative_pl(void)
{
#ifdef CONFIG_SCHED_WALT
return sysctl_sched_conservative_pl;
#else
return false;
#endif
}
static unsigned long freq_to_util(struct sugov_policy *sg_policy,
unsigned int freq)
{
return mult_frac(sg_policy->max, freq,
sg_policy->policy->cpuinfo.max_freq);
}
#define KHZ 1000
static void sugov_track_cycles(struct sugov_policy *sg_policy,
unsigned int prev_freq,
u64 upto)
{
u64 delta_ns, cycles;
u64 next_ws = sg_policy->last_ws + sched_ravg_window;
if (unlikely(!sysctl_sched_use_walt_cpu_util))
return;
upto = min(upto, next_ws);
/* Track cycles in current window */
delta_ns = upto - sg_policy->last_cyc_update_time;
delta_ns *= prev_freq;
do_div(delta_ns, (NSEC_PER_SEC / KHZ));
cycles = delta_ns;
sg_policy->curr_cycles += cycles;
sg_policy->last_cyc_update_time = upto;
}
static void sugov_calc_avg_cap(struct sugov_policy *sg_policy, u64 curr_ws,
unsigned int prev_freq)
{
u64 last_ws = sg_policy->last_ws;
unsigned int avg_freq;
if (unlikely(!sysctl_sched_use_walt_cpu_util))
return;
BUG_ON(curr_ws < last_ws);
if (curr_ws <= last_ws)
return;
/* If we skipped some windows */
if (curr_ws > (last_ws + sched_ravg_window)) {
avg_freq = prev_freq;
/* Reset tracking history */
sg_policy->last_cyc_update_time = curr_ws;
} else {
sugov_track_cycles(sg_policy, prev_freq, curr_ws);
avg_freq = sg_policy->curr_cycles;
avg_freq /= sched_ravg_window / (NSEC_PER_SEC / KHZ);
}
sg_policy->avg_cap = freq_to_util(sg_policy, avg_freq);
sg_policy->curr_cycles = 0;
sg_policy->last_ws = curr_ws;
}
static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
unsigned int next_freq)
{
@ -171,6 +245,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
sg_policy->last_freq_update_time = time;
if (policy->fast_switch_enabled) {
sugov_track_cycles(sg_policy, sg_policy->policy->cur, time);
next_freq = cpufreq_driver_fast_switch(policy, next_freq);
if (!next_freq)
return;
@ -186,6 +261,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
}
}
#define TARGET_LOAD 80
/**
* get_next_freq - Compute a new frequency for a given cpufreq policy.
* @sg_policy: schedutil policy object to compute the new frequency for.
@ -332,17 +408,49 @@ static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
#endif /* CONFIG_NO_HZ_COMMON */
#define NL_RATIO 75
#define DEFAULT_HISPEED_LOAD 90
static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
unsigned long *max)
{
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
bool is_migration = sg_cpu->flags & SCHED_CPUFREQ_INTERCLUSTER_MIG;
unsigned long nl = sg_cpu->walt_load.nl;
unsigned long cpu_util = sg_cpu->util;
bool is_hiload;
unsigned long pl = sg_cpu->walt_load.pl;
if (unlikely(!sysctl_sched_use_walt_cpu_util))
return;
is_hiload = (cpu_util >= mult_frac(sg_policy->avg_cap,
sg_policy->tunables->hispeed_load,
100));
if (is_hiload && !is_migration)
*util = max(*util, sg_policy->hispeed_util);
if (is_hiload && nl >= mult_frac(cpu_util, NL_RATIO, 100))
*util = *max;
if (sg_policy->tunables->pl) {
if (conservative_pl())
pl = mult_frac(pl, TARGET_LOAD, 100);
*util = max(*util, pl);
}
}
static void sugov_update_single(struct update_util_data *hook, u64 time,
unsigned int flags)
{
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
struct cpufreq_policy *policy = sg_policy->policy;
unsigned long util, max;
unsigned long util, max, hs_util;
unsigned int next_f;
bool busy;
if (flags & SCHED_CPUFREQ_PL)
if (!sg_policy->tunables->pl && flags & SCHED_CPUFREQ_PL)
return;
flags &= ~SCHED_CPUFREQ_RT_DL;
@ -363,13 +471,34 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
busy = use_pelt() && !sg_policy->need_freq_update &&
sugov_cpu_is_busy(sg_cpu);
raw_spin_lock(&sg_policy->update_lock);
if (flags & SCHED_CPUFREQ_DL) {
/* clear cache when it's bypassed */
sg_policy->cached_raw_freq = 0;
next_f = policy->cpuinfo.max_freq;
} else {
sugov_get_util(&util, &max, sg_cpu->cpu, time);
if (sg_policy->max != max) {
sg_policy->max = max;
hs_util = freq_to_util(sg_policy,
sg_policy->tunables->hispeed_freq);
hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
sg_policy->hispeed_util = hs_util;
}
sg_cpu->util = util;
sg_cpu->max = max;
sg_cpu->flags = flags;
sugov_calc_avg_cap(sg_policy, sg_cpu->walt_load.ws,
sg_policy->policy->cur);
trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util,
sg_policy->avg_cap, max, sg_cpu->walt_load.nl,
sg_cpu->walt_load.pl, flags);
sugov_iowait_boost(sg_cpu, &util, &max);
sugov_walt_adjust(sg_cpu, &util, &max);
next_f = get_next_freq(sg_policy, util, max);
/*
* Do not reduce the frequency if the CPU has not been idle
@ -383,6 +512,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
}
}
sugov_update_commit(sg_policy, time, next_f);
raw_spin_unlock(&sg_policy->update_lock);
}
static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
@ -416,14 +546,22 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
return policy->cpuinfo.max_freq;
}
/*
* If the util value for all CPUs in a policy is 0, just using >
* will result in a max value of 1. WALT stats can later update
* the aggregated util value, causing get_next_freq() to compute
* freq = max_freq * 1.25 * (util / max) for nonzero util,
* leading to spurious jumps to fmax.
*/
j_util = j_sg_cpu->util;
j_max = j_sg_cpu->max;
if (j_util * max > j_max * util) {
if (j_util * max >= j_max * util) {
util = j_util;
max = j_max;
}
sugov_iowait_boost(j_sg_cpu, &util, &max);
sugov_walt_adjust(j_sg_cpu, &util, &max);
}
return get_next_freq(sg_policy, util, max);
@ -434,10 +572,10 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
{
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
unsigned long util, max;
unsigned long util, max, hs_util;
unsigned int next_f;
if (flags & SCHED_CPUFREQ_PL)
if (!sg_policy->tunables->pl && flags & SCHED_CPUFREQ_PL)
return;
sugov_get_util(&util, &max, sg_cpu->cpu, time);
@ -446,6 +584,14 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
raw_spin_lock(&sg_policy->update_lock);
if (sg_policy->max != max) {
sg_policy->max = max;
hs_util = freq_to_util(sg_policy,
sg_policy->tunables->hispeed_freq);
hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
sg_policy->hispeed_util = hs_util;
}
sg_cpu->util = util;
sg_cpu->max = max;
sg_cpu->flags = flags;
@ -453,6 +599,13 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
sugov_set_iowait_boost(sg_cpu, time);
sg_cpu->last_update = time;
sugov_calc_avg_cap(sg_policy, sg_cpu->walt_load.ws,
sg_policy->policy->cur);
trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util, sg_policy->avg_cap,
max, sg_cpu->walt_load.nl,
sg_cpu->walt_load.pl, flags);
if (sugov_should_update_freq(sg_policy, time) &&
!(flags & SCHED_CPUFREQ_CONTINUE)) {
if (flags & SCHED_CPUFREQ_DL) {
@ -472,8 +625,13 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
static void sugov_work(struct kthread_work *work)
{
struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
unsigned long flags;
mutex_lock(&sg_policy->work_lock);
raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
sugov_track_cycles(sg_policy, sg_policy->policy->cur,
sched_ktime_clock());
raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
__cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
CPUFREQ_RELATION_L);
mutex_unlock(&sg_policy->work_lock);
@ -578,12 +736,88 @@ static ssize_t down_rate_limit_us_store(struct gov_attr_set *attr_set,
return count;
}
static ssize_t hispeed_load_show(struct gov_attr_set *attr_set, char *buf)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->hispeed_load);
}
static ssize_t hispeed_load_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
if (kstrtouint(buf, 10, &tunables->hispeed_load))
return -EINVAL;
tunables->hispeed_load = min(100U, tunables->hispeed_load);
return count;
}
static ssize_t hispeed_freq_show(struct gov_attr_set *attr_set, char *buf)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->hispeed_freq);
}
static ssize_t hispeed_freq_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
unsigned int val;
struct sugov_policy *sg_policy;
unsigned long hs_util;
unsigned long flags;
if (kstrtouint(buf, 10, &val))
return -EINVAL;
tunables->hispeed_freq = val;
list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
hs_util = freq_to_util(sg_policy,
sg_policy->tunables->hispeed_freq);
hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
sg_policy->hispeed_util = hs_util;
raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
}
return count;
}
static ssize_t pl_show(struct gov_attr_set *attr_set, char *buf)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->pl);
}
static ssize_t pl_store(struct gov_attr_set *attr_set, const char *buf,
size_t count)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
if (kstrtobool(buf, &tunables->pl))
return -EINVAL;
return count;
}
static struct governor_attr up_rate_limit_us = __ATTR_RW(up_rate_limit_us);
static struct governor_attr down_rate_limit_us = __ATTR_RW(down_rate_limit_us);
static struct governor_attr hispeed_load = __ATTR_RW(hispeed_load);
static struct governor_attr hispeed_freq = __ATTR_RW(hispeed_freq);
static struct governor_attr pl = __ATTR_RW(pl);
static struct attribute *sugov_attributes[] = {
&up_rate_limit_us.attr,
&down_rate_limit_us.attr,
&hispeed_load.attr,
&hispeed_freq.attr,
&pl.attr,
NULL
};
@ -702,6 +936,9 @@ static void sugov_tunables_save(struct cpufreq_policy *policy,
per_cpu(cached_tunables, cpu) = cached;
}
cached->pl = tunables->pl;
cached->hispeed_load = tunables->hispeed_load;
cached->hispeed_freq = tunables->hispeed_freq;
cached->up_rate_limit_us = tunables->up_rate_limit_us;
cached->down_rate_limit_us = tunables->down_rate_limit_us;
}
@ -721,6 +958,9 @@ static void sugov_tunables_restore(struct cpufreq_policy *policy)
if (!cached)
return;
tunables->pl = cached->pl;
tunables->hispeed_load = cached->hispeed_load;
tunables->hispeed_freq = cached->hispeed_freq;
tunables->up_rate_limit_us = cached->up_rate_limit_us;
tunables->down_rate_limit_us = cached->down_rate_limit_us;
update_min_rate_limit_ns(sg_policy);
@ -770,6 +1010,8 @@ static int sugov_init(struct cpufreq_policy *policy)
tunables->up_rate_limit_us = 500;
tunables->down_rate_limit_us = 1000;
tunables->hispeed_load = DEFAULT_HISPEED_LOAD;
tunables->hispeed_freq = 0;
policy->governor_data = sg_policy;
sg_policy->tunables = tunables;
@ -885,14 +1127,27 @@ static void sugov_limits(struct cpufreq_policy *policy)
{
struct sugov_policy *sg_policy = policy->governor_data;
unsigned long flags;
unsigned int ret;
int cpu;
if (!policy->fast_switch_enabled) {
mutex_lock(&sg_policy->work_lock);
raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
sugov_track_cycles(sg_policy, sg_policy->policy->cur,
sched_ktime_clock());
raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
cpufreq_policy_apply_limits(policy);
mutex_unlock(&sg_policy->work_lock);
} else {
raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
cpufreq_policy_apply_limits_fast(policy);
sugov_track_cycles(sg_policy, sg_policy->policy->cur,
ktime_get_ns());
ret = cpufreq_policy_apply_limits_fast(policy);
if (ret && policy->cur != ret) {
policy->cur = ret;
for_each_cpu(cpu, policy->cpus)
trace_cpu_frequency(ret, cpu);
}
raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
}