Revert "soc: qcom: watchdog_v2: Optimize IPI pings to reduce system jitter"

This reverts commit f1786dc2072e9343706e84c5935d156766114c23.

Change-Id: I2c90d025a74f260848e0edc6d31035a282e146e2
Signed-off-by: Richard Raya <rdxzv.dev@gmail.com>
This commit is contained in:
Richard Raya 2024-09-08 20:45:30 -03:00
parent d414205a86
commit 8f25c15578
3 changed files with 37 additions and 62 deletions

View File

@ -82,8 +82,7 @@ struct msm_watchdog_data {
unsigned int min_slack_ticks;
unsigned long long min_slack_ns;
void *scm_regsave;
atomic_t alive_mask;
atomic_t pinged_mask;
cpumask_t alive_mask;
struct mutex disable_lock;
bool irq_ppi;
struct msm_watchdog_data __percpu **wdog_cpu_dd;
@ -101,6 +100,8 @@ struct msm_watchdog_data {
bool user_pet_complete;
unsigned long long timer_fired;
unsigned long long thread_start;
unsigned long long ping_start[NR_CPUS];
unsigned long long ping_end[NR_CPUS];
unsigned int cpu_scandump_sizes[NR_CPUS];
/* When single buffer is used to collect Scandump */
@ -145,8 +146,8 @@ static void dump_cpu_alive_mask(struct msm_watchdog_data *wdog_dd)
{
static char alive_mask_buf[MASK_SIZE];
scnprintf(alive_mask_buf, MASK_SIZE, "%x",
atomic_read(&wdog_dd->alive_mask));
scnprintf(alive_mask_buf, MASK_SIZE, "%*pb1", cpumask_pr_args(
&wdog_dd->alive_mask));
dev_info(wdog_dd->dev, "cpu alive mask from last pet %s\n",
alive_mask_buf);
}
@ -394,59 +395,33 @@ static void pet_watchdog(struct msm_watchdog_data *wdog_dd)
static void keep_alive_response(void *info)
{
struct msm_watchdog_data *wdog_dd = wdog_data;
unsigned int this_cpu_bit = (unsigned long)info >> 32;
unsigned int final_alive_mask = (unsigned int)(long)info;
unsigned int old;
int cpu = smp_processor_id();
struct msm_watchdog_data *wdog_dd = (struct msm_watchdog_data *)info;
/* Wake up the watchdog task if we're the final pinged CPU */
old = atomic_fetch_or_relaxed(this_cpu_bit, &wdog_data->alive_mask);
if (old == (final_alive_mask & ~this_cpu_bit))
wake_up_process(wdog_dd->watchdog_task);
cpumask_set_cpu(cpu, &wdog_dd->alive_mask);
wdog_dd->ping_end[cpu] = sched_clock();
/* Make sure alive mask is cleared and set in order */
smp_mb();
}
static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
/*
* If this function does not return, it implies one of the
* other cpu's is not responsive.
*/
static void ping_other_cpus(struct msm_watchdog_data *wdog_dd)
{
unsigned long online_mask, ping_mask = 0;
unsigned int final_alive_mask;
int cpu, this_cpu;
int cpu;
/*
* Ping all CPUs other than the current one asynchronously so that we
* don't spend a lot of time spinning on the current CPU with IRQs
* disabled (which is what smp_call_function_single() does in
* synchronous mode).
*/
migrate_disable();
this_cpu = raw_smp_processor_id();
atomic_set(&wdog_dd->alive_mask, BIT(this_cpu));
online_mask = *cpumask_bits(cpu_online_mask) & ~BIT(this_cpu);
for_each_cpu(cpu, to_cpumask(&online_mask)) {
if (!cpu_idle_pc_state[cpu] && !cpu_isolated(cpu))
ping_mask |= BIT(cpu);
cpumask_clear(&wdog_dd->alive_mask);
/* Make sure alive mask is cleared and set in order */
smp_mb();
for_each_cpu(cpu, cpu_online_mask) {
if (!cpu_idle_pc_state[cpu] && !cpu_isolated(cpu)) {
wdog_dd->ping_start[cpu] = sched_clock();
smp_call_function_single(cpu, keep_alive_response,
wdog_dd, 1);
}
}
final_alive_mask = ping_mask | BIT(this_cpu);
for_each_cpu(cpu, to_cpumask(&ping_mask)) {
generic_exec_single(cpu, per_cpu_ptr(&csd_data, cpu),
keep_alive_response,
(void *)(BIT(cpu + 32) | final_alive_mask));
}
migrate_enable();
atomic_set(&wdog_dd->pinged_mask, final_alive_mask);
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (atomic_read(&wdog_dd->alive_mask) == final_alive_mask)
break;
schedule();
}
__set_current_state(TASK_RUNNING);
}
/* Set pet observer to expire 1 second before watchdog bite */
@ -488,7 +463,7 @@ static __ref int watchdog_kthread(void *arg)
(struct msm_watchdog_data *)arg;
unsigned long delay_time = 0;
struct sched_param param = {.sched_priority = MAX_RT_PRIO-1};
int ret;
int ret, cpu;
sched_setscheduler(current, SCHED_FIFO, &param);
while (!kthread_should_stop()) {
@ -498,6 +473,9 @@ static __ref int watchdog_kthread(void *arg)
} while (ret != 0);
wdog_dd->thread_start = sched_clock();
for_each_cpu(cpu, cpu_present_mask)
wdog_dd->ping_start[cpu] = wdog_dd->ping_end[cpu] = 0;
if (wdog_dd->do_ipi_ping)
ping_other_cpus(wdog_dd);
@ -612,18 +590,17 @@ static void print_wdog_data(struct msm_watchdog_data *wdog_dd)
/* Check if pet task is running */
wdog_task = wdog_dd->watchdog_task;
if (wdog_task) {
if (wdog_task->state == TASK_UNINTERRUPTIBLE) {
unsigned long dead_mask;
if (wdog_task->on_cpu) {
dev_info(wdog_dd->dev, "Pet task is running on CPU%d\n",
task_cpu(wdog_task));
dead_mask = atomic_read(&wdog_dd->alive_mask) ^
atomic_read(&wdog_dd->pinged_mask);
for_each_cpu(cpu, to_cpumask(&dead_mask)) {
dev_info(wdog_dd->dev,
"CPU%d did not respond to IPI ping\n",
cpu);
for_each_cpu(cpu, cpu_present_mask) {
if (wdog_dd->ping_start[cpu] != 0 &&
wdog_dd->ping_end[cpu] == 0) {
dev_info(wdog_dd->dev, "CPU%d did not "
"respond to IPI ping\n",
cpu);
break;
}
}
} else if (wdog_task->state == TASK_RUNNING) {
dev_info(wdog_dd->dev, "Pet task is waiting on CPU%d\n",
@ -1031,6 +1008,7 @@ static int msm_watchdog_probe(struct platform_device *pdev)
wdog_data = wdog_dd;
wdog_dd->dev = &pdev->dev;
platform_set_drvdata(pdev, wdog_dd);
cpumask_clear(&wdog_dd->alive_mask);
wdog_dd->watchdog_task = kthread_create(watchdog_kthread, wdog_dd,
"msm_watchdog");
if (IS_ERR(wdog_dd->watchdog_task)) {

View File

@ -97,8 +97,6 @@ extern void smp_cpus_done(unsigned int max_cpus);
/*
* Call a function on all other processors
*/
int generic_exec_single(int cpu, struct __call_single_data *csd,
smp_call_func_t func, void *info);
int smp_call_function(smp_call_func_t func, void *info, int wait);
void smp_call_function_many(const struct cpumask *mask,
smp_call_func_t func, void *info, bool wait);

View File

@ -130,8 +130,7 @@ static __always_inline void csd_lock(struct __call_single_data *csd)
static __always_inline void csd_unlock(struct __call_single_data *csd)
{
if (!(csd->flags & CSD_FLAG_LOCK))
return;
WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
/*
* ensure we're all done before releasing data:
@ -148,7 +147,7 @@ extern void send_call_function_single_ipi(int cpu);
* for execution on the given CPU. data must already have
* ->func, ->info, and ->flags set.
*/
int generic_exec_single(int cpu, struct __call_single_data *csd,
static int generic_exec_single(int cpu, struct __call_single_data *csd,
smp_call_func_t func, void *info)
{
if (cpu == smp_processor_id()) {