cpu-boost: Rework scheduling setup

This patch minimizes the latency of an input boost by creating a dedicated
kworker and scheduling it with the realtime policy "SCHED_FIFO".
We use "MAX_RT_PRIO - 2" to have the input boost task preempt userspace
RT tasks if needed while being preempted by kernel internal RT tasks which are
scheduled at "MAX_RT_PRIO - 1" or higher.

Also since now the cpu-boost wq would only handle the work to disable a running
input boost, which isn't latency critical, we break up the cpu-boost wq and
move the work to disable a running input boost to the shared regular wq.

Change-Id: Ie4912d910ab01739197c4ba1737a1fbb8f61ecfa
Signed-off-by: Alex Naidis <alex.naidis@linux.com>
Signed-off-by: Park Ju Hyung <qkrwngud825@gmail.com>
Signed-off-by: UtsavisGreat <utsavbalar1231@gmail.com>
This commit is contained in:
Alex Naidis 2017-03-01 21:56:09 +01:00 committed by Richard Raya
parent 3327194b36
commit 2a48fce1c2
3 changed files with 33 additions and 23 deletions

View File

@ -17,11 +17,15 @@
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/cpu.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/time.h>
#include <uapi/linux/sched/types.h>
#include <linux/sched/rt.h>
struct cpu_sync {
int cpu;
@ -30,9 +34,8 @@ struct cpu_sync {
};
static DEFINE_PER_CPU(struct cpu_sync, sync_info);
static struct workqueue_struct *cpu_boost_wq;
static struct work_struct input_boost_work;
static struct kthread_work input_boost_work;
static bool input_boost_enabled;
@ -46,6 +49,10 @@ static bool sched_boost_active;
static struct delayed_work input_boost_rem;
static u64 last_input_time;
static struct kthread_worker cpu_boost_worker;
static struct task_struct *cpu_boost_worker_thread;
#define MIN_INPUT_INTERVAL (150 * USEC_PER_MSEC)
static int set_input_boost_freq(const char *buf, const struct kernel_param *kp)
@ -187,7 +194,7 @@ static void do_input_boost_rem(struct work_struct *work)
}
}
static void do_input_boost(struct work_struct *work)
static void do_input_boost(struct kthread_work *work)
{
unsigned int i, ret;
struct cpu_sync *i_sync_info;
@ -217,8 +224,7 @@ static void do_input_boost(struct work_struct *work)
sched_boost_active = true;
}
queue_delayed_work(cpu_boost_wq, &input_boost_rem,
msecs_to_jiffies(input_boost_ms));
schedule_delayed_work(&input_boost_rem, msecs_to_jiffies(input_boost_ms));
}
static void cpuboost_input_event(struct input_handle *handle,
@ -233,10 +239,10 @@ static void cpuboost_input_event(struct input_handle *handle,
if (now - last_input_time < MIN_INPUT_INTERVAL)
return;
if (work_pending(&input_boost_work))
if (queuing_blocked(&cpu_boost_worker, &input_boost_work))
return;
queue_work(cpu_boost_wq, &input_boost_work);
kthread_queue_work(&cpu_boost_worker, &input_boost_work);
last_input_time = ktime_to_us(ktime_get());
}
@ -315,12 +321,16 @@ static int cpu_boost_init(void)
{
int cpu, ret;
struct cpu_sync *s;
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 2 };
cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0);
if (!cpu_boost_wq)
kthread_init_worker(&cpu_boost_worker);
cpu_boost_worker_thread = kthread_run(kthread_worker_fn,
&cpu_boost_worker, "cpu_boost_worker_thread");
if (IS_ERR(cpu_boost_worker_thread))
return -EFAULT;
INIT_WORK(&input_boost_work, do_input_boost);
sched_setscheduler(cpu_boost_worker_thread, SCHED_FIFO, &param);
kthread_init_work(&input_boost_work, do_input_boost);
INIT_DELAYED_WORK(&input_boost_rem, do_input_boost_rem);
for_each_possible_cpu(cpu) {

View File

@ -192,6 +192,19 @@ extern void __kthread_init_worker(struct kthread_worker *worker,
TIMER_IRQSAFE); \
} while (0)
/*
* Returns true when the work could not be queued at the moment.
* It happens when it is already pending in a worker list
* or when it is being cancelled.
*/
static inline bool queuing_blocked(struct kthread_worker *worker,
struct kthread_work *work)
{
lockdep_assert_held(&worker->lock);
return !list_empty(&work->node) || work->canceling;
}
int kthread_worker_fn(void *worker_ptr);
__printf(2, 3)

View File

@ -775,19 +775,6 @@ kthread_create_worker_on_cpu(int cpu, unsigned int flags,
}
EXPORT_SYMBOL(kthread_create_worker_on_cpu);
/*
* Returns true when the work could not be queued at the moment.
* It happens when it is already pending in a worker list
* or when it is being cancelled.
*/
static inline bool queuing_blocked(struct kthread_worker *worker,
struct kthread_work *work)
{
lockdep_assert_held(&worker->lock);
return !list_empty(&work->node) || work->canceling;
}
static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
struct kthread_work *work)
{