Revert "msm-4.14: Drop perf-critical API"

This reverts commit 1b396d869a6da9fa864d4de8235f2d0afc7164c1.

Change-Id: I13b4629e9aefcd23da2e58ef534c1057f81059cd
Signed-off-by: Richard Raya <rdxzv.dev@gmail.com>
This commit is contained in:
Richard Raya 2025-01-31 19:31:47 -03:00
parent 0979e5af82
commit e72721b923
27 changed files with 359 additions and 27 deletions

View File

@ -813,6 +813,18 @@ config BIG_CPU_MASK
heterogeneous system. Use 0 if you are unsure, which just results in
this storing the bitmask of all available CPUs.
config BIG_CPU_DRM_MASK
int "Bitmask of available big CPUs to affine DRM"
default 64
help
This bitmask specifies which of the CPUs are for affining DRM irqs.
config BIG_CPU_KGSL_MASK
int "Bitmask of available big CPUs to affine KGSL"
default 128
help
This bitmask specifies which of the CPUs are for affining KGSL irqs.
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
select GENERIC_IRQ_MIGRATION

View File

@ -582,6 +582,8 @@ CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=8
CONFIG_LITTLE_CPU_MASK=63
CONFIG_BIG_CPU_MASK=192
CONFIG_BIG_CPU_DRM_MASK=64
CONFIG_BIG_CPU_KGSL_MASK=128
CONFIG_HOTPLUG_CPU=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y

View File

@ -490,9 +490,13 @@ static int simple_lmk_init_set(const char *val, const struct kernel_param *kp)
struct task_struct *thread;
if (!atomic_cmpxchg(&init_done, 0, 1)) {
thread = kthread_run(simple_lmk_reaper_thread, NULL, "simple_lmkd_reaper");
thread = kthread_run_perf_critical(cpu_perf_mask,
simple_lmk_reaper_thread,
NULL, "simple_lmkd_reaper");
BUG_ON(IS_ERR(thread));
thread = kthread_run(simple_lmk_reclaim_thread, NULL, "simple_lmkd");
thread = kthread_run_perf_critical(cpu_perf_mask,
simple_lmk_reclaim_thread,
NULL, "simple_lmkd");
BUG_ON(IS_ERR(thread));
BUG_ON(vmpressure_notifier_register(&vmpressure_notif));
}

View File

@ -360,7 +360,7 @@ static int __init cpu_input_boost_init(void)
goto unregister_handler;
}
thread = kthread_run(cpu_boost_thread, b, "cpu_boostd");
thread = kthread_run_perf_critical(cpu_perf_mask, cpu_boost_thread, b, "cpu_boostd");
if (IS_ERR(thread)) {
ret = PTR_ERR(thread);
pr_err("Failed to start CPU boost thread, err: %d\n", ret);

View File

@ -319,8 +319,9 @@ static int __init devfreq_boost_init(void)
for (i = 0; i < DEVFREQ_MAX; i++) {
struct boost_dev *b = &d->devices[i];
thread[i] = kthread_run(devfreq_boost_thread, b,
"devfreq_boostd/%d", i);
thread[i] = kthread_run_perf_critical(cpu_perf_mask,
devfreq_boost_thread, b,
"devfreq_boostd/%d", i);
if (IS_ERR(thread[i])) {
ret = PTR_ERR(thread[i]);
pr_err("Failed to create kthread, err: %d\n", ret);

View File

@ -2412,7 +2412,8 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, void *data,
*/
struct pm_qos_request req = {
.type = PM_QOS_REQ_AFFINE_CORES,
.cpus_affine = BIT(raw_smp_processor_id())
.cpus_affine = BIT(raw_smp_processor_id()) |
*cpumask_bits(cpu_perf_mask)
};
int ret;

View File

@ -101,7 +101,7 @@
int drm_irq_install(struct drm_device *dev, int irq)
{
int ret;
unsigned long sh_flags = 0;
unsigned long sh_flags = IRQF_PERF_DRM_AFFINE;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;

View File

@ -619,7 +619,8 @@ static void _msm_drm_commit_work_cb(struct kthread_work *work)
commit_work);
struct pm_qos_request req = {
.type = PM_QOS_REQ_AFFINE_CORES,
.cpus_affine = BIT(raw_smp_processor_id())
.cpus_affine = BIT(raw_smp_processor_id()) |
*cpumask_bits(cpu_perf_mask)
};
/*

View File

@ -817,7 +817,8 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
kthread_init_worker(&priv->disp_thread[i].worker);
priv->disp_thread[i].dev = ddev;
priv->disp_thread[i].thread =
kthread_run(kthread_worker_fn,
kthread_run_perf_critical(cpu_perf_drm_mask,
kthread_worker_fn,
&priv->disp_thread[i].worker,
"crtc_commit:%d", priv->disp_thread[i].crtc_id);
kthread_init_work(&priv->thread_priority_work,
@ -835,7 +836,8 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
kthread_init_worker(&priv->event_thread[i].worker);
priv->event_thread[i].dev = ddev;
priv->event_thread[i].thread =
kthread_run(kthread_worker_fn,
kthread_run_perf_critical(cpu_perf_drm_mask,
kthread_worker_fn,
&priv->event_thread[i].worker,
"crtc_event:%d", priv->event_thread[i].crtc_id);
/**
@ -881,7 +883,8 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
* other important events.
*/
kthread_init_worker(&priv->pp_event_worker);
priv->pp_event_thread = kthread_run(kthread_worker_fn, &priv->pp_event_worker, "pp_event");
priv->pp_event_thread = kthread_run_perf_critical(cpu_perf_drm_mask,
kthread_worker_fn, &priv->pp_event_worker, "pp_event");
kthread_init_work(&priv->thread_priority_work, msm_drm_display_thread_priority_worker);
kthread_queue_work(&priv->pp_event_worker, &priv->thread_priority_work);
kthread_flush_work(&priv->thread_priority_work);
@ -907,6 +910,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
goto fail;
}
}
irq_set_perf_affinity(platform_get_irq(pdev, 0), IRQF_PERF_DRM_AFFINE);
ret = drm_dev_register(ddev, 0);
if (ret)

View File

@ -5071,8 +5071,8 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
}
status = devm_request_irq(device->dev, device->pwrctrl.interrupt_num,
kgsl_irq_handler, IRQF_TRIGGER_HIGH,
device->name, device);
kgsl_irq_handler, IRQF_TRIGGER_HIGH |
IRQF_PERF_KGSL_AFFINE, device->name, device);
if (status) {
KGSL_DRV_ERR(device, "request_irq(%d) failed: %d\n",
device->pwrctrl.interrupt_num, status);
@ -5227,10 +5227,13 @@ static void kgsl_core_exit(void)
}
static long kgsl_run_one_worker(struct kthread_worker *worker,
struct task_struct **thread, const char *name)
struct task_struct **thread, const char *name, bool perf_crit)
{
kthread_init_worker(worker);
*thread = kthread_run(kthread_worker_fn, worker, name);
if (perf_crit)
*thread = kthread_run_perf_critical(cpu_perf_kgsl_mask, kthread_worker_fn, worker, name);
else
*thread = kthread_run(kthread_worker_fn, worker, name);
if (IS_ERR(*thread)) {
pr_err("unable to start %s\n", name);
return PTR_ERR(thread);
@ -5325,10 +5328,10 @@ static int __init kgsl_core_init(void)
if (IS_ERR_VALUE(kgsl_run_one_worker(&kgsl_driver.worker,
&kgsl_driver.worker_thread,
"kgsl_worker_thread")) ||
"kgsl_worker_thread", true)) ||
IS_ERR_VALUE(kgsl_run_one_worker(&kgsl_driver.low_prio_worker,
&kgsl_driver.low_prio_worker_thread,
"kgsl_low_prio_worker_thread")))
"kgsl_low_prio_worker_thread", false)))
goto err;
sched_setscheduler(kgsl_driver.worker_thread, SCHED_FIFO, &param);

View File

@ -670,7 +670,7 @@ static int fpc1020_probe(struct platform_device *pdev)
atomic_set(&fpc1020->wakeup_enabled, 0);
irqf = IRQF_TRIGGER_RISING | IRQF_ONESHOT;
irqf = IRQF_TRIGGER_RISING | IRQF_ONESHOT | IRQF_PERF_AFFINE;
if (of_property_read_bool(dev->of_node, "fpc,enable-wakeup")) {
irqf |= IRQF_NO_SUSPEND;
device_init_wakeup(dev, 1);

View File

@ -365,7 +365,7 @@ static int irq_setup(struct gf_dev *gf_dev)
gf_dev->irq = gf_irq_num(gf_dev);
status = request_threaded_irq(gf_dev->irq, NULL, gf_irq,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
IRQF_TRIGGER_RISING | IRQF_ONESHOT | IRQF_PERF_AFFINE,
"gf", gf_dev);
if (status) {

View File

@ -1794,7 +1794,8 @@ static int32_t nvt_ts_probe(struct spi_device *client)
NVT_LOG("int_trigger_type=%d\n", ts->int_trigger_type);
ts->irq_enabled = true;
ret = request_threaded_irq(client->irq, NULL, nvt_ts_work_func,
ts->int_trigger_type | IRQF_ONESHOT, NVT_SPI_NAME, ts);
ts->int_trigger_type | IRQF_ONESHOT |
IRQF_PERF_AFFINE, NVT_SPI_NAME, ts);
if (ret != 0) {
NVT_ERR("request irq failed. ret=%d\n", ret);
goto err_int_request_failed;
@ -1803,6 +1804,7 @@ static int32_t nvt_ts_probe(struct spi_device *client)
NVT_LOG("request irq %d succeed\n", client->irq);
}
}
irq_set_perf_affinity(client->irq, IRQF_PERF_AFFINE);
#if WAKEUP_GESTURE
device_init_wakeup(&ts->input_dev->dev, 1);

View File

@ -99,6 +99,11 @@ void dead_special_task(void)
static LIST_HEAD(formats);
static DEFINE_RWLOCK(binfmt_lock);
#define SFLINGER_BIN_PREFIX "/system/bin/surfaceflinger"
#define FINGERPRINT_BIN_PREFIX "/vendor/bin/hw/android.hardware.biometrics.fingerprint"
#define HWCOMPOSER_BIN_PREFIX "/vendor/bin/hw/android.hardware.graphics.composer"
#define GRALLOC_BIN_PREFIX "/vendor/bin/hw/vendor.qti.hardware.display.allocator"
#define ZYGOTE32_BIN "/system/bin/app_process32"
#define ZYGOTE64_BIN "/system/bin/app_process64"
static struct signal_struct *zygote32_sig;
@ -1920,7 +1925,31 @@ static int do_execveat_common(int fd, struct filename *filename,
goto out;
if (is_global_init(current->parent)) {
if (unlikely(!strcmp(filename->name, PERFD_BIN))) {
if (unlikely(!strncmp(filename->name,
SFLINGER_BIN_PREFIX,
strlen(SFLINGER_BIN_PREFIX)))) {
current->flags |= PF_PERF_CRITICAL;
set_cpus_allowed_ptr(current, cpu_perf_mask);
}
else if (unlikely(!strncmp(filename->name,
FINGERPRINT_BIN_PREFIX,
strlen(FINGERPRINT_BIN_PREFIX)))) {
current->flags |= PF_PERF_CRITICAL;
set_cpus_allowed_ptr(current, cpu_perf_mask);
}
else if (unlikely(!strncmp(filename->name,
HWCOMPOSER_BIN_PREFIX,
strlen(HWCOMPOSER_BIN_PREFIX)))) {
current->flags |= PF_PERF_CRITICAL;
set_cpus_allowed_ptr(current, cpu_perf_mask);
}
else if (unlikely(!strncmp(filename->name,
GRALLOC_BIN_PREFIX,
strlen(GRALLOC_BIN_PREFIX)))) {
current->flags |= PF_PERF_CRITICAL;
set_cpus_allowed_ptr(current, cpu_perf_mask);
}
else if (unlikely(!strcmp(filename->name, PERFD_BIN))) {
WRITE_ONCE(perfd_tsk, current);
}
else if (unlikely(!strcmp(filename->name, ZYGOTE32_BIN))) {

View File

@ -59,6 +59,8 @@ extern unsigned int nr_cpu_ids;
* cpu_isolated_mask- has bit 'cpu' set iff cpu isolated
* cpu_lp_mask - has bit 'cpu' set iff cpu is part of little cluster
* cpu_perf_mask - has bit 'cpu' set iff cpu is part of big cluster
* cpu_perf_drm_mask- has bit 'cpu' set iff cpu is a part of big cluster and affined for drm
* cpu_perf_kgsl_mask- has bit 'cpu' set iff cpu is a part of big cluster and affined for kgsl
*
* If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
*
@ -103,6 +105,8 @@ extern struct cpumask __cpu_isolated_mask;
#define cpu_isolated_mask ((const struct cpumask *)&__cpu_isolated_mask)
extern const struct cpumask *const cpu_lp_mask;
extern const struct cpumask *const cpu_perf_mask;
extern const struct cpumask *const cpu_perf_drm_mask;
extern const struct cpumask *const cpu_perf_kgsl_mask;
#if NR_CPUS > 1
#define num_online_cpus() cpumask_weight(cpu_online_mask)

View File

@ -63,6 +63,8 @@
* interrupt handler after suspending interrupts. For system
* wakeup devices users need to implement wakeup detection in
* their interrupt handlers.
* IRQF_PERF_AFFINE - Interrupt is critical to the overall performance of the
* system and should be processed on a big CPU.
*/
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
@ -76,6 +78,9 @@
#define IRQF_NO_THREAD 0x00010000
#define IRQF_EARLY_RESUME 0x00020000
#define IRQF_COND_SUSPEND 0x00040000
#define IRQF_PERF_AFFINE 0x00080000
#define IRQF_PERF_DRM_AFFINE 0x00100000
#define IRQF_PERF_KGSL_AFFINE 0x00200000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
@ -218,10 +223,13 @@ extern void enable_irq(unsigned int irq);
extern void enable_percpu_irq(unsigned int irq, unsigned int type);
extern bool irq_percpu_is_enabled(unsigned int irq);
extern void irq_wake_thread(unsigned int irq, void *dev_id);
extern void irq_set_perf_affinity(unsigned int irq, unsigned int perf_flag);
/* The following three functions are for the core kernel use only. */
extern void suspend_device_irqs(void);
extern void resume_device_irqs(void);
extern void unaffine_perf_irqs(void);
extern void reaffine_perf_irqs(bool from_hotplug);
/**
* struct irq_affinity_notify - context for notification of IRQ affinity changes

View File

@ -214,6 +214,7 @@ struct irq_data {
* IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
* IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call
* irq_chip::irq_set_affinity() when deactivated.
* IRQD_PERF_CRITICAL - IRQ is performance-critical
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
@ -235,6 +236,7 @@ enum {
IRQD_MANAGED_SHUTDOWN = (1 << 23),
IRQD_SINGLE_TARGET = (1 << 24),
IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
IRQD_PERF_CRITICAL = (1 << 26),
IRQD_AFFINITY_ON_ACTIVATE = (1 << 29),
};

View File

@ -52,6 +52,27 @@ bool kthread_is_per_cpu(struct task_struct *k);
__k; \
})
/**
* kthread_run_perf_critical - create and wake a performance-critical thread.
*
* Same as kthread_create(), but takes a perf cpumask to affine to.
*/
#define kthread_run_perf_critical(perfmask, threadfn, data, namefmt, ...) \
({ \
struct task_struct *__k \
= kthread_create(threadfn, data, namefmt, ## __VA_ARGS__); \
if (!IS_ERR(__k)) { \
__k->flags |= PF_PERF_CRITICAL; \
BUILD_BUG_ON((perfmask != cpu_lp_mask) && \
(perfmask != cpu_perf_mask) && \
(perfmask != cpu_perf_drm_mask) && \
(perfmask != cpu_perf_kgsl_mask)); \
kthread_bind_mask(__k, perfmask); \
wake_up_process(__k); \
} \
__k; \
})
void free_kthread_struct(struct task_struct *k);
void kthread_bind(struct task_struct *k, unsigned int cpu);
void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);

View File

@ -1675,6 +1675,7 @@ extern struct pid *cad_pid;
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */
#define PF_PERF_CRITICAL 0x02000000 /* Thread is performance-critical */
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_WAKE_UP_IDLE 0x01000000 /* TTWU on an idle CPU */

View File

@ -1099,8 +1099,18 @@ static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
{
struct cpumask newmask;
int err;
preempt_disable();
cpumask_andnot(&newmask, cpu_online_mask, cpumask_of(cpu));
preempt_enable();
/* One big and LITTLE CPU must remain online */
if (!cpumask_intersects(&newmask, cpu_lp_mask) ||
!cpumask_intersects(&newmask, cpu_perf_mask))
return -EINVAL;
/*
* When cpusets are enabled, the rebuilding of the scheduling
* domains is deferred to a workqueue context. Make sure
@ -1332,6 +1342,7 @@ int freeze_secondary_cpus(int primary)
int cpu, error = 0;
cpu_maps_update_begin();
unaffine_perf_irqs();
if (!cpu_online(primary))
primary = cpumask_first(cpu_online_mask);
/*
@ -1421,6 +1432,7 @@ void enable_nonboot_cpus(void)
arch_enable_nonboot_cpus_end();
cpumask_clear(frozen_cpus);
reaffine_perf_irqs(false);
out:
cpu_maps_update_done();
}
@ -2450,6 +2462,22 @@ const struct cpumask *const cpu_perf_mask = cpu_possible_mask;
#endif
EXPORT_SYMBOL(cpu_perf_mask);
#if CONFIG_BIG_CPU_DRM_MASK
static const unsigned long perf_cpu_drm_bits = CONFIG_BIG_CPU_DRM_MASK;
const struct cpumask *const cpu_perf_drm_mask = to_cpumask(&perf_cpu_drm_bits);
#else
const struct cpumask *const cpu_perf_drm_mask = cpu_possible_mask;
#endif
EXPORT_SYMBOL(cpu_perf_drm_mask);
#if CONFIG_BIG_CPU_KGSL_MASK
static const unsigned long perf_cpu_kgsl_bits = CONFIG_BIG_CPU_KGSL_MASK;
const struct cpumask *const cpu_perf_kgsl_mask = to_cpumask(&perf_cpu_kgsl_bits);
#else
const struct cpumask *const cpu_perf_kgsl_mask = cpu_possible_mask;
#endif
EXPORT_SYMBOL(cpu_perf_kgsl_mask);
void init_cpu_present(const struct cpumask *src)
{
cpumask_copy(&__cpu_present_mask, src);

View File

@ -261,7 +261,10 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
switch (__irq_startup_managed(desc, aff, force)) {
case IRQ_STARTUP_NORMAL:
ret = __irq_startup(desc);
irq_setup_affinity(desc);
if (irqd_has_set(&desc->irq_data, IRQD_PERF_CRITICAL))
setup_perf_irq_locked(desc, desc->action->flags);
else
irq_setup_affinity(desc);
break;
case IRQ_STARTUP_MANAGED:
irq_do_set_affinity(d, aff, false);

View File

@ -70,6 +70,9 @@ static bool migrate_one_irq(struct irq_desc *desc)
return false;
}
if (irqd_has_set(d, IRQD_PERF_CRITICAL))
return false;
/*
* No move required, if:
* - Interrupt is per cpu
@ -207,6 +210,9 @@ void irq_migrate_all_off_this_cpu(void)
irq, smp_processor_id());
}
}
if (!cpumask_test_cpu(smp_processor_id(), cpu_lp_mask))
reaffine_perf_irqs(true);
}
static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
@ -214,6 +220,9 @@ static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
struct irq_data *data = irq_desc_get_irq_data(desc);
const struct cpumask *affinity = irq_data_get_affinity_mask(data);
if (irqd_has_set(data, IRQD_PERF_CRITICAL))
return;
if (!irqd_affinity_is_managed(data) || !desc->action ||
!irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
return;
@ -250,5 +259,8 @@ int irq_affinity_online_cpu(unsigned int cpu)
}
irq_unlock_sparse();
if (!cpumask_test_cpu(cpu, cpu_lp_mask))
reaffine_perf_irqs(true);
return 0;
}

View File

@ -140,6 +140,7 @@ extern int irq_do_set_affinity(struct irq_data *data,
#ifdef CONFIG_SMP
extern int irq_setup_affinity(struct irq_desc *desc);
extern void setup_perf_irq_locked(struct irq_desc *desc, unsigned int perf_flag);
#else
static inline int irq_setup_affinity(struct irq_desc *desc) { return 0; }
#endif

View File

@ -20,9 +20,23 @@
#include <linux/sched/task.h>
#include <uapi/linux/sched/types.h>
#include <linux/task_work.h>
#include <linux/cpu.h>
#include "internals.h"
struct irq_desc_list {
struct list_head list;
struct irq_desc *desc;
unsigned int perf_flag;
};
static LIST_HEAD(perf_crit_irqs);
static DEFINE_RAW_SPINLOCK(perf_irqs_lock);
static int perf_cpu_index = -1;
static int perf_cpu_drm_index = -1;
static int perf_cpu_kgsl_index = -1;
static bool perf_crit_suspended;
#ifdef CONFIG_IRQ_FORCED_THREADING
__read_mostly bool force_irqthreads;
EXPORT_SYMBOL_GPL(force_irqthreads);
@ -173,7 +187,8 @@ bool irq_can_set_affinity_usr(unsigned int irq)
struct irq_desc *desc = irq_to_desc(irq);
return __irq_can_set_affinity(desc) &&
!irqd_affinity_is_managed(&desc->irq_data);
!irqd_affinity_is_managed(&desc->irq_data) &&
!irqd_has_set(&desc->irq_data, IRQD_PERF_CRITICAL);
}
/**
@ -1198,6 +1213,156 @@ setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
return 0;
}
static void add_desc_to_perf_list(struct irq_desc *desc, unsigned int perf_flag)
{
struct irq_desc_list *item;
item = kmalloc(sizeof(*item), GFP_ATOMIC | __GFP_NOFAIL);
item->desc = desc;
item->perf_flag = perf_flag;
raw_spin_lock(&perf_irqs_lock);
list_add(&item->list, &perf_crit_irqs);
raw_spin_unlock(&perf_irqs_lock);
}
static void affine_one_perf_thread(struct irqaction *action)
{
const struct cpumask *mask;
if (!action->thread)
return;
if (action->flags & IRQF_PERF_AFFINE)
mask = cpu_perf_mask;
if (action->flags & IRQF_PERF_DRM_AFFINE)
mask = cpu_perf_drm_mask;
if (action->flags & IRQF_PERF_KGSL_AFFINE)
mask = cpu_perf_kgsl_mask;
action->thread->flags |= PF_PERF_CRITICAL;
set_cpus_allowed_ptr(action->thread, mask);
}
static void unaffine_one_perf_thread(struct irqaction *action)
{
if (!action->thread)
return;
action->thread->flags &= ~PF_PERF_CRITICAL;
set_cpus_allowed_ptr(action->thread, cpu_all_mask);
}
static void affine_one_perf_irq(struct irq_desc *desc, unsigned int perf_flag)
{
const struct cpumask *mask;
int *mask_index;
int cpu;
if (perf_flag & IRQF_PERF_AFFINE) {
mask = cpu_perf_mask;
mask_index = &perf_cpu_index;
}
if (perf_flag & IRQF_PERF_DRM_AFFINE) {
mask = cpu_perf_drm_mask;
mask_index = &perf_cpu_drm_index;
}
if (perf_flag & IRQF_PERF_KGSL_AFFINE) {
mask = cpu_perf_kgsl_mask;
mask_index = &perf_cpu_kgsl_index;
}
if (!cpumask_intersects(mask, cpu_online_mask)) {
WARN(1, "requested perf CPU is offline for %s\n", desc->name);
irq_set_affinity_locked(&desc->irq_data, cpu_online_mask, true);
return;
}
/* Balance the performance-critical IRQs across the given CPUs */
while (1) {
cpu = cpumask_next_and(*mask_index, mask, cpu_online_mask);
if (cpu < nr_cpu_ids)
break;
*mask_index = -1;
}
irq_set_affinity_locked(&desc->irq_data, cpumask_of(cpu), true);
*mask_index = cpu;
}
void setup_perf_irq_locked(struct irq_desc *desc, unsigned int perf_flag)
{
add_desc_to_perf_list(desc, perf_flag);
raw_spin_lock(&perf_irqs_lock);
affine_one_perf_irq(desc, perf_flag);
raw_spin_unlock(&perf_irqs_lock);
}
void irq_set_perf_affinity(unsigned int irq, unsigned int perf_flag)
{
struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
if (!desc)
return;
raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->action) {
desc->action->flags |= perf_flag;
irqd_set(&desc->irq_data, IRQD_PERF_CRITICAL);
setup_perf_irq_locked(desc, perf_flag);
} else {
WARN(1, "perf affine: action not set for IRQ%d\n", irq);
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
void unaffine_perf_irqs(void)
{
struct irq_desc_list *data;
unsigned long flags;
raw_spin_lock_irqsave(&perf_irqs_lock, flags);
perf_crit_suspended = true;
list_for_each_entry(data, &perf_crit_irqs, list) {
struct irq_desc *desc = data->desc;
raw_spin_lock(&desc->lock);
irq_set_affinity_locked(&desc->irq_data, cpu_all_mask, true);
unaffine_one_perf_thread(desc->action);
raw_spin_unlock(&desc->lock);
}
raw_spin_unlock_irqrestore(&perf_irqs_lock, flags);
}
void reaffine_perf_irqs(bool from_hotplug)
{
struct irq_desc_list *data;
unsigned long flags;
raw_spin_lock_irqsave(&perf_irqs_lock, flags);
/* Don't allow hotplug to reaffine IRQs when resuming from suspend */
if (!from_hotplug || !perf_crit_suspended) {
perf_crit_suspended = false;
perf_cpu_index = -1;
perf_cpu_drm_index = -1;
perf_cpu_kgsl_index = -1;
list_for_each_entry(data, &perf_crit_irqs, list) {
struct irq_desc *desc = data->desc;
raw_spin_lock(&desc->lock);
affine_one_perf_irq(desc, data->perf_flag);
affine_one_perf_thread(desc->action);
raw_spin_unlock(&desc->lock);
}
}
raw_spin_unlock_irqrestore(&perf_irqs_lock, flags);
}
/*
* Internal function to register an irqaction - typically used to
* allocate special interrupts that are part of the architecture.
@ -1457,6 +1622,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
}
if (new->flags & (IRQF_PERF_AFFINE |
IRQF_PERF_DRM_AFFINE | IRQF_PERF_KGSL_AFFINE)) {
affine_one_perf_thread(new);
irqd_set(&desc->irq_data, IRQD_PERF_CRITICAL);
*old_ptr = new;
}
if (irq_settings_can_autoenable(desc)) {
irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
} else {
@ -1481,7 +1653,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
irq, omsk, nmsk);
}
*old_ptr = new;
if (!irqd_has_set(&desc->irq_data, IRQD_PERF_CRITICAL))
*old_ptr = new;
irq_pm_install_action(desc, new);
@ -1623,6 +1796,20 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
action_ptr = &action->next;
}
if (irqd_has_set(&desc->irq_data, IRQD_PERF_CRITICAL)) {
struct irq_desc_list *data;
raw_spin_lock(&perf_irqs_lock);
list_for_each_entry(data, &perf_crit_irqs, list) {
if (data->desc == desc) {
list_del(&data->list);
kfree(data);
break;
}
}
raw_spin_unlock(&perf_irqs_lock);
}
/* Found it - now remove it from the list of entries: */
*action_ptr = action->next;

View File

@ -2297,8 +2297,8 @@ static void rcu_spawn_one_nocb_kthread(int cpu)
}
/* Spawn the kthread for this CPU. */
t = kthread_run(rcu_nocb_cb_kthread, rdp,
"rcuo%c/%d", rcu_state.abbr, cpu);
t = kthread_run_perf_critical(cpu_lp_mask, rcu_nocb_cb_kthread, rdp,
"rcuo%c/%d", rcu_state.abbr, cpu);
if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
return;
WRITE_ONCE(rdp->nocb_cb_kthread, t);

View File

@ -1874,6 +1874,12 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
int ret = 0;
cpumask_t allowed_mask;
/* Don't allow perf-critical threads to have non-perf affinities */
if ((p->flags & PF_PERF_CRITICAL) && new_mask != cpu_lp_mask &&
new_mask != cpu_perf_mask && new_mask != cpu_perf_drm_mask &&
new_mask != cpu_perf_kgsl_mask)
return -EINVAL;
rq = task_rq_lock(p, &rf);
update_rq_clock(rq);

View File

@ -5748,7 +5748,7 @@ int __init workqueue_init_early(void)
WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
cpumask_copy(wq_unbound_cpumask, cpu_lp_mask);
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);