mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
sched: style cleanups
style cleanup of various changes that were done recently. no code changed: text data bss dec hex filename 23680 2542 28 26250 668a sched.o.before 23680 2542 28 26250 668a sched.o.after Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
ce6bd420f4
commit
41a2d6cfa3
@ -211,7 +211,6 @@ static inline struct task_group *task_group(struct task_struct *p)
|
||||
#else
|
||||
tg = &init_task_group;
|
||||
#endif
|
||||
|
||||
return tg;
|
||||
}
|
||||
|
||||
@ -249,14 +248,15 @@ struct cfs_rq {
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
|
||||
|
||||
/* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
|
||||
/*
|
||||
* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
|
||||
* a hierarchy). Non-leaf lrqs hold other higher schedulable entities
|
||||
* (like users, containers etc.)
|
||||
*
|
||||
* leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
|
||||
* list is used during load balance.
|
||||
*/
|
||||
struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */
|
||||
struct list_head leaf_cfs_rq_list;
|
||||
struct task_group *tg; /* group that "owns" this runqueue */
|
||||
#endif
|
||||
};
|
||||
@ -4390,8 +4390,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
|
||||
* @policy: new policy.
|
||||
* @param: structure containing the new RT priority.
|
||||
*/
|
||||
asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
|
||||
struct sched_param __user *param)
|
||||
asmlinkage long
|
||||
sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
|
||||
{
|
||||
/* negative values for policy are not valid */
|
||||
if (policy < 0)
|
||||
@ -5245,11 +5245,12 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
|
||||
* kernel threads (both mm NULL), since they never
|
||||
* leave kernel.
|
||||
*/
|
||||
if (p->mm && printk_ratelimit())
|
||||
if (p->mm && printk_ratelimit()) {
|
||||
printk(KERN_INFO "process %d (%s) no "
|
||||
"longer affine to cpu%d\n",
|
||||
task_pid_nr(p), p->comm, dead_cpu);
|
||||
}
|
||||
}
|
||||
} while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
|
||||
}
|
||||
|
||||
@ -5612,9 +5613,11 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
migrate_nr_uninterruptible(rq);
|
||||
BUG_ON(rq->nr_running != 0);
|
||||
|
||||
/* No need to migrate the tasks: it was best-effort if
|
||||
/*
|
||||
* No need to migrate the tasks: it was best-effort if
|
||||
* they didn't take sched_hotcpu_mutex. Just wake up
|
||||
* the requestors. */
|
||||
* the requestors.
|
||||
*/
|
||||
spin_lock_irq(&rq->lock);
|
||||
while (!list_empty(&rq->migration_queue)) {
|
||||
struct migration_req *req;
|
||||
@ -5999,8 +6002,8 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
|
||||
static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
|
||||
static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
|
||||
|
||||
static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map,
|
||||
struct sched_group **sg)
|
||||
static int
|
||||
cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
|
||||
{
|
||||
if (sg)
|
||||
*sg = &per_cpu(sched_group_cpus, cpu);
|
||||
@ -6017,8 +6020,8 @@ static DEFINE_PER_CPU(struct sched_group, sched_group_core);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
|
||||
static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
|
||||
struct sched_group **sg)
|
||||
static int
|
||||
cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
|
||||
{
|
||||
int group;
|
||||
cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
|
||||
@ -6029,8 +6032,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
|
||||
return group;
|
||||
}
|
||||
#elif defined(CONFIG_SCHED_MC)
|
||||
static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
|
||||
struct sched_group **sg)
|
||||
static int
|
||||
cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
|
||||
{
|
||||
if (sg)
|
||||
*sg = &per_cpu(sched_group_core, cpu);
|
||||
@ -6041,8 +6044,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
|
||||
static DEFINE_PER_CPU(struct sched_domain, phys_domains);
|
||||
static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
|
||||
|
||||
static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
|
||||
struct sched_group **sg)
|
||||
static int
|
||||
cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
|
||||
{
|
||||
int group;
|
||||
#ifdef CONFIG_SCHED_MC
|
||||
@ -7193,16 +7196,17 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
|
||||
return &tg->css;
|
||||
}
|
||||
|
||||
static void cpu_cgroup_destroy(struct cgroup_subsys *ss,
|
||||
struct cgroup *cgrp)
|
||||
static void
|
||||
cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
|
||||
{
|
||||
struct task_group *tg = cgroup_tg(cgrp);
|
||||
|
||||
sched_destroy_group(tg);
|
||||
}
|
||||
|
||||
static int cpu_cgroup_can_attach(struct cgroup_subsys *ss,
|
||||
struct cgroup *cgrp, struct task_struct *tsk)
|
||||
static int
|
||||
cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
/* We don't support RT-tasks being in separate groups */
|
||||
if (tsk->sched_class != &fair_sched_class)
|
||||
@ -7308,8 +7312,8 @@ static struct cgroup_subsys_state *cpuacct_create(
|
||||
}
|
||||
|
||||
/* destroy an existing cpu accounting group */
|
||||
static void cpuacct_destroy(struct cgroup_subsys *ss,
|
||||
struct cgroup *cont)
|
||||
static void
|
||||
cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
|
||||
{
|
||||
struct cpuacct *ca = cgroup_ca(cont);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user