mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
perf_counter: Simplify and fix task migration counting
The task migrations counter was causing rare and hard to decypher memory corruptions under load. After a day of debugging and bisection we found that the problem was introduced with: 3f731ca: perf_counter: Fix cpu migration counter Turning them off fixes the crashes. Incidentally, the whole perf_counter_task_migration() logic can be done simpler as well, by injecting a proper sw-counter event. This cleanup also fixed the crashes. The precise failure mode is not completely clear yet, but we are clearly not unhappy about having a fix ;-) Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
f5970550d5
commit
e5289d4a18
@ -682,8 +682,6 @@ static inline void perf_counter_mmap(struct vm_area_struct *vma)
|
|||||||
extern void perf_counter_comm(struct task_struct *tsk);
|
extern void perf_counter_comm(struct task_struct *tsk);
|
||||||
extern void perf_counter_fork(struct task_struct *tsk);
|
extern void perf_counter_fork(struct task_struct *tsk);
|
||||||
|
|
||||||
extern void perf_counter_task_migration(struct task_struct *task, int cpu);
|
|
||||||
|
|
||||||
extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
|
extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
|
||||||
|
|
||||||
extern int sysctl_perf_counter_paranoid;
|
extern int sysctl_perf_counter_paranoid;
|
||||||
@ -724,8 +722,6 @@ static inline void perf_counter_mmap(struct vm_area_struct *vma) { }
|
|||||||
static inline void perf_counter_comm(struct task_struct *tsk) { }
|
static inline void perf_counter_comm(struct task_struct *tsk) { }
|
||||||
static inline void perf_counter_fork(struct task_struct *tsk) { }
|
static inline void perf_counter_fork(struct task_struct *tsk) { }
|
||||||
static inline void perf_counter_init(void) { }
|
static inline void perf_counter_init(void) { }
|
||||||
static inline void perf_counter_task_migration(struct task_struct *task,
|
|
||||||
int cpu) { }
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
@ -124,7 +124,7 @@ void perf_enable(void)
|
|||||||
|
|
||||||
static void get_ctx(struct perf_counter_context *ctx)
|
static void get_ctx(struct perf_counter_context *ctx)
|
||||||
{
|
{
|
||||||
atomic_inc(&ctx->refcount);
|
WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_ctx(struct rcu_head *head)
|
static void free_ctx(struct rcu_head *head)
|
||||||
@ -3467,27 +3467,6 @@ static const struct pmu perf_ops_task_clock = {
|
|||||||
.read = task_clock_perf_counter_read,
|
.read = task_clock_perf_counter_read,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
|
||||||
* Software counter: cpu migrations
|
|
||||||
*/
|
|
||||||
void perf_counter_task_migration(struct task_struct *task, int cpu)
|
|
||||||
{
|
|
||||||
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
|
|
||||||
struct perf_counter_context *ctx;
|
|
||||||
|
|
||||||
perf_swcounter_ctx_event(&cpuctx->ctx, PERF_TYPE_SOFTWARE,
|
|
||||||
PERF_COUNT_SW_CPU_MIGRATIONS,
|
|
||||||
1, 1, NULL, 0);
|
|
||||||
|
|
||||||
ctx = perf_pin_task_context(task);
|
|
||||||
if (ctx) {
|
|
||||||
perf_swcounter_ctx_event(ctx, PERF_TYPE_SOFTWARE,
|
|
||||||
PERF_COUNT_SW_CPU_MIGRATIONS,
|
|
||||||
1, 1, NULL, 0);
|
|
||||||
perf_unpin_context(ctx);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_EVENT_PROFILE
|
#ifdef CONFIG_EVENT_PROFILE
|
||||||
void perf_tpcounter_event(int event_id)
|
void perf_tpcounter_event(int event_id)
|
||||||
{
|
{
|
||||||
|
@ -1978,7 +1978,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
|||||||
if (task_hot(p, old_rq->clock, NULL))
|
if (task_hot(p, old_rq->clock, NULL))
|
||||||
schedstat_inc(p, se.nr_forced2_migrations);
|
schedstat_inc(p, se.nr_forced2_migrations);
|
||||||
#endif
|
#endif
|
||||||
perf_counter_task_migration(p, new_cpu);
|
perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS,
|
||||||
|
1, 1, NULL, 0);
|
||||||
}
|
}
|
||||||
p->se.vruntime -= old_cfsrq->min_vruntime -
|
p->se.vruntime -= old_cfsrq->min_vruntime -
|
||||||
new_cfsrq->min_vruntime;
|
new_cfsrq->min_vruntime;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user