aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMorten Rasmussen <Morten.Rasmussen@arm.com>2012-09-14 14:38:16 +0100
committerViresh Kumar <viresh.kumar@linaro.org>2012-10-03 16:28:56 +0530
commite72226aee9d8a70a3ce17f7cc5b57b26876382e6 (patch)
tree98af579c3684cf3b632e7a4dd82ec8e4c19053ab
parent6ece6e6290579718e9c545cd9e7d1e765e054c5f (diff)
downloadvexpress-lsk-e72226aee9d8a70a3ce17f7cc5b57b26876382e6.tar.gz
sched: Add HMP task migration ftrace event
Adds ftrace event for tracing task migrations using HMP optimized scheduling. Signed-off-by: Morten Rasmussen <Morten.Rasmussen@arm.com>
-rw-r--r--include/trace/events/sched.h28
-rw-r--r--kernel/sched/fair.c15
2 files changed, 39 insertions, 4 deletions
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 847eb76fc80..501aa32eb2f 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -555,6 +555,34 @@ TRACE_EVENT(sched_task_usage_ratio,
__entry->comm, __entry->pid,
__entry->ratio)
);
+
+/*
+ * Tracepoint for HMP (CONFIG_SCHED_HMP) task migrations.
+ */
+TRACE_EVENT(sched_hmp_migrate,
+
+ TP_PROTO(struct task_struct *tsk, int dest, int force),
+
+ TP_ARGS(tsk, dest, force),
+
+ TP_STRUCT__entry(
+ __array(char, comm, TASK_COMM_LEN)
+ __field(pid_t, pid)
+ __field(int, dest)
+ __field(int, force)
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->dest = dest;
+ __entry->force = force;
+ ),
+
+ TP_printk("comm=%s pid=%d dest=%d force=%d",
+ __entry->comm, __entry->pid,
+ __entry->dest, __entry->force)
+);
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8aa8b641c37..abc90a6de73 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3349,10 +3349,16 @@ unlock:
rcu_read_unlock();
#ifdef CONFIG_SCHED_HMP
- if (hmp_up_migration(prev_cpu, &p->se))
- return hmp_select_faster_cpu(p, prev_cpu);
- if (hmp_down_migration(prev_cpu, &p->se))
- return hmp_select_slower_cpu(p, prev_cpu);
+ if (hmp_up_migration(prev_cpu, &p->se)) {
+ new_cpu = hmp_select_faster_cpu(p, prev_cpu);
+ trace_sched_hmp_migrate(p, new_cpu, 0);
+ return new_cpu;
+ }
+ if (hmp_down_migration(prev_cpu, &p->se)) {
+ new_cpu = hmp_select_slower_cpu(p, prev_cpu);
+ trace_sched_hmp_migrate(p, new_cpu, 0);
+ return new_cpu;
+ }
/* Make sure that the task stays in its previous hmp domain */
if (!cpumask_test_cpu(new_cpu, &hmp_cpu_domain(prev_cpu)->cpus))
return prev_cpu;
@@ -5732,6 +5738,7 @@ static void hmp_force_up_migration(int this_cpu)
target->push_cpu = hmp_select_faster_cpu(p, cpu);
target->migrate_task = p;
force = 1;
+ trace_sched_hmp_migrate(p, target->push_cpu, 1);
}
}
raw_spin_unlock_irqrestore(&target->lock, flags);