aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMorten Rasmussen <Morten.Rasmussen@arm.com>2012-09-14 14:38:08 +0100
committerViresh Kumar <viresh.kumar@linaro.org>2013-01-04 12:25:44 +0530
commitc9f7f57b2820cf1eba1aec29dbcad32a3f24dc96 (patch)
tree5ab02ab203bd0f1ea54e91c8ab35d627e02b80a1
parent2f8bb216f532b020d360393beb43a6f6e13960a4 (diff)
downloadvexpress-lsk-c9f7f57b2820cf1eba1aec29dbcad32a3f24dc96.tar.gz
sched: entity load-tracking load_avg_ratio
This patch adds load_avg_ratio to each task. The load_avg_ratio is a variant of load_avg_contrib which is not scaled by the task priority. It is calculated like this: runnable_avg_sum * NICE_0_LOAD / (runnable_avg_period + 1). Signed-off-by: Morten Rasmussen <Morten.Rasmussen@arm.com>
-rw-r--r--include/linux/sched.h1
-rw-r--r--kernel/sched/fair.c3
2 files changed, 4 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f2d8806ab4e..abf222e5dce 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1132,6 +1132,7 @@ struct sched_avg {
u64 last_runnable_update;
s64 decay_count;
unsigned long load_avg_contrib;
+ unsigned long load_avg_ratio;
u32 usage_avg_sum;
};
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 16a9c603a8f..5234f7f3be9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1427,6 +1427,9 @@ static inline void __update_task_entity_contrib(struct sched_entity *se)
contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
contrib /= (se->avg.runnable_avg_period + 1);
se->avg.load_avg_contrib = scale_load(contrib);
+ contrib = se->avg.runnable_avg_sum * scale_load_down(NICE_0_LOAD);
+ contrib /= (se->avg.runnable_avg_period + 1);
+ se->avg.load_avg_ratio = scale_load(contrib);
}
/* Compute the current contribution to load_avg by se, return any delta */