aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Redpath <Chris.Redpath@arm.com>2013-11-22 13:19:18 +0000
committerMark Brown <broonie@linaro.org>2013-11-22 13:56:00 +0000
commit7cf6a7300bb9a88f543061270419427395ab4d2f (patch)
tree353c3c02af29582a2455160f9e9f65366ff45d22
parente7004f3e7fd90e611362b78ac64e357ee80c177c (diff)
downloadvexpress-lsk-7cf6a7300bb9a88f543061270419427395ab4d2f.tar.gz
sched: hmp: Fix build breakage when not using CONFIG_SCHED_HMPv3.10/topic/big.LITTLE
hmp_variable_scale_convert was used without guards in __update_entity_runnable_avg. Guard it. Signed-off-by: Chris Redpath <chris.redpath@arm.com> Signed-off-by: Mark Brown <broonie@linaro.org>
-rw-r--r--kernel/sched/fair.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c7d808ee0a3..8a4a02740f0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1210,6 +1210,7 @@ static u32 __compute_runnable_contrib(u64 n)
return contrib + runnable_avg_yN_sum[n];
}
+#ifdef CONFIG_SCHED_HMP
#define HMP_VARIABLE_SCALE_SHIFT 16ULL
struct hmp_global_attr {
struct attribute attr;
@@ -1291,6 +1292,7 @@ struct cpufreq_extents {
static struct cpufreq_extents freq_scale[CONFIG_NR_CPUS];
#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
+#endif /* CONFIG_SCHED_HMP */
/* We can represent the historical contribution to runnable average as the
* coefficients of a geometric series. To do this we sub-divide our runnable
@@ -1336,8 +1338,9 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
#endif /* CONFIG_HMP_FREQUENCY_INVARIANT_SCALE */
delta = now - sa->last_runnable_update;
-
+#ifdef CONFIG_SCHED_HMP
delta = hmp_variable_scale_convert(delta);
+#endif
/*
* This should only happen when time goes backwards, which it
* unfortunately does during sched clock init when we swap over to TSC.