aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/arch_timer.c29
-rw-r--r--arch/arm/kernel/entry-common.S12
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/hw_breakpoint.c6
-rw-r--r--arch/arm/kernel/sched_clock.c15
-rw-r--r--arch/arm/kernel/setup.c24
-rw-r--r--arch/arm/kernel/smp.c3
-rw-r--r--arch/arm/kernel/smp_tlb.c66
-rw-r--r--arch/arm/kernel/time.c7
9 files changed, 136 insertions, 28 deletions
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index d957a51435d..59dcdced6e3 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -22,9 +22,11 @@ static unsigned long arch_timer_read_counter_long(void)
return arch_timer_read_counter();
}
-static u32 arch_timer_read_counter_u32(void)
+static u32 sched_clock_mult __read_mostly;
+
+static unsigned long long notrace arch_timer_sched_clock(void)
{
- return arch_timer_read_counter();
+ return arch_timer_read_counter() * sched_clock_mult;
}
static struct delay_timer arch_delay_timer;
@@ -37,25 +39,20 @@ static void __init arch_timer_delay_timer_register(void)
register_current_timer_delay(&arch_delay_timer);
}
-int __init arch_timer_of_register(void)
+int __init arch_timer_arch_init(void)
{
- int ret;
+ u32 arch_timer_rate = arch_timer_get_rate();
- ret = arch_timer_init();
- if (ret)
- return ret;
+ if (arch_timer_rate == 0)
+ return -ENXIO;
arch_timer_delay_timer_register();
- return 0;
-}
-
-int __init arch_timer_sched_clock_init(void)
-{
- if (arch_timer_get_rate() == 0)
- return -ENXIO;
+ /* Cache the sched_clock multiplier to save a divide in the hot path. */
+ sched_clock_mult = NSEC_PER_SEC / arch_timer_rate;
+ sched_clock_func = arch_timer_sched_clock;
+ pr_info("sched_clock: ARM arch timer >56 bits at %ukHz, resolution %uns\n",
+ arch_timer_rate / 1000, sched_clock_mult);
- setup_sched_clock(arch_timer_read_counter_u32,
- 32, arch_timer_get_rate());
return 0;
}
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 3248cde504e..fefd7f97143 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -276,7 +276,13 @@ ENDPROC(ftrace_graph_caller_old)
*/
.macro mcount_enter
+/*
+ * This pad compensates for the push {lr} at the call site. Note that we are
+ * unable to unwind through a function which does not otherwise save its lr.
+ */
+ UNWIND(.pad #4)
stmdb sp!, {r0-r3, lr}
+ UNWIND(.save {r0-r3, lr})
.endm
.macro mcount_get_lr reg
@@ -289,6 +295,7 @@ ENDPROC(ftrace_graph_caller_old)
.endm
ENTRY(__gnu_mcount_nc)
+UNWIND(.fnstart)
#ifdef CONFIG_DYNAMIC_FTRACE
mov ip, lr
ldmia sp!, {lr}
@@ -296,17 +303,22 @@ ENTRY(__gnu_mcount_nc)
#else
__mcount
#endif
+UNWIND(.fnend)
ENDPROC(__gnu_mcount_nc)
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller)
+UNWIND(.fnstart)
__ftrace_caller
+UNWIND(.fnend)
ENDPROC(ftrace_caller)
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
+UNWIND(.fnstart)
__ftrace_graph_caller
+UNWIND(.fnend)
ENDPROC(ftrace_graph_caller)
#endif
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index e0eb9a1cae7..8bac553fe21 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -267,7 +267,7 @@ __create_page_tables:
addne r6, r6, #1 << SECTION_SHIFT
strne r6, [r3]
-#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
+#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
sub r4, r4, #4 @ Fixup page table pointer
@ for 64-bit descriptors
#endif
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 96093b75ab9..5dc1aa6f0f7 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -966,7 +966,7 @@ static void reset_ctrl_regs(void *unused)
}
if (err) {
- pr_warning("CPU %d debug is powered down!\n", cpu);
+ pr_warn_once("CPU %d debug is powered down!\n", cpu);
cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
return;
}
@@ -987,7 +987,7 @@ clear_vcr:
isb();
if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
- pr_warning("CPU %d failed to disable vector catch\n", cpu);
+ pr_warn_once("CPU %d failed to disable vector catch\n", cpu);
return;
}
@@ -1007,7 +1007,7 @@ clear_vcr:
}
if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
- pr_warning("CPU %d failed to clear debug register pairs\n", cpu);
+ pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu);
return;
}
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index bd6f56b9ec2..880584852fc 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -20,6 +20,7 @@ struct clock_data {
u64 epoch_ns;
u32 epoch_cyc;
u32 epoch_cyc_copy;
+ unsigned long rate;
u32 mult;
u32 shift;
bool suspended;
@@ -113,11 +114,14 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
u64 res, wrap;
char r_unit;
+ if (cd.rate > rate)
+ return;
+
BUG_ON(bits > 32);
WARN_ON(!irqs_disabled());
- WARN_ON(read_sched_clock != jiffy_sched_clock_read);
read_sched_clock = read;
sched_clock_mask = (1 << bits) - 1;
+ cd.rate = rate;
/* calculate the mult/shift to convert counter ticks to ns. */
clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0);
@@ -161,12 +165,19 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
pr_debug("Registered %pF as sched_clock source\n", read);
}
-unsigned long long notrace sched_clock(void)
+static unsigned long long notrace sched_clock_32(void)
{
u32 cyc = read_sched_clock();
return cyc_to_sched_clock(cyc, sched_clock_mask);
}
+unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32;
+
+unsigned long long notrace sched_clock(void)
+{
+ return sched_clock_func();
+}
+
void __init sched_clock_postinit(void)
{
/*
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 3f6cbb2e3ed..d343a6c3a6d 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -353,6 +353,23 @@ void __init early_print(const char *str, ...)
printk("%s", buf);
}
+static void __init cpuid_init_hwcaps(void)
+{
+ unsigned int divide_instrs;
+
+ if (cpu_architecture() < CPU_ARCH_ARMv7)
+ return;
+
+ divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
+
+ switch (divide_instrs) {
+ case 2:
+ elf_hwcap |= HWCAP_IDIVA;
+ case 1:
+ elf_hwcap |= HWCAP_IDIVT;
+ }
+}
+
static void __init feat_v6_fixup(void)
{
int id = read_cpuid_id();
@@ -483,8 +500,11 @@ static void __init setup_processor(void)
snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
list->elf_name, ENDIANNESS);
elf_hwcap = list->elf_hwcap;
+
+ cpuid_init_hwcaps();
+
#ifndef CONFIG_ARM_THUMB
- elf_hwcap &= ~HWCAP_THUMB;
+ elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
#endif
feat_v6_fixup();
@@ -524,7 +544,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
size -= start & ~PAGE_MASK;
bank->start = PAGE_ALIGN(start);
-#ifndef CONFIG_LPAE
+#ifndef CONFIG_ARM_LPAE
if (bank->start + size < bank->start) {
printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
"32-bit physical address space\n", (long long)start);
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 79078edbb9b..1f2ccccaf00 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -673,9 +673,6 @@ static int cpufreq_callback(struct notifier_block *nb,
if (freq->flags & CPUFREQ_CONST_LOOPS)
return NOTIFY_OK;
- if (arm_delay_ops.const_clock)
- return NOTIFY_OK;
-
if (!per_cpu(l_p_j_ref, cpu)) {
per_cpu(l_p_j_ref, cpu) =
per_cpu(cpu_data, cpu).loops_per_jiffy;
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index bd030053139..e82e1d24877 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -12,6 +12,7 @@
#include <asm/smp_plat.h>
#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
/**********************************************************************/
@@ -69,12 +70,72 @@ static inline void ipi_flush_bp_all(void *ignored)
local_flush_bp_all();
}
+#ifdef CONFIG_ARM_ERRATA_798181
+static int erratum_a15_798181(void)
+{
+ unsigned int midr = read_cpuid_id();
+
+ /* Cortex-A15 r0p0..r3p2 affected */
+ if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
+ return 0;
+ return 1;
+}
+#else
+static int erratum_a15_798181(void)
+{
+ return 0;
+}
+#endif
+
+static void ipi_flush_tlb_a15_erratum(void *arg)
+{
+ dmb();
+}
+
+static void broadcast_tlb_a15_erratum(void)
+{
+ if (!erratum_a15_798181())
+ return;
+
+ dummy_flush_tlb_a15_erratum();
+ smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum,
+ NULL, 1);
+}
+
+static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
+{
+ int cpu;
+ cpumask_t mask = { CPU_BITS_NONE };
+
+ if (!erratum_a15_798181())
+ return;
+
+ dummy_flush_tlb_a15_erratum();
+ for_each_online_cpu(cpu) {
+ if (cpu == smp_processor_id())
+ continue;
+ /*
+ * We only need to send an IPI if the other CPUs are running
+ * the same ASID as the one being invalidated. There is no
+ * need for locking around the active_asids check since the
+ * switch_mm() function has at least one dmb() (as required by
+ * this workaround) in case a context switch happens on
+ * another CPU after the condition below.
+ */
+ if (atomic64_read(&mm->context.id) ==
+ atomic64_read(&per_cpu(active_asids, cpu)))
+ cpumask_set_cpu(cpu, &mask);
+ }
+ smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
+}
+
void flush_tlb_all(void)
{
if (tlb_ops_need_broadcast())
on_each_cpu(ipi_flush_tlb_all, NULL, 1);
else
local_flush_tlb_all();
+ broadcast_tlb_a15_erratum();
}
void flush_tlb_mm(struct mm_struct *mm)
@@ -83,6 +144,7 @@ void flush_tlb_mm(struct mm_struct *mm)
on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
else
local_flush_tlb_mm(mm);
+ broadcast_tlb_mm_a15_erratum(mm);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
@@ -95,6 +157,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
&ta, 1);
} else
local_flush_tlb_page(vma, uaddr);
+ broadcast_tlb_mm_a15_erratum(vma->vm_mm);
}
void flush_tlb_kernel_page(unsigned long kaddr)
@@ -105,6 +168,7 @@ void flush_tlb_kernel_page(unsigned long kaddr)
on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
} else
local_flush_tlb_kernel_page(kaddr);
+ broadcast_tlb_a15_erratum();
}
void flush_tlb_range(struct vm_area_struct *vma,
@@ -119,6 +183,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
&ta, 1);
} else
local_flush_tlb_range(vma, start, end);
+ broadcast_tlb_mm_a15_erratum(vma->vm_mm);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
@@ -130,6 +195,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
} else
local_flush_tlb_kernel_range(start, end);
+ broadcast_tlb_a15_erratum();
}
void flush_bp_all(void)
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 955d92d265e..abff4e9aaee 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -22,6 +22,7 @@
#include <linux/errno.h>
#include <linux/profile.h>
#include <linux/timer.h>
+#include <linux/clocksource.h>
#include <linux/irq.h>
#include <asm/thread_info.h>
@@ -115,6 +116,10 @@ int __init register_persistent_clock(clock_access_fn read_boot,
void __init time_init(void)
{
- machine_desc->init_time();
+ if (machine_desc->init_time)
+ machine_desc->init_time();
+ else
+ clocksource_of_init();
+
sched_clock_postinit();
}