aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJon Medhurst <tixy@linaro.org>2013-05-03 14:24:38 +0100
committerJon Medhurst <tixy@linaro.org>2013-05-03 14:24:38 +0100
commitc65283a962e8e391283db37939f8483249b0d276 (patch)
tree3934cce1e78da41ec9e55b70c7114b9ef212be5c
parent84b4b2e12997f29a0c1890fcb700111a905ee6ad (diff)
parent23c5ac41ee7db68b1d64e75e641f026e8f1afe97 (diff)
downloadvexpress-lsk-c65283a962e8e391283db37939f8483249b0d276.tar.gz
Merge branch 'tracking-armlt-dcscb' into integration-linux-vexpress
-rw-r--r--Documentation/devicetree/bindings/arm/rtsm-dcscb.txt19
-rw-r--r--arch/arm/include/asm/cp15.h14
-rw-r--r--arch/arm/mach-vexpress/Kconfig9
-rw-r--r--arch/arm/mach-vexpress/Makefile2
-rw-r--r--arch/arm/mach-vexpress/dcscb.c256
-rw-r--r--arch/arm/mach-vexpress/dcscb_setup.S80
-rw-r--r--drivers/cpuidle/cpuidle-calxeda.c14
7 files changed, 380 insertions, 14 deletions
diff --git a/Documentation/devicetree/bindings/arm/rtsm-dcscb.txt b/Documentation/devicetree/bindings/arm/rtsm-dcscb.txt
new file mode 100644
index 00000000000..3b8fbf3c00c
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/rtsm-dcscb.txt
@@ -0,0 +1,19 @@
+ARM Dual Cluster System Configuration Block
+-------------------------------------------
+
+The Dual Cluster System Configuration Block (DCSCB) provides basic
+functionality for controlling clocks, resets and configuration pins in
+the Dual Cluster System implemented by the Real-Time System Model (RTSM).
+
+Required properties:
+
+- compatible : should be "arm,rtsm,dcscb"
+
+- reg : physical base address and the size of the registers window
+
+Example:
+
+ dcscb@60000000 {
+ compatible = "arm,rtsm,dcscb";
+ reg = <0x60000000 0x1000>;
+ };
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index 5ef4d8015a6..ce4d01c03e6 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -59,6 +59,20 @@ static inline void set_cr(unsigned int val)
isb();
}
+static inline unsigned int get_auxcr(void)
+{
+ unsigned int val;
+ asm("mrc p15, 0, %0, c1, c0, 1 @ get AUXCR" : "=r" (val));
+ return val;
+}
+
+static inline void set_auxcr(unsigned int val)
+{
+ asm volatile("mcr p15, 0, %0, c1, c0, 1 @ set AUXCR"
+ : : "r" (val));
+ isb();
+}
+
#ifndef CONFIG_SMP
extern void adjust_cr(unsigned long mask, unsigned long set);
#endif
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index 52d315b792c..49e97ef3933 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -52,4 +52,13 @@ config ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
config ARCH_VEXPRESS_CA9X4
bool "Versatile Express Cortex-A9x4 tile"
+config ARCH_VEXPRESS_DCSCB
+ bool "Dual Cluster System Control Block (DCSCB) support"
+ depends on MCPM
+ select ARM_CCI
+ help
+ Support for the Dual Cluster System Configuration Block (DCSCB).
+ This is needed to provide CPU and cluster power management
+ on RTSM.
+
endmenu
diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile
index 80b64971fbd..f5b78d49246 100644
--- a/arch/arm/mach-vexpress/Makefile
+++ b/arch/arm/mach-vexpress/Makefile
@@ -6,5 +6,7 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
obj-y := v2m.o reset.o
obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o
+obj-$(CONFIG_ARCH_VEXPRESS_DCSCB) += dcscb.o dcscb_setup.o
+CFLAGS_REMOVE_dcscb.o = -pg
obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c
new file mode 100644
index 00000000000..0dc3caca227
--- /dev/null
+++ b/arch/arm/mach-vexpress/dcscb.c
@@ -0,0 +1,256 @@
+/*
+ * arch/arm/mach-vexpress/dcscb.c - Dual Cluster System Configuration Block
+ *
+ * Created by: Nicolas Pitre, May 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/of_address.h>
+#include <linux/vexpress.h>
+#include <linux/arm-cci.h>
+
+#include <asm/mcpm.h>
+#include <asm/proc-fns.h>
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/cp15.h>
+#include <asm/psci.h>
+
+
+#define RST_HOLD0 0x0
+#define RST_HOLD1 0x4
+#define SYS_SWRESET 0x8
+#define RST_STAT0 0xc
+#define RST_STAT1 0x10
+#define EAG_CFG_R 0x20
+#define EAG_CFG_W 0x24
+#define KFC_CFG_R 0x28
+#define KFC_CFG_W 0x2c
+#define DCS_CFG_R 0x30
+
+/*
+ * We can't use regular spinlocks. In the switcher case, it is possible
+ * for an outbound CPU to call power_down() after its inbound counterpart
+ * is already live using the same logical CPU number which trips lockdep
+ * debugging.
+ */
+static arch_spinlock_t dcscb_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+
+static void __iomem *dcscb_base;
+static int dcscb_use_count[4][2];
+static int dcscb_mcpm_cpu_mask[2];
+
+static int dcscb_power_up(unsigned int cpu, unsigned int cluster)
+{
+ unsigned int rst_hold, cpumask = (1 << cpu);
+ unsigned int mcpm_mask = dcscb_mcpm_cpu_mask[cluster];
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ if (cpu >= 4 || cluster >= 2)
+ return -EINVAL;
+
+ /*
+ * Since this is called with IRQs enabled, and no arch_spin_lock_irq
+ * variant exists, we need to disable IRQs manually here.
+ */
+ local_irq_disable();
+ arch_spin_lock(&dcscb_lock);
+
+ dcscb_use_count[cpu][cluster]++;
+ if (dcscb_use_count[cpu][cluster] == 1) {
+ rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
+ if (rst_hold & (1 << 8)) {
+ /* remove cluster reset and add individual CPU's reset */
+ rst_hold &= ~(1 << 8);
+ rst_hold |= mcpm_mask;
+ }
+ rst_hold &= ~(cpumask | (cpumask << 4));
+ writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
+ } else if (dcscb_use_count[cpu][cluster] != 2) {
+ /*
+ * The only possible values are:
+ * 0 = CPU down
+ * 1 = CPU (still) up
+ * 2 = CPU requested to be up before it had a chance
+ * to actually make itself down.
+ * Any other value is a bug.
+ */
+ BUG();
+ }
+
+ arch_spin_unlock(&dcscb_lock);
+ local_irq_enable();
+
+ return 0;
+}
+
+static void dcscb_power_down(void)
+{
+ unsigned int mpidr, cpu, cluster, rst_hold, cpumask, mcpm_mask;
+ bool last_man = false, skip_wfi = false;
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ cpumask = (1 << cpu);
+ mcpm_mask = dcscb_mcpm_cpu_mask[cluster];
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ BUG_ON(cpu >= 4 || cluster >= 2);
+
+ __mcpm_cpu_going_down(cpu, cluster);
+
+ arch_spin_lock(&dcscb_lock);
+ BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
+ dcscb_use_count[cpu][cluster]--;
+ if (dcscb_use_count[cpu][cluster] == 0) {
+ rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
+ rst_hold |= cpumask;
+ if (((rst_hold | (rst_hold >> 4)) & mcpm_mask) == mcpm_mask) {
+ rst_hold |= (1 << 8);
+ last_man = true;
+ }
+ writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
+ } else if (dcscb_use_count[cpu][cluster] == 1) {
+ /*
+ * A power_up request went ahead of us.
+ * Even if we do not want to shut this CPU down,
+ * the caller expects a certain state as if the WFI
+ * was aborted. So let's continue with cache cleaning.
+ */
+ skip_wfi = true;
+ } else
+ BUG();
+
+ if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
+ arch_spin_unlock(&dcscb_lock);
+
+ /*
+ * Flush all cache levels for this cluster.
+ *
+ * A15/A7 can hit in the cache with SCTLR.C=0, so we don't need
+ * a preliminary flush here for those CPUs. At least, that's
+ * the theory -- without the extra flush, Linux explodes on
+ * RTSM (maybe not needed anymore, to be investigated).
+ */
+ flush_cache_all();
+ set_cr(get_cr() & ~CR_C);
+ flush_cache_all();
+
+ /*
+ * This is a harmless no-op. On platforms with a real
+ * outer cache this might either be needed or not,
+ * depending on where the outer cache sits.
+ */
+ outer_flush_all();
+
+ /* Disable local coherency by clearing the ACTLR "SMP" bit: */
+ set_auxcr(get_auxcr() & ~(1 << 6));
+
+ /*
+ * Disable cluster-level coherency by masking
+ * incoming snoops and DVM messages:
+ */
+ disable_cci(cluster);
+
+ __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
+ } else {
+ arch_spin_unlock(&dcscb_lock);
+
+ /*
+ * Flush the local CPU cache.
+ *
+ * A15/A7 can hit in the cache with SCTLR.C=0, so we don't need
+ * a preliminary flush here for those CPUs. At least, that's
+ * the theory -- without the extra flush, Linux explodes on
+ * RTSM (maybe not needed anymore, to be investigated).
+ */
+ flush_cache_louis();
+ set_cr(get_cr() & ~CR_C);
+ flush_cache_louis();
+
+ /* Disable local coherency by clearing the ACTLR "SMP" bit: */
+ set_auxcr(get_auxcr() & ~(1 << 6));
+ }
+
+ __mcpm_cpu_down(cpu, cluster);
+
+ /* Now we are prepared for power-down, do it: */
+ if (!skip_wfi) {
+ dsb();
+ wfi();
+ }
+
+ /* Not dead at this point? Let our caller cope. */
+}
+
+static const struct mcpm_platform_ops dcscb_power_ops = {
+ .power_up = dcscb_power_up,
+ .power_down = dcscb_power_down,
+};
+
+static void __init dcscb_usage_count_init(void)
+{
+ unsigned int mpidr, cpu, cluster;
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ BUG_ON(cpu >= 4 || cluster >= 2);
+ dcscb_use_count[cpu][cluster] = 1;
+}
+
+extern void dcscb_power_up_setup(unsigned int affinity_level);
+
+static int __init dcscb_init(void)
+{
+ struct device_node *node;
+ unsigned int cfg;
+ int ret;
+
+ ret = psci_probe();
+ if (!ret) {
+ pr_debug("psci found. Aborting native init\n");
+ return -ENODEV;
+ }
+
+ node = of_find_compatible_node(NULL, NULL, "arm,rtsm,dcscb");
+ if (!node)
+ return -ENODEV;
+ dcscb_base= of_iomap(node, 0);
+ if (!dcscb_base)
+ return -EADDRNOTAVAIL;
+ cfg = readl_relaxed(dcscb_base + DCS_CFG_R);
+ dcscb_mcpm_cpu_mask[0] = (1 << (((cfg >> 16) >> (0 << 2)) & 0xf)) - 1;
+ dcscb_mcpm_cpu_mask[1] = (1 << (((cfg >> 16) >> (1 << 2)) & 0xf)) - 1;
+ dcscb_usage_count_init();
+
+ ret = mcpm_platform_register(&dcscb_power_ops);
+ if (!ret)
+ ret = mcpm_sync_init(dcscb_power_up_setup);
+ if (ret) {
+ iounmap(dcscb_base);
+ return ret;
+ }
+
+ /*
+ * Future entries into the kernel can now go
+ * through the cluster entry vectors.
+ */
+ vexpress_flags_set(virt_to_phys(mcpm_entry_point));
+
+ return 0;
+}
+
+early_initcall(dcscb_init);
diff --git a/arch/arm/mach-vexpress/dcscb_setup.S b/arch/arm/mach-vexpress/dcscb_setup.S
new file mode 100644
index 00000000000..93bd13f458a
--- /dev/null
+++ b/arch/arm/mach-vexpress/dcscb_setup.S
@@ -0,0 +1,80 @@
+/*
+ * arch/arm/include/asm/dcscb_setup.S
+ *
+ * Created by: Dave Martin, 2012-06-22
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+
+#include <linux/linkage.h>
+#include <asm/mcpm.h>
+
+
+#define SLAVE_SNOOPCTL_OFFSET 0
+#define SNOOPCTL_SNOOP_ENABLE (1 << 0)
+#define SNOOPCTL_DVM_ENABLE (1 << 1)
+
+#define CCI_STATUS_OFFSET 0xc
+#define STATUS_CHANGE_PENDING (1 << 0)
+
+#define CCI_SLAVE_OFFSET(n) (0x1000 + 0x1000 * (n))
+
+#define RTSM_CCI_PHYS_BASE 0x2c090000
+#define RTSM_CCI_SLAVE_A15 3
+#define RTSM_CCI_SLAVE_A7 4
+
+#define RTSM_CCI_A15_OFFSET CCI_SLAVE_OFFSET(RTSM_CCI_SLAVE_A15)
+#define RTSM_CCI_A7_OFFSET CCI_SLAVE_OFFSET(RTSM_CCI_SLAVE_A7)
+
+
+ENTRY(dcscb_power_up_setup)
+
+ cmp r0, #0 @ check affinity level
+ beq 2f
+
+/*
+ * Enable cluster-level coherency, in preparation for turning on the MMU.
+ * The ACTLR SMP bit does not need to be set here, because cpu_resume()
+ * already restores that.
+ */
+
+ mrc p15, 0, r0, c0, c0, 5 @ MPIDR
+ ubfx r0, r0, #8, #4 @ cluster
+
+ @ A15/A7 may not require explicit L2 invalidation on reset, dependent
+ @ on hardware integration desicions.
+ @ For now, this code assumes that L2 is either already invalidated, or
+ @ invalidation is not required.
+
+ ldr r3, =RTSM_CCI_PHYS_BASE + RTSM_CCI_A15_OFFSET
+ cmp r0, #0 @ A15 cluster?
+ addne r3, r3, #RTSM_CCI_A7_OFFSET - RTSM_CCI_A15_OFFSET
+
+ @ r3 now points to the correct CCI slave register block
+
+ ldr r0, [r3, #SLAVE_SNOOPCTL_OFFSET]
+ orr r0, r0, #SNOOPCTL_SNOOP_ENABLE | SNOOPCTL_DVM_ENABLE
+ str r0, [r3, #SLAVE_SNOOPCTL_OFFSET] @ enable CCI snoops
+
+ @ Wait for snoop control change to complete:
+
+ ldr r3, =RTSM_CCI_PHYS_BASE
+
+1: ldr r0, [r3, #CCI_STATUS_OFFSET]
+ tst r0, #STATUS_CHANGE_PENDING
+ bne 1b
+
+ dsb @ Synchronise side-effects of enabling CCI
+
+ bx lr
+
+2: @ Implementation-specific local CPU setup operations should go here,
+ @ if any. In this case, there is nothing to do.
+
+ bx lr
+
+ENDPROC(dcscb_power_up_setup)
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index e1aab38c5a8..ece83d6e049 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -37,20 +37,6 @@ extern void *scu_base_addr;
static struct cpuidle_device __percpu *calxeda_idle_cpuidle_devices;
-static inline unsigned int get_auxcr(void)
-{
- unsigned int val;
- asm("mrc p15, 0, %0, c1, c0, 1 @ get AUXCR" : "=r" (val) : : "cc");
- return val;
-}
-
-static inline void set_auxcr(unsigned int val)
-{
- asm volatile("mcr p15, 0, %0, c1, c0, 1 @ set AUXCR"
- : : "r" (val) : "cc");
- isb();
-}
-
static noinline void calxeda_idle_restore(void)
{
set_cr(get_cr() | CR_C);