aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/kernel/smp.c142
-rw-r--r--drivers/acpi/plat/arm-core.c169
2 files changed, 130 insertions, 181 deletions
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 2673120f46aa..5adbcb5ae26f 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -35,6 +35,7 @@
#include <linux/clockchips.h>
#include <linux/completion.h>
#include <linux/of.h>
+#include <linux/acpi.h>
#include <asm/atomic.h>
#include <asm/cacheflush.h>
@@ -286,11 +287,6 @@ void __init smp_prepare_boot_cpu(void)
static void (*smp_cross_call)(const struct cpumask *, unsigned int);
-/*
- * Enumerate the possible CPU set from the device tree and build the
- * cpu logical map array containing MPIDR values related to logical
- * cpus. Assumes that cpu_logical_map(0) has already been initialized.
- */
static int __init of_smp_init_cpus(void)
{
struct device_node *dn = NULL;
@@ -400,23 +396,141 @@ next:
return 0;
}
-/*
- * In ACPI mode, the cpu possible map was enumerated before SMP
- * initialization when MADT table was parsed, so we can get the
- * possible map here to initialize CPUs.
- */
-static void __init acpi_smp_init_cpus(void)
+#ifdef CONFIG_ACPI
+
+static int __init
+acpi_smp_check_madt_entry(struct acpi_subtable_header *header,
+ const unsigned long end)
{
- int cpu;
+ struct acpi_madt_generic_interrupt *processor;
+
+ if (header->type != ACPI_MADT_TYPE_GENERIC_INTERRUPT)
+ return -EINVAL;
+
+ processor = (struct acpi_madt_generic_interrupt *)header;
+
+ if (BAD_MADT_ENTRY(processor, end) ||
+ !(processor->flags & ACPI_MADT_ENABLED))
+ return -EINVAL;
+
+ /*
+ * Non affinity bits must be set to 0 in the CPU UID
+ */
+ if (processor->uid & ~MPIDR_HWID_BITMASK)
+ return -ENOENT;
+
+ return 0;
+}
+
+#define FOR_EACH_MADT_ENTRY(entry, end) \
+ for (; (((unsigned long)entry) + sizeof(struct acpi_subtable_header)) < end; \
+ entry = (struct acpi_subtable_header *) \
+ ((unsigned long)entry + entry->length))
+
+static int __init
+acpi_smp_parse_madt(struct acpi_subtable_header *header, const unsigned long end)
+{
+ struct acpi_madt_generic_interrupt *processor;
+ unsigned int i, status, cpu = 0;
+
+ /* First entry represents boot CPU and needs to be valid */
+ status = acpi_smp_check_madt_entry(header, end);
+ if (status) {
+ pr_err("Failed to parse boot CPU MADT entry\n");
+ return status;
+ }
+
+ processor = (struct acpi_madt_generic_interrupt *)header;
+ if (cpu_logical_map(0) != (u64)processor->uid) {
+ pr_err("Wrong boot CPU placement in MADT\n");
+ return -ENOENT;
+ }
+
+ /*
+ * Update CPU logic to GIC id mapping.
+ * cpu_logical_map has already been initialized and the boot CPU doesn't
+ * need the enable-method so continue.
+ */
+ cpu_physical_id(cpu++) = processor->gic_id;
+
+ /* Now handle with rest of CPUs */
+ header = (struct acpi_subtable_header *)
+ ((unsigned long)header + header->length);
+ FOR_EACH_MADT_ENTRY(header, end) {
+ u64 hwid;
+
+ if (acpi_smp_check_madt_entry(header, end))
+ continue;
+
+ processor = (struct acpi_madt_generic_interrupt *)header;
+ hwid = processor->uid;
+
+ /*
+ * Duplicate MPIDRs are a recipe for disaster. Scan
+ * all initialized entries and check for
+ * duplicates. If any is found just ignore the CPU.
+ * cpu_logical_map was initialized to INVALID_HWID to
+ * avoid matching valid MPIDR values.
+ */
+ for (i = 0; (i < cpu) && (i < NR_CPUS); i++) {
+ if (cpu_logical_map(i) == hwid) {
+ pr_err("Duplicate CPU UID: 0x%x in MADT\n",
+ (int)processor->uid);
+ continue;
+ }
+ }
+
+ if (cpu >= NR_CPUS)
+ continue;
- for_each_possible_cpu(cpu) {
if (cpu_acpi_read_ops(cpu) != 0)
continue;
- cpu_ops[cpu]->cpu_init(NULL, cpu);
+ if (cpu_ops[cpu]->cpu_init(NULL, cpu))
+ continue;
+
+ pr_debug("CPU logical map [%d] = 0x%lx\n", cpu, (long int)hwid);
+ set_cpu_possible(cpu, true);
+ cpu_logical_map(cpu) = hwid;
+ cpu_physical_id(cpu++) = processor->gic_id;
}
+
+ if (cpu > NR_CPUS)
+ pr_warning("no. of cores (%d) greater than configured maximum"
+ " of %d - clipping\n", cpu, NR_CPUS);
+
+ return 0;
+}
+
+
+static void __init acpi_smp_init_cpus(void)
+{
+ int count;
+
+ /*
+ * The GIC corresponding to the boot processor must be the first entry
+ * in the list of interrupt controller descriptors. If that is true,
+ * we keep on parsing next ones. While we are here, fill in
+ * cpu_physical_id map for ACPI processor driver too.
+ */
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+ acpi_smp_parse_madt, 1);
+ if (count == 1)
+ pr_warning("None of secondaries found, assuming UP\n");
+ else if (count <= 0)
+ pr_err("Failed to parse MADT during SMP init, not enabling"
+ " secondaries\n");
+
}
+#else
+static inline void acpi_smp_init_cpus(void) {}
+#endif
+/*
+ * Enumerate the possible CPU set either from the device tree or MADT
+ * and build the cpu logical map array containing MPIDR values related to
+ * logical CPUs. Assumes that cpu_logical_map(0) has already been initialized.
+ */
void __init smp_init_cpus(void)
{
if (!of_smp_init_cpus())
diff --git a/drivers/acpi/plat/arm-core.c b/drivers/acpi/plat/arm-core.c
index f0bd7bb4562d..53c822183402 100644
--- a/drivers/acpi/plat/arm-core.c
+++ b/drivers/acpi/plat/arm-core.c
@@ -48,17 +48,11 @@ EXPORT_SYMBOL(acpi_disabled);
int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */
EXPORT_SYMBOL(acpi_pci_disabled);
-/* available_cpus here means enabled cpu in MADT */
-static int available_cpus;
-
/* Map logic cpu id to physical GIC id (physical CPU id). */
int arm_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
-static int boot_cpu_apic_id = -1;
-
-/* Parked Address in ACPI GIC structure */
-static u64 parked_address[NR_CPUS];
-#define PREFIX "ACPI: "
+#undef pr_fmt
+#define pr_fmt(fmt) "ACPI: " fmt
/* FIXME: this function should be moved to topology.c when it is ready */
void arch_fix_phys_package_id(int num, u32 slot)
@@ -108,140 +102,6 @@ void __init __acpi_unmap_table(char *map, unsigned long size)
return;
}
-static int __init acpi_parse_madt(struct acpi_table_header *table)
-{
- struct acpi_table_madt *madt = NULL;
-
- madt = (struct acpi_table_madt *)table;
- if (!madt) {
- pr_warn(PREFIX "Unable to map MADT\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-/**
- * acpi_register_gic_cpu_interface - register a gic cpu interface and
- * generates a logic cpu number
- * @id: gic cpu interface id to register
- * @enabled: this cpu is enabled or not
- *
- * Returns the logic cpu number which maps to the gic cpu interface
- */
-static int acpi_register_gic_cpu_interface(int id, u8 enabled)
-{
- int cpu;
-
- if (id >= MAX_GIC_CPU_INTERFACE) {
- pr_info(PREFIX "skipped apicid that is too big\n");
- return -EINVAL;
- }
-
- total_cpus++;
- if (!enabled)
- return -EINVAL;
-
- if (available_cpus >= NR_CPUS) {
- pr_warn(PREFIX "NR_CPUS limit of %d reached,"
- " Processor %d/0x%x ignored.\n", NR_CPUS, total_cpus, id);
- return -EINVAL;
- }
-
- available_cpus++;
-
- /* allocate a logic cpu id for the new comer */
- if (boot_cpu_apic_id == id) {
- /*
- * boot_cpu_init() already hold bit 0 in cpu_present_mask
- * for BSP, no need to allocte again.
- */
- cpu = 0;
- } else {
- cpu = cpumask_next_zero(-1, cpu_present_mask);
- }
-
- /* map the logic cpu id to APIC id */
- arm_cpu_to_apicid[cpu] = id;
-
- set_cpu_present(cpu, true);
- set_cpu_possible(cpu, true);
-
- return cpu;
-}
-
-static int __init
-acpi_parse_gic(struct acpi_subtable_header *header, const unsigned long end)
-{
- struct acpi_madt_generic_interrupt *processor = NULL;
- int cpu;
-
- processor = (struct acpi_madt_generic_interrupt *)header;
-
- if (BAD_MADT_ENTRY(processor, end))
- return -EINVAL;
-
- acpi_table_print_madt_entry(header);
-
- /*
- * We need to register disabled CPU as well to permit
- * counting disabled CPUs. This allows us to size
- * cpus_possible_map more accurately, to permit
- * to not preallocating memory for all NR_CPUS
- * when we use CPU hotplug.
- */
- cpu = acpi_register_gic_cpu_interface(processor->gic_id,
- processor->flags & ACPI_MADT_ENABLED);
-
- /*
- * We need the parked address for SMP initialization with
- * spin-table enable method
- */
- if (cpu >= 0 && processor->parked_address)
- parked_address[cpu] = processor->parked_address;
-
- return 0;
-}
-
-/*
- * Parse GIC cpu interface related entries in MADT
- * returns 0 on success, < 0 on error
- */
-static int __init acpi_parse_madt_gic_entries(void)
-{
- int count;
-
- /*
- * do a partial walk of MADT to determine how many CPUs
- * we have including disabled CPUs
- */
- count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
- acpi_parse_gic, MAX_GIC_CPU_INTERFACE);
-
- if (!count) {
- pr_err(PREFIX "No GIC entries present\n");
- return -ENODEV;
- } else if (count < 0) {
- pr_err(PREFIX "Error parsing GIC entry\n");
- return count;
- }
-
-#ifdef CONFIG_SMP
- if (available_cpus == 0) {
- pr_info(PREFIX "Found 0 CPUs; assuming 1\n");
- arm_cpu_to_apicid[available_cpus] =
- read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
- available_cpus = 1; /* We've got at least one of these */
- }
-#endif
-
- /* Make boot-up look pretty */
- pr_info("%d CPUs available, %d CPUs total\n", available_cpus,
- total_cpus);
-
- return 0;
-}
-
int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
{
*irq = gsi_to_irq(gsi);
@@ -333,26 +193,6 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
return 0;
}
-static void __init acpi_process_madt(void)
-{
- int error;
-
- /* Get the boot CPU interface ID before MADT parsing */
- boot_cpu_apic_id = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
-
- if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
-
- /*
- * Parse MADT GIC cpu interface entries
- */
- error = acpi_parse_madt_gic_entries();
- if (!error)
- pr_info("Using ACPI for processor (GIC) configuration information\n");
- }
-
- return;
-}
-
/*
* acpi_boot_table_init() and acpi_boot_init()
* called from setup_arch(), always.
@@ -390,11 +230,6 @@ int __init acpi_boot_init(void)
acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
- /*
- * Process the Multiple APIC Description Table (MADT), if present
- */
- acpi_process_madt();
-
return 0;
}