aboutsummaryrefslogtreecommitdiff
path: root/bl31
diff options
context:
space:
mode:
Diffstat (limited to 'bl31')
-rw-r--r--bl31/aarch64/crash_reporting.S291
-rw-r--r--bl31/aarch64/runtime_exceptions.S102
-rw-r--r--bl31/bl31.mk1
-rw-r--r--bl31/bl31_main.c2
-rw-r--r--bl31/context_mgmt.c40
-rw-r--r--bl31/runtime_svc.c8
6 files changed, 346 insertions, 98 deletions
diff --git a/bl31/aarch64/crash_reporting.S b/bl31/aarch64/crash_reporting.S
new file mode 100644
index 0000000..cb9110b
--- /dev/null
+++ b/bl31/aarch64/crash_reporting.S
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+#include <plat_macros.S>
+
+ .globl get_crash_stack
+ .globl dump_state_and_die
+ .globl dump_intr_state_and_die
+
+ /* ------------------------------------------------------
+ * The below section deals with dumping the system state
+ * when an unhandled exception is taken in EL3.
+ * The layout and the names of the registers which will
+ * be dumped during a unhandled exception is given below.
+ * ------------------------------------------------------
+ */
+.section .rodata.dump_reg_name, "aS"
+caller_saved_regs: .asciz "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",\
+ "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16",\
+ "x17", "x18", ""
+
+callee_saved_regs: .asciz "x19", "x20", "x21", "x22", "x23", "x24",\
+ "x25", "x26", "x27", "x28", "x29", "x30", ""
+
+el3_sys_regs: .asciz "scr_el3", "sctlr_el3", "cptr_el3", "tcr_el3",\
+ "daif", "mair_el3", "spsr_el3", "elr_el3", "ttbr0_el3", "esr_el3",\
+ "sp_el3", "far_el3", ""
+
+non_el3_sys_0_regs: .asciz "spsr_el1", "elr_el1", "spsr_abt", "spsr_und",\
+ "spsr_irq", "spsr_fiq", "sctlr_el1", "actlr_el1", "cpacr_el1",\
+ "csselr_el1", "sp_el1", "esr_el1", "ttbr0_el1", "ttbr1_el1",\
+ "mair_el1", "amair_el1", "tcr_el1", "tpidr_el1", ""
+
+non_el3_sys_1_regs: .asciz "tpidr_el0", "tpidrro_el0", "dacr32_el2",\
+ "ifsr32_el2", "par_el1", "far_el1", "afsr0_el1", "afsr1_el1",\
+ "contextidr_el1", "vbar_el1", "cntp_ctl_el0", "cntp_cval_el0",\
+ "cntv_ctl_el0", "cntv_cval_el0", "cntkctl_el1", "fpexc32_el2",\
+ "sp_el0", ""
+
+ /* -----------------------------------------------------
+ * Currently we are stack limited. Hence make sure that
+ * we dont try to dump more than 20 registers using the
+ * stack.
+ * -----------------------------------------------------
+ */
+
+#define REG_SIZE 0x8
+
+/* The caller saved registers are X0 to X18 */
+#define CALLER_SAVED_REG_SIZE (20 * REG_SIZE)
+/* The caller saved registers are X19 to X30 */
+#define CALLEE_SAVED_REG_SIZE (12 * REG_SIZE)
+/* The EL3 sys regs*/
+#define EL3_SYS_REG_SIZE (12 * REG_SIZE)
+/* The non EL3 sys regs set-0 */
+#define NON_EL3_SYS_0_REG_SIZE (18 * REG_SIZE)
+/* The non EL3 sys regs set-1 */
+#define NON_EL3_SYS_1_REG_SIZE (18 * REG_SIZE)
+
+ .macro print_caller_saved_regs
+ sub sp, sp, #CALLER_SAVED_REG_SIZE
+ stp x0, x1, [sp]
+ stp x2, x3, [sp, #(REG_SIZE * 2)]
+ stp x4, x5, [sp, #(REG_SIZE * 4)]
+ stp x6, x7, [sp, #(REG_SIZE * 6)]
+ stp x8, x9, [sp, #(REG_SIZE * 8)]
+ stp x10, x11, [sp, #(REG_SIZE * 10)]
+ stp x12, x13, [sp, #(REG_SIZE * 12)]
+ stp x14, x15, [sp, #(REG_SIZE * 14)]
+ stp x16, x17, [sp, #(REG_SIZE * 16)]
+ stp x18, xzr, [sp, #(REG_SIZE * 18)]
+ adr x0, caller_saved_regs
+ mov x1, sp
+ bl print_string_value
+ add sp, sp, #CALLER_SAVED_REG_SIZE
+ .endm
+
+ .macro print_callee_saved_regs
+ sub sp, sp, CALLEE_SAVED_REG_SIZE
+ stp x19, x20, [sp]
+ stp x21, x22, [sp, #(REG_SIZE * 2)]
+ stp x23, x24, [sp, #(REG_SIZE * 4)]
+ stp x25, x26, [sp, #(REG_SIZE * 6)]
+ stp x27, x28, [sp, #(REG_SIZE * 8)]
+ stp x29, x30, [sp, #(REG_SIZE * 10)]
+ adr x0, callee_saved_regs
+ mov x1, sp
+ bl print_string_value
+ add sp, sp, #CALLEE_SAVED_REG_SIZE
+ .endm
+
+ .macro print_el3_sys_regs
+ sub sp, sp, #EL3_SYS_REG_SIZE
+ mrs x9, scr_el3
+ mrs x10, sctlr_el3
+ mrs x11, cptr_el3
+ mrs x12, tcr_el3
+ mrs x13, daif
+ mrs x14, mair_el3
+ mrs x15, spsr_el3 /*save the elr and spsr regs seperately*/
+ mrs x16, elr_el3
+ mrs x17, ttbr0_el3
+ mrs x8, esr_el3
+ mrs x7, far_el3
+
+ stp x9, x10, [sp]
+ stp x11, x12, [sp, #(REG_SIZE * 2)]
+ stp x13, x14, [sp, #(REG_SIZE * 4)]
+ stp x15, x16, [sp, #(REG_SIZE * 6)]
+ stp x17, x8, [sp, #(REG_SIZE * 8)]
+ stp x0, x7, [sp, #(REG_SIZE * 10)] /* sp_el3 is in x0 */
+
+ adr x0, el3_sys_regs
+ mov x1, sp
+ bl print_string_value
+ add sp, sp, #EL3_SYS_REG_SIZE
+ .endm
+
+ .macro print_non_el3_sys_0_regs
+ sub sp, sp, #NON_EL3_SYS_0_REG_SIZE
+ mrs x9, spsr_el1
+ mrs x10, elr_el1
+ mrs x11, spsr_abt
+ mrs x12, spsr_und
+ mrs x13, spsr_irq
+ mrs x14, spsr_fiq
+ mrs x15, sctlr_el1
+ mrs x16, actlr_el1
+ mrs x17, cpacr_el1
+ mrs x8, csselr_el1
+
+ stp x9, x10, [sp]
+ stp x11, x12, [sp, #(REG_SIZE * 2)]
+ stp x13, x14, [sp, #(REG_SIZE * 4)]
+ stp x15, x16, [sp, #(REG_SIZE * 6)]
+ stp x17, x8, [sp, #(REG_SIZE * 8)]
+
+ mrs x10, sp_el1
+ mrs x11, esr_el1
+ mrs x12, ttbr0_el1
+ mrs x13, ttbr1_el1
+ mrs x14, mair_el1
+ mrs x15, amair_el1
+ mrs x16, tcr_el1
+ mrs x17, tpidr_el1
+
+ stp x10, x11, [sp, #(REG_SIZE * 10)]
+ stp x12, x13, [sp, #(REG_SIZE * 12)]
+ stp x14, x15, [sp, #(REG_SIZE * 14)]
+ stp x16, x17, [sp, #(REG_SIZE * 16)]
+
+ adr x0, non_el3_sys_0_regs
+ mov x1, sp
+ bl print_string_value
+ add sp, sp, #NON_EL3_SYS_0_REG_SIZE
+ .endm
+
+ .macro print_non_el3_sys_1_regs
+ sub sp, sp, #NON_EL3_SYS_1_REG_SIZE
+
+ mrs x9, tpidr_el0
+ mrs x10, tpidrro_el0
+ mrs x11, dacr32_el2
+ mrs x12, ifsr32_el2
+ mrs x13, par_el1
+ mrs x14, far_el1
+ mrs x15, afsr0_el1
+ mrs x16, afsr1_el1
+ mrs x17, contextidr_el1
+ mrs x8, vbar_el1
+
+ stp x9, x10, [sp]
+ stp x11, x12, [sp, #(REG_SIZE * 2)]
+ stp x13, x14, [sp, #(REG_SIZE * 4)]
+ stp x15, x16, [sp, #(REG_SIZE * 6)]
+ stp x17, x8, [sp, #(REG_SIZE * 8)]
+
+ mrs x10, cntp_ctl_el0
+ mrs x11, cntp_cval_el0
+ mrs x12, cntv_ctl_el0
+ mrs x13, cntv_cval_el0
+ mrs x14, cntkctl_el1
+ mrs x15, fpexc32_el2
+ mrs x8, sp_el0
+
+ stp x10, x11, [sp, #(REG_SIZE *10)]
+ stp x12, x13, [sp, #(REG_SIZE * 12)]
+ stp x14, x15, [sp, #(REG_SIZE * 14)]
+ stp x8, xzr, [sp, #(REG_SIZE * 16)]
+
+ adr x0, non_el3_sys_1_regs
+ mov x1, sp
+ bl print_string_value
+ add sp, sp, #NON_EL3_SYS_1_REG_SIZE
+ .endm
+
+ .macro init_crash_stack
+ msr cntfrq_el0, x0 /* we can corrupt this reg to free up x0 */
+ mrs x0, tpidr_el3
+
+ /* Check if tpidr is initialized */
+ cbz x0, infinite_loop
+
+ ldr x0, [x0, #PTR_CACHE_CRASH_STACK_OFFSET]
+ /* store the x30 and sp to stack */
+ str x30, [x0, #-(REG_SIZE)]!
+ mov x30, sp
+ str x30, [x0, #-(REG_SIZE)]!
+ mov sp, x0
+ mrs x0, cntfrq_el0
+ .endm
+
+ /* ---------------------------------------------------
+ * The below function initializes the crash dump stack ,
+ * and prints the system state. This function
+ * will not return.
+ * ---------------------------------------------------
+ */
+func dump_state_and_die
+ init_crash_stack
+ print_caller_saved_regs
+ b print_state
+
+func dump_intr_state_and_die
+ init_crash_stack
+ print_caller_saved_regs
+ plat_print_gic_regs /* fall through to print_state */
+
+print_state:
+ /* copy the original x30 from stack */
+ ldr x30, [sp, #REG_SIZE]
+ print_callee_saved_regs
+ /* copy the original SP_EL3 from stack to x0 and rewind stack */
+ ldr x0, [sp], #(REG_SIZE * 2)
+ print_el3_sys_regs
+ print_non_el3_sys_0_regs
+ print_non_el3_sys_1_regs
+ b infinite_loop
+
+func infinite_loop
+ b infinite_loop
+
+
+#define PCPU_CRASH_STACK_SIZE 0x140
+
+ /* -----------------------------------------------------
+ * void get_crash_stack (uint64_t mpidr) : This
+ * function is used to allocate a small stack for
+ * reporting unhandled exceptions
+ * -----------------------------------------------------
+ */
+func get_crash_stack
+ mov x10, x30 // lr
+ get_mp_stack pcpu_crash_stack, PCPU_CRASH_STACK_SIZE
+ ret x10
+
+ /* -----------------------------------------------------
+ * Per-cpu crash stacks in normal memory.
+ * -----------------------------------------------------
+ */
+declare_stack pcpu_crash_stack, tzfw_normal_stacks, \
+ PCPU_CRASH_STACK_SIZE, PLATFORM_CORE_COUNT
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
index 9c98ad6..b6dcccb 100644
--- a/bl31/aarch64/runtime_exceptions.S
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -37,7 +37,6 @@
.globl runtime_exceptions
.globl el3_exit
- .globl get_exception_stack
.macro save_x18_to_x29_sp_el0
stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
@@ -63,8 +62,7 @@ sync_exception_sp_el0:
* We don't expect any synchronous exceptions from EL3
* -----------------------------------------------------
*/
- wfi
- b sync_exception_sp_el0
+ bl dump_state_and_die
check_vector_size sync_exception_sp_el0
.align 7
@@ -74,20 +72,17 @@ sync_exception_sp_el0:
* -----------------------------------------------------
*/
irq_sp_el0:
- handle_async_exception IRQ_SP_EL0
- b irq_sp_el0
+ bl dump_intr_state_and_die
check_vector_size irq_sp_el0
.align 7
fiq_sp_el0:
- handle_async_exception FIQ_SP_EL0
- b fiq_sp_el0
+ bl dump_intr_state_and_die
check_vector_size fiq_sp_el0
.align 7
serror_sp_el0:
- handle_async_exception SERROR_SP_EL0
- b serror_sp_el0
+ bl dump_state_and_die
check_vector_size serror_sp_el0
/* -----------------------------------------------------
@@ -100,36 +95,25 @@ sync_exception_sp_elx:
* This exception will trigger if anything went wrong
* during a previous exception entry or exit or while
* handling an earlier unexpected synchronous exception.
- * In any case we cannot rely on SP_EL3. Switching to a
- * known safe area of memory will corrupt at least a
- * single register. It is best to enter wfi in loop as
- * that will preserve the system state for analysis
- * through a debugger later.
+ * There is a high probability that SP_EL3 is corrupted.
* -----------------------------------------------------
*/
- wfi
- b sync_exception_sp_elx
+ bl dump_state_and_die
check_vector_size sync_exception_sp_elx
- /* -----------------------------------------------------
- * As mentioned in the previous comment, all bets are
- * off if SP_EL3 cannot be relied upon. Report their
- * occurrence.
- * -----------------------------------------------------
- */
.align 7
irq_sp_elx:
- b irq_sp_elx
+ bl dump_intr_state_and_die
check_vector_size irq_sp_elx
.align 7
fiq_sp_elx:
- b fiq_sp_elx
+ bl dump_intr_state_and_die
check_vector_size fiq_sp_elx
.align 7
serror_sp_elx:
- b serror_sp_elx
+ bl dump_state_and_die
check_vector_size serror_sp_elx
/* -----------------------------------------------------
@@ -156,20 +140,17 @@ sync_exception_aarch64:
* -----------------------------------------------------
*/
irq_aarch64:
- handle_async_exception IRQ_AARCH64
- b irq_aarch64
+ bl dump_intr_state_and_die
check_vector_size irq_aarch64
.align 7
fiq_aarch64:
- handle_async_exception FIQ_AARCH64
- b fiq_aarch64
+ bl dump_intr_state_and_die
check_vector_size fiq_aarch64
.align 7
serror_aarch64:
- handle_async_exception SERROR_AARCH64
- b serror_aarch64
+ bl dump_state_and_die
check_vector_size serror_aarch64
/* -----------------------------------------------------
@@ -196,20 +177,17 @@ sync_exception_aarch32:
* -----------------------------------------------------
*/
irq_aarch32:
- handle_async_exception IRQ_AARCH32
- b irq_aarch32
+ bl dump_intr_state_and_die
check_vector_size irq_aarch32
.align 7
fiq_aarch32:
- handle_async_exception FIQ_AARCH32
- b fiq_aarch32
+ bl dump_intr_state_and_die
check_vector_size fiq_aarch32
.align 7
serror_aarch32:
- handle_async_exception SERROR_AARCH32
- b serror_aarch32
+ bl dump_state_and_die
check_vector_size serror_aarch32
.align 7
@@ -367,9 +345,7 @@ el3_exit: ; .type el3_exit, %function
msr elr_el3, x17
/* Restore saved general purpose registers and return */
- bl restore_gp_registers
- ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
- eret
+ b restore_gp_registers_eret
smc_unknown:
/*
@@ -379,7 +355,8 @@ smc_unknown:
* content). Either way, we aren't leaking any secure information
* through them
*/
- bl restore_gp_registers_callee
+ mov w0, #SMC_UNK
+ b restore_gp_registers_callee_eret
smc_prohibited:
ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
@@ -387,7 +364,8 @@ smc_prohibited:
eret
rt_svc_fw_critical_error:
- b rt_svc_fw_critical_error
+ msr spsel, #1 /* Switch to SP_ELx */
+ bl dump_state_and_die
/* -----------------------------------------------------
* The following functions are used to saved and restore
@@ -413,52 +391,24 @@ func save_gp_registers
save_x18_to_x29_sp_el0
ret
-func restore_gp_registers
+func restore_gp_registers_eret
ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
-restore_gp_registers_callee:
- ldr x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
-
+restore_gp_registers_callee_eret:
ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
- msr sp_el0, x17
- ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
- ret
-
- /* -----------------------------------------------------
- * 256 bytes of exception stack for each cpu
- * -----------------------------------------------------
- */
-#if DEBUG
-#define PCPU_EXCEPTION_STACK_SIZE 0x300
-#else
-#define PCPU_EXCEPTION_STACK_SIZE 0x100
-#endif
- /* -----------------------------------------------------
- * void get_exception_stack (uint64_t mpidr) : This
- * function is used to allocate a small stack for
- * reporting unhandled exceptions
- * -----------------------------------------------------
- */
-func get_exception_stack
- mov x10, x30 // lr
- get_mp_stack pcpu_exception_stack, PCPU_EXCEPTION_STACK_SIZE
- ret x10
-
- /* -----------------------------------------------------
- * Per-cpu exception stacks in normal memory.
- * -----------------------------------------------------
- */
-declare_stack pcpu_exception_stack, tzfw_normal_stacks, \
- PCPU_EXCEPTION_STACK_SIZE, PLATFORM_CORE_COUNT
+ ldp x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+ msr sp_el0, x17
+ ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+ eret
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index ef40f67..c0dc2fd 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -35,6 +35,7 @@ BL31_SOURCES += bl31/bl31_main.c \
bl31/aarch64/bl31_entrypoint.S \
bl31/aarch64/context.S \
bl31/aarch64/runtime_exceptions.S \
+ bl31/aarch64/crash_reporting.S \
common/aarch64/early_exceptions.S \
lib/locks/bakery/bakery_lock.c \
lib/locks/exclusive/spinlock.S \
diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c
index 755320d..5a09829 100644
--- a/bl31/bl31_main.c
+++ b/bl31/bl31_main.c
@@ -37,7 +37,6 @@
#include <runtime_svc.h>
#include <stdio.h>
-
/*******************************************************************************
* This function pointer is used to initialise the BL32 image. It's initialized
* by SPD calling bl31_register_bl32_init after setting up all things necessary
@@ -99,6 +98,7 @@ void bl31_main(void)
*/
assert(cm_get_context(mpidr, NON_SECURE));
cm_set_next_eret_context(NON_SECURE);
+ cm_init_pcpu_ptr_cache();
write_vbar_el3((uint64_t) runtime_exceptions);
isb();
next_image_type = NON_SECURE;
diff --git a/bl31/context_mgmt.c b/bl31/context_mgmt.c
index 8d1396e..eae608c 100644
--- a/bl31/context_mgmt.c
+++ b/bl31/context_mgmt.c
@@ -31,6 +31,7 @@
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
+#include <bl31.h>
#include <context.h>
#include <context_mgmt.h>
#include <platform.h>
@@ -47,6 +48,9 @@ typedef struct {
static context_info_t cm_context_info[PLATFORM_CORE_COUNT];
+/* The per_cpu_ptr_cache_t space allocation */
+static per_cpu_ptr_cache_t per_cpu_ptr_cache_space[PLATFORM_CORE_COUNT];
+
/*******************************************************************************
* Context management library initialisation routine. This library is used by
* runtime services to share pointers to 'cpu_context' structures for the secure
@@ -211,21 +215,31 @@ void cm_set_next_eret_context(uint32_t security_state)
: : "r" (ctx));
}
-/*******************************************************************************
- * This function is used to program exception stack in the 'cpu_context'
- * structure. This is the initial stack used for taking and handling exceptions
- * at EL3. This stack is expected to be initialized once by each security state
- ******************************************************************************/
-void cm_init_exception_stack(uint64_t mpidr, uint32_t security_state)
+/************************************************************************
+ * The following function is used to populate the per cpu pointer cache.
+ * The pointer will be stored in the tpidr_el3 register.
+ *************************************************************************/
+void cm_init_pcpu_ptr_cache()
{
- cpu_context_t *ctx;
- el3_state_t *state;
+ unsigned long mpidr = read_mpidr();
+ uint32_t linear_id = platform_get_core_pos(mpidr);
+ per_cpu_ptr_cache_t *pcpu_ptr_cache;
- ctx = cm_get_context(mpidr, security_state);
- assert(ctx);
+ pcpu_ptr_cache = &per_cpu_ptr_cache_space[linear_id];
+ assert(pcpu_ptr_cache);
+ pcpu_ptr_cache->crash_stack = get_crash_stack(mpidr);
+
+ cm_set_pcpu_ptr_cache(pcpu_ptr_cache);
+}
- /* Set exception stack in the context */
- state = get_el3state_ctx(ctx);
- write_ctx_reg(state, CTX_EXCEPTION_SP, get_exception_stack(mpidr));
+void cm_set_pcpu_ptr_cache(const void *pcpu_ptr)
+{
+ write_tpidr_el3((unsigned long)pcpu_ptr);
}
+
+void *cm_get_pcpu_ptr_cache(void)
+{
+ return (void *)read_tpidr_el3();
+}
+
diff --git a/bl31/runtime_svc.c b/bl31/runtime_svc.c
index 9a68e50..b2ba685 100644
--- a/bl31/runtime_svc.c
+++ b/bl31/runtime_svc.c
@@ -135,11 +135,3 @@ void runtime_svc_init()
error:
panic();
}
-
-void fault_handler(void *handle)
-{
- gp_regs_t *gpregs_ctx = get_gpregs_ctx(handle);
- ERROR("Unhandled synchronous fault. Register dump @ 0x%x \n",
- gpregs_ctx);
- panic();
-}