aboutsummaryrefslogtreecommitdiff
path: root/bl31
diff options
context:
space:
mode:
authorAndrew Thoelke <andrew.thoelke@arm.com>2014-06-04 21:10:52 +0100
committerAndrew Thoelke <andrew.thoelke@arm.com>2014-06-23 14:55:44 +0100
commit167a935733a6e3e412b8ed6a60034d0d84895f2e (patch)
treec443557e8fe8ff50628567aa6ebfc11f6fc9f6dd /bl31
parent5298f2cb98b9bdc18eb2f25cd28180ba7fd000d8 (diff)
downloadarm-trusted-firmware-167a935733a6e3e412b8ed6a60034d0d84895f2e.tar.gz
Initialise CPU contexts from entry_point_info
Consolidate all BL3-1 CPU context initialization for cold boot, PSCI and SPDs into two functions: * The first uses entry_point_info to initialize the relevant cpu_context for first entry into a lower exception level on a CPU * The second populates the EL1 and EL2 system registers as needed from the cpu_context to ensure correct entry into the lower EL This patch alters the way that BL3-1 determines which exception level is used when first entering EL1 or EL2 during cold boot - this is now fully determined by the SPSR value in the entry_point_info for BL3-3, as set up by the platform code in BL2 (or otherwise provided to BL3-1). In the situation that EL1 (or svc mode) is selected for a processor that supports EL2, the context management code will now configure all essential EL2 register state to ensure correct execution of EL1. This allows the platform code to run non-secure EL1 payloads directly without requiring a small EL2 stub or OS loader. Change-Id: If9fbb2417e82d2226e47568203d5a369f39d3b0f
Diffstat (limited to 'bl31')
-rw-r--r--bl31/aarch64/bl31_arch_setup.c44
-rw-r--r--bl31/aarch64/context.S6
-rw-r--r--bl31/bl31_main.c41
-rw-r--r--bl31/context_mgmt.c217
4 files changed, 191 insertions, 117 deletions
diff --git a/bl31/aarch64/bl31_arch_setup.c b/bl31/aarch64/bl31_arch_setup.c
index ad73de0..e0382b3 100644
--- a/bl31/aarch64/bl31_arch_setup.c
+++ b/bl31/aarch64/bl31_arch_setup.c
@@ -51,11 +51,11 @@ void bl31_arch_setup(void)
write_sctlr_el3(tmp_reg);
/*
- * Enable HVCs, route FIQs to EL3, set the next EL to be AArch64, route
- * external abort and SError interrupts to EL3
+ * Route external abort and SError interrupts to EL3
+ * other SCR bits will be configured before exiting to a lower exception
+ * level
*/
- tmp_reg = SCR_RES1_BITS | SCR_RW_BIT | SCR_HCE_BIT | SCR_EA_BIT |
- SCR_FIQ_BIT;
+ tmp_reg = SCR_RES1_BITS | SCR_EA_BIT;
write_scr(tmp_reg);
/*
@@ -68,39 +68,3 @@ void bl31_arch_setup(void)
counter_freq = plat_get_syscnt_freq();
write_cntfrq_el0(counter_freq);
}
-
-/*******************************************************************************
- * Detect what the security state of the next EL is and setup the minimum
- * required architectural state: program SCTRL to reflect the RES1 bits, and to
- * have MMU and caches disabled
- ******************************************************************************/
-void bl31_next_el_arch_setup(uint32_t security_state)
-{
- unsigned long id_aa64pfr0 = read_id_aa64pfr0_el1();
- unsigned long next_sctlr;
- unsigned long el_status;
- unsigned long scr = read_scr();
-
- /* Use the same endianness than the current BL */
- next_sctlr = (read_sctlr_el3() & SCTLR_EE_BIT);
-
- /* Find out which EL we are going to */
- el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) & ID_AA64PFR0_ELX_MASK;
-
- if (security_state == NON_SECURE) {
- /* Check if EL2 is supported */
- if (el_status && (scr & SCR_HCE_BIT)) {
- /* Set SCTLR EL2 */
- next_sctlr |= SCTLR_EL2_RES1;
- write_sctlr_el2(next_sctlr);
- return;
- }
- }
-
- /*
- * SCTLR_EL1 needs the same programming irrespective of the
- * security state of EL1.
- */
- next_sctlr |= SCTLR_EL1_RES1;
- write_sctlr_el1(next_sctlr);
-}
diff --git a/bl31/aarch64/context.S b/bl31/aarch64/context.S
index d0bca64..6667419 100644
--- a/bl31/aarch64/context.S
+++ b/bl31/aarch64/context.S
@@ -43,9 +43,8 @@
.global el3_sysregs_context_save
func el3_sysregs_context_save
- mrs x9, scr_el3
mrs x10, sctlr_el3
- stp x9, x10, [x0, #CTX_SCR_EL3]
+ str x10, [x0, #CTX_SCTLR_EL3]
mrs x11, cptr_el3
stp x11, xzr, [x0, #CTX_CPTR_EL3]
@@ -98,8 +97,7 @@ func el3_sysregs_context_restore
/* Make sure all the above changes are observed */
isb
- ldp x9, x10, [x0, #CTX_SCR_EL3]
- msr scr_el3, x9
+ ldr x10, [x0, #CTX_SCTLR_EL3]
msr sctlr_el3, x10
isb
diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c
index 6f88e65..8cc7e0d 100644
--- a/bl31/bl31_main.c
+++ b/bl31/bl31_main.c
@@ -140,53 +140,18 @@ uint32_t bl31_get_next_image_type(void)
void bl31_prepare_next_image_entry()
{
entry_point_info_t *next_image_info;
- uint32_t scr, image_type;
- cpu_context_t *ctx;
- gp_regs_t *gp_regs;
+ uint32_t image_type;
/* Determine which image to execute next */
image_type = bl31_get_next_image_type();
- /*
- * Setup minimal architectural state of the next highest EL to
- * allow execution in it immediately upon entering it.
- */
- bl31_next_el_arch_setup(image_type);
-
/* Program EL3 registers to enable entry into the next EL */
next_image_info = bl31_plat_get_next_image_ep_info(image_type);
assert(next_image_info);
assert(image_type == GET_SECURITY_STATE(next_image_info->h.attr));
- scr = read_scr();
- scr &= ~SCR_NS_BIT;
- if (image_type == NON_SECURE)
- scr |= SCR_NS_BIT;
-
- scr &= ~SCR_RW_BIT;
- if ((next_image_info->spsr & (1 << MODE_RW_SHIFT)) ==
- (MODE_RW_64 << MODE_RW_SHIFT))
- scr |= SCR_RW_BIT;
-
- /*
- * Tell the context mgmt. library to ensure that SP_EL3 points to
- * the right context to exit from EL3 correctly.
- */
- cm_set_el3_eret_context(image_type,
- next_image_info->pc,
- next_image_info->spsr,
- scr);
-
- /*
- * Save the args generated in BL2 for the image in the right context
- * used on its entry
- */
- ctx = cm_get_context(image_type);
- gp_regs = get_gpregs_ctx(ctx);
- memcpy(gp_regs, (void *)&next_image_info->args, sizeof(aapcs64_params_t));
-
- /* Finally set the next context */
- cm_set_next_eret_context(image_type);
+ cm_init_context(read_mpidr_el1(), next_image_info);
+ cm_prepare_el3_exit(image_type);
}
/*******************************************************************************
diff --git a/bl31/context_mgmt.c b/bl31/context_mgmt.c
index 67a6e03..81c7c56 100644
--- a/bl31/context_mgmt.c
+++ b/bl31/context_mgmt.c
@@ -40,6 +40,7 @@
#include <platform.h>
#include <platform_def.h>
#include <runtime_svc.h>
+#include <string.h>
/*******************************************************************************
@@ -87,6 +88,177 @@ void cm_set_context_by_mpidr(uint64_t mpidr, void *context, uint32_t security_st
}
/*******************************************************************************
+ * This function is used to program the context that's used for exception
+ * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
+ * the required security state
+ ******************************************************************************/
+static inline void cm_set_next_context(void *context)
+{
+#if DEBUG
+ uint64_t sp_mode;
+
+ /*
+ * Check that this function is called with SP_EL0 as the stack
+ * pointer
+ */
+ __asm__ volatile("mrs %0, SPSel\n"
+ : "=r" (sp_mode));
+
+ assert(sp_mode == MODE_SP_EL0);
+#endif
+
+ __asm__ volatile("msr spsel, #1\n"
+ "mov sp, %0\n"
+ "msr spsel, #0\n"
+ : : "r" (context));
+}
+
+/*******************************************************************************
+ * The following function initializes a cpu_context for the current CPU for
+ * first use, and sets the initial entrypoint state as specified by the
+ * entry_point_info structure.
+ *
+ * The security state to initialize is determined by the SECURE attribute
+ * of the entry_point_info. The function returns a pointer to the initialized
+ * context and sets this as the next context to return to.
+ *
+ * The EE and ST attributes are used to configure the endianess and secure
+ * timer availability for the new excution context.
+ *
+ * To prepare the register state for entry call cm_prepare_el3_exit() and
+ * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
+ * cm_e1_sysreg_context_restore().
+ ******************************************************************************/
+void cm_init_context(uint64_t mpidr, const entry_point_info_t *ep)
+{
+ uint32_t security_state;
+ cpu_context_t *ctx;
+ uint32_t scr_el3;
+ el3_state_t *state;
+ gp_regs_t *gp_regs;
+ unsigned long sctlr_elx;
+
+ security_state = GET_SECURITY_STATE(ep->h.attr);
+ ctx = cm_get_context_by_mpidr(mpidr, security_state);
+ assert(ctx);
+
+ /* Clear any residual register values from the context */
+ memset(ctx, 0, sizeof(*ctx));
+
+ /*
+ * Base the context SCR on the current value, adjust for entry point
+ * specific requirements and set trap bits from the IMF
+ * TODO: provide the base/global SCR bits using another mechanism?
+ */
+ scr_el3 = read_scr();
+ scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT |
+ SCR_ST_BIT | SCR_HCE_BIT);
+
+ if (security_state != SECURE)
+ scr_el3 |= SCR_NS_BIT;
+
+ if (GET_RW(ep->spsr) == MODE_RW_64)
+ scr_el3 |= SCR_RW_BIT;
+
+ if (EP_GET_ST(ep->h.attr))
+ scr_el3 |= SCR_ST_BIT;
+
+ scr_el3 |= get_scr_el3_from_routing_model(security_state);
+
+ /*
+ * Set up SCTLR_ELx for the target exception level:
+ * EE bit is taken from the entrpoint attributes
+ * M, C and I bits must be zero (as required by PSCI specification)
+ *
+ * The target exception level is based on the spsr mode requested.
+ * If execution is requested to EL2 or hyp mode, HVC is enabled
+ * via SCR_EL3.HCE.
+ *
+ * Always compute the SCTLR_EL1 value and save in the cpu_context
+ * - the EL2 registers are set up by cm_preapre_ns_entry() as they
+ * are not part of the stored cpu_context
+ *
+ * TODO: In debug builds the spsr should be validated and checked
+ * against the CPU support, security state, endianess and pc
+ */
+ sctlr_elx = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0;
+ sctlr_elx |= SCTLR_EL1_RES1;
+ write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
+
+ if ((GET_RW(ep->spsr) == MODE_RW_64
+ && GET_EL(ep->spsr) == MODE_EL2)
+ || (GET_RW(ep->spsr) != MODE_RW_64
+ && GET_M32(ep->spsr) == MODE32_hyp)) {
+ scr_el3 |= SCR_HCE_BIT;
+ }
+
+ /* Populate EL3 state so that we've the right context before doing ERET */
+ state = get_el3state_ctx(ctx);
+ write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
+ write_ctx_reg(state, CTX_ELR_EL3, ep->pc);
+ write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr);
+
+ /*
+ * Store the X0-X7 value from the entrypoint into the context
+ * Use memcpy as we are in control of the layout of the structures
+ */
+ gp_regs = get_gpregs_ctx(ctx);
+ memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t));
+}
+
+/*******************************************************************************
+ * Prepare the CPU system registers for first entry into secure or normal world
+ *
+ * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized
+ * If execution is requested to non-secure EL1 or svc mode, and the CPU supports
+ * EL2 then EL2 is disabled by configuring all necessary EL2 registers.
+ * For all entries, the EL1 registers are initialized from the cpu_context
+ ******************************************************************************/
+void cm_prepare_el3_exit(uint32_t security_state)
+{
+ uint32_t sctlr_elx, scr_el3, cptr_el2;
+ cpu_context_t *ctx = cm_get_context(security_state);
+
+ assert(ctx);
+
+ if (security_state == NON_SECURE) {
+ scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
+ if (scr_el3 & SCR_HCE_BIT) {
+ /* Use SCTLR_EL1.EE value to initialise sctlr_el2 */
+ sctlr_elx = read_ctx_reg(get_sysregs_ctx(ctx),
+ CTX_SCTLR_EL1);
+ sctlr_elx &= ~SCTLR_EE_BIT;
+ sctlr_elx |= SCTLR_EL2_RES1;
+ write_sctlr_el2(sctlr_elx);
+ } else if (read_id_aa64pfr0_el1() &
+ (ID_AA64PFR0_ELX_MASK << ID_AA64PFR0_EL2_SHIFT)) {
+ /* EL2 present but unused, need to disable safely */
+
+ /* HCR_EL2 = 0, except RW bit set to match SCR_EL3 */
+ write_hcr_el2((scr_el3 & SCR_RW_BIT) ? HCR_RW_BIT : 0);
+
+ /* SCTLR_EL2 : can be ignored when bypassing */
+
+ /* CPTR_EL2 : disable all traps TCPAC, TTA, TFP */
+ cptr_el2 = read_cptr_el2();
+ cptr_el2 &= ~(TCPAC_BIT | TTA_BIT | TFP_BIT);
+ write_cptr_el2(cptr_el2);
+
+ /* Enable EL1 access to timer */
+ write_cnthctl_el2(EL1PCEN_BIT | EL1PCTEN_BIT);
+
+ /* Set VPIDR, VMPIDR to match MIDR, MPIDR */
+ write_vpidr_el2(read_midr_el1());
+ write_vmpidr_el2(read_mpidr_el1());
+ }
+ }
+
+ el1_sysregs_context_restore(get_sysregs_ctx(ctx));
+
+ cm_set_next_context(ctx);
+}
+
+/*******************************************************************************
* The next four functions are used by runtime services to save and restore EL3
* and EL1 contexts on the 'cpu_context' structure for the specified security
* state.
@@ -132,13 +304,10 @@ void cm_el1_sysregs_context_restore(uint32_t security_state)
}
/*******************************************************************************
- * This function populates 'cpu_context' pertaining to the given security state
- * with the entrypoint, SPSR and SCR values so that an ERET from this security
- * state correctly restores corresponding values to drop the CPU to the next
- * exception level
+ * This function populates ELR_EL3 member of 'cpu_context' pertaining to the
+ * given security state with the given entrypoint
******************************************************************************/
-void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint,
- uint32_t spsr, uint32_t scr)
+void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint)
{
cpu_context_t *ctx;
el3_state_t *state;
@@ -146,23 +315,17 @@ void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint,
ctx = cm_get_context(security_state);
assert(ctx);
- /* Program the interrupt routing model for this security state */
- scr &= ~SCR_FIQ_BIT;
- scr &= ~SCR_IRQ_BIT;
- scr |= get_scr_el3_from_routing_model(security_state);
-
- /* Populate EL3 state so that we've the right context before doing ERET */
+ /* Populate EL3 state so that ERET jumps to the correct entry */
state = get_el3state_ctx(ctx);
- write_ctx_reg(state, CTX_SPSR_EL3, spsr);
write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
- write_ctx_reg(state, CTX_SCR_EL3, scr);
}
/*******************************************************************************
- * This function populates ELR_EL3 member of 'cpu_context' pertaining to the
- * given security state with the given entrypoint
+ * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context'
+ * pertaining to the given security state
******************************************************************************/
-void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint)
+void cm_set_elr_spsr_el3(uint32_t security_state,
+ uint64_t entrypoint, uint32_t spsr)
{
cpu_context_t *ctx;
el3_state_t *state;
@@ -173,6 +336,7 @@ void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint)
/* Populate EL3 state so that ERET jumps to the correct entry */
state = get_el3state_ctx(ctx);
write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
+ write_ctx_reg(state, CTX_SPSR_EL3, spsr);
}
/*******************************************************************************
@@ -233,26 +397,9 @@ uint32_t cm_get_scr_el3(uint32_t security_state)
void cm_set_next_eret_context(uint32_t security_state)
{
cpu_context_t *ctx;
-#if DEBUG
- uint64_t sp_mode;
-#endif
ctx = cm_get_context(security_state);
assert(ctx);
-#if DEBUG
- /*
- * Check that this function is called with SP_EL0 as the stack
- * pointer
- */
- __asm__ volatile("mrs %0, SPSel\n"
- : "=r" (sp_mode));
-
- assert(sp_mode == MODE_SP_EL0);
-#endif
-
- __asm__ volatile("msr spsel, #1\n"
- "mov sp, %0\n"
- "msr spsel, #0\n"
- : : "r" (ctx));
+ cm_set_next_context(ctx);
}