aboutsummaryrefslogtreecommitdiff
path: root/lib/aarch64/misc_helpers.S
diff options
context:
space:
mode:
authorDan Handley <dan.handley@arm.com>2014-04-09 12:48:25 +0100
committerDan Handley <dan.handley@arm.com>2014-05-06 12:35:02 +0100
commit4ecca33988b90de43ec4f4a929094a38a23fda31 (patch)
treead1fa01314b562bfbdac77a39451ef7967c0ff03 /lib/aarch64/misc_helpers.S
parentb495bdef190acf166c713e138b61c5bb25402fc0 (diff)
downloadarm-trusted-firmware-4ecca33988b90de43ec4f4a929094a38a23fda31.tar.gz
Move include and source files to logical locations
Move almost all system include files to a logical sub-directory under ./include. The only remaining system include directories not under ./include are specific to the platform. Move the corresponding source files to match the include directory structure. Also remove pm.h as it is no longer used. Change-Id: Ie5ea6368ec5fad459f3e8a802ad129135527f0b3
Diffstat (limited to 'lib/aarch64/misc_helpers.S')
-rw-r--r--lib/aarch64/misc_helpers.S341
1 files changed, 341 insertions, 0 deletions
diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S
new file mode 100644
index 0000000..e3b4ab5
--- /dev/null
+++ b/lib/aarch64/misc_helpers.S
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch_helpers.h>
+#include <runtime_svc.h>
+#include <asm_macros.S>
+
+ .globl enable_irq
+ .globl disable_irq
+
+ .globl enable_fiq
+ .globl disable_fiq
+
+ .globl enable_serror
+ .globl disable_serror
+
+ .globl enable_debug_exceptions
+ .globl disable_debug_exceptions
+
+ .globl read_daif
+ .globl write_daif
+
+ .globl read_spsr
+ .globl read_spsr_el1
+ .globl read_spsr_el2
+ .globl read_spsr_el3
+
+ .globl write_spsr
+ .globl write_spsr_el1
+ .globl write_spsr_el2
+ .globl write_spsr_el3
+
+ .globl read_elr
+ .globl read_elr_el1
+ .globl read_elr_el2
+ .globl read_elr_el3
+
+ .globl write_elr
+ .globl write_elr_el1
+ .globl write_elr_el2
+ .globl write_elr_el3
+
+ .globl get_afflvl_shift
+ .globl mpidr_mask_lower_afflvls
+ .globl dsb
+ .globl isb
+ .globl sev
+ .globl wfe
+ .globl wfi
+ .globl eret
+ .globl smc
+
+ .globl zeromem16
+ .globl memcpy16
+
+
+func get_afflvl_shift
+ cmp x0, #3
+ cinc x0, x0, eq
+ mov x1, #MPIDR_AFFLVL_SHIFT
+ lsl x0, x0, x1
+ ret
+
+func mpidr_mask_lower_afflvls
+ cmp x1, #3
+ cinc x1, x1, eq
+ mov x2, #MPIDR_AFFLVL_SHIFT
+ lsl x2, x1, x2
+ lsr x0, x0, x2
+ lsl x0, x0, x2
+ ret
+
+ /* -----------------------------------------------------
+ * Asynchronous exception manipulation accessors
+ * -----------------------------------------------------
+ */
+func enable_irq
+ msr daifclr, #DAIF_IRQ_BIT
+ ret
+
+
+func enable_fiq
+ msr daifclr, #DAIF_FIQ_BIT
+ ret
+
+
+func enable_serror
+ msr daifclr, #DAIF_ABT_BIT
+ ret
+
+
+func enable_debug_exceptions
+ msr daifclr, #DAIF_DBG_BIT
+ ret
+
+
+func disable_irq
+ msr daifset, #DAIF_IRQ_BIT
+ ret
+
+
+func disable_fiq
+ msr daifset, #DAIF_FIQ_BIT
+ ret
+
+
+func disable_serror
+ msr daifset, #DAIF_ABT_BIT
+ ret
+
+
+func disable_debug_exceptions
+ msr daifset, #DAIF_DBG_BIT
+ ret
+
+
+func read_daif
+ mrs x0, daif
+ ret
+
+
+func write_daif
+ msr daif, x0
+ ret
+
+
+func read_spsr
+ mrs x0, CurrentEl
+ cmp x0, #(MODE_EL1 << MODE_EL_SHIFT)
+ b.eq read_spsr_el1
+ cmp x0, #(MODE_EL2 << MODE_EL_SHIFT)
+ b.eq read_spsr_el2
+ cmp x0, #(MODE_EL3 << MODE_EL_SHIFT)
+ b.eq read_spsr_el3
+
+
+func read_spsr_el1
+ mrs x0, spsr_el1
+ ret
+
+
+func read_spsr_el2
+ mrs x0, spsr_el2
+ ret
+
+
+func read_spsr_el3
+ mrs x0, spsr_el3
+ ret
+
+
+func write_spsr
+ mrs x1, CurrentEl
+ cmp x1, #(MODE_EL1 << MODE_EL_SHIFT)
+ b.eq write_spsr_el1
+ cmp x1, #(MODE_EL2 << MODE_EL_SHIFT)
+ b.eq write_spsr_el2
+ cmp x1, #(MODE_EL3 << MODE_EL_SHIFT)
+ b.eq write_spsr_el3
+
+
+func write_spsr_el1
+ msr spsr_el1, x0
+ isb
+ ret
+
+
+func write_spsr_el2
+ msr spsr_el2, x0
+ isb
+ ret
+
+
+func write_spsr_el3
+ msr spsr_el3, x0
+ isb
+ ret
+
+
+func read_elr
+ mrs x0, CurrentEl
+ cmp x0, #(MODE_EL1 << MODE_EL_SHIFT)
+ b.eq read_elr_el1
+ cmp x0, #(MODE_EL2 << MODE_EL_SHIFT)
+ b.eq read_elr_el2
+ cmp x0, #(MODE_EL3 << MODE_EL_SHIFT)
+ b.eq read_elr_el3
+
+
+func read_elr_el1
+ mrs x0, elr_el1
+ ret
+
+
+func read_elr_el2
+ mrs x0, elr_el2
+ ret
+
+
+func read_elr_el3
+ mrs x0, elr_el3
+ ret
+
+
+func write_elr
+ mrs x1, CurrentEl
+ cmp x1, #(MODE_EL1 << MODE_EL_SHIFT)
+ b.eq write_elr_el1
+ cmp x1, #(MODE_EL2 << MODE_EL_SHIFT)
+ b.eq write_elr_el2
+ cmp x1, #(MODE_EL3 << MODE_EL_SHIFT)
+ b.eq write_elr_el3
+
+
+func write_elr_el1
+ msr elr_el1, x0
+ isb
+ ret
+
+
+func write_elr_el2
+ msr elr_el2, x0
+ isb
+ ret
+
+
+func write_elr_el3
+ msr elr_el3, x0
+ isb
+ ret
+
+
+func dsb
+ dsb sy
+ ret
+
+
+func isb
+ isb
+ ret
+
+
+func sev
+ sev
+ ret
+
+
+func wfe
+ wfe
+ ret
+
+
+func wfi
+ wfi
+ ret
+
+
+func eret
+ eret
+
+
+func smc
+ smc #0
+
+/* -----------------------------------------------------------------------
+ * void zeromem16(void *mem, unsigned int length);
+ *
+ * Initialise a memory region to 0.
+ * The memory address must be 16-byte aligned.
+ * -----------------------------------------------------------------------
+ */
+func zeromem16
+ add x2, x0, x1
+/* zero 16 bytes at a time */
+z_loop16:
+ sub x3, x2, x0
+ cmp x3, #16
+ b.lt z_loop1
+ stp xzr, xzr, [x0], #16
+ b z_loop16
+/* zero byte per byte */
+z_loop1:
+ cmp x0, x2
+ b.eq z_end
+ strb wzr, [x0], #1
+ b z_loop1
+z_end: ret
+
+
+/* --------------------------------------------------------------------------
+ * void memcpy16(void *dest, const void *src, unsigned int length)
+ *
+ * Copy length bytes from memory area src to memory area dest.
+ * The memory areas should not overlap.
+ * Destination and source addresses must be 16-byte aligned.
+ * --------------------------------------------------------------------------
+ */
+func memcpy16
+/* copy 16 bytes at a time */
+m_loop16:
+ cmp x2, #16
+ b.lt m_loop1
+ ldp x3, x4, [x1], #16
+ stp x3, x4, [x0], #16
+ sub x2, x2, #16
+ b m_loop16
+/* copy byte per byte */
+m_loop1:
+ cbz x2, m_end
+ ldrb w3, [x1], #1
+ strb w3, [x0], #1
+ subs x2, x2, #1
+ b.ne m_loop1
+m_end: ret