summaryrefslogtreecommitdiff
path: root/big-little/common/pagetable_setup.c
diff options
context:
space:
mode:
Diffstat (limited to 'big-little/common/pagetable_setup.c')
-rwxr-xr-xbig-little/common/pagetable_setup.c585
1 files changed, 292 insertions, 293 deletions
diff --git a/big-little/common/pagetable_setup.c b/big-little/common/pagetable_setup.c
index fa4e6fd..0390153 100755
--- a/big-little/common/pagetable_setup.c
+++ b/big-little/common/pagetable_setup.c
@@ -18,7 +18,7 @@
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
- */
+ */
/* ----------------------------------------------------------------------------
* i n c l u d e s
@@ -30,10 +30,10 @@
#include "virt_helpers.h"
typedef struct {
- unsigned va;
- unsigned pa;
- unsigned long long attrs;
- unsigned long long *pt_addr;
+ unsigned va;
+ unsigned pa;
+ unsigned long long attrs;
+ unsigned long long *pt_addr;
} four_kb_pt_desc;
/* ----------------------------------------------------------------------------
@@ -42,7 +42,7 @@ typedef struct {
#define LEVEL1 0x1
#define LEVEL2 0x2
-#define HYP_PA_START 0x00000000 /* Flat mapping */
+#define HYP_PA_START 0x00000000 /* Flat mapping */
#define HYP_PA_END 0xFFFFFFFF
#define HYP_VA_START HYP_PA_START
#define HYP_VA_END HYP_PA_END
@@ -147,58 +147,58 @@ unsigned long long stage2_l3_so_pt[512] __attribute__ ((aligned(4096)));
* cpu interface for OS use.
*/
static void CreateL3PageTable(four_kb_pt_desc * l3_mapping, unsigned level,
- unsigned long long *base_pt_addr)
+ unsigned long long *base_pt_addr)
{
- unsigned one_gb_index = l3_mapping->pa >> 30;
- unsigned two_mb_index = l3_mapping->pa >> 21;
- unsigned four_kb_index = 0;
- unsigned pa_4k_index = 0;
- unsigned long long l1_desc = 0;
- unsigned long long *l2_desc = 0;
- unsigned long long old_attrs = 0;
- unsigned long long *l1_pt_addr = 0;
- unsigned long long *l2_pt_addr = 0;
- unsigned long long *l3_pt_addr = l3_mapping->pt_addr;
-
- /*
- * Indices calculated above are relative to the GB or MB they
- * belong to rather than an offset of 0x0. e.g. for the 2mb index
- * index = (address >> 21) - (<number of 2MBs in 1GB> x <this GB index>)
- */
-
- /* Calculate the level 2 page table descriptor */
- if (level == 1) {
- l1_pt_addr = base_pt_addr;
- l1_desc = l1_pt_addr[one_gb_index];
- l2_pt_addr =
- (unsigned long long
- *)((unsigned)((&l1_desc)[0] & 0xfffff000UL));
- l2_desc = &l2_pt_addr[two_mb_index - (512 * one_gb_index)];
- } else {
- l2_pt_addr = &base_pt_addr[one_gb_index << 9];
- l2_desc = &base_pt_addr[two_mb_index - (512 * one_gb_index)];
- }
-
- /* Preserve the old attributes */
- old_attrs = *l2_desc & 0xfff0000000000fffULL;
- /* Replace block mapping with table mapping */
- *l2_desc = (unsigned long long)l3_pt_addr | TABLE_MAPPING;
-
- /* Create a flat mapping for all 4k descriptors to begin with */
- for (four_kb_index = 0; four_kb_index < 512; four_kb_index++) {
- l3_pt_addr[four_kb_index] =
- (((two_mb_index << 9) +
- four_kb_index) << 12) | old_attrs | VALID_MAPPING;
- }
- pa_4k_index = ((l3_mapping->pa << 11) >> 11) >> 12;
-
- /*
- * Replace the existing descriptor with new mapping and attributes
- */
- l3_pt_addr[pa_4k_index] =
- l3_mapping->va | l3_mapping->attrs | VALID_MAPPING;
-
- return;
+ unsigned one_gb_index = l3_mapping->pa >> 30;
+ unsigned two_mb_index = l3_mapping->pa >> 21;
+ unsigned four_kb_index = 0;
+ unsigned pa_4k_index = 0;
+ unsigned long long l1_desc = 0;
+ unsigned long long *l2_desc = 0;
+ unsigned long long old_attrs = 0;
+ unsigned long long *l1_pt_addr = 0;
+ unsigned long long *l2_pt_addr = 0;
+ unsigned long long *l3_pt_addr = l3_mapping->pt_addr;
+
+ /*
+ * Indices calculated above are relative to the GB or MB they
+ * belong to rather than an offset of 0x0. e.g. for the 2mb index
+ * index = (address >> 21) - (<number of 2MBs in 1GB> x <this GB index>)
+ */
+
+ /* Calculate the level 2 page table descriptor */
+ if (level == 1) {
+ l1_pt_addr = base_pt_addr;
+ l1_desc = l1_pt_addr[one_gb_index];
+ l2_pt_addr =
+ (unsigned long long
+ *)((unsigned)((&l1_desc)[0] & 0xfffff000UL));
+ l2_desc = &l2_pt_addr[two_mb_index - (512 * one_gb_index)];
+ } else {
+ l2_pt_addr = &base_pt_addr[one_gb_index << 9];
+ l2_desc = &base_pt_addr[two_mb_index - (512 * one_gb_index)];
+ }
+
+ /* Preserve the old attributes */
+ old_attrs = *l2_desc & 0xfff0000000000fffULL;
+ /* Replace block mapping with table mapping */
+ *l2_desc = (unsigned long long)l3_pt_addr | TABLE_MAPPING;
+
+ /* Create a flat mapping for all 4k descriptors to begin with */
+ for (four_kb_index = 0; four_kb_index < 512; four_kb_index++) {
+ l3_pt_addr[four_kb_index] =
+ (((two_mb_index << 9) +
+ four_kb_index) << 12) | old_attrs | VALID_MAPPING;
+ }
+ pa_4k_index = ((l3_mapping->pa << 11) >> 11) >> 12;
+
+ /*
+ * Replace the existing descriptor with new mapping and attributes
+ */
+ l3_pt_addr[pa_4k_index] =
+ l3_mapping->va | l3_mapping->attrs | VALID_MAPPING;
+
+ return;
}
/*
@@ -206,262 +206,261 @@ static void CreateL3PageTable(four_kb_pt_desc * l3_mapping, unsigned level,
*/
static void Add4KMapping(four_kb_pt_desc * l3_mapping)
{
- unsigned pa_4k_index = ((l3_mapping->pa << 11) >> 11) >> 12;
- unsigned long long *l3_pt_addr = l3_mapping->pt_addr;
+ unsigned pa_4k_index = ((l3_mapping->pa << 11) >> 11) >> 12;
+ unsigned long long *l3_pt_addr = l3_mapping->pt_addr;
- /*
- * Replace the existing descriptor with new mapping and attributes
- */
- l3_pt_addr[pa_4k_index] =
- l3_mapping->va | l3_mapping->attrs | VALID_MAPPING;
+ /*
+ * Replace the existing descriptor with new mapping and attributes
+ */
+ l3_pt_addr[pa_4k_index] =
+ l3_mapping->va | l3_mapping->attrs | VALID_MAPPING;
- return;
+ return;
}
void CreateHypModePageTables(void)
{
- unsigned num_l1_descs = 0, num_l2_descs = 0;
- unsigned l1_index, l2_index;
- unsigned long long l2_attrs = 0;
- four_kb_pt_desc l3_desc;
-
- /* Create the pagetables */
- num_l1_descs = ((HYP_PA_END - HYP_PA_START) >> 30) + 1;
- num_l2_descs = ((HYP_PA_END - HYP_PA_START) >> 21) + 1;
-
- /* Only the first 4GB are valid translations */
- for (l1_index = 0; l1_index < num_l1_descs; l1_index++) {
- hyp_l1_pagetable[l1_index] =
- (unsigned long long)&hyp_l2_pagetable[l1_index][0] |
- TABLE_MAPPING;
- for (l2_index = 0; l2_index < num_l2_descs / num_l1_descs;
- l2_index++) {
-
- if ((l2_index + (l1_index << 9)) < 32) {
- /* 0-64M(Secure ROM/NOR Flash):Block mapping with RO access, Inner shareable, Inner/Outer WBWA */
- l2_attrs =
- BLOCK_MAPPING | HMAIR0_NORMAL_MEM_ATTR_IDX |
- NS_BIT | SHAREABILITY(0x3) | ACCESS_FLAG |
- AP(KERN_RO);
- ((unsigned *) &l2_attrs)[1] |= XN;
- }
- else if ((l2_index + (l1_index << 9)) < 64)
- /* 64-128M(Secure RAM) : Block mapping with RW access, Inner shareable, Inner/Outer WBWA */
- l2_attrs =
- BLOCK_MAPPING | HMAIR0_NORMAL_MEM_ATTR_IDX |
- NS_BIT | SHAREABILITY(0x3) | ACCESS_FLAG |
- AP(KERN_RW);
- else if ((l2_index + (l1_index << 9)) < 1024) {
- /* 128-2048M (Peripherals) : Block mapping of Device memory */
- l2_attrs =
- BLOCK_MAPPING | HMAIR0_DEVICE_MEM_ATTR_IDX |
- NS_BIT | SHAREABILITY(0x3) | ACCESS_FLAG |
- AP(KERN_RW);
- ((unsigned *) &l2_attrs)[1] |= XN;
- }
- else
- /* 2-4GB (RAM) : Block mapping with RW access, Inner shareable, Inner/Outer WBWA */
- l2_attrs =
- BLOCK_MAPPING | HMAIR0_NORMAL_MEM_ATTR_IDX |
- NS_BIT | SHAREABILITY(0x3) | ACCESS_FLAG |
- AP(KERN_RW);
-
- hyp_l2_pagetable[l1_index][l2_index] =
- ((l2_index + (l1_index << 9)) << 21) | l2_attrs;
- }
- }
-
- /*
- * Create a mapping for a device page to be used
- * for Locks, Events & anything that is shared when both
- * the clusters are executing at the same time.
- */
- l3_desc.va = (unsigned)&BL_DV_PAGE$$Base;
- l3_desc.pa = (unsigned)&BL_DV_PAGE$$Base;
- l3_desc.attrs =
- ACCESS_FLAG | HMAIR0_DEVICE_MEM_ATTR_IDX | SHAREABILITY(0x3) |
- AP(KERN_RW);
- l3_desc.pt_addr = hyp_l3_so_pt;
- CreateL3PageTable(&l3_desc, LEVEL1, (unsigned long long *)hyp_l1_pagetable);
-
- return;
+ unsigned num_l1_descs = 0, num_l2_descs = 0;
+ unsigned l1_index, l2_index;
+ unsigned long long l2_attrs = 0;
+ four_kb_pt_desc l3_desc;
+
+ /* Create the pagetables */
+ num_l1_descs = ((HYP_PA_END - HYP_PA_START) >> 30) + 1;
+ num_l2_descs = ((HYP_PA_END - HYP_PA_START) >> 21) + 1;
+
+ /* Only the first 4GB are valid translations */
+ for (l1_index = 0; l1_index < num_l1_descs; l1_index++) {
+ hyp_l1_pagetable[l1_index] =
+ (unsigned long long)&hyp_l2_pagetable[l1_index][0] |
+ TABLE_MAPPING;
+ for (l2_index = 0; l2_index < num_l2_descs / num_l1_descs;
+ l2_index++) {
+
+ if ((l2_index + (l1_index << 9)) < 32) {
+ /* 0-64M(Secure ROM/NOR Flash):Block mapping with RO access, Inner shareable, Inner/Outer WBWA */
+ l2_attrs =
+ BLOCK_MAPPING | HMAIR0_NORMAL_MEM_ATTR_IDX |
+ NS_BIT | SHAREABILITY(0x3) | ACCESS_FLAG |
+ AP(KERN_RO);
+ ((unsigned *)&l2_attrs)[1] |= XN;
+ } else if ((l2_index + (l1_index << 9)) < 64)
+ /* 64-128M(Secure RAM) : Block mapping with RW access, Inner shareable, Inner/Outer WBWA */
+ l2_attrs =
+ BLOCK_MAPPING | HMAIR0_NORMAL_MEM_ATTR_IDX |
+ NS_BIT | SHAREABILITY(0x3) | ACCESS_FLAG |
+ AP(KERN_RW);
+ else if ((l2_index + (l1_index << 9)) < 1024) {
+ /* 128-2048M (Peripherals) : Block mapping of Device memory */
+ l2_attrs =
+ BLOCK_MAPPING | HMAIR0_DEVICE_MEM_ATTR_IDX |
+ NS_BIT | SHAREABILITY(0x3) | ACCESS_FLAG |
+ AP(KERN_RW);
+ ((unsigned *)&l2_attrs)[1] |= XN;
+ } else
+ /* 2-4GB (RAM) : Block mapping with RW access, Inner shareable, Inner/Outer WBWA */
+ l2_attrs =
+ BLOCK_MAPPING | HMAIR0_NORMAL_MEM_ATTR_IDX |
+ NS_BIT | SHAREABILITY(0x3) | ACCESS_FLAG |
+ AP(KERN_RW);
+
+ hyp_l2_pagetable[l1_index][l2_index] =
+ ((l2_index + (l1_index << 9)) << 21) | l2_attrs;
+ }
+ }
+
+ /*
+ * Create a mapping for a device page to be used
+ * for Locks, Events & anything that is shared when both
+ * the clusters are executing at the same time.
+ */
+ l3_desc.va = (unsigned)&BL_DV_PAGE$$Base;
+ l3_desc.pa = (unsigned)&BL_DV_PAGE$$Base;
+ l3_desc.attrs =
+ ACCESS_FLAG | HMAIR0_DEVICE_MEM_ATTR_IDX | SHAREABILITY(0x3) |
+ AP(KERN_RW);
+ l3_desc.pt_addr = hyp_l3_so_pt;
+ CreateL3PageTable(&l3_desc, LEVEL1,
+ (unsigned long long *)hyp_l1_pagetable);
+
+ return;
}
void EnableHypModePageTables(void)
{
- /* Update the HTTBR */
- write_httbr((unsigned long long)hyp_l1_pagetable);
-
- /*
- * Setup the HMAIR0 register.
- * [7:0] = Device memory
- * [15:8] = Normal memory, Inner and outer cacheable, WBWA
- */
- write_hmair0(IDX2(HMAIR_SO_MEM) |
- IDX1(HMAIR_INNER_WB_RWA_MEM | HMAIR_OUTER_WB_RWA_MEM) |
- IDX0(HMAIR_DEVICE_MEM));
-
- /*
- * Set the HTCR.
- * Pagetables are Normal memory, Inner/Outer shareable, Inner/Outer WBWA
- */
- write_htcr(EAE(ENABLE) | SH0(CR_INNER_SH) | ORGN0(CR_C_WBWA) |
- IRGN0(CR_C_WBWA) | T0SZ(CR_ADDR_SPC_4GB));
-
- /* Enable the Hyp MMU */
- write_hsctlr(ICACHE(ENABLE) | DCACHE(ENABLE) | ALIGNMENT(ENABLE) |
- MMU(ENABLE));
-
- return;
+ /* Update the HTTBR */
+ write_httbr((unsigned long long)hyp_l1_pagetable);
+
+ /*
+ * Setup the HMAIR0 register.
+ * [7:0] = Device memory
+ * [15:8] = Normal memory, Inner and outer cacheable, WBWA
+ */
+ write_hmair0(IDX2(HMAIR_SO_MEM) |
+ IDX1(HMAIR_INNER_WB_RWA_MEM | HMAIR_OUTER_WB_RWA_MEM) |
+ IDX0(HMAIR_DEVICE_MEM));
+
+ /*
+ * Set the HTCR.
+ * Pagetables are Normal memory, Inner/Outer shareable, Inner/Outer WBWA
+ */
+ write_htcr(EAE(ENABLE) | SH0(CR_INNER_SH) | ORGN0(CR_C_WBWA) |
+ IRGN0(CR_C_WBWA) | T0SZ(CR_ADDR_SPC_4GB));
+
+ /* Enable the Hyp MMU */
+ write_hsctlr(ICACHE(ENABLE) | DCACHE(ENABLE) | ALIGNMENT(ENABLE) |
+ MMU(ENABLE));
+
+ return;
}
void Create2ndStagePageTables(void)
{
- unsigned two_mb_index = 0;
- unsigned one_gb_index = 0;
- unsigned long long level2_desc = 0;
- four_kb_pt_desc l3_desc = { 0 };
-
- /*
- * Create the flat mapped 2nd stage page tables.
- * This should be done only once. The remaining
- * cpus can share the mappings and wait while
- * this is being done.
- */
- for (one_gb_index = 0; one_gb_index < 4; one_gb_index++)
- for (two_mb_index = 0; two_mb_index < 512; two_mb_index++) {
-
- if ((two_mb_index + (one_gb_index << 9)) < 32)
- /* 0-64M (Secure ROM/NOR Flash) : Block mapping with RO access, Inner shareable, Inner/Outer WBWA */
- level2_desc =
- ACCESS_FLAG | SHAREABILITY(0x3) | AP(HAP_RO)
- | MEM_ATTR(0xf) | BLOCK_MAPPING;
- else if ((two_mb_index + (one_gb_index << 9)) < 64)
- /* 64-128M (Secure RAM) : Block mapping with RW access, Inner shareable, Inner/Outer WBWA */
- level2_desc =
- ACCESS_FLAG | SHAREABILITY(0x3) | AP(HAP_RW)
- | MEM_ATTR(0xf) | BLOCK_MAPPING;
- else if ((two_mb_index + (one_gb_index << 9)) < 1024)
- /* 128-2048M (Peripherals) : Block mapping of Device memory */
- level2_desc =
- ACCESS_FLAG | SHAREABILITY(0x3) | AP(HAP_RW)
- | MEM_ATTR(0x1) | BLOCK_MAPPING;
- else
- /* 2-4GB (RAM) : Block mapping with RW access, Inner shareable, Inner/Outer WBWA */
- level2_desc =
- ACCESS_FLAG | SHAREABILITY(0x3) | AP(HAP_RW)
- | MEM_ATTR(0xf) | BLOCK_MAPPING;
-
- stage2_l2_pagetable[one_gb_index][two_mb_index] =
- (two_mb_index +
- (512 * one_gb_index) << 21) | level2_desc;
-
- }
-
- /* First 4KB Mapping PCPUIF to the VCPUIF for the payload software */
- l3_desc.va = VGIC_VM_PHY_BASE;
- l3_desc.pa = GIC_IC_PHY_BASE;
- l3_desc.attrs =
- ACCESS_FLAG | SHAREABILITY(0x3) | ACCESS_PERM(0x3) | MEM_ATTR(0x1);
- l3_desc.pt_addr = stage2_l3_cpuif_pt;
- CreateL3PageTable(&l3_desc, LEVEL2,
- (unsigned long long *)stage2_l2_pagetable);
-
- /* Second 4KB Mapping PCPUIF to the VCPUIF for the payload software */
- l3_desc.va = VGIC_VM_PHY_BASE + 0x1000;
- l3_desc.pa = GIC_IC_PHY_BASE + 0x1000;
- l3_desc.attrs =
- ACCESS_FLAG | SHAREABILITY(0x3) | ACCESS_PERM(0x3) | MEM_ATTR(0x1);
- l3_desc.pt_addr = stage2_l3_cpuif_pt;
- Add4KMapping(&l3_desc);
-
- /*
- * Create a mapping for a device page to be used
- * for Locks, Events & anything that is shared when both
- * the clusters are executing at the same time.
- */
- l3_desc.va = (unsigned)&BL_DV_PAGE$$Base;
- l3_desc.pa = (unsigned)&BL_DV_PAGE$$Base;
- l3_desc.attrs =
- ACCESS_FLAG | SHAREABILITY(0x3) | ACCESS_PERM(0x3) | MEM_ATTR(0x1);
- l3_desc.pt_addr = stage2_l3_so_pt;
- CreateL3PageTable(&l3_desc, LEVEL2,
- (unsigned long long *)stage2_l2_pagetable);
-
- return;
+ unsigned two_mb_index = 0;
+ unsigned one_gb_index = 0;
+ unsigned long long level2_desc = 0;
+ four_kb_pt_desc l3_desc = { 0 };
+
+ /*
+ * Create the flat mapped 2nd stage page tables.
+ * This should be done only once. The remaining
+ * cpus can share the mappings and wait while
+ * this is being done.
+ */
+ for (one_gb_index = 0; one_gb_index < 4; one_gb_index++)
+ for (two_mb_index = 0; two_mb_index < 512; two_mb_index++) {
+
+ if ((two_mb_index + (one_gb_index << 9)) < 32)
+ /* 0-64M (Secure ROM/NOR Flash) : Block mapping with RO access, Inner shareable, Inner/Outer WBWA */
+ level2_desc =
+ ACCESS_FLAG | SHAREABILITY(0x3) | AP(HAP_RO)
+ | MEM_ATTR(0xf) | BLOCK_MAPPING;
+ else if ((two_mb_index + (one_gb_index << 9)) < 64)
+ /* 64-128M (Secure RAM) : Block mapping with RW access, Inner shareable, Inner/Outer WBWA */
+ level2_desc =
+ ACCESS_FLAG | SHAREABILITY(0x3) | AP(HAP_RW)
+ | MEM_ATTR(0xf) | BLOCK_MAPPING;
+ else if ((two_mb_index + (one_gb_index << 9)) < 1024)
+ /* 128-2048M (Peripherals) : Block mapping of Device memory */
+ level2_desc =
+ ACCESS_FLAG | SHAREABILITY(0x3) | AP(HAP_RW)
+ | MEM_ATTR(0x1) | BLOCK_MAPPING;
+ else
+ /* 2-4GB (RAM) : Block mapping with RW access, Inner shareable, Inner/Outer WBWA */
+ level2_desc =
+ ACCESS_FLAG | SHAREABILITY(0x3) | AP(HAP_RW)
+ | MEM_ATTR(0xf) | BLOCK_MAPPING;
+
+ stage2_l2_pagetable[one_gb_index][two_mb_index] =
+ (two_mb_index +
+ (512 * one_gb_index) << 21) | level2_desc;
+
+ }
+
+ /* First 4KB Mapping PCPUIF to the VCPUIF for the payload software */
+ l3_desc.va = VGIC_VM_PHY_BASE;
+ l3_desc.pa = GIC_IC_PHY_BASE;
+ l3_desc.attrs =
+ ACCESS_FLAG | SHAREABILITY(0x3) | ACCESS_PERM(0x3) | MEM_ATTR(0x1);
+ l3_desc.pt_addr = stage2_l3_cpuif_pt;
+ CreateL3PageTable(&l3_desc, LEVEL2,
+ (unsigned long long *)stage2_l2_pagetable);
+
+ /* Second 4KB Mapping PCPUIF to the VCPUIF for the payload software */
+ l3_desc.va = VGIC_VM_PHY_BASE + 0x1000;
+ l3_desc.pa = GIC_IC_PHY_BASE + 0x1000;
+ l3_desc.attrs =
+ ACCESS_FLAG | SHAREABILITY(0x3) | ACCESS_PERM(0x3) | MEM_ATTR(0x1);
+ l3_desc.pt_addr = stage2_l3_cpuif_pt;
+ Add4KMapping(&l3_desc);
+
+ /*
+ * Create a mapping for a device page to be used
+ * for Locks, Events & anything that is shared when both
+ * the clusters are executing at the same time.
+ */
+ l3_desc.va = (unsigned)&BL_DV_PAGE$$Base;
+ l3_desc.pa = (unsigned)&BL_DV_PAGE$$Base;
+ l3_desc.attrs =
+ ACCESS_FLAG | SHAREABILITY(0x3) | ACCESS_PERM(0x3) | MEM_ATTR(0x1);
+ l3_desc.pt_addr = stage2_l3_so_pt;
+ CreateL3PageTable(&l3_desc, LEVEL2,
+ (unsigned long long *)stage2_l2_pagetable);
+
+ return;
}
void Enable2ndStagePageTables(void)
{
- /*
- * Set the VTCR to:
- * Normal memory outer shareable, Device memory shareable
- * Outer and Inner WBWA
- * Start at level 2
- * Size of addressed region is 4GB (16k worth of page tables)
- */
- write_vtcr(SH0(CR_INNER_SH) | ORGN0(CR_C_WBWA) | IRGN0(CR_C_WBWA));
-
- /* Address is already aligned to 16k or 2*14 */
- write_vttbr((unsigned long long)stage2_l2_pagetable);
-
- write_hcr(read_hcr() | HCR_VM);
-
- /*
- * TODO: We do not need a synchronization barrier here as we
- * are not yet executing out of NS PL0 & PL1 and there will be
- * a barrier at some point before that.
- */
- return;
+ /*
+ * Set the VTCR to:
+ * Normal memory outer shareable, Device memory shareable
+ * Outer and Inner WBWA
+ * Start at level 2
+ * Size of addressed region is 4GB (16k worth of page tables)
+ */
+ write_vtcr(SH0(CR_INNER_SH) | ORGN0(CR_C_WBWA) | IRGN0(CR_C_WBWA));
+
+ /* Address is already aligned to 16k or 2*14 */
+ write_vttbr((unsigned long long)stage2_l2_pagetable);
+
+ write_hcr(read_hcr() | HCR_VM);
+
+ /*
+ * TODO: We do not need a synchronization barrier here as we
+ * are not yet executing out of NS PL0 & PL1 and there will be
+ * a barrier at some point before that.
+ */
+ return;
}
void SetupVirtExtPageTables(void)
{
- unsigned cpu_id = read_cpuid();
- unsigned first_cpu = find_first_cpu();
- unsigned cluster_id = read_clusterid();
- unsigned abs_cpuid = 0;
-
- if (!switcher)
- abs_cpuid = abs_cpuid(cpu_id, cluster_id);
-
- /*
- * First cpu creates the pagetables after
- * a cold reset. Reused by all cpus across
- * warm resets.
- */
- if (switcher ) {
-
- /*
- * While switching its possible that the host cluster
- * is brought out of reset first. Hence, the first
- * cpu of whichever cluster reaches here does the
- * pagetable setup
- */
- if (cpu_id == first_cpu) {
- CreateHypModePageTables();
- Create2ndStagePageTables();
- set_events(VIRT_PGT_DONE);
- }
-
- wait_for_event(VIRT_PGT_DONE, cpu_id);
- reset_event(VIRT_PGT_DONE, cpu_id);
-
- } else {
-
- /*
- * Any cluster can do the initialisation as long as
- * only one of them does it.
- */
- if (cpu_id == first_cpu && cluster_id == host_cluster) {
- CreateHypModePageTables();
- Create2ndStagePageTables();
- set_events(VIRT_PGT_DONE);
- }
-
- wait_for_event(VIRT_PGT_DONE, abs_cpuid);
- reset_event(VIRT_PGT_DONE, abs_cpuid);
- }
-
- return;
+ unsigned cpu_id = read_cpuid();
+ unsigned first_cpu = find_first_cpu();
+ unsigned cluster_id = read_clusterid();
+ unsigned abs_cpuid = 0;
+
+ if (!switcher)
+ abs_cpuid = abs_cpuid(cpu_id, cluster_id);
+
+ /*
+ * First cpu creates the pagetables after
+ * a cold reset. Reused by all cpus across
+ * warm resets.
+ */
+ if (switcher) {
+
+ /*
+ * While switching its possible that the host cluster
+ * is brought out of reset first. Hence, the first
+ * cpu of whichever cluster reaches here does the
+ * pagetable setup
+ */
+ if (cpu_id == first_cpu) {
+ CreateHypModePageTables();
+ Create2ndStagePageTables();
+ set_events(VIRT_PGT_DONE);
+ }
+
+ wait_for_event(VIRT_PGT_DONE, cpu_id);
+ reset_event(VIRT_PGT_DONE, cpu_id);
+
+ } else {
+
+ /*
+ * Any cluster can do the initialisation as long as
+ * only one of them does it.
+ */
+ if (cpu_id == first_cpu && cluster_id == host_cluster) {
+ CreateHypModePageTables();
+ Create2ndStagePageTables();
+ set_events(VIRT_PGT_DONE);
+ }
+
+ wait_for_event(VIRT_PGT_DONE, abs_cpuid);
+ reset_event(VIRT_PGT_DONE, abs_cpuid);
+ }
+
+ return;
}