aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/xlat_tables_v2/aarch32/enable_mmu.S66
-rw-r--r--lib/xlat_tables_v2/aarch32/xlat_tables_arch.c47
-rw-r--r--lib/xlat_tables_v2/aarch64/enable_mmu.S91
-rw-r--r--lib/xlat_tables_v2/aarch64/xlat_tables_arch.c96
-rw-r--r--lib/xlat_tables_v2/xlat_tables.mk3
-rw-r--r--lib/xlat_tables_v2/xlat_tables_internal.c11
-rw-r--r--lib/xlat_tables_v2/xlat_tables_private.h2
7 files changed, 204 insertions, 112 deletions
diff --git a/lib/xlat_tables_v2/aarch32/enable_mmu.S b/lib/xlat_tables_v2/aarch32/enable_mmu.S
new file mode 100644
index 000000000..97cdde751
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch32/enable_mmu.S
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <xlat_tables_v2.h>
+
+ .global enable_mmu_direct
+
+func enable_mmu_direct
+ /* Assert that MMU is turned off */
+#if ENABLE_ASSERTIONS
+ ldcopr r1, SCTLR
+ tst r1, #SCTLR_M_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* Invalidate TLB entries */
+ TLB_INVALIDATE(r0, TLBIALL)
+
+ mov r3, r0
+ ldr r0, =mmu_cfg_params
+
+ /* MAIR0 */
+ ldr r1, [r0, #(MMU_CFG_MAIR0 << 2)]
+ stcopr r1, MAIR0
+
+ /* TTBCR */
+ ldr r2, [r0, #(MMU_CFG_TCR << 2)]
+ stcopr r2, TTBCR
+
+ /* TTBR0 */
+ ldr r1, [r0, #(MMU_CFG_TTBR0_LO << 2)]
+ ldr r2, [r0, #(MMU_CFG_TTBR0_HI << 2)]
+ stcopr16 r1, r2, TTBR0_64
+
+ /* TTBR1 is unused right now; set it to 0. */
+ mov r1, #0
+ mov r2, #0
+ stcopr16 r1, r2, TTBR1_64
+
+ /*
+ * Ensure all translation table writes have drained into memory, the TLB
+ * invalidation is complete, and translation register writes are
+ * committed before enabling the MMU
+ */
+ dsb ish
+ isb
+
+ /* Enable enable MMU by honoring flags */
+ ldcopr r1, SCTLR
+ ldr r2, =(SCTLR_WXN_BIT | SCTLR_C_BIT | SCTLR_M_BIT)
+ orr r1, r1, r2
+
+ /* Clear C bit if requested */
+ tst r3, #DISABLE_DCACHE
+ bicne r1, r1, #SCTLR_C_BIT
+
+ stcopr r1, SCTLR
+ isb
+
+ bx lr
+endfunc enable_mmu_direct
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
index f66f802f3..94dcf578a 100644
--- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -18,6 +18,8 @@
#error ARMv7 target does not support LPAE MMU descriptors
#endif
+uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
/*
* Returns 1 if the provided granule size is supported, 0 otherwise.
*/
@@ -109,22 +111,16 @@ int xlat_arch_current_el(void)
* Function for enabling the MMU in Secure PL1, assuming that the page tables
* have already been created.
******************************************************************************/
-void enable_mmu_arch(unsigned int flags,
- uint64_t *base_table,
+void setup_mmu_cfg(unsigned int flags,
+ const uint64_t *base_table,
unsigned long long max_pa,
uintptr_t max_va)
{
- u_register_t mair0, ttbcr, sctlr;
+ u_register_t mair0, ttbcr;
uint64_t ttbr0;
assert(IS_IN_SECURE());
- sctlr = read_sctlr();
- assert((sctlr & SCTLR_M_BIT) == 0);
-
- /* Invalidate TLBs at the current exception level */
- tlbiall();
-
/* Set attributes in the right indices of the MAIR */
mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
@@ -185,30 +181,9 @@ void enable_mmu_arch(unsigned int flags,
ttbr0 |= TTBR_CNP_BIT;
#endif
- /* Now program the relevant system registers */
- write_mair0(mair0);
- write_ttbcr(ttbcr);
- write64_ttbr0(ttbr0);
- write64_ttbr1(0);
-
- /*
- * Ensure all translation table writes have drained
- * into memory, the TLB invalidation is complete,
- * and translation register writes are committed
- * before enabling the MMU
- */
- dsbish();
- isb();
-
- sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
-
- if (flags & DISABLE_DCACHE)
- sctlr &= ~SCTLR_C_BIT;
- else
- sctlr |= SCTLR_C_BIT;
-
- write_sctlr(sctlr);
-
- /* Ensure the MMU enable takes effect immediately */
- isb();
+ /* Now populate MMU configuration */
+ mmu_cfg_params[MMU_CFG_MAIR0] = mair0;
+ mmu_cfg_params[MMU_CFG_TCR] = ttbcr;
+ mmu_cfg_params[MMU_CFG_TTBR0_LO] = (uint32_t) ttbr0;
+ mmu_cfg_params[MMU_CFG_TTBR0_HI] = ttbr0 >> 32;
}
diff --git a/lib/xlat_tables_v2/aarch64/enable_mmu.S b/lib/xlat_tables_v2/aarch64/enable_mmu.S
new file mode 100644
index 000000000..a72c7fae5
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch64/enable_mmu.S
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <xlat_tables_v2.h>
+
+ .global enable_mmu_direct_el1
+ .global enable_mmu_direct_el3
+
+ /* Macros to read and write to system register for a given EL. */
+ .macro _msr reg_name, el, gp_reg
+ msr \reg_name\()_el\()\el, \gp_reg
+ .endm
+
+ .macro _mrs gp_reg, reg_name, el
+ mrs \gp_reg, \reg_name\()_el\()\el
+ .endm
+
+ .macro define_mmu_enable_func el
+ func enable_mmu_direct_\()el\el
+#if ENABLE_ASSERTIONS
+ _mrs x1, sctlr, \el
+ tst x1, #SCTLR_M_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* Invalidate TLB entries */
+ .if \el == 1
+ TLB_INVALIDATE(vmalle1)
+ .else
+ .if \el == 3
+ TLB_INVALIDATE(alle3)
+ .else
+ .error "EL must be 1 or 3"
+ .endif
+ .endif
+
+ mov x7, x0
+ ldr x0, =mmu_cfg_params
+
+ /* MAIR */
+ ldr w1, [x0, #(MMU_CFG_MAIR0 << 2)]
+ _msr mair, \el, x1
+
+ /* TCR */
+ ldr w2, [x0, #(MMU_CFG_TCR << 2)]
+ _msr tcr, \el, x2
+
+ /* TTBR */
+ ldr w3, [x0, #(MMU_CFG_TTBR0_LO << 2)]
+ ldr w4, [x0, #(MMU_CFG_TTBR0_HI << 2)]
+ orr x3, x3, x4, lsl #32
+ _msr ttbr0, \el, x3
+
+ /*
+ * Ensure all translation table writes have drained into memory, the TLB
+ * invalidation is complete, and translation register writes are
+ * committed before enabling the MMU
+ */
+ dsb ish
+ isb
+
+ /* Set and clear required fields of SCTLR */
+ _mrs x4, sctlr, \el
+ mov_imm x5, SCTLR_WXN_BIT | SCTLR_C_BIT | SCTLR_M_BIT
+ orr x4, x4, x5
+
+ /* Additionally, amend SCTLR fields based on flags */
+ bic x5, x4, #SCTLR_C_BIT
+ tst x7, #DISABLE_DCACHE
+ csel x4, x5, x4, ne
+
+ _msr sctlr, \el, x4
+ isb
+
+ ret
+ endfunc enable_mmu_direct_\()el\el
+ .endm
+
+ /*
+ * Define MMU-enabling functions for EL1 and EL3:
+ *
+ * enable_mmu_direct_el1
+ * enable_mmu_direct_el3
+ */
+ define_mmu_enable_func 1
+ define_mmu_enable_func 3
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
index c501e7074..71b9c8fae 100644
--- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -16,6 +16,8 @@
#include <xlat_tables_v2.h>
#include "../xlat_tables_private.h"
+uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
/*
* Returns 1 if the provided granule size is supported, 0 otherwise.
*/
@@ -183,70 +185,13 @@ int xlat_arch_current_el(void)
return el;
}
-/*******************************************************************************
- * Macro generating the code for the function enabling the MMU in the given
- * exception level, assuming that the pagetables have already been created.
- *
- * _el: Exception level at which the function will run
- * _tlbi_fct: Function to invalidate the TLBs at the current
- * exception level
- ******************************************************************************/
-#define DEFINE_ENABLE_MMU_EL(_el, _tlbi_fct) \
- static void enable_mmu_internal_el##_el(int flags, \
- uint64_t mair, \
- uint64_t tcr, \
- uint64_t ttbr) \
- { \
- uint32_t sctlr = read_sctlr_el##_el(); \
- assert((sctlr & SCTLR_M_BIT) == 0); \
- \
- /* Invalidate TLBs at the current exception level */ \
- _tlbi_fct(); \
- \
- write_mair_el##_el(mair); \
- write_tcr_el##_el(tcr); \
- \
- /* Set TTBR bits as well */ \
- if (ARM_ARCH_AT_LEAST(8, 2)) { \
- /* Enable CnP bit so as to share page tables */ \
- /* with all PEs. This is mandatory for */ \
- /* ARMv8.2 implementations. */ \
- ttbr |= TTBR_CNP_BIT; \
- } \
- write_ttbr0_el##_el(ttbr); \
- \
- /* Ensure all translation table writes have drained */ \
- /* into memory, the TLB invalidation is complete, */ \
- /* and translation register writes are committed */ \
- /* before enabling the MMU */ \
- dsbish(); \
- isb(); \
- \
- sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
- if (flags & DISABLE_DCACHE) \
- sctlr &= ~SCTLR_C_BIT; \
- else \
- sctlr |= SCTLR_C_BIT; \
- \
- write_sctlr_el##_el(sctlr); \
- \
- /* Ensure the MMU enable takes effect immediately */ \
- isb(); \
- }
-
-/* Define EL1 and EL3 variants of the function enabling the MMU */
-#if IMAGE_EL == 1
-DEFINE_ENABLE_MMU_EL(1, tlbivmalle1)
-#elif IMAGE_EL == 3
-DEFINE_ENABLE_MMU_EL(3, tlbialle3)
-#endif
-
-void enable_mmu_arch(unsigned int flags,
- uint64_t *base_table,
+void setup_mmu_cfg(unsigned int flags,
+ const uint64_t *base_table,
unsigned long long max_pa,
uintptr_t max_va)
{
uint64_t mair, ttbr, tcr;
+ uintptr_t virtual_addr_space_size;
/* Set attributes in the right indices of the MAIR. */
mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
@@ -256,27 +201,25 @@ void enable_mmu_arch(unsigned int flags,
ttbr = (uint64_t) base_table;
/*
- * Set TCR bits as well.
- */
-
- /*
* Limit the input address ranges and memory region sizes translated
* using TTBR0 to the given virtual address space size.
*/
- assert(max_va < UINTPTR_MAX);
- uintptr_t virtual_addr_space_size = max_va + 1;
+ assert(max_va < ((uint64_t) UINTPTR_MAX));
+
+ virtual_addr_space_size = max_va + 1;
assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
+
/*
* __builtin_ctzll(0) is undefined but here we are guaranteed that
* virtual_addr_space_size is in the range [1,UINTPTR_MAX].
*/
- tcr = 64 - __builtin_ctzll(virtual_addr_space_size);
+ tcr = (uint64_t) 64 - __builtin_ctzll(virtual_addr_space_size);
/*
* Set the cacheability and shareability attributes for memory
* associated with translation table walks.
*/
- if (flags & XLAT_TABLE_NC) {
+ if ((flags & XLAT_TABLE_NC) != 0) {
/* Inner & outer non-cacheable non-shareable. */
tcr |= TCR_SH_NON_SHAREABLE |
TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC;
@@ -299,10 +242,23 @@ void enable_mmu_arch(unsigned int flags,
* translated using TTBR1_EL1.
*/
tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
- enable_mmu_internal_el1(flags, mair, tcr, ttbr);
#elif IMAGE_EL == 3
assert(IS_IN_EL(3));
tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);
- enable_mmu_internal_el3(flags, mair, tcr, ttbr);
#endif
+
+ mmu_cfg_params[MMU_CFG_MAIR0] = (uint32_t) mair;
+ mmu_cfg_params[MMU_CFG_TCR] = (uint32_t) tcr;
+
+ /* Set TTBR bits as well */
+ if (ARM_ARCH_AT_LEAST(8, 2)) {
+ /*
+ * Enable CnP bit so as to share page tables with all PEs. This
+ * is mandatory for ARMv8.2 implementations.
+ */
+ ttbr |= TTBR_CNP_BIT;
+ }
+
+ mmu_cfg_params[MMU_CFG_TTBR0_LO] = (uint32_t) ttbr;
+ mmu_cfg_params[MMU_CFG_TTBR0_HI] = (uint32_t) (ttbr >> 32);
}
diff --git a/lib/xlat_tables_v2/xlat_tables.mk b/lib/xlat_tables_v2/xlat_tables.mk
index 06dd844a2..1e70f37f7 100644
--- a/lib/xlat_tables_v2/xlat_tables.mk
+++ b/lib/xlat_tables_v2/xlat_tables.mk
@@ -1,10 +1,11 @@
#
-# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
XLAT_TABLES_LIB_SRCS := $(addprefix lib/xlat_tables_v2/, \
+ ${ARCH}/enable_mmu.S \
${ARCH}/xlat_tables_arch.c \
xlat_tables_internal.c)
diff --git a/lib/xlat_tables_v2/xlat_tables_internal.c b/lib/xlat_tables_v2/xlat_tables_internal.c
index 5beb51e90..7f1d3958a 100644
--- a/lib/xlat_tables_v2/xlat_tables_internal.c
+++ b/lib/xlat_tables_v2/xlat_tables_internal.c
@@ -802,7 +802,7 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
* that there is free space.
*/
assert(mm_last->size == 0U);
-
+
/* Make room for new region by moving other regions up by one place */
mm_destination = mm_cursor + 1;
memmove(mm_destination, mm_cursor,
@@ -1313,22 +1313,25 @@ void init_xlat_tables(void)
void enable_mmu_secure(unsigned int flags)
{
- enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address);
+ enable_mmu_direct(flags);
}
#else
void enable_mmu_el1(unsigned int flags)
{
- enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address);
+ enable_mmu_direct_el1(flags);
}
void enable_mmu_el3(unsigned int flags)
{
- enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address);
+ enable_mmu_direct_el3(flags);
}
#endif /* AARCH32 */
diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h
index 157dd0396..777189fb0 100644
--- a/lib/xlat_tables_v2/xlat_tables_private.h
+++ b/lib/xlat_tables_v2/xlat_tables_private.h
@@ -81,7 +81,7 @@ int xlat_arch_current_el(void);
unsigned long long xlat_arch_get_max_supported_pa(void);
/* Enable MMU and configure it to use the specified translation tables. */
-void enable_mmu_arch(unsigned int flags, uint64_t *base_table,
+void setup_mmu_cfg(unsigned int flags, const uint64_t *base_table,
unsigned long long max_pa, uintptr_t max_va);
/*