aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/aarch64/cache_helpers.S6
-rw-r--r--lib/xlat_tables_v2/ro_xlat_tables.mk37
-rw-r--r--lib/xlat_tables_v2/xlat_tables.mk6
-rw-r--r--lib/xlat_tables_v2/xlat_tables_context.c78
4 files changed, 122 insertions, 5 deletions
diff --git a/lib/aarch64/cache_helpers.S b/lib/aarch64/cache_helpers.S
index 9ef8ca79b..de9c8e4f0 100644
--- a/lib/aarch64/cache_helpers.S
+++ b/lib/aarch64/cache_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -30,7 +30,7 @@ loop_\op:
dc \op, x0
add x0, x0, x2
cmp x0, x1
- b.lo loop_\op
+ b.lo loop_\op
dsb sy
exit_loop_\op:
ret
@@ -140,7 +140,7 @@ loop3_\_op:
level_done:
add x10, x10, #2 // increment cache number
cmp x3, x10
- b.hi loop1
+ b.hi loop1
msr csselr_el1, xzr // select cache level 0 in csselr
dsb sy // barrier to complete final cache operation
isb
diff --git a/lib/xlat_tables_v2/ro_xlat_tables.mk b/lib/xlat_tables_v2/ro_xlat_tables.mk
new file mode 100644
index 000000000..7991e1afd
--- /dev/null
+++ b/lib/xlat_tables_v2/ro_xlat_tables.mk
@@ -0,0 +1,37 @@
+#
+# Copyright (c) 2020, ARM Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifeq (${USE_DEBUGFS}, 1)
+ $(error "Debugfs requires functionality from the dynamic translation \
+ library and is incompatible with ALLOW_RO_XLAT_TABLES.")
+endif
+
+ifeq (${ARCH},aarch32)
+ ifeq (${RESET_TO_SP_MIN},1)
+ $(error "RESET_TO_SP_MIN requires functionality from the dynamic \
+ translation library and is incompatible with \
+ ALLOW_RO_XLAT_TABLES.")
+ endif
+else # if AArch64
+ ifeq (${PLAT},tegra)
+ $(error "Tegra requires functionality from the dynamic translation \
+ library and is incompatible with ALLOW_RO_XLAT_TABLES.")
+ endif
+ ifeq (${RESET_TO_BL31},1)
+ $(error "RESET_TO_BL31 requires functionality from the dynamic \
+ translation library and is incompatible with \
+ ALLOW_RO_XLAT_TABLES.")
+ endif
+ ifeq (${SPD},trusty)
+ $(error "Trusty requires functionality from the dynamic translation \
+ library and is incompatible with ALLOW_RO_XLAT_TABLES.")
+ endif
+ ifeq (${SPM_MM},1)
+ $(error "SPM_MM requires functionality to change memory region \
+ attributes, which is not possible once the translation tables \
+ have been made read-only.")
+ endif
+endif
diff --git a/lib/xlat_tables_v2/xlat_tables.mk b/lib/xlat_tables_v2/xlat_tables.mk
index c946315bf..bcc3e68d8 100644
--- a/lib/xlat_tables_v2/xlat_tables.mk
+++ b/lib/xlat_tables_v2/xlat_tables.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -13,3 +13,7 @@ XLAT_TABLES_LIB_SRCS := $(addprefix lib/xlat_tables_v2/, \
XLAT_TABLES_LIB_V2 := 1
$(eval $(call add_define,XLAT_TABLES_LIB_V2))
+
+ifeq (${ALLOW_RO_XLAT_TABLES}, 1)
+ include lib/xlat_tables_v2/ro_xlat_tables.mk
+endif
diff --git a/lib/xlat_tables_v2/xlat_tables_context.c b/lib/xlat_tables_v2/xlat_tables_context.c
index f4b64b33f..adca57875 100644
--- a/lib/xlat_tables_v2/xlat_tables_context.c
+++ b/lib/xlat_tables_v2/xlat_tables_context.c
@@ -1,9 +1,10 @@
/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <arch_helpers.h>
#include <assert.h>
#include <platform_def.h>
@@ -24,8 +25,14 @@ uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
* Allocate and initialise the default translation context for the BL image
* currently executing.
*/
+#if PLAT_RO_XLAT_TABLES
+REGISTER_XLAT_CONTEXT_RO_BASE_TABLE(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
+ PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE,
+ EL_REGIME_INVALID, "xlat_table");
+#else
REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
+#endif
void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size,
unsigned int attr)
@@ -119,6 +126,75 @@ int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
return xlat_change_mem_attributes_ctx(&tf_xlat_ctx, base_va, size, attr);
}
+#if PLAT_RO_XLAT_TABLES
+/* Change the memory attributes of the descriptors which resolve the address
+ * range that belongs to the translation tables themselves, which are by default
+ * mapped as part of read-write data in the BL image's memory.
+ *
+ * Since the translation tables map themselves via these level 3 (page)
+ * descriptors, any change applied to them with the MMU on would introduce a
+ * chicken and egg problem because of the break-before-make sequence.
+ * Eventually, it would reach the descriptor that resolves the very table it
+ * belongs to and the invalidation (break step) would cause the subsequent write
+ * (make step) to it to generate an MMU fault. Therefore, the MMU is disabled
+ * before making the change.
+ *
+ * No assumption is made about what data this function needs, therefore all the
+ * caches are flushed in order to ensure coherency. A future optimization would
+ * be to only flush the required data to main memory.
+ */
+int xlat_make_tables_readonly(void)
+{
+ assert(tf_xlat_ctx.initialized == true);
+#ifdef __aarch64__
+ if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) {
+ disable_mmu_el1();
+ } else if (tf_xlat_ctx.xlat_regime == EL3_REGIME) {
+ disable_mmu_el3();
+ } else {
+ assert(tf_xlat_ctx.xlat_regime == EL2_REGIME);
+ return -1;
+ }
+
+ /* Flush all caches. */
+ dcsw_op_all(DCCISW);
+#else /* !__aarch64__ */
+ assert(tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME);
+ /* On AArch32, we flush the caches before disabling the MMU. The reason
+ * for this is that the dcsw_op_all AArch32 function pushes some
+ * registers onto the stack under the assumption that it is writing to
+ * cache, which is not true with the MMU off. This would result in the
+ * stack becoming corrupted and a wrong/junk value for the LR being
+ * restored at the end of the routine.
+ */
+ dcsw_op_all(DC_OP_CISW);
+ disable_mmu_secure();
+#endif
+
+ int rc = xlat_change_mem_attributes_ctx(&tf_xlat_ctx,
+ (uintptr_t)tf_xlat_ctx.tables,
+ tf_xlat_ctx.tables_num * XLAT_TABLE_SIZE,
+ MT_RO_DATA | MT_SECURE);
+
+#ifdef __aarch64__
+ if (tf_xlat_ctx.xlat_regime == EL1_EL0_REGIME) {
+ enable_mmu_el1(0U);
+ } else {
+ assert(tf_xlat_ctx.xlat_regime == EL3_REGIME);
+ enable_mmu_el3(0U);
+ }
+#else /* !__aarch64__ */
+ enable_mmu_svc_mon(0U);
+#endif
+
+ if (rc == 0) {
+ tf_xlat_ctx.readonly_tables = true;
+ }
+
+ return rc;
+}
+#endif /* PLAT_RO_XLAT_TABLES */
+
/*
* If dynamic allocation of new regions is disabled then by the time we call the
* function enabling the MMU, we'll have registered all the memory regions to