From fdb1964c34968921379d3592e7ac6e9a685dbab1 Mon Sep 17 00:00:00 2001 From: Sandrine Bailleux Date: Thu, 28 Sep 2017 21:58:12 +0100 Subject: xlat: Introduce MAP_REGION2() macro The current implementation of the memory mapping API favours mapping memory regions using the biggest possible block size in order to reduce the number of translation tables needed. In some cases, this behaviour might not be desirable. When translation tables are edited at run-time, coarse-grain mappings like that might need splitting into finer-grain tables. This operation has a performance cost. The MAP_REGION2() macro allows to specify the granularity of translation tables used for the initial mapping of a memory region. This might increase performance for memory regions that are likely to be edited in the future, at the expense of a potentially increased memory footprint. The Translation Tables Library Design Guide has been updated to explain the use case for this macro. Also added a few intermediate titles to make the guide easier to digest. Change-Id: I04de9302e0ee3d326b8877043a9f638766b81b7b Co-authored-by: Sandrine Bailleux Co-authored-by: Antonio Nino Diaz Signed-off-by: Antonio Nino Diaz --- lib/xlat_tables_v2/xlat_tables_internal.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'lib') diff --git a/lib/xlat_tables_v2/xlat_tables_internal.c b/lib/xlat_tables_v2/xlat_tables_internal.c index da658b114..feca964b1 100644 --- a/lib/xlat_tables_v2/xlat_tables_internal.c +++ b/lib/xlat_tables_v2/xlat_tables_internal.c @@ -417,7 +417,8 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm, * descriptors. If not, create a table instead. */ if ((dest_pa & XLAT_BLOCK_MASK(level)) || - (level < MIN_LVL_BLOCK_DESC)) + (level < MIN_LVL_BLOCK_DESC) || + (mm->granularity < XLAT_BLOCK_SIZE(level))) return ACTION_CREATE_NEW_TABLE; else return ACTION_WRITE_BLOCK_ENTRY; @@ -590,9 +591,10 @@ void print_mmap(mmap_region_t *const mmap) mmap_region_t *mm = mmap; while (mm->size) { - tf_printf(" VA:%p PA:0x%llx size:0x%zx attr:0x%x\n", + tf_printf(" VA:%p PA:0x%llx size:0x%zx attr:0x%x", (void *)mm->base_va, mm->base_pa, mm->size, mm->attr); + tf_printf(" granularity:0x%zx\n", mm->granularity); ++mm; }; tf_printf("\n"); @@ -613,7 +615,7 @@ static int mmap_add_region_check(xlat_ctx_t *ctx, const mmap_region_t *mm) unsigned long long base_pa = mm->base_pa; uintptr_t base_va = mm->base_va; size_t size = mm->size; - mmap_attr_t attr = mm->attr; + size_t granularity = mm->granularity; unsigned long long end_pa = base_pa + size - 1; uintptr_t end_va = base_va + size - 1; @@ -622,6 +624,12 @@ static int mmap_add_region_check(xlat_ctx_t *ctx, const mmap_region_t *mm) !IS_PAGE_ALIGNED(size)) return -EINVAL; + if ((granularity != XLAT_BLOCK_SIZE(1)) && + (granularity != XLAT_BLOCK_SIZE(2)) && + (granularity != XLAT_BLOCK_SIZE(3))) { + return -EINVAL; + } + /* Check for overflows */ if ((base_pa > end_pa) || (base_va > end_va)) return -ERANGE; @@ -663,11 +671,9 @@ static int mmap_add_region_check(xlat_ctx_t *ctx, const mmap_region_t *mm) if (fully_overlapped_va) { #if PLAT_XLAT_TABLES_DYNAMIC - if ((attr & MT_DYNAMIC) || + if ((mm->attr & MT_DYNAMIC) || (mm_cursor->attr & MT_DYNAMIC)) return -EPERM; -#else - (void)attr; #endif /* PLAT_XLAT_TABLES_DYNAMIC */ if ((mm_cursor->base_va - mm_cursor->base_pa) != (base_va - base_pa)) -- cgit v1.2.3 From f301da44fa233bbde2e457b64aa7903623c53586 Mon Sep 17 00:00:00 2001 From: Sandrine Bailleux Date: Tue, 25 Apr 2017 14:09:47 +0100 Subject: xlat: Always compile TLB invalidation functions TLB invalidation functions used to be conditionally compiled in. They were enabled only when using the dynamic mapping feature. because only then would we need to modify page tables on the fly. Actually there are other use cases where invalidating TLBs is required. When changing memory attributes in existing translation descriptors for example. These other use cases do not necessarily depend on the dynamic mapping feature. This patch removes this dependency and always compile TLB invalidation functions in. If they're not used, they will be removed from the binary at link-time anyway so there's no consequence on the memory footprint if these functions are not called. Change-Id: I1c33764ae900eb00073ee23b7d0d53d4efa4dd21 Signed-off-by: Sandrine Bailleux --- lib/xlat_tables_v2/aarch64/xlat_tables_arch.c | 5 ----- lib/xlat_tables_v2/xlat_tables_private.h | 4 ++-- 2 files changed, 2 insertions(+), 7 deletions(-) (limited to 'lib') diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c index 760db928b..81e035beb 100644 --- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c +++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include @@ -82,8 +81,6 @@ int is_mmu_enabled(void) #endif } -#if PLAT_XLAT_TABLES_DYNAMIC - void xlat_arch_tlbi_va(uintptr_t va) { /* @@ -124,8 +121,6 @@ void xlat_arch_tlbi_va_sync(void) isb(); } -#endif /* PLAT_XLAT_TABLES_DYNAMIC */ - int xlat_arch_current_el(void) { int el = GET_EL(read_CurrentEl()); diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h index d352583c9..fbd9578a7 100644 --- a/lib/xlat_tables_v2/xlat_tables_private.h +++ b/lib/xlat_tables_v2/xlat_tables_private.h @@ -34,6 +34,8 @@ typedef enum { MT_DYNAMIC = 1 << MT_DYN_SHIFT } mmap_priv_attr_t; +#endif /* PLAT_XLAT_TABLES_DYNAMIC */ + /* * Function used to invalidate all levels of the translation walk for a given * virtual address. It must be called for every translation table entry that is @@ -47,8 +49,6 @@ void xlat_arch_tlbi_va(uintptr_t va); */ void xlat_arch_tlbi_va_sync(void); -#endif /* PLAT_XLAT_TABLES_DYNAMIC */ - /* Print VA, PA, size and attributes of all regions in the mmap array. */ void print_mmap(mmap_region_t *const mmap); -- cgit v1.2.3 From b4ae615bd734104cfed5d2534b4c14278415057e Mon Sep 17 00:00:00 2001 From: Douglas Raillard Date: Mon, 25 Sep 2017 15:23:22 +0100 Subject: xlat: Introduce function xlat_arch_tlbi_va_regime() Introduce a variant of the TLB invalidation helper function that allows the targeted translation regime to be specified, rather than defaulting to the current one. This new function is useful in the context of EL3 software managing translation tables for the S-EL1&0 translation regime, as then it might need to invalidate S-EL1&0 TLB entries rather than EL3 ones. Define a new enumeration to be able to represent translation regimes in the xlat tables library. Change-Id: Ibe4438dbea2d7a6e7470bfb68ff805d8bf6b07e5 Co-authored-by: Sandrine Bailleux Co-authored-by: Douglas Raillard Co-authored-by: Antonio Nino Diaz Signed-off-by: Antonio Nino Diaz --- lib/xlat_tables_v2/aarch32/xlat_tables_arch.c | 11 +++++++++ lib/xlat_tables_v2/aarch64/xlat_tables_arch.c | 33 +++++++++++++++++++++------ lib/xlat_tables_v2/xlat_tables_private.h | 16 ++++++++++--- 3 files changed, 50 insertions(+), 10 deletions(-) (limited to 'lib') diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c index e66b92751..30ad91e1a 100644 --- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c +++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c @@ -40,6 +40,17 @@ void xlat_arch_tlbi_va(uintptr_t va) tlbimvaais(TLBI_ADDR(va)); } +void xlat_arch_tlbi_va_regime(uintptr_t va, xlat_regime_t xlat_regime __unused) +{ + /* + * Ensure the translation table write has drained into memory before + * invalidating the TLB entry. + */ + dsbishst(); + + tlbimvaais(TLBI_ADDR(va)); +} + void xlat_arch_tlbi_va_sync(void) { /* Invalidate all entries from branch predictors. */ diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c index 81e035beb..06bd49785 100644 --- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c +++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c @@ -82,6 +82,17 @@ int is_mmu_enabled(void) } void xlat_arch_tlbi_va(uintptr_t va) +{ +#if IMAGE_EL == 1 + assert(IS_IN_EL(1)); + xlat_arch_tlbi_va_regime(va, EL1_EL0_REGIME); +#elif IMAGE_EL == 3 + assert(IS_IN_EL(3)); + xlat_arch_tlbi_va_regime(va, EL3_REGIME); +#endif +} + +void xlat_arch_tlbi_va_regime(uintptr_t va, xlat_regime_t xlat_regime) { /* * Ensure the translation table write has drained into memory before @@ -89,13 +100,21 @@ void xlat_arch_tlbi_va(uintptr_t va) */ dsbishst(); -#if IMAGE_EL == 1 - assert(IS_IN_EL(1)); - tlbivaae1is(TLBI_ADDR(va)); -#elif IMAGE_EL == 3 - assert(IS_IN_EL(3)); - tlbivae3is(TLBI_ADDR(va)); -#endif + /* + * This function only supports invalidation of TLB entries for the EL3 + * and EL1&0 translation regimes. + * + * Also, it is architecturally UNDEFINED to invalidate TLBs of a higher + * exception level (see section D4.9.2 of the ARM ARM rev B.a). + */ + if (xlat_regime == EL1_EL0_REGIME) { + assert(xlat_arch_current_el() >= 1); + tlbivaae1is(TLBI_ADDR(va)); + } else { + assert(xlat_regime == EL3_REGIME); + assert(xlat_arch_current_el() >= 3); + tlbivae3is(TLBI_ADDR(va)); + } } void xlat_arch_tlbi_va_sync(void) diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h index fbd9578a7..2730ab6c2 100644 --- a/lib/xlat_tables_v2/xlat_tables_private.h +++ b/lib/xlat_tables_v2/xlat_tables_private.h @@ -37,11 +37,21 @@ typedef enum { #endif /* PLAT_XLAT_TABLES_DYNAMIC */ /* - * Function used to invalidate all levels of the translation walk for a given - * virtual address. It must be called for every translation table entry that is - * modified. + * Invalidate all TLB entries that match the given virtual address. This + * operation applies to all PEs in the same Inner Shareable domain as the PE + * that executes this function. This functions must be called for every + * translation table entry that is modified. + * + * xlat_arch_tlbi_va() applies the invalidation to the exception level of the + * current translation regime, whereas xlat_arch_tlbi_va_regime() applies it to + * the given translation regime. + * + * Note, however, that it is architecturally UNDEFINED to invalidate TLB entries + * pertaining to a higher exception level, e.g. invalidating EL3 entries from + * S-EL1. */ void xlat_arch_tlbi_va(uintptr_t va); +void xlat_arch_tlbi_va_regime(uintptr_t va, xlat_regime_t xlat_regime); /* * This function has to be called at the end of any code that uses the function -- cgit v1.2.3 From 609c91917f95e5c2c0dcccbfbea6ff32539bf738 Mon Sep 17 00:00:00 2001 From: Antonio Nino Diaz Date: Wed, 4 Oct 2017 16:52:15 +0100 Subject: xlat: Add support for EL0 and EL1 mappings This patch introduces the ability of the xlat tables library to manage EL0 and EL1 mappings from a higher exception level. Attributes MT_USER and MT_PRIVILEGED have been added to allow the user specify the target EL in the translation regime EL1&0. REGISTER_XLAT_CONTEXT2 macro is introduced to allow creating a xlat_ctx_t that targets a given translation regime (EL1&0 or EL3). A new member is added to xlat_ctx_t to represent the translation regime the xlat_ctx_t manages. The execute_never mask member is removed as it is computed from existing information. Change-Id: I95e14abc3371d7a6d6a358cc54c688aa9975c110 Co-authored-by: Douglas Raillard Co-authored-by: Sandrine Bailleux Co-authored-by: Antonio Nino Diaz Signed-off-by: Antonio Nino Diaz --- lib/xlat_tables_v2/aarch32/xlat_tables_arch.c | 7 +- .../aarch32/xlat_tables_arch_private.h | 22 ++++ lib/xlat_tables_v2/aarch64/xlat_tables_arch.c | 34 ++---- .../aarch64/xlat_tables_arch_private.h | 28 +++++ lib/xlat_tables_v2/xlat_tables.mk | 2 + lib/xlat_tables_v2/xlat_tables_internal.c | 135 ++++++++++++++++----- lib/xlat_tables_v2/xlat_tables_private.h | 14 +-- 7 files changed, 171 insertions(+), 71 deletions(-) create mode 100644 lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h create mode 100644 lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h (limited to 'lib') diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c index 30ad91e1a..cbc868504 100644 --- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c +++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c @@ -22,7 +22,7 @@ unsigned long long xlat_arch_get_max_supported_pa(void) } #endif /* ENABLE_ASSERTIONS*/ -int is_mmu_enabled(void) +int is_mmu_enabled_ctx(const xlat_ctx_t *ctx __unused) { return (read_sctlr() & SCTLR_M_BIT) != 0; } @@ -88,11 +88,6 @@ int xlat_arch_current_el(void) return 3; } -uint64_t xlat_arch_get_xn_desc(int el __unused) -{ - return UPPER_ATTRS(XN); -} - /******************************************************************************* * Function for enabling the MMU in Secure PL1, assuming that the page tables * have already been created. diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h b/lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h new file mode 100644 index 000000000..509395d85 --- /dev/null +++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __XLAT_TABLES_ARCH_PRIVATE_H__ +#define __XLAT_TABLES_ARCH_PRIVATE_H__ + +#include +#include + +/* + * Return the execute-never mask that will prevent instruction fetch at the + * given translation regime. + */ +static inline uint64_t xlat_arch_regime_get_xn_desc(xlat_regime_t regime __unused) +{ + return UPPER_ATTRS(XN); +} + +#endif /* __XLAT_TABLES_ARCH_PRIVATE_H__ */ diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c index 06bd49785..eda38d341 100644 --- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c +++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c @@ -16,12 +16,6 @@ #include #include "../xlat_tables_private.h" -#if defined(IMAGE_BL1) || defined(IMAGE_BL31) -# define IMAGE_EL 3 -#else -# define IMAGE_EL 1 -#endif - static unsigned long long calc_physical_addr_size_bits( unsigned long long max_addr) { @@ -70,17 +64,19 @@ unsigned long long xlat_arch_get_max_supported_pa(void) } #endif /* ENABLE_ASSERTIONS*/ -int is_mmu_enabled(void) +int is_mmu_enabled_ctx(const xlat_ctx_t *ctx) { -#if IMAGE_EL == 1 - assert(IS_IN_EL(1)); - return (read_sctlr_el1() & SCTLR_M_BIT) != 0; -#elif IMAGE_EL == 3 - assert(IS_IN_EL(3)); - return (read_sctlr_el3() & SCTLR_M_BIT) != 0; -#endif + if (ctx->xlat_regime == EL1_EL0_REGIME) { + assert(xlat_arch_current_el() >= 1); + return (read_sctlr_el1() & SCTLR_M_BIT) != 0; + } else { + assert(ctx->xlat_regime == EL3_REGIME); + assert(xlat_arch_current_el() >= 3); + return (read_sctlr_el3() & SCTLR_M_BIT) != 0; + } } + void xlat_arch_tlbi_va(uintptr_t va) { #if IMAGE_EL == 1 @@ -149,16 +145,6 @@ int xlat_arch_current_el(void) return el; } -uint64_t xlat_arch_get_xn_desc(int el) -{ - if (el == 3) { - return UPPER_ATTRS(XN); - } else { - assert(el == 1); - return UPPER_ATTRS(PXN); - } -} - /******************************************************************************* * Macro generating the code for the function enabling the MMU in the given * exception level, assuming that the pagetables have already been created. diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h b/lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h new file mode 100644 index 000000000..d201590a4 --- /dev/null +++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __XLAT_TABLES_ARCH_PRIVATE_H__ +#define __XLAT_TABLES_ARCH_PRIVATE_H__ + +#include +#include +#include + +/* + * Return the execute-never mask that will prevent instruction fetch at all ELs + * that are part of the given translation regime. + */ +static inline uint64_t xlat_arch_regime_get_xn_desc(xlat_regime_t regime) +{ + if (regime == EL1_EL0_REGIME) { + return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN); + } else { + assert(regime == EL3_REGIME); + return UPPER_ATTRS(XN); + } +} + +#endif /* __XLAT_TABLES_ARCH_PRIVATE_H__ */ diff --git a/lib/xlat_tables_v2/xlat_tables.mk b/lib/xlat_tables_v2/xlat_tables.mk index b94ce5d07..06dd844a2 100644 --- a/lib/xlat_tables_v2/xlat_tables.mk +++ b/lib/xlat_tables_v2/xlat_tables.mk @@ -7,3 +7,5 @@ XLAT_TABLES_LIB_SRCS := $(addprefix lib/xlat_tables_v2/, \ ${ARCH}/xlat_tables_arch.c \ xlat_tables_internal.c) + +INCLUDES += -Ilib/xlat_tables_v2/${ARCH} diff --git a/lib/xlat_tables_v2/xlat_tables_internal.c b/lib/xlat_tables_v2/xlat_tables_internal.c index feca964b1..9faeb7eff 100644 --- a/lib/xlat_tables_v2/xlat_tables_internal.c +++ b/lib/xlat_tables_v2/xlat_tables_internal.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include @@ -112,9 +112,11 @@ static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx) #endif /* PLAT_XLAT_TABLES_DYNAMIC */ -/* Returns a block/page table descriptor for the given level and attributes. */ -static uint64_t xlat_desc(mmap_attr_t attr, unsigned long long addr_pa, - int level, uint64_t execute_never_mask) +/* + * Returns a block/page table descriptor for the given level and attributes. + */ +uint64_t xlat_desc(const xlat_ctx_t *ctx, mmap_attr_t attr, + unsigned long long addr_pa, int level) { uint64_t desc; int mem_type; @@ -133,9 +135,28 @@ static uint64_t xlat_desc(mmap_attr_t attr, unsigned long long addr_pa, * Deduce other fields of the descriptor based on the MT_NS and MT_RW * memory region attributes. */ + desc |= LOWER_ATTRS(ACCESS_FLAG); + desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0; desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO); - desc |= LOWER_ATTRS(ACCESS_FLAG); + + /* + * Do not allow unprivileged access when the mapping is for a privileged + * EL. For translation regimes that do not have mappings for access for + * lower exception levels, set AP[2] to AP_NO_ACCESS_UNPRIVILEGED. + */ + if (ctx->xlat_regime == EL1_EL0_REGIME) { + if (attr & MT_USER) { + /* EL0 mapping requested, so we give User access */ + desc |= LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED); + } else { + /* EL1 mapping requested, no User access granted */ + desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED); + } + } else { + assert(ctx->xlat_regime == EL3_REGIME); + desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED); + } /* * Deduce shareability domain and executability of the memory region @@ -156,7 +177,7 @@ static uint64_t xlat_desc(mmap_attr_t attr, unsigned long long addr_pa, * fetch, which could be an issue if this memory region * corresponds to a read-sensitive peripheral. */ - desc |= execute_never_mask; + desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime); } else { /* Normal memory */ /* @@ -171,10 +192,13 @@ static uint64_t xlat_desc(mmap_attr_t attr, unsigned long long addr_pa, * translation table. * * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER - * attribute to figure out the value of the XN bit. + * attribute to figure out the value of the XN bit. The actual + * XN bit(s) to set in the descriptor depends on the context's + * translation regime and the policy applied in + * xlat_arch_regime_get_xn_desc(). */ if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER)) { - desc |= execute_never_mask; + desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime); } if (mem_type == MT_MEMORY) { @@ -314,7 +338,7 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm, if (action == ACTION_WRITE_BLOCK_ENTRY) { table_base[table_idx] = INVALID_DESC; - xlat_arch_tlbi_va(table_idx_va); + xlat_arch_tlbi_va_regime(table_idx_va, ctx->xlat_regime); } else if (action == ACTION_RECURSE_INTO_TABLE) { @@ -330,7 +354,8 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm, */ if (xlat_table_is_empty(ctx, subtable)) { table_base[table_idx] = INVALID_DESC; - xlat_arch_tlbi_va(table_idx_va); + xlat_arch_tlbi_va_regime(table_idx_va, + ctx->xlat_regime); } } else { @@ -536,8 +561,7 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm, if (action == ACTION_WRITE_BLOCK_ENTRY) { table_base[table_idx] = - xlat_desc(mm->attr, table_idx_pa, level, - ctx->execute_never_mask); + xlat_desc(ctx, mm->attr, table_idx_pa, level); } else if (action == ACTION_CREATE_NEW_TABLE) { @@ -882,9 +906,8 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm) .size = end_va - mm->base_va, .attr = 0 }; - xlat_tables_unmap_region(ctx, - &unmap_mm, 0, ctx->base_table, - ctx->base_table_entries, ctx->base_level); + xlat_tables_unmap_region(ctx, &unmap_mm, 0, ctx->base_table, + ctx->base_table_entries, ctx->base_level); return -ENOMEM; } @@ -999,9 +1022,10 @@ int mmap_remove_dynamic_region(uintptr_t base_va, size_t size) #if LOG_LEVEL >= LOG_LEVEL_VERBOSE /* Print the attributes of the specified block descriptor. */ -static void xlat_desc_print(uint64_t desc, uint64_t execute_never_mask) +static void xlat_desc_print(xlat_ctx_t *ctx, uint64_t desc) { int mem_type_index = ATTR_INDEX_GET(desc); + xlat_regime_t xlat_regime = ctx->xlat_regime; if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) { tf_printf("MEM"); @@ -1012,9 +1036,49 @@ static void xlat_desc_print(uint64_t desc, uint64_t execute_never_mask) tf_printf("DEV"); } - tf_printf(LOWER_ATTRS(AP_RO) & desc ? "-RO" : "-RW"); + const char *priv_str = "(PRIV)"; + const char *user_str = "(USER)"; + + /* + * Showing Privileged vs Unprivileged only makes sense for EL1&0 + * mappings + */ + const char *ro_str = "-RO"; + const char *rw_str = "-RW"; + const char *no_access_str = "-NOACCESS"; + + if (xlat_regime == EL3_REGIME) { + /* For EL3, the AP[2] bit is all what matters */ + tf_printf((desc & LOWER_ATTRS(AP_RO)) ? ro_str : rw_str); + } else { + const char *ap_str = (desc & LOWER_ATTRS(AP_RO)) ? ro_str : rw_str; + tf_printf(ap_str); + tf_printf(priv_str); + /* + * EL0 can only have the same permissions as EL1 or no + * permissions at all. + */ + tf_printf((desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED)) + ? ap_str : no_access_str); + tf_printf(user_str); + } + + const char *xn_str = "-XN"; + const char *exec_str = "-EXEC"; + + if (xlat_regime == EL3_REGIME) { + /* For EL3, the XN bit is all what matters */ + tf_printf(LOWER_ATTRS(XN) & desc ? xn_str : exec_str); + } else { + /* For EL0 and EL1, we need to know who has which rights */ + tf_printf(LOWER_ATTRS(PXN) & desc ? xn_str : exec_str); + tf_printf(priv_str); + + tf_printf(LOWER_ATTRS(UXN) & desc ? xn_str : exec_str); + tf_printf(user_str); + } + tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S"); - tf_printf(execute_never_mask & desc ? "-XN" : "-EXEC"); } static const char * const level_spacers[] = { @@ -1031,9 +1095,10 @@ static const char *invalid_descriptors_ommited = * Recursive function that reads the translation tables passed as an argument * and prints their status. */ -static void xlat_tables_print_internal(const uintptr_t table_base_va, +static void xlat_tables_print_internal(xlat_ctx_t *ctx, + const uintptr_t table_base_va, uint64_t *const table_base, const int table_entries, - const unsigned int level, const uint64_t execute_never_mask) + const unsigned int level) { assert(level <= XLAT_TABLE_LEVEL_MAX); @@ -1092,17 +1157,16 @@ static void xlat_tables_print_internal(const uintptr_t table_base_va, uintptr_t addr_inner = desc & TABLE_ADDR_MASK; - xlat_tables_print_internal(table_idx_va, + xlat_tables_print_internal(ctx, table_idx_va, (uint64_t *)addr_inner, - XLAT_TABLE_ENTRIES, level+1, - execute_never_mask); + XLAT_TABLE_ENTRIES, level + 1); } else { tf_printf("%sVA:%p PA:0x%llx size:0x%zx ", level_spacers[level], (void *)table_idx_va, (unsigned long long)(desc & TABLE_ADDR_MASK), level_size); - xlat_desc_print(desc, execute_never_mask); + xlat_desc_print(ctx, desc); tf_printf("\n"); } } @@ -1122,7 +1186,15 @@ static void xlat_tables_print_internal(const uintptr_t table_base_va, void xlat_tables_print(xlat_ctx_t *ctx) { #if LOG_LEVEL >= LOG_LEVEL_VERBOSE + const char *xlat_regime_str; + if (ctx->xlat_regime == EL1_EL0_REGIME) { + xlat_regime_str = "1&0"; + } else { + assert(ctx->xlat_regime == EL3_REGIME); + xlat_regime_str = "3"; + } VERBOSE("Translation tables state:\n"); + VERBOSE(" Xlat regime: EL%s\n", xlat_regime_str); VERBOSE(" Max allowed PA: 0x%llx\n", ctx->pa_max_address); VERBOSE(" Max allowed VA: %p\n", (void *) ctx->va_max_address); VERBOSE(" Max mapped PA: 0x%llx\n", ctx->max_pa); @@ -1146,22 +1218,21 @@ void xlat_tables_print(xlat_ctx_t *ctx) used_page_tables, ctx->tables_num, ctx->tables_num - used_page_tables); - xlat_tables_print_internal(0, ctx->base_table, ctx->base_table_entries, - ctx->base_level, ctx->execute_never_mask); + xlat_tables_print_internal(ctx, 0, ctx->base_table, + ctx->base_table_entries, ctx->base_level); #endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */ } void init_xlat_tables_ctx(xlat_ctx_t *ctx) { - mmap_region_t *mm = ctx->mmap; - - assert(!is_mmu_enabled()); + assert(ctx != NULL); assert(!ctx->initialized); + assert(ctx->xlat_regime == EL3_REGIME || ctx->xlat_regime == EL1_EL0_REGIME); + assert(!is_mmu_enabled_ctx(ctx)); - print_mmap(mm); + mmap_region_t *mm = ctx->mmap; - ctx->execute_never_mask = - xlat_arch_get_xn_desc(xlat_arch_current_el()); + print_mmap(mm); /* All tables must be zeroed before mapping any region. */ diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h index 2730ab6c2..79efbebba 100644 --- a/lib/xlat_tables_v2/xlat_tables_private.h +++ b/lib/xlat_tables_v2/xlat_tables_private.h @@ -75,13 +75,6 @@ void xlat_tables_print(xlat_ctx_t *ctx); /* Returns the current Exception Level. The returned EL must be 1 or higher. */ int xlat_arch_current_el(void); -/* - * Returns the bit mask that has to be ORed to the rest of a translation table - * descriptor so that execution of code is prohibited at the given Exception - * Level. - */ -uint64_t xlat_arch_get_xn_desc(int el); - /* * Return the maximum physical address supported by the hardware. * This value depends on the execution state (AArch32/AArch64). @@ -92,7 +85,10 @@ unsigned long long xlat_arch_get_max_supported_pa(void); void enable_mmu_arch(unsigned int flags, uint64_t *base_table, unsigned long long pa, uintptr_t max_va); -/* Return 1 if the MMU of this Exception Level is enabled, 0 otherwise. */ -int is_mmu_enabled(void); +/* + * Return 1 if the MMU of the translation regime managed by the given xlat_ctx_t + * is enabled, 0 otherwise. + */ +int is_mmu_enabled_ctx(const xlat_ctx_t *ctx); #endif /* __XLAT_TABLES_PRIVATE_H__ */ -- cgit v1.2.3