aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/xlat_tables_v2/aarch32/xlat_tables_arch.c11
-rw-r--r--lib/xlat_tables_v2/aarch64/xlat_tables_arch.c33
-rw-r--r--lib/xlat_tables_v2/xlat_tables_private.h16
3 files changed, 50 insertions, 10 deletions
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
index e66b92751..30ad91e1a 100644
--- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -40,6 +40,17 @@ void xlat_arch_tlbi_va(uintptr_t va)
tlbimvaais(TLBI_ADDR(va));
}
+void xlat_arch_tlbi_va_regime(uintptr_t va, xlat_regime_t xlat_regime __unused)
+{
+ /*
+ * Ensure the translation table write has drained into memory before
+ * invalidating the TLB entry.
+ */
+ dsbishst();
+
+ tlbimvaais(TLBI_ADDR(va));
+}
+
void xlat_arch_tlbi_va_sync(void)
{
/* Invalidate all entries from branch predictors. */
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
index 81e035beb..06bd49785 100644
--- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -83,19 +83,38 @@ int is_mmu_enabled(void)
void xlat_arch_tlbi_va(uintptr_t va)
{
+#if IMAGE_EL == 1
+ assert(IS_IN_EL(1));
+ xlat_arch_tlbi_va_regime(va, EL1_EL0_REGIME);
+#elif IMAGE_EL == 3
+ assert(IS_IN_EL(3));
+ xlat_arch_tlbi_va_regime(va, EL3_REGIME);
+#endif
+}
+
+void xlat_arch_tlbi_va_regime(uintptr_t va, xlat_regime_t xlat_regime)
+{
/*
* Ensure the translation table write has drained into memory before
* invalidating the TLB entry.
*/
dsbishst();
-#if IMAGE_EL == 1
- assert(IS_IN_EL(1));
- tlbivaae1is(TLBI_ADDR(va));
-#elif IMAGE_EL == 3
- assert(IS_IN_EL(3));
- tlbivae3is(TLBI_ADDR(va));
-#endif
+ /*
+ * This function only supports invalidation of TLB entries for the EL3
+ * and EL1&0 translation regimes.
+ *
+ * Also, it is architecturally UNDEFINED to invalidate TLBs of a higher
+ * exception level (see section D4.9.2 of the ARM ARM rev B.a).
+ */
+ if (xlat_regime == EL1_EL0_REGIME) {
+ assert(xlat_arch_current_el() >= 1);
+ tlbivaae1is(TLBI_ADDR(va));
+ } else {
+ assert(xlat_regime == EL3_REGIME);
+ assert(xlat_arch_current_el() >= 3);
+ tlbivae3is(TLBI_ADDR(va));
+ }
}
void xlat_arch_tlbi_va_sync(void)
diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h
index fbd9578a7..2730ab6c2 100644
--- a/lib/xlat_tables_v2/xlat_tables_private.h
+++ b/lib/xlat_tables_v2/xlat_tables_private.h
@@ -37,11 +37,21 @@ typedef enum {
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
/*
- * Function used to invalidate all levels of the translation walk for a given
- * virtual address. It must be called for every translation table entry that is
- * modified.
+ * Invalidate all TLB entries that match the given virtual address. This
+ * operation applies to all PEs in the same Inner Shareable domain as the PE
+ * that executes this function. This functions must be called for every
+ * translation table entry that is modified.
+ *
+ * xlat_arch_tlbi_va() applies the invalidation to the exception level of the
+ * current translation regime, whereas xlat_arch_tlbi_va_regime() applies it to
+ * the given translation regime.
+ *
+ * Note, however, that it is architecturally UNDEFINED to invalidate TLB entries
+ * pertaining to a higher exception level, e.g. invalidating EL3 entries from
+ * S-EL1.
*/
void xlat_arch_tlbi_va(uintptr_t va);
+void xlat_arch_tlbi_va_regime(uintptr_t va, xlat_regime_t xlat_regime);
/*
* This function has to be called at the end of any code that uses the function