aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/el3_runtime/aarch32/context_mgmt.c15
-rw-r--r--lib/el3_runtime/aarch64/context.S45
-rw-r--r--lib/el3_runtime/aarch64/context_mgmt.c43
-rw-r--r--lib/extensions/spe/spe.c85
4 files changed, 127 insertions, 61 deletions
diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c
index 3e7a5b733..a8672d6ca 100644
--- a/lib/el3_runtime/aarch32/context_mgmt.c
+++ b/lib/el3_runtime/aarch32/context_mgmt.c
@@ -125,6 +125,17 @@ static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t
}
/*******************************************************************************
+ * Enable architecture extensions on first entry to Non-secure world.
+ * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
+ * it is zero.
+ ******************************************************************************/
+static void enable_extensions_nonsecure(int el2_unused)
+{
+#if IMAGE_BL32
+#endif
+}
+
+/*******************************************************************************
* The following function initializes the cpu_context for a CPU specified by
* its `cpu_idx` for first use, and sets the initial entrypoint state as
* specified by the entry_point_info structure.
@@ -161,6 +172,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
{
uint32_t hsctlr, scr;
cpu_context_t *ctx = cm_get_context(security_state);
+ int el2_unused = 0;
assert(ctx);
@@ -185,6 +197,8 @@ void cm_prepare_el3_exit(uint32_t security_state)
isb();
} else if (read_id_pfr1() &
(ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) {
+ el2_unused = 1;
+
/*
* Set the NS bit to access NS copies of certain banked
* registers
@@ -283,5 +297,6 @@ void cm_prepare_el3_exit(uint32_t security_state)
write_scr(read_scr() & ~SCR_NS_BIT);
isb();
}
+ enable_extensions_nonsecure(el2_unused);
}
}
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index db16a9f0e..620ec16ff 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -9,7 +9,6 @@
#include <context.h>
.global el1_sysregs_context_save
- .global el1_sysregs_context_save_post_ops
.global el1_sysregs_context_restore
#if CTX_INCLUDE_FPREGS
.global fpregs_context_save
@@ -90,9 +89,6 @@ func el1_sysregs_context_save
mrs x15, dacr32_el2
mrs x16, ifsr32_el2
stp x15, x16, [x0, #CTX_DACR32_EL2]
-
- mrs x17, fpexc32_el2
- str x17, [x0, #CTX_FP_FPEXC32_EL2]
#endif
/* Save NS timer registers if the build has instructed so */
@@ -115,36 +111,6 @@ endfunc el1_sysregs_context_save
/* -----------------------------------------------------
* The following function strictly follows the AArch64
* PCS to use x9-x17 (temporary caller-saved registers)
- * to do post operations after saving the EL1 system
- * register context.
- * -----------------------------------------------------
- */
-func el1_sysregs_context_save_post_ops
-#if ENABLE_SPE_FOR_LOWER_ELS
- /* Detect if SPE is implemented */
- mrs x9, id_aa64dfr0_el1
- ubfx x9, x9, #ID_AA64DFR0_PMS_SHIFT, #ID_AA64DFR0_PMS_LENGTH
- cmp x9, #0x1
- b.ne 1f
-
- /*
- * Before switching from normal world to secure world
- * the profiling buffers need to be drained out to memory. This is
- * required to avoid an invalid memory access when TTBR is switched
- * for entry to SEL1.
- */
- .arch armv8.2-a+profile
- psb csync
- dsb nsh
- .arch armv8-a
-1:
-#endif
- ret
-endfunc el1_sysregs_context_save_post_ops
-
-/* -----------------------------------------------------
- * The following function strictly follows the AArch64
- * PCS to use x9-x17 (temporary caller-saved registers)
* to restore EL1 system register context. It assumes
* that 'x0' is pointing to a 'el1_sys_regs' structure
* from where the register context will be restored
@@ -212,9 +178,6 @@ func el1_sysregs_context_restore
ldp x15, x16, [x0, #CTX_DACR32_EL2]
msr dacr32_el2, x15
msr ifsr32_el2, x16
-
- ldr x17, [x0, #CTX_FP_FPEXC32_EL2]
- msr fpexc32_el2, x17
#endif
/* Restore NS timer registers if the build has instructed so */
#if NS_TIMER_SWITCH
@@ -275,6 +238,10 @@ func fpregs_context_save
mrs x10, fpcr
str x10, [x0, #CTX_FP_FPCR]
+#if CTX_INCLUDE_AARCH32_REGS
+ mrs x11, fpexc32_el2
+ str x11, [x0, #CTX_FP_FPEXC32_EL2]
+#endif
ret
endfunc fpregs_context_save
@@ -318,6 +285,10 @@ func fpregs_context_restore
ldr x10, [x0, #CTX_FP_FPCR]
msr fpcr, x10
+#if CTX_INCLUDE_AARCH32_REGS
+ ldr x11, [x0, #CTX_FP_FPEXC32_EL2]
+ msr fpexc32_el2, x11
+#endif
/*
* No explict ISB required here as ERET to
* switch to secure EL1 or non-secure world
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index c8232df96..8f1523f0d 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -15,6 +15,7 @@
#include <platform_def.h>
#include <pubsub_events.h>
#include <smcc_helpers.h>
+#include <spe.h>
#include <string.h>
#include <utils.h>
@@ -209,6 +210,20 @@ static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t
}
/*******************************************************************************
+ * Enable architecture extensions on first entry to Non-secure world.
+ * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
+ * it is zero.
+ ******************************************************************************/
+static void enable_extensions_nonsecure(int el2_unused)
+{
+#if IMAGE_BL31
+#if ENABLE_SPE_FOR_LOWER_ELS
+ spe_enable(el2_unused);
+#endif
+#endif
+}
+
+/*******************************************************************************
* The following function initializes the cpu_context for a CPU specified by
* its `cpu_idx` for first use, and sets the initial entrypoint state as
* specified by the entry_point_info structure.
@@ -245,6 +260,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
{
uint32_t sctlr_elx, scr_el3, mdcr_el2;
cpu_context_t *ctx = cm_get_context(security_state);
+ int el2_unused = 0;
assert(ctx);
@@ -258,6 +274,8 @@ void cm_prepare_el3_exit(uint32_t security_state)
sctlr_elx |= SCTLR_EL2_RES1;
write_sctlr_el2(sctlr_elx);
} else if (EL_IMPLEMENTED(2)) {
+ el2_unused = 1;
+
/*
* EL2 present but unused, need to disable safely.
* SCTLR_EL2 can be ignored in this case.
@@ -340,13 +358,6 @@ void cm_prepare_el3_exit(uint32_t security_state)
* relying on hw. Some fields are architecturally
* UNKNOWN on reset.
*
- * MDCR_EL2.TPMS (ARM v8.2): Do not trap statistical
- * profiling controls to EL2.
- *
- * MDCR_EL2.E2PB (ARM v8.2): SPE enabled in non-secure
- * state. Accesses to profiling buffer controls at
- * non-secure EL1 are not trapped to EL2.
- *
* MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and
* EL1 System register accesses to the Debug ROM
* registers are not trapped to EL2.
@@ -383,22 +394,6 @@ void cm_prepare_el3_exit(uint32_t security_state)
| MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT
| MDCR_EL2_TPMCR_BIT));
-#if ENABLE_SPE_FOR_LOWER_ELS
- uint64_t id_aa64dfr0_el1;
-
- /* Detect if SPE is implemented */
- id_aa64dfr0_el1 = read_id_aa64dfr0_el1() >>
- ID_AA64DFR0_PMS_SHIFT;
- if ((id_aa64dfr0_el1 & ID_AA64DFR0_PMS_MASK) == 1) {
- /*
- * Make sure traps to EL2 are not generated if
- * EL2 is implemented but not used.
- */
- mdcr_el2 &= ~MDCR_EL2_TPMS;
- mdcr_el2 |= MDCR_EL2_E2PB(MDCR_EL2_E2PB_EL1);
- }
-#endif
-
write_mdcr_el2(mdcr_el2);
/*
@@ -420,6 +415,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL &
~(CNTHP_CTL_ENABLE_BIT));
}
+ enable_extensions_nonsecure(el2_unused);
}
cm_el1_sysregs_context_restore(security_state);
@@ -439,7 +435,6 @@ void cm_el1_sysregs_context_save(uint32_t security_state)
assert(ctx);
el1_sysregs_context_save(get_sysregs_ctx(ctx));
- el1_sysregs_context_save_post_ops();
#if IMAGE_BL31
if (security_state == SECURE)
diff --git a/lib/extensions/spe/spe.c b/lib/extensions/spe/spe.c
new file mode 100644
index 000000000..3b297f21a
--- /dev/null
+++ b/lib/extensions/spe/spe.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <pubsub.h>
+
+/*
+ * The assembler does not yet understand the psb csync mnemonic
+ * so use the equivalent hint instruction.
+ */
+#define psb_csync() asm volatile("hint #17")
+
+void spe_enable(int el2_unused)
+{
+ uint64_t features;
+
+ features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT;
+ if ((features & ID_AA64DFR0_PMS_MASK) == 1) {
+ uint64_t v;
+
+ if (el2_unused) {
+ /*
+ * MDCR_EL2.TPMS (ARM v8.2): Do not trap statistical
+ * profiling controls to EL2.
+ *
+ * MDCR_EL2.E2PB (ARM v8.2): SPE enabled in Non-secure
+ * state. Accesses to profiling buffer controls at
+ * Non-secure EL1 are not trapped to EL2.
+ */
+ v = read_mdcr_el2();
+ v &= ~MDCR_EL2_TPMS;
+ v |= MDCR_EL2_E2PB(MDCR_EL2_E2PB_EL1);
+ write_mdcr_el2(v);
+ }
+
+ /*
+ * MDCR_EL2.NSPB (ARM v8.2): SPE enabled in Non-secure state
+ * and disabled in secure state. Accesses to SPE registers at
+ * S-EL1 generate trap exceptions to EL3.
+ */
+ v = read_mdcr_el3();
+ v |= MDCR_NSPB(MDCR_NSPB_EL1);
+ write_mdcr_el3(v);
+ }
+}
+
+void spe_disable(void)
+{
+ uint64_t features;
+
+ features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT;
+ if ((features & ID_AA64DFR0_PMS_MASK) == 1) {
+ uint64_t v;
+
+ /* Drain buffered data */
+ psb_csync();
+ dsbnsh();
+
+ /* Disable profiling buffer */
+ v = read_pmblimitr_el1();
+ v &= ~(1ULL << 0);
+ write_pmblimitr_el1(v);
+ isb();
+ }
+}
+
+static void *spe_drain_buffers_hook(const void *arg)
+{
+ uint64_t features;
+
+ features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT;
+ if ((features & ID_AA64DFR0_PMS_MASK) == 1) {
+ /* Drain buffered data */
+ psb_csync();
+ dsbnsh();
+ }
+
+ return 0;
+}
+
+SUBSCRIBE_TO_EVENT(cm_entering_secure_world, spe_drain_buffers_hook);