diff options
Diffstat (limited to 'lib/el3_runtime')
-rw-r--r-- | lib/el3_runtime/aarch32/context_mgmt.c | 4 | ||||
-rw-r--r-- | lib/el3_runtime/aarch64/context.S | 481 | ||||
-rw-r--r-- | lib/el3_runtime/aarch64/context_mgmt.c | 135 | ||||
-rw-r--r-- | lib/el3_runtime/aarch64/cpu_data.S | 5 |
4 files changed, 590 insertions, 35 deletions
diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c index 73d1e354d..2443001b8 100644 --- a/lib/el3_runtime/aarch32/context_mgmt.c +++ b/lib/el3_runtime/aarch32/context_mgmt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -17,8 +17,6 @@ #include <lib/el3_runtime/context_mgmt.h> #include <lib/extensions/amu.h> #include <lib/utils.h> -#include <plat/common/platform.h> -#include <smccc_helpers.h> /******************************************************************************* * Context management library initialisation routine. This library is used by diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S index 9bd25bac9..75e214d9c 100644 --- a/lib/el3_runtime/aarch64/context.S +++ b/lib/el3_runtime/aarch64/context.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -8,6 +8,12 @@ #include <asm_macros.S> #include <assert_macros.S> #include <context.h> +#include <el3_common_macros.S> + +#if CTX_INCLUDE_EL2_REGS + .global el2_sysregs_context_save + .global el2_sysregs_context_restore +#endif .global el1_sysregs_context_save .global el1_sysregs_context_restore @@ -17,8 +23,416 @@ #endif .global save_gp_pmcr_pauth_regs .global restore_gp_pmcr_pauth_regs + .global save_and_update_ptw_el1_sys_regs .global el3_exit +#if CTX_INCLUDE_EL2_REGS + +/* ----------------------------------------------------- + * The following function strictly follows the AArch64 + * PCS to use x9-x17 (temporary caller-saved registers) + * to save EL2 system register context. It assumes that + * 'x0' is pointing to a 'el2_sys_regs' structure where + * the register context will be saved. + * + * The following registers are not added. + * AMEVCNTVOFF0<n>_EL2 + * AMEVCNTVOFF1<n>_EL2 + * ICH_AP0R<n>_EL2 + * ICH_AP1R<n>_EL2 + * ICH_LR<n>_EL2 + * ----------------------------------------------------- + */ + +func el2_sysregs_context_save + mrs x9, actlr_el2 + mrs x10, afsr0_el2 + stp x9, x10, [x0, #CTX_ACTLR_EL2] + + mrs x11, afsr1_el2 + mrs x12, amair_el2 + stp x11, x12, [x0, #CTX_AFSR1_EL2] + + mrs x13, cnthctl_el2 + mrs x14, cnthp_ctl_el2 + stp x13, x14, [x0, #CTX_CNTHCTL_EL2] + + mrs x15, cnthp_cval_el2 + mrs x16, cnthp_tval_el2 + stp x15, x16, [x0, #CTX_CNTHP_CVAL_EL2] + + mrs x17, cntvoff_el2 + mrs x9, cptr_el2 + stp x17, x9, [x0, #CTX_CNTVOFF_EL2] + + mrs x11, elr_el2 +#if CTX_INCLUDE_AARCH32_REGS + mrs x10, dbgvcr32_el2 + stp x10, x11, [x0, #CTX_DBGVCR32_EL2] +#else + str x11, [x0, #CTX_ELR_EL2] +#endif + + mrs x14, esr_el2 + mrs x15, far_el2 + stp x14, x15, [x0, #CTX_ESR_EL2] + + mrs x16, hacr_el2 + mrs x17, hcr_el2 + stp x16, x17, [x0, #CTX_HACR_EL2] + + mrs x9, hpfar_el2 + mrs x10, hstr_el2 + stp x9, x10, [x0, #CTX_HPFAR_EL2] + + mrs x11, ICC_SRE_EL2 + mrs x12, ICH_HCR_EL2 + stp x11, x12, [x0, #CTX_ICC_SRE_EL2] + + mrs x13, ICH_VMCR_EL2 + mrs x14, mair_el2 + stp x13, x14, [x0, #CTX_ICH_VMCR_EL2] + + mrs x15, mdcr_el2 +#if ENABLE_SPE_FOR_LOWER_ELS + mrs x16, PMSCR_EL2 + stp x15, x16, [x0, #CTX_MDCR_EL2] +#else + str x15, [x0, #CTX_MDCR_EL2] +#endif + + mrs x17, sctlr_el2 + mrs x9, spsr_el2 + stp x17, x9, [x0, #CTX_SCTLR_EL2] + + mrs x10, sp_el2 + mrs x11, tcr_el2 + stp x10, x11, [x0, #CTX_SP_EL2] + + mrs x12, tpidr_el2 + mrs x13, ttbr0_el2 + stp x12, x13, [x0, #CTX_TPIDR_EL2] + + mrs x14, vbar_el2 + mrs x15, vmpidr_el2 + stp x14, x15, [x0, #CTX_VBAR_EL2] + + mrs x16, vpidr_el2 + mrs x17, vtcr_el2 + stp x16, x17, [x0, #CTX_VPIDR_EL2] + + mrs x9, vttbr_el2 + str x9, [x0, #CTX_VTTBR_EL2] + +#if CTX_INCLUDE_MTE_REGS + mrs x10, TFSR_EL2 + str x10, [x0, #CTX_TFSR_EL2] +#endif + +#if ENABLE_MPAM_FOR_LOWER_ELS + mrs x9, MPAM2_EL2 + mrs x10, MPAMHCR_EL2 + stp x9, x10, [x0, #CTX_MPAM2_EL2] + + mrs x11, MPAMVPM0_EL2 + mrs x12, MPAMVPM1_EL2 + stp x11, x12, [x0, #CTX_MPAMVPM0_EL2] + + mrs x13, MPAMVPM2_EL2 + mrs x14, MPAMVPM3_EL2 + stp x13, x14, [x0, #CTX_MPAMVPM2_EL2] + + mrs x15, MPAMVPM4_EL2 + mrs x16, MPAMVPM5_EL2 + stp x15, x16, [x0, #CTX_MPAMVPM4_EL2] + + mrs x17, MPAMVPM6_EL2 + mrs x9, MPAMVPM7_EL2 + stp x17, x9, [x0, #CTX_MPAMVPM6_EL2] + + mrs x10, MPAMVPMV_EL2 + str x10, [x0, #CTX_MPAMVPMV_EL2] +#endif + + +#if ARM_ARCH_AT_LEAST(8, 6) + mrs x11, HAFGRTR_EL2 + mrs x12, HDFGRTR_EL2 + stp x11, x12, [x0, #CTX_HAFGRTR_EL2] + + mrs x13, HDFGWTR_EL2 + mrs x14, HFGITR_EL2 + stp x13, x14, [x0, #CTX_HDFGWTR_EL2] + + mrs x15, HFGRTR_EL2 + mrs x16, HFGWTR_EL2 + stp x15, x16, [x0, #CTX_HFGRTR_EL2] + + mrs x17, CNTPOFF_EL2 + str x17, [x0, #CTX_CNTPOFF_EL2] +#endif + +#if ARM_ARCH_AT_LEAST(8, 4) + mrs x9, cnthps_ctl_el2 + mrs x10, cnthps_cval_el2 + stp x9, x10, [x0, #CTX_CNTHPS_CTL_EL2] + + mrs x11, cnthps_tval_el2 + mrs x12, cnthvs_ctl_el2 + stp x11, x12, [x0, #CTX_CNTHPS_TVAL_EL2] + + mrs x13, cnthvs_cval_el2 + mrs x14, cnthvs_tval_el2 + stp x13, x14, [x0, #CTX_CNTHVS_CVAL_EL2] + + mrs x15, cnthv_ctl_el2 + mrs x16, cnthv_cval_el2 + stp x15, x16, [x0, #CTX_CNTHV_CTL_EL2] + + mrs x17, cnthv_tval_el2 + mrs x9, contextidr_el2 + stp x17, x9, [x0, #CTX_CNTHV_TVAL_EL2] + +#if CTX_INCLUDE_AARCH32_REGS + mrs x10, sder32_el2 + str x10, [x0, #CTX_SDER32_EL2] +#endif + + mrs x11, ttbr1_el2 + str x11, [x0, #CTX_TTBR1_EL2] + + mrs x12, vdisr_el2 + str x12, [x0, #CTX_VDISR_EL2] + +#if CTX_INCLUDE_NEVE_REGS + mrs x13, vncr_el2 + str x13, [x0, #CTX_VNCR_EL2] +#endif + + mrs x14, vsesr_el2 + str x14, [x0, #CTX_VSESR_EL2] + + mrs x15, vstcr_el2 + str x15, [x0, #CTX_VSTCR_EL2] + + mrs x16, vsttbr_el2 + str x16, [x0, #CTX_VSTTBR_EL2] + + mrs x17, TRFCR_EL2 + str x17, [x0, #CTX_TRFCR_EL2] +#endif + +#if ARM_ARCH_AT_LEAST(8, 5) + mrs x9, scxtnum_el2 + str x9, [x0, #CTX_SCXTNUM_EL2] +#endif + + ret +endfunc el2_sysregs_context_save + +/* ----------------------------------------------------- + * The following function strictly follows the AArch64 + * PCS to use x9-x17 (temporary caller-saved registers) + * to restore EL2 system register context. It assumes + * that 'x0' is pointing to a 'el2_sys_regs' structure + * from where the register context will be restored + + * The following registers are not restored + * AMEVCNTVOFF0<n>_EL2 + * AMEVCNTVOFF1<n>_EL2 + * ICH_AP0R<n>_EL2 + * ICH_AP1R<n>_EL2 + * ICH_LR<n>_EL2 + * ----------------------------------------------------- + */ +func el2_sysregs_context_restore + + ldp x9, x10, [x0, #CTX_ACTLR_EL2] + msr actlr_el2, x9 + msr afsr0_el2, x10 + + ldp x11, x12, [x0, #CTX_AFSR1_EL2] + msr afsr1_el2, x11 + msr amair_el2, x12 + + ldp x13, x14, [x0, #CTX_CNTHCTL_EL2] + msr cnthctl_el2, x13 + msr cnthp_ctl_el2, x14 + + ldp x15, x16, [x0, #CTX_CNTHP_CVAL_EL2] + msr cnthp_cval_el2, x15 + msr cnthp_tval_el2, x16 + + ldp x17, x9, [x0, #CTX_CNTVOFF_EL2] + msr cntvoff_el2, x17 + msr cptr_el2, x9 + +#if CTX_INCLUDE_AARCH32_REGS + ldp x10, x11, [x0, #CTX_DBGVCR32_EL2] + msr dbgvcr32_el2, x10 +#else + ldr x11, [x0, #CTX_ELR_EL2] +#endif + msr elr_el2, x11 + + ldp x14, x15, [x0, #CTX_ESR_EL2] + msr esr_el2, x14 + msr far_el2, x15 + + ldp x16, x17, [x0, #CTX_HACR_EL2] + msr hacr_el2, x16 + msr hcr_el2, x17 + + ldp x9, x10, [x0, #CTX_HPFAR_EL2] + msr hpfar_el2, x9 + msr hstr_el2, x10 + + ldp x11, x12, [x0, #CTX_ICC_SRE_EL2] + msr ICC_SRE_EL2, x11 + msr ICH_HCR_EL2, x12 + + ldp x13, x14, [x0, #CTX_ICH_VMCR_EL2] + msr ICH_VMCR_EL2, x13 + msr mair_el2, x14 + +#if ENABLE_SPE_FOR_LOWER_ELS + ldp x15, x16, [x0, #CTX_MDCR_EL2] + msr PMSCR_EL2, x16 +#else + ldr x15, [x0, #CTX_MDCR_EL2] +#endif + msr mdcr_el2, x15 + + ldp x17, x9, [x0, #CTX_SCTLR_EL2] + msr sctlr_el2, x17 + msr spsr_el2, x9 + + ldp x10, x11, [x0, #CTX_SP_EL2] + msr sp_el2, x10 + msr tcr_el2, x11 + + ldp x12, x13, [x0, #CTX_TPIDR_EL2] + msr tpidr_el2, x12 + msr ttbr0_el2, x13 + + ldp x13, x14, [x0, #CTX_VBAR_EL2] + msr vbar_el2, x13 + msr vmpidr_el2, x14 + + ldp x15, x16, [x0, #CTX_VPIDR_EL2] + msr vpidr_el2, x15 + msr vtcr_el2, x16 + + ldr x17, [x0, #CTX_VTTBR_EL2] + msr vttbr_el2, x17 + +#if CTX_INCLUDE_MTE_REGS + ldr x9, [x0, #CTX_TFSR_EL2] + msr TFSR_EL2, x9 +#endif + +#if ENABLE_MPAM_FOR_LOWER_ELS + ldp x10, x11, [x0, #CTX_MPAM2_EL2] + msr MPAM2_EL2, x10 + msr MPAMHCR_EL2, x11 + + ldp x12, x13, [x0, #CTX_MPAMVPM0_EL2] + msr MPAMVPM0_EL2, x12 + msr MPAMVPM1_EL2, x13 + + ldp x14, x15, [x0, #CTX_MPAMVPM2_EL2] + msr MPAMVPM2_EL2, x14 + msr MPAMVPM3_EL2, x15 + + ldp x16, x17, [x0, #CTX_MPAMVPM4_EL2] + msr MPAMVPM4_EL2, x16 + msr MPAMVPM5_EL2, x17 + + ldp x9, x10, [x0, #CTX_MPAMVPM6_EL2] + msr MPAMVPM6_EL2, x9 + msr MPAMVPM7_EL2, x10 + + ldr x11, [x0, #CTX_MPAMVPMV_EL2] + msr MPAMVPMV_EL2, x11 +#endif + +#if ARM_ARCH_AT_LEAST(8, 6) + ldp x12, x13, [x0, #CTX_HAFGRTR_EL2] + msr HAFGRTR_EL2, x12 + msr HDFGRTR_EL2, x13 + + ldp x14, x15, [x0, #CTX_HDFGWTR_EL2] + msr HDFGWTR_EL2, x14 + msr HFGITR_EL2, x15 + + ldp x16, x17, [x0, #CTX_HFGRTR_EL2] + msr HFGRTR_EL2, x16 + msr HFGWTR_EL2, x17 + + ldr x9, [x0, #CTX_CNTPOFF_EL2] + msr CNTPOFF_EL2, x9 +#endif + +#if ARM_ARCH_AT_LEAST(8, 4) + ldp x10, x11, [x0, #CTX_CNTHPS_CTL_EL2] + msr cnthps_ctl_el2, x10 + msr cnthps_cval_el2, x11 + + ldp x12, x13, [x0, #CTX_CNTHPS_TVAL_EL2] + msr cnthps_tval_el2, x12 + msr cnthvs_ctl_el2, x13 + + ldp x14, x15, [x0, #CTX_CNTHVS_CVAL_EL2] + msr cnthvs_cval_el2, x14 + msr cnthvs_tval_el2, x15 + + ldp x16, x17, [x0, #CTX_CNTHV_CTL_EL2] + msr cnthv_ctl_el2, x16 + msr cnthv_cval_el2, x17 + + ldp x9, x10, [x0, #CTX_CNTHV_TVAL_EL2] + msr cnthv_tval_el2, x9 + msr contextidr_el2, x10 + +#if CTX_INCLUDE_AARCH32_REGS + ldr x11, [x0, #CTX_SDER32_EL2] + msr sder32_el2, x11 +#endif + + ldr x12, [x0, #CTX_TTBR1_EL2] + msr ttbr1_el2, x12 + + ldr x13, [x0, #CTX_VDISR_EL2] + msr vdisr_el2, x13 + +#if CTX_INCLUDE_NEVE_REGS + ldr x14, [x0, #CTX_VNCR_EL2] + msr vncr_el2, x14 +#endif + + ldr x15, [x0, #CTX_VSESR_EL2] + msr vsesr_el2, x15 + + ldr x16, [x0, #CTX_VSTCR_EL2] + msr vstcr_el2, x16 + + ldr x17, [x0, #CTX_VSTTBR_EL2] + msr vsttbr_el2, x17 + + ldr x9, [x0, #CTX_TRFCR_EL2] + msr TRFCR_EL2, x9 +#endif + +#if ARM_ARCH_AT_LEAST(8, 5) + ldr x10, [x0, #CTX_SCXTNUM_EL2] + msr scxtnum_el2, x10 +#endif + + ret +endfunc el2_sysregs_context_restore + +#endif /* CTX_INCLUDE_EL2_REGS */ + /* ------------------------------------------------------------------ * The following function strictly follows the AArch64 PCS to use * x9-x17 (temporary caller-saved registers) to save EL1 system @@ -32,9 +446,11 @@ func el1_sysregs_context_save mrs x10, elr_el1 stp x9, x10, [x0, #CTX_SPSR_EL1] +#if !ERRATA_SPECULATIVE_AT mrs x15, sctlr_el1 - mrs x16, actlr_el1 + mrs x16, tcr_el1 stp x15, x16, [x0, #CTX_SCTLR_EL1] +#endif mrs x17, cpacr_el1 mrs x9, csselr_el1 @@ -52,9 +468,9 @@ func el1_sysregs_context_save mrs x15, amair_el1 stp x14, x15, [x0, #CTX_MAIR_EL1] - mrs x16, tcr_el1 + mrs x16, actlr_el1 mrs x17, tpidr_el1 - stp x16, x17, [x0, #CTX_TCR_EL1] + stp x16, x17, [x0, #CTX_ACTLR_EL1] mrs x9, tpidr_el0 mrs x10, tpidrro_el0 @@ -129,9 +545,11 @@ func el1_sysregs_context_restore msr spsr_el1, x9 msr elr_el1, x10 +#if !ERRATA_SPECULATIVE_AT ldp x15, x16, [x0, #CTX_SCTLR_EL1] msr sctlr_el1, x15 - msr actlr_el1, x16 + msr tcr_el1, x16 +#endif ldp x17, x9, [x0, #CTX_CPACR_EL1] msr cpacr_el1, x17 @@ -149,8 +567,8 @@ func el1_sysregs_context_restore msr mair_el1, x14 msr amair_el1, x15 - ldp x16, x17, [x0, #CTX_TCR_EL1] - msr tcr_el1, x16 + ldp x16, x17, [x0, #CTX_ACTLR_EL1] + msr actlr_el1, x16 msr tpidr_el1, x17 ldp x9, x10, [x0, #CTX_TPIDR_EL0] @@ -471,6 +889,48 @@ func restore_gp_pmcr_pauth_regs ret endfunc restore_gp_pmcr_pauth_regs +/* + * In case of ERRATA_SPECULATIVE_AT, save SCTLR_EL1 and TCR_EL1 + * registers and update EL1 registers to disable stage1 and stage2 + * page table walk + */ +func save_and_update_ptw_el1_sys_regs + /* ---------------------------------------------------------- + * Save only sctlr_el1 and tcr_el1 registers + * ---------------------------------------------------------- + */ + mrs x29, sctlr_el1 + str x29, [sp, #(CTX_EL1_SYSREGS_OFFSET + CTX_SCTLR_EL1)] + mrs x29, tcr_el1 + str x29, [sp, #(CTX_EL1_SYSREGS_OFFSET + CTX_TCR_EL1)] + + /* ------------------------------------------------------------ + * Must follow below order in order to disable page table + * walk for lower ELs (EL1 and EL0). First step ensures that + * page table walk is disabled for stage1 and second step + * ensures that page table walker should use TCR_EL1.EPDx + * bits to perform address translation. ISB ensures that CPU + * does these 2 steps in order. + * + * 1. Update TCR_EL1.EPDx bits to disable page table walk by + * stage1. + * 2. Enable MMU bit to avoid identity mapping via stage2 + * and force TCR_EL1.EPDx to be used by the page table + * walker. + * ------------------------------------------------------------ + */ + orr x29, x29, #(TCR_EPD0_BIT) + orr x29, x29, #(TCR_EPD1_BIT) + msr tcr_el1, x29 + isb + mrs x29, sctlr_el1 + orr x29, x29, #SCTLR_M_BIT + msr sctlr_el1, x29 + isb + + ret +endfunc save_and_update_ptw_el1_sys_regs + /* ------------------------------------------------------------------ * This routine assumes that the SP_EL3 is pointing to a valid * context structure from where the gp regs and other special @@ -515,6 +975,8 @@ func el3_exit blr x17 1: #endif + restore_ptw_el1_sys_regs + /* ---------------------------------------------------------- * Restore general purpose (including x30), PMCR_EL0 and * ARMv8.3-PAuth registers. @@ -533,6 +995,11 @@ func el3_exit * ---------------------------------------------------------- */ esb +#else + dsb sy +#endif +#ifdef IMAGE_BL31 + str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3] #endif exception_return diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c index dc4717abe..72d463b71 100644 --- a/lib/el3_runtime/aarch64/context_mgmt.c +++ b/lib/el3_runtime/aarch64/context_mgmt.c @@ -22,9 +22,8 @@ #include <lib/extensions/mpam.h> #include <lib/extensions/spe.h> #include <lib/extensions/sve.h> +#include <lib/extensions/twed.h> #include <lib/utils.h> -#include <plat/common/platform.h> -#include <smccc_helpers.h> /******************************************************************************* @@ -109,6 +108,14 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) if (EP_GET_ST(ep->h.attr) != 0U) scr_el3 |= SCR_ST_BIT; +#if RAS_TRAP_LOWER_EL_ERR_ACCESS + /* + * SCR_EL3.TERR: Trap Error record accesses. Accesses to the RAS ERR + * and RAS ERX registers from EL1 and EL2 are trapped to EL3. + */ + scr_el3 |= SCR_TERR_BIT; +#endif + #if !HANDLE_EA_EL3_FIRST /* * SCR_EL3.EA: Do not route External Abort and SError Interrupt External @@ -137,30 +144,33 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) scr_el3 |= SCR_API_BIT | SCR_APK_BIT; #endif /* !CTX_INCLUDE_PAUTH_REGS */ +#if !CTX_INCLUDE_MTE_REGS || ENABLE_ASSERTIONS + /* Get Memory Tagging Extension support level */ + unsigned int mte = get_armv8_5_mte_support(); +#endif /* * Enable MTE support. Support is enabled unilaterally for the normal * world, and only for the secure world when CTX_INCLUDE_MTE_REGS is * set. */ #if CTX_INCLUDE_MTE_REGS - assert(get_armv8_5_mte_support() == MTE_IMPLEMENTED_ELX); + assert((mte == MTE_IMPLEMENTED_ELX) || (mte == MTE_IMPLEMENTED_ASY)); scr_el3 |= SCR_ATA_BIT; #else - unsigned int mte = get_armv8_5_mte_support(); - if (mte == MTE_IMPLEMENTED_EL0) { - /* - * Can enable MTE across both worlds as no MTE registers are - * used - */ - scr_el3 |= SCR_ATA_BIT; - } else if (mte == MTE_IMPLEMENTED_ELX && security_state == NON_SECURE) { - /* - * Can only enable MTE in Non-Secure world without register - * saving - */ + /* + * When MTE is only implemented at EL0, it can be enabled + * across both worlds as no MTE registers are used. + */ + if ((mte == MTE_IMPLEMENTED_EL0) || + /* + * When MTE is implemented at all ELs, it can be only enabled + * in Non-Secure world without register saving. + */ + (((mte == MTE_IMPLEMENTED_ELX) || (mte == MTE_IMPLEMENTED_ASY)) && + (security_state == NON_SECURE))) { scr_el3 |= SCR_ATA_BIT; } -#endif +#endif /* CTX_INCLUDE_MTE_REGS */ #ifdef IMAGE_BL31 /* @@ -174,11 +184,26 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) * SCR_EL3.HCE: Enable HVC instructions if next execution state is * AArch64 and next EL is EL2, or if next execution state is AArch32 and * next mode is Hyp. + * SCR_EL3.FGTEn: Enable Fine Grained Virtualization Traps under the + * same conditions as HVC instructions and when the processor supports + * ARMv8.6-FGT. + * SCR_EL3.ECVEn: Enable Enhanced Counter Virtualization (ECV) + * CNTPOFF_EL2 register under the same conditions as HVC instructions + * and when the processor supports ECV. */ if (((GET_RW(ep->spsr) == MODE_RW_64) && (GET_EL(ep->spsr) == MODE_EL2)) || ((GET_RW(ep->spsr) != MODE_RW_64) && (GET_M32(ep->spsr) == MODE32_hyp))) { scr_el3 |= SCR_HCE_BIT; + + if (is_armv8_6_fgt_present()) { + scr_el3 |= SCR_FGTEN_BIT; + } + + if (get_armv8_6_ecv_support() + == ID_AA64MMFR0_EL1_ECV_SELF_SYNCH) { + scr_el3 |= SCR_ECVEN_BIT; + } } /* Enable S-EL2 if the next EL is EL2 and security state is secure */ @@ -231,12 +256,30 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) sctlr_elx |= SCTLR_IESB_BIT; #endif + /* Enable WFE trap delay in SCR_EL3 if supported and configured */ + if (is_armv8_6_twed_present()) { + uint32_t delay = plat_arm_set_twedel_scr_el3(); + + if (delay != TWED_DISABLED) { + /* Make sure delay value fits */ + assert((delay & ~SCR_TWEDEL_MASK) == 0U); + + /* Set delay in SCR_EL3 */ + scr_el3 &= ~(SCR_TWEDEL_MASK << SCR_TWEDEL_SHIFT); + scr_el3 |= ((delay & SCR_TWEDEL_MASK) + << SCR_TWEDEL_SHIFT); + + /* Enable WFE delay */ + scr_el3 |= SCR_TWEDEn_BIT; + } + } + /* * Store the initialised SCTLR_EL1 value in the cpu_context - SCTLR_EL2 * and other EL2 registers are set up by cm_prepare_ns_entry() as they * are not part of the stored cpu_context. */ - write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx); + write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx); /* * Base the context ACTLR_EL1 on the current value, as it is @@ -246,7 +289,7 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) * be zero. */ actlr_elx = read_actlr_el1(); - write_ctx_reg((get_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx)); + write_ctx_reg((get_el1_sysregs_ctx(ctx)), (CTX_ACTLR_EL1), (actlr_elx)); /* * Populate EL3 state so that we've the right context @@ -338,7 +381,7 @@ void cm_prepare_el3_exit(uint32_t security_state) CTX_SCR_EL3); if ((scr_el3 & SCR_HCE_BIT) != 0U) { /* Use SCTLR_EL1.EE value to initialise sctlr_el2 */ - sctlr_elx = read_ctx_reg(get_sysregs_ctx(ctx), + sctlr_elx = read_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1); sctlr_elx &= SCTLR_EE_BIT; sctlr_elx |= SCTLR_EL2_RES1; @@ -532,6 +575,52 @@ void cm_prepare_el3_exit(uint32_t security_state) cm_set_next_eret_context(security_state); } +#if CTX_INCLUDE_EL2_REGS +/******************************************************************************* + * Save EL2 sysreg context + ******************************************************************************/ +void cm_el2_sysregs_context_save(uint32_t security_state) +{ + u_register_t scr_el3 = read_scr(); + + /* + * Always save the non-secure EL2 context, only save the + * S-EL2 context if S-EL2 is enabled. + */ + if ((security_state == NON_SECURE) || + ((security_state == SECURE) && ((scr_el3 & SCR_EEL2_BIT) != 0U))) { + cpu_context_t *ctx; + + ctx = cm_get_context(security_state); + assert(ctx != NULL); + + el2_sysregs_context_save(get_el2_sysregs_ctx(ctx)); + } +} + +/******************************************************************************* + * Restore EL2 sysreg context + ******************************************************************************/ +void cm_el2_sysregs_context_restore(uint32_t security_state) +{ + u_register_t scr_el3 = read_scr(); + + /* + * Always restore the non-secure EL2 context, only restore the + * S-EL2 context if S-EL2 is enabled. + */ + if ((security_state == NON_SECURE) || + ((security_state == SECURE) && ((scr_el3 & SCR_EEL2_BIT) != 0U))) { + cpu_context_t *ctx; + + ctx = cm_get_context(security_state); + assert(ctx != NULL); + + el2_sysregs_context_restore(get_el2_sysregs_ctx(ctx)); + } +} +#endif /* CTX_INCLUDE_EL2_REGS */ + /******************************************************************************* * The next four functions are used by runtime services to save and restore * EL1 context on the 'cpu_context' structure for the specified security @@ -544,7 +633,7 @@ void cm_el1_sysregs_context_save(uint32_t security_state) ctx = cm_get_context(security_state); assert(ctx != NULL); - el1_sysregs_context_save(get_sysregs_ctx(ctx)); + el1_sysregs_context_save(get_el1_sysregs_ctx(ctx)); #if IMAGE_BL31 if (security_state == SECURE) @@ -561,7 +650,7 @@ void cm_el1_sysregs_context_restore(uint32_t security_state) ctx = cm_get_context(security_state); assert(ctx != NULL); - el1_sysregs_context_restore(get_sysregs_ctx(ctx)); + el1_sysregs_context_restore(get_el1_sysregs_ctx(ctx)); #if IMAGE_BL31 if (security_state == SECURE) @@ -624,7 +713,7 @@ void cm_write_scr_el3_bit(uint32_t security_state, assert(ctx != NULL); /* Ensure that the bit position is a valid one */ - assert(((1U << bit_pos) & SCR_VALID_BIT_MASK) != 0U); + assert(((1UL << bit_pos) & SCR_VALID_BIT_MASK) != 0U); /* Ensure that the 'value' is only a bit wide */ assert(value <= 1U); @@ -635,7 +724,7 @@ void cm_write_scr_el3_bit(uint32_t security_state, */ state = get_el3state_ctx(ctx); scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); - scr_el3 &= ~(1U << bit_pos); + scr_el3 &= ~(1UL << bit_pos); scr_el3 |= (u_register_t)value << bit_pos; write_ctx_reg(state, CTX_SCR_EL3, scr_el3); } diff --git a/lib/el3_runtime/aarch64/cpu_data.S b/lib/el3_runtime/aarch64/cpu_data.S index 2edf22559..2392d6b90 100644 --- a/lib/el3_runtime/aarch64/cpu_data.S +++ b/lib/el3_runtime/aarch64/cpu_data.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -41,7 +41,8 @@ endfunc init_cpu_data_ptr func _cpu_data_by_index mov_imm x1, CPU_DATA_SIZE mul x0, x0, x1 - adr x1, percpu_data + adrp x1, percpu_data + add x1, x1, :lo12:percpu_data add x0, x0, x1 ret endfunc _cpu_data_by_index |