aboutsummaryrefslogtreecommitdiffstats
path: root/services
diff options
context:
space:
mode:
Diffstat (limited to 'services')
-rw-r--r--services/arm_arch_svc/arm_arch_svc_setup.c28
-rw-r--r--services/spd/tlkd/tlkd_common.c8
-rw-r--r--services/spd/tlkd/tlkd_main.c94
-rw-r--r--services/spd/tlkd/tlkd_pm.c26
-rw-r--r--services/spd/trusty/generic-arm64-smcall.c23
-rw-r--r--services/spd/trusty/generic-arm64-smcall.h1
-rw-r--r--services/spd/trusty/smcall.h8
-rw-r--r--services/spd/trusty/trusty.c35
-rw-r--r--services/spd/tspd/tspd_main.c4
-rw-r--r--services/std_svc/sdei/sdei_main.c10
-rw-r--r--services/std_svc/spm_mm/spm_mm_setup.c20
-rw-r--r--services/std_svc/spm_mm/spm_mm_xlat.c6
-rw-r--r--services/std_svc/spmd/aarch64/spmd_helpers.S73
-rw-r--r--services/std_svc/spmd/spmd.mk22
-rw-r--r--services/std_svc/spmd/spmd_main.c646
-rw-r--r--services/std_svc/spmd/spmd_pm.c156
-rw-r--r--services/std_svc/spmd/spmd_private.h103
-rw-r--r--services/std_svc/std_svc_setup.c30
-rw-r--r--services/std_svc/trng/trng_entropy_pool.c151
-rw-r--r--services/std_svc/trng/trng_entropy_pool.h16
-rw-r--r--services/std_svc/trng/trng_main.c143
21 files changed, 1543 insertions, 60 deletions
diff --git a/services/arm_arch_svc/arm_arch_svc_setup.c b/services/arm_arch_svc/arm_arch_svc_setup.c
index 1fc7827b4..37bfc62e2 100644
--- a/services/arm_arch_svc/arm_arch_svc_setup.c
+++ b/services/arm_arch_svc/arm_arch_svc_setup.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -12,18 +12,21 @@
#include <lib/smccc.h>
#include <services/arm_arch_svc.h>
#include <smccc_helpers.h>
+#include <plat/common/platform.h>
static int32_t smccc_version(void)
{
return MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION);
}
-static int32_t smccc_arch_features(u_register_t arg)
+static int32_t smccc_arch_features(u_register_t arg1)
{
- switch (arg) {
+ switch (arg1) {
case SMCCC_VERSION:
case SMCCC_ARCH_FEATURES:
- return SMC_OK;
+ return SMC_ARCH_CALL_SUCCESS;
+ case SMCCC_ARCH_SOC_ID:
+ return plat_is_smccc_feature_available(arg1);
#if WORKAROUND_CVE_2017_5715
case SMCCC_ARCH_WORKAROUND_1:
if (check_wa_cve_2017_5715() == ERRATA_NOT_APPLIES)
@@ -66,7 +69,7 @@ static int32_t smccc_arch_features(u_register_t arg)
return 0;
#else
/* Either the CPUs are unaffected or permanently mitigated */
- return SMCCC_ARCH_NOT_REQUIRED;
+ return SMC_ARCH_CALL_NOT_REQUIRED;
#endif
}
#endif
@@ -78,6 +81,19 @@ static int32_t smccc_arch_features(u_register_t arg)
}
}
+/* return soc revision or soc version on success otherwise
+ * return invalid parameter */
+static int32_t smccc_arch_id(u_register_t arg1)
+{
+ if (arg1 == SMCCC_GET_SOC_REVISION) {
+ return plat_get_soc_revision();
+ }
+ if (arg1 == SMCCC_GET_SOC_VERSION) {
+ return plat_get_soc_version();
+ }
+ return SMC_ARCH_CALL_INVAL_PARAM;
+}
+
/*
* Top-level Arm Architectural Service SMC handler.
*/
@@ -95,6 +111,8 @@ static uintptr_t arm_arch_svc_smc_handler(uint32_t smc_fid,
SMC_RET1(handle, smccc_version());
case SMCCC_ARCH_FEATURES:
SMC_RET1(handle, smccc_arch_features(x1));
+ case SMCCC_ARCH_SOC_ID:
+ SMC_RET1(handle, smccc_arch_id(x1));
#if WORKAROUND_CVE_2017_5715
case SMCCC_ARCH_WORKAROUND_1:
/*
diff --git a/services/spd/tlkd/tlkd_common.c b/services/spd/tlkd/tlkd_common.c
index dbe6c2e34..820bd8a72 100644
--- a/services/spd/tlkd/tlkd_common.c
+++ b/services/spd/tlkd/tlkd_common.c
@@ -38,16 +38,16 @@ uint64_t tlkd_va_translate(uintptr_t va, int type)
int at = type & AT_MASK;
switch (at) {
case 0:
- ats12e1r(va);
+ AT(ats12e1r, va);
break;
case 1:
- ats12e1w(va);
+ AT(ats12e1w, va);
break;
case 2:
- ats12e0r(va);
+ AT(ats12e0r, va);
break;
case 3:
- ats12e0w(va);
+ AT(ats12e0w, va);
break;
default:
assert(0); /* Unreachable */
diff --git a/services/spd/tlkd/tlkd_main.c b/services/spd/tlkd/tlkd_main.c
index 3cfc52db4..ecac43522 100644
--- a/services/spd/tlkd/tlkd_main.c
+++ b/services/spd/tlkd/tlkd_main.c
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -14,6 +15,7 @@
* responsible for initialising and maintaining communication with the SP.
******************************************************************************/
#include <assert.h>
+#include <bl31/interrupt_mgmt.h>
#include <errno.h>
#include <stddef.h>
@@ -49,6 +51,50 @@ DEFINE_SVC_UUID2(tlk_uuid,
static int32_t tlkd_init(void);
/*******************************************************************************
+ * Secure Payload Dispatcher's timer interrupt handler
+ ******************************************************************************/
+static uint64_t tlkd_interrupt_handler(uint32_t id,
+ uint32_t flags,
+ void *handle,
+ void *cookie)
+{
+ cpu_context_t *s_cpu_context;
+ int irq = plat_ic_get_pending_interrupt_id();
+
+ /* acknowledge the interrupt and mark it complete */
+ (void)plat_ic_acknowledge_interrupt();
+ plat_ic_end_of_interrupt(irq);
+
+ /*
+ * Disable the routing of NS interrupts from secure world to
+ * EL3 while interrupted on this core.
+ */
+ disable_intr_rm_local(INTR_TYPE_S_EL1, SECURE);
+
+ /* Check the security state when the exception was generated */
+ assert(get_interrupt_src_ss(flags) == NON_SECURE);
+ assert(handle == cm_get_context(NON_SECURE));
+
+ /* Save non-secure state */
+ cm_el1_sysregs_context_save(NON_SECURE);
+
+ /* Get a reference to the secure context */
+ s_cpu_context = cm_get_context(SECURE);
+ assert(s_cpu_context);
+
+ /*
+ * Restore non-secure state. There is no need to save the
+ * secure system register context since the SP was supposed
+ * to preserve it during S-EL1 interrupt handling.
+ */
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+
+ /* Provide the IRQ number to the SPD */
+ SMC_RET4(s_cpu_context, (uint32_t)TLK_IRQ_FIRED, 0, (uint32_t)irq, 0);
+}
+
+/*******************************************************************************
* Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type
* (aarch32/aarch64) if not already known and initialises the context for entry
* into the SP for its initialisation.
@@ -56,6 +102,8 @@ static int32_t tlkd_init(void);
static int32_t tlkd_setup(void)
{
entry_point_info_t *tlk_ep_info;
+ uint32_t flags;
+ int32_t ret;
/*
* Get information about the Secure Payload (BL32) image. Its
@@ -86,6 +134,18 @@ static int32_t tlkd_setup(void)
tlk_ep_info->pc,
&tlk_ctx);
+ /* get a list of all S-EL1 IRQs from the platform */
+
+ /* register interrupt handler */
+ flags = 0;
+ set_interrupt_rm_flag(flags, NON_SECURE);
+ ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
+ tlkd_interrupt_handler,
+ flags);
+ if (ret != 0) {
+ ERROR("failed to register tlkd interrupt handler (%d)\n", ret);
+ }
+
/*
* All TLK SPD initialization done. Now register our init function
* with BL31 for deferred invocation
@@ -212,6 +272,9 @@ static uintptr_t tlkd_smc_handler(uint32_t smc_fid,
case TLK_TA_LAUNCH_OP:
case TLK_TA_SEND_EVENT:
case TLK_RESUME_FID:
+ case TLK_SET_BL_VERSION:
+ case TLK_LOCK_BL_INTERFACE:
+ case TLK_BL_RPMB_SERVICE:
if (!ns)
SMC_RET1(handle, SMC_UNK);
@@ -370,7 +433,6 @@ static uintptr_t tlkd_smc_handler(uint32_t smc_fid,
*/
case TLK_SUSPEND_DONE:
case TLK_RESUME_DONE:
- case TLK_SYSTEM_OFF_DONE:
if (ns)
SMC_RET1(handle, SMC_UNK);
@@ -385,6 +447,34 @@ static uintptr_t tlkd_smc_handler(uint32_t smc_fid,
break;
/*
+ * This function ID is used by SP to indicate that it has completed
+ * handling the secure interrupt.
+ */
+ case TLK_IRQ_DONE:
+
+ if (ns)
+ SMC_RET1(handle, SMC_UNK);
+
+ assert(handle == cm_get_context(SECURE));
+
+ /* save secure world context */
+ cm_el1_sysregs_context_save(SECURE);
+
+ /* Get a reference to the non-secure context */
+ ns_cpu_context = cm_get_context(NON_SECURE);
+ assert(ns_cpu_context);
+
+ /*
+ * Restore non-secure state. There is no need to save the
+ * secure system register context since the SP was supposed
+ * to preserve it during S-EL1 interrupt handling.
+ */
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+
+ SMC_RET0(ns_cpu_context);
+
+ /*
* Return the number of service function IDs implemented to
* provide service to non-secure
*/
diff --git a/services/spd/tlkd/tlkd_pm.c b/services/spd/tlkd/tlkd_pm.c
index 7d1959bce..ed5bf7761 100644
--- a/services/spd/tlkd/tlkd_pm.c
+++ b/services/spd/tlkd/tlkd_pm.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -98,29 +99,6 @@ static void cpu_resume_handler(u_register_t suspend_level)
}
/*******************************************************************************
- * System is about to be reset. Inform the SP to allow any book-keeping
- ******************************************************************************/
-static void system_off_handler(void)
-{
- int cpu = read_mpidr() & MPIDR_CPU_MASK;
- gp_regs_t *gp_regs;
-
- /* TLK runs only on CPU0 */
- if (cpu != 0)
- return;
-
- /* pass system off/reset events to TLK */
- gp_regs = get_gpregs_ctx(&tlk_ctx.cpu_ctx);
- write_ctx_reg(gp_regs, CTX_GPREG_X0, TLK_SYSTEM_OFF);
-
- /*
- * Enter the SP. We do not care about the return value because we
- * must continue with the shutdown anyway.
- */
- (void)tlkd_synchronous_sp_entry(&tlk_ctx);
-}
-
-/*******************************************************************************
* Structure populated by the Dispatcher to be given a chance to perform any
* bookkeeping before PSCI executes a power mgmt. operation.
******************************************************************************/
@@ -128,6 +106,4 @@ const spd_pm_ops_t tlkd_pm_ops = {
.svc_migrate_info = cpu_migrate_info,
.svc_suspend = cpu_suspend_handler,
.svc_suspend_finish = cpu_resume_handler,
- .svc_system_off = system_off_handler,
- .svc_system_reset = system_off_handler
};
diff --git a/services/spd/trusty/generic-arm64-smcall.c b/services/spd/trusty/generic-arm64-smcall.c
index dfc3e71b7..5c3a62849 100644
--- a/services/spd/trusty/generic-arm64-smcall.c
+++ b/services/spd/trusty/generic-arm64-smcall.c
@@ -12,6 +12,22 @@
#include "generic-arm64-smcall.h"
+#ifndef PLAT_ARM_GICD_BASE
+#ifdef GICD_BASE
+#define PLAT_ARM_GICD_BASE GICD_BASE
+#define PLAT_ARM_GICC_BASE GICC_BASE
+#ifdef GICR_BASE
+#define PLAT_ARM_GICR_BASE GICR_BASE
+#endif
+#else
+#error PLAT_ARM_GICD_BASE or GICD_BASE must be defined
+#endif
+#endif
+
+#ifndef PLAT_ARM_GICR_BASE
+#define PLAT_ARM_GICR_BASE SMC_UNK
+#endif
+
int trusty_disable_serial_debug;
struct dputc_state {
@@ -48,12 +64,15 @@ static void trusty_dputc(char ch, int secure)
static uint64_t trusty_get_reg_base(uint32_t reg)
{
switch (reg) {
- case 0:
+ case SMC_GET_GIC_BASE_GICD:
return PLAT_ARM_GICD_BASE;
- case 1:
+ case SMC_GET_GIC_BASE_GICC:
return PLAT_ARM_GICC_BASE;
+ case SMC_GET_GIC_BASE_GICR:
+ return PLAT_ARM_GICR_BASE;
+
default:
NOTICE("%s(0x%x) unknown reg\n", __func__, reg);
return SMC_UNK;
diff --git a/services/spd/trusty/generic-arm64-smcall.h b/services/spd/trusty/generic-arm64-smcall.h
index 06efc722f..ac0346924 100644
--- a/services/spd/trusty/generic-arm64-smcall.h
+++ b/services/spd/trusty/generic-arm64-smcall.h
@@ -23,5 +23,6 @@
*/
#define SMC_GET_GIC_BASE_GICD 0
#define SMC_GET_GIC_BASE_GICC 1
+#define SMC_GET_GIC_BASE_GICR 2
#define SMC_FC_GET_REG_BASE SMC_FASTCALL_NR(SMC_ENTITY_PLATFORM_MONITOR, 0x1)
#define SMC_FC64_GET_REG_BASE SMC_FASTCALL64_NR(SMC_ENTITY_PLATFORM_MONITOR, 0x1)
diff --git a/services/spd/trusty/smcall.h b/services/spd/trusty/smcall.h
index 9c1c38c74..c66f7db86 100644
--- a/services/spd/trusty/smcall.h
+++ b/services/spd/trusty/smcall.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -71,4 +72,11 @@
#define SMC_YC_VDEV_KICK_VQ SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 24U)
#define SMC_YC_SET_ROT_PARAMS SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 65535U)
+/*
+ * Standard Trusted OS Function IDs that fall under Trusted OS call range
+ * according to SMC calling convention
+ */
+#define SMC_FC64_GET_UUID SMC_FASTCALL64_NR(63U, 0xFF01U) /* Implementation UID */
+#define SMC_FC_GET_UUID SMC_FASTCALL_NR(63U, 0xFF01U) /* Implementation.UID */
+
#endif /* SMCALL_H */
diff --git a/services/spd/trusty/trusty.c b/services/spd/trusty/trusty.c
index d6c092cb7..e102b8228 100644
--- a/services/spd/trusty/trusty.c
+++ b/services/spd/trusty/trusty.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -16,11 +17,18 @@
#include <common/debug.h>
#include <common/runtime_svc.h>
#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/smccc.h>
#include <plat/common/platform.h>
+#include <tools_share/uuid.h>
#include "sm_err.h"
#include "smcall.h"
+/* Trusty UID: RFC-4122 compliant UUID version 4 */
+DEFINE_SVC_UUID2(trusty_uuid,
+ 0x40ee25f0, 0xa2bc, 0x304c, 0x8c, 0x4c,
+ 0xa1, 0x73, 0xc5, 0x7d, 0x8a, 0xf1);
+
/* macro to check if Hypervisor is enabled in the HCR_EL2 register */
#define HYP_ENABLE_FLAG 0x286001U
@@ -150,9 +158,9 @@ static uint64_t trusty_fiq_handler(uint32_t id,
(void)memcpy(&ctx->fiq_gpregs, get_gpregs_ctx(handle), sizeof(ctx->fiq_gpregs));
ctx->fiq_pc = SMC_GET_EL3(handle, CTX_ELR_EL3);
ctx->fiq_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3);
- ctx->fiq_sp_el1 = read_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1);
+ ctx->fiq_sp_el1 = read_ctx_reg(get_el1_sysregs_ctx(handle), CTX_SP_EL1);
- write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_handler_sp);
+ write_ctx_reg(get_el1_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_handler_sp);
cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_handler_pc, (uint32_t)ctx->fiq_handler_cpsr);
SMC_RET0(handle);
@@ -211,7 +219,7 @@ static uint64_t trusty_fiq_exit(void *handle, uint64_t x1, uint64_t x2, uint64_t
*/
(void)memcpy(get_gpregs_ctx(handle), &ctx->fiq_gpregs, sizeof(ctx->fiq_gpregs));
ctx->fiq_handler_active = 0;
- write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_sp_el1);
+ write_ctx_reg(get_el1_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_sp_el1);
cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_pc, (uint32_t)ctx->fiq_cpsr);
SMC_RET0(handle);
@@ -256,6 +264,11 @@ static uintptr_t trusty_smc_handler(uint32_t smc_fid,
SMC_RET1(handle, SMC_UNK);
} else {
switch (smc_fid) {
+ case SMC_FC64_GET_UUID:
+ case SMC_FC_GET_UUID:
+ /* provide the UUID for the service to the client */
+ SMC_UUID_RET(handle, trusty_uuid);
+ break;
case SMC_FC64_SET_FIQ_HANDLER:
return trusty_set_fiq_handler(handle, x1, x2, x3);
case SMC_FC64_GET_FIQ_REGS:
@@ -263,6 +276,12 @@ static uintptr_t trusty_smc_handler(uint32_t smc_fid,
case SMC_FC_FIQ_EXIT:
return trusty_fiq_exit(handle, x1, x2, x3);
default:
+ /* Not all OENs greater than SMC_ENTITY_SECURE_MONITOR are supported */
+ if (SMC_ENTITY(smc_fid) > SMC_ENTITY_SECURE_MONITOR) {
+ VERBOSE("%s: unsupported SMC FID (0x%x)\n", __func__, smc_fid);
+ SMC_RET1(handle, SMC_UNK);
+ }
+
if (is_hypervisor_mode())
vmid = SMC_GET_GP(handle, CTX_GPREG_X7);
@@ -390,6 +409,10 @@ static const spd_pm_ops_t trusty_pm = {
void plat_trusty_set_boot_args(aapcs64_params_t *args);
+#if !defined(TSP_SEC_MEM_SIZE) && defined(BL32_MEM_SIZE)
+#define TSP_SEC_MEM_SIZE BL32_MEM_SIZE
+#endif
+
#ifdef TSP_SEC_MEM_SIZE
#pragma weak plat_trusty_set_boot_args
void plat_trusty_set_boot_args(aapcs64_params_t *args)
@@ -409,7 +432,7 @@ static int32_t trusty_setup(void)
/* Get trusty's entry point info */
ep_info = bl31_plat_get_next_image_ep_info(SECURE);
if (ep_info == NULL) {
- INFO("Trusty image missing.\n");
+ VERBOSE("Trusty image missing.\n");
return -1;
}
@@ -462,7 +485,7 @@ static int32_t trusty_setup(void)
trusty_fiq_handler,
flags);
if (ret != 0) {
- ERROR("trusty: failed to register fiq handler, ret = %d\n", ret);
+ VERBOSE("trusty: failed to register fiq handler, ret = %d\n", ret);
}
if (aarch32) {
@@ -498,7 +521,7 @@ DECLARE_RT_SVC(
trusty_fast,
OEN_TOS_START,
- SMC_ENTITY_SECURE_MONITOR,
+ OEN_TOS_END,
SMC_TYPE_FAST,
trusty_setup,
trusty_smc_handler
diff --git a/services/spd/tspd/tspd_main.c b/services/spd/tspd/tspd_main.c
index f2067244e..b0cbf625d 100644
--- a/services/spd/tspd/tspd_main.c
+++ b/services/spd/tspd/tspd_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -126,7 +126,7 @@ static uint64_t tspd_sel1_interrupt_handler(uint32_t id,
* interrupt handling.
*/
if (get_yield_smc_active_flag(tsp_ctx->state)) {
- tsp_ctx->saved_spsr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx,
+ tsp_ctx->saved_spsr_el3 = (uint32_t)SMC_GET_EL3(&tsp_ctx->cpu_ctx,
CTX_SPSR_EL3);
tsp_ctx->saved_elr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx,
CTX_ELR_EL3);
diff --git a/services/std_svc/sdei/sdei_main.c b/services/std_svc/sdei/sdei_main.c
index 042417764..dba5e07ff 100644
--- a/services/std_svc/sdei/sdei_main.c
+++ b/services/std_svc/sdei/sdei_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -226,6 +226,7 @@ static void sdei_class_init(sdei_class_t class)
/* SDEI dispatcher initialisation */
void sdei_init(void)
{
+ plat_sdei_setup();
sdei_class_init(SDEI_CRITICAL);
sdei_class_init(SDEI_NORMAL);
@@ -328,8 +329,11 @@ finish:
}
/* Register handler and argument for an SDEI event */
-static int64_t sdei_event_register(int ev_num, uint64_t ep, uint64_t arg,
- uint64_t flags, uint64_t mpidr)
+static int64_t sdei_event_register(int ev_num,
+ uint64_t ep,
+ uint64_t arg,
+ uint64_t flags,
+ uint64_t mpidr)
{
int ret;
unsigned int routing;
diff --git a/services/std_svc/spm_mm/spm_mm_setup.c b/services/std_svc/spm_mm/spm_mm_setup.c
index ccb2f9058..32562c311 100644
--- a/services/std_svc/spm_mm/spm_mm_setup.c
+++ b/services/std_svc/spm_mm/spm_mm_setup.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -116,17 +116,17 @@ void spm_sp_setup(sp_context_t *sp_ctx)
xlat_ctx->pa_max_address, xlat_ctx->va_max_address,
EL1_EL0_REGIME);
- write_ctx_reg(get_sysregs_ctx(ctx), CTX_MAIR_EL1,
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_MAIR_EL1,
mmu_cfg_params[MMU_CFG_MAIR]);
- write_ctx_reg(get_sysregs_ctx(ctx), CTX_TCR_EL1,
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_TCR_EL1,
mmu_cfg_params[MMU_CFG_TCR]);
- write_ctx_reg(get_sysregs_ctx(ctx), CTX_TTBR0_EL1,
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_TTBR0_EL1,
mmu_cfg_params[MMU_CFG_TTBR0]);
/* Setup SCTLR_EL1 */
- u_register_t sctlr_el1 = read_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1);
+ u_register_t sctlr_el1 = read_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1);
sctlr_el1 |=
/*SCTLR_EL1_RES1 |*/
@@ -142,6 +142,8 @@ void spm_sp_setup(sp_context_t *sp_ctx)
SCTLR_DZE_BIT |
/* Enable SP Alignment check for EL0 */
SCTLR_SA0_BIT |
+ /* Don't change PSTATE.PAN on taking an exception to EL1 */
+ SCTLR_SPAN_BIT |
/* Allow cacheable data and instr. accesses to normal memory. */
SCTLR_C_BIT | SCTLR_I_BIT |
/* Enable MMU. */
@@ -160,7 +162,7 @@ void spm_sp_setup(sp_context_t *sp_ctx)
SCTLR_UMA_BIT
);
- write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1);
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1);
/*
* Setup other system registers
@@ -168,10 +170,10 @@ void spm_sp_setup(sp_context_t *sp_ctx)
*/
/* Shim Exception Vector Base Address */
- write_ctx_reg(get_sysregs_ctx(ctx), CTX_VBAR_EL1,
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_VBAR_EL1,
SPM_SHIM_EXCEPTIONS_PTR);
- write_ctx_reg(get_sysregs_ctx(ctx), CTX_CNTKCTL_EL1,
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CNTKCTL_EL1,
EL0PTEN_BIT | EL0VTEN_BIT | EL0PCTEN_BIT | EL0VCTEN_BIT);
/*
@@ -181,7 +183,7 @@ void spm_sp_setup(sp_context_t *sp_ctx)
* TTA: Enable access to trace registers.
* ZEN (v8.2): Trap SVE instructions and access to SVE registers.
*/
- write_ctx_reg(get_sysregs_ctx(ctx), CTX_CPACR_EL1,
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CPACR_EL1,
CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_NONE));
/*
diff --git a/services/std_svc/spm_mm/spm_mm_xlat.c b/services/std_svc/spm_mm/spm_mm_xlat.c
index 6c02f0743..eae597cab 100644
--- a/services/std_svc/spm_mm/spm_mm_xlat.c
+++ b/services/std_svc/spm_mm/spm_mm_xlat.c
@@ -21,13 +21,17 @@
#ifndef PLAT_SP_IMAGE_XLAT_SECTION_NAME
#define PLAT_SP_IMAGE_XLAT_SECTION_NAME "xlat_table"
#endif
+#ifndef PLAT_SP_IMAGE_BASE_XLAT_SECTION_NAME
+#define PLAT_SP_IMAGE_BASE_XLAT_SECTION_NAME ".bss"
+#endif
/* Allocate and initialise the translation context for the secure partitions. */
REGISTER_XLAT_CONTEXT2(sp,
PLAT_SP_IMAGE_MMAP_REGIONS,
PLAT_SP_IMAGE_MAX_XLAT_TABLES,
PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE,
- EL1_EL0_REGIME, PLAT_SP_IMAGE_XLAT_SECTION_NAME);
+ EL1_EL0_REGIME, PLAT_SP_IMAGE_XLAT_SECTION_NAME,
+ PLAT_SP_IMAGE_BASE_XLAT_SECTION_NAME);
/* Lock used for SP_MEMORY_ATTRIBUTES_GET and SP_MEMORY_ATTRIBUTES_SET */
static spinlock_t mem_attr_smc_lock;
diff --git a/services/std_svc/spmd/aarch64/spmd_helpers.S b/services/std_svc/spmd/aarch64/spmd_helpers.S
new file mode 100644
index 000000000..d7bffca24
--- /dev/null
+++ b/services/std_svc/spmd/aarch64/spmd_helpers.S
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include "../spmd_private.h"
+
+ .global spmd_spm_core_enter
+ .global spmd_spm_core_exit
+
+ /* ---------------------------------------------------------------------
+ * This function is called with SP_EL0 as stack. Here we stash our EL3
+ * callee-saved registers on to the stack as a part of saving the C
+ * runtime and enter the secure payload.
+ * 'x0' contains a pointer to the memory where the address of the C
+ * runtime context is to be saved.
+ * ---------------------------------------------------------------------
+ */
+func spmd_spm_core_enter
+ /* Make space for the registers that we're going to save */
+ mov x3, sp
+ str x3, [x0, #0]
+ sub sp, sp, #SPMD_C_RT_CTX_SIZE
+
+ /* Save callee-saved registers on to the stack */
+ stp x19, x20, [sp, #SPMD_C_RT_CTX_X19]
+ stp x21, x22, [sp, #SPMD_C_RT_CTX_X21]
+ stp x23, x24, [sp, #SPMD_C_RT_CTX_X23]
+ stp x25, x26, [sp, #SPMD_C_RT_CTX_X25]
+ stp x27, x28, [sp, #SPMD_C_RT_CTX_X27]
+ stp x29, x30, [sp, #SPMD_C_RT_CTX_X29]
+
+ /* ---------------------------------------------------------------------
+ * Everything is setup now. el3_exit() will use the secure context to
+ * restore to the general purpose and EL3 system registers to ERET
+ * into the secure payload.
+ * ---------------------------------------------------------------------
+ */
+ b el3_exit
+endfunc spmd_spm_core_enter
+
+ /* ---------------------------------------------------------------------
+ * This function is called with 'x0' pointing to a C runtime context.
+ * It restores the saved registers and jumps to that runtime with 'x0'
+ * as the new SP register. This destroys the C runtime context that had
+ * been built on the stack below the saved context by the caller. Later
+ * the second parameter 'x1' is passed as a return value to the caller.
+ * ---------------------------------------------------------------------
+ */
+func spmd_spm_core_exit
+ /* Restore the previous stack */
+ mov sp, x0
+
+ /* Restore callee-saved registers on to the stack */
+ ldp x19, x20, [x0, #(SPMD_C_RT_CTX_X19 - SPMD_C_RT_CTX_SIZE)]
+ ldp x21, x22, [x0, #(SPMD_C_RT_CTX_X21 - SPMD_C_RT_CTX_SIZE)]
+ ldp x23, x24, [x0, #(SPMD_C_RT_CTX_X23 - SPMD_C_RT_CTX_SIZE)]
+ ldp x25, x26, [x0, #(SPMD_C_RT_CTX_X25 - SPMD_C_RT_CTX_SIZE)]
+ ldp x27, x28, [x0, #(SPMD_C_RT_CTX_X27 - SPMD_C_RT_CTX_SIZE)]
+ ldp x29, x30, [x0, #(SPMD_C_RT_CTX_X29 - SPMD_C_RT_CTX_SIZE)]
+
+ /* ---------------------------------------------------------------------
+ * This should take us back to the instruction after the call to the
+ * last spm_secure_partition_enter().* Place the second parameter to x0
+ * so that the caller will see it as a return value from the original
+ * entry call.
+ * ---------------------------------------------------------------------
+ */
+ mov x0, x1
+ ret
+endfunc spmd_spm_core_exit
diff --git a/services/std_svc/spmd/spmd.mk b/services/std_svc/spmd/spmd.mk
new file mode 100644
index 000000000..73f7c85dd
--- /dev/null
+++ b/services/std_svc/spmd/spmd.mk
@@ -0,0 +1,22 @@
+#
+# Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifneq (${ARCH},aarch64)
+ $(error "Error: SPMD is only supported on aarch64.")
+endif
+
+SPMD_SOURCES += $(addprefix services/std_svc/spmd/, \
+ ${ARCH}/spmd_helpers.S \
+ spmd_pm.c \
+ spmd_main.c)
+
+# Let the top-level Makefile know that we intend to include a BL32 image
+NEED_BL32 := yes
+
+# Enable dynamic memory mapping
+# The SPMD component maps the SPMC DTB within BL31 virtual space.
+PLAT_XLAT_TABLES_DYNAMIC := 1
+$(eval $(call add_define,PLAT_XLAT_TABLES_DYNAMIC))
diff --git a/services/std_svc/spmd/spmd_main.c b/services/std_svc/spmd/spmd_main.c
new file mode 100644
index 000000000..a076be255
--- /dev/null
+++ b/services/std_svc/spmd/spmd_main.c
@@ -0,0 +1,646 @@
+/*
+ * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <arch/aarch64/arch_features.h>
+#include <bl31/bl31.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/smccc.h>
+#include <lib/spinlock.h>
+#include <lib/utils.h>
+#include <plat/common/common_def.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+#include <services/ffa_svc.h>
+#include <services/spmd_svc.h>
+#include <smccc_helpers.h>
+#include "spmd_private.h"
+
+/*******************************************************************************
+ * SPM Core context information.
+ ******************************************************************************/
+static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * SPM Core attribute information read from its manifest.
+ ******************************************************************************/
+static spmc_manifest_attribute_t spmc_attrs;
+
+/*******************************************************************************
+ * SPM Core entry point information. Discovered on the primary core and reused
+ * on secondary cores.
+ ******************************************************************************/
+static entry_point_info_t *spmc_ep_info;
+
+/*******************************************************************************
+ * SPM Core context on CPU based on mpidr.
+ ******************************************************************************/
+spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr)
+{
+ int core_idx = plat_core_pos_by_mpidr(mpidr);
+
+ if (core_idx < 0) {
+ ERROR("Invalid mpidr: %llx, returned ID: %d\n", mpidr, core_idx);
+ panic();
+ }
+
+ return &spm_core_context[core_idx];
+}
+
+/*******************************************************************************
+ * SPM Core context on current CPU get helper.
+ ******************************************************************************/
+spmd_spm_core_context_t *spmd_get_context(void)
+{
+ return spmd_get_context_by_mpidr(read_mpidr());
+}
+
+/*******************************************************************************
+ * SPM Core entry point information get helper.
+ ******************************************************************************/
+entry_point_info_t *spmd_spmc_ep_info_get(void)
+{
+ return spmc_ep_info;
+}
+
+/*******************************************************************************
+ * SPM Core ID getter.
+ ******************************************************************************/
+uint16_t spmd_spmc_id_get(void)
+{
+ return spmc_attrs.spmc_id;
+}
+
+/*******************************************************************************
+ * Static function declaration.
+ ******************************************************************************/
+static int32_t spmd_init(void);
+static int spmd_spmc_init(void *pm_addr);
+static uint64_t spmd_ffa_error_return(void *handle,
+ int error_code);
+static uint64_t spmd_smc_forward(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *handle);
+
+/*******************************************************************************
+ * This function takes an SPMC context pointer and performs a synchronous
+ * SPMC entry.
+ ******************************************************************************/
+uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
+{
+ uint64_t rc;
+
+ assert(spmc_ctx != NULL);
+
+ cm_set_context(&(spmc_ctx->cpu_ctx), SECURE);
+
+ /* Restore the context assigned above */
+ cm_el1_sysregs_context_restore(SECURE);
+
+#if SPMD_SPM_AT_SEL2
+ cm_el2_sysregs_context_restore(SECURE);
+#endif
+ cm_set_next_eret_context(SECURE);
+
+ /* Enter SPMC */
+ rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
+
+ /* Save secure state */
+ cm_el1_sysregs_context_save(SECURE);
+#if SPMD_SPM_AT_SEL2
+ cm_el2_sysregs_context_save(SECURE);
+#endif
+
+ return rc;
+}
+
+/*******************************************************************************
+ * This function returns to the place where spmd_spm_core_sync_entry() was
+ * called originally.
+ ******************************************************************************/
+__dead2 void spmd_spm_core_sync_exit(uint64_t rc)
+{
+ spmd_spm_core_context_t *ctx = spmd_get_context();
+
+ /* Get current CPU context from SPMC context */
+ assert(cm_get_context(SECURE) == &(ctx->cpu_ctx));
+
+ /*
+ * The SPMD must have initiated the original request through a
+ * synchronous entry into SPMC. Jump back to the original C runtime
+ * context with the value of rc in x0;
+ */
+ spmd_spm_core_exit(ctx->c_rt_ctx, rc);
+
+ panic();
+}
+
+/*******************************************************************************
+ * Jump to the SPM Core for the first time.
+ ******************************************************************************/
+static int32_t spmd_init(void)
+{
+ spmd_spm_core_context_t *ctx = spmd_get_context();
+ uint64_t rc;
+ unsigned int linear_id = plat_my_core_pos();
+ unsigned int core_id;
+
+ VERBOSE("SPM Core init start.\n");
+ ctx->state = SPMC_STATE_ON_PENDING;
+
+ /* Set the SPMC context state on other CPUs to OFF */
+ for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) {
+ if (core_id != linear_id) {
+ spm_core_context[core_id].state = SPMC_STATE_OFF;
+ spm_core_context[core_id].secondary_ep.entry_point = 0UL;
+ }
+ }
+
+ rc = spmd_spm_core_sync_entry(ctx);
+ if (rc != 0ULL) {
+ ERROR("SPMC initialisation failed 0x%llx\n", rc);
+ return 0;
+ }
+
+ ctx->state = SPMC_STATE_ON;
+
+ VERBOSE("SPM Core init end.\n");
+
+ return 1;
+}
+
+/*******************************************************************************
+ * Loads SPMC manifest and inits SPMC.
+ ******************************************************************************/
+static int spmd_spmc_init(void *pm_addr)
+{
+ spmd_spm_core_context_t *spm_ctx = spmd_get_context();
+ uint32_t ep_attr;
+ int rc;
+
+ /* Load the SPM Core manifest */
+ rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr);
+ if (rc != 0) {
+ WARN("No or invalid SPM Core manifest image provided by BL2\n");
+ return rc;
+ }
+
+ /*
+ * Ensure that the SPM Core version is compatible with the SPM
+ * Dispatcher version.
+ */
+ if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) ||
+ (spmc_attrs.minor_version > FFA_VERSION_MINOR)) {
+ WARN("Unsupported FFA version (%u.%u)\n",
+ spmc_attrs.major_version, spmc_attrs.minor_version);
+ return -EINVAL;
+ }
+
+ VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version,
+ spmc_attrs.minor_version);
+
+ VERBOSE("SPM Core run time EL%x.\n",
+ SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1);
+
+ /* Validate the SPMC ID, Ensure high bit is set */
+ if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) &
+ SPMC_SECURE_ID_MASK) == 0U) {
+ WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id);
+ return -EINVAL;
+ }
+
+ /* Validate the SPM Core execution state */
+ if ((spmc_attrs.exec_state != MODE_RW_64) &&
+ (spmc_attrs.exec_state != MODE_RW_32)) {
+ WARN("Unsupported %s%x.\n", "SPM Core execution state 0x",
+ spmc_attrs.exec_state);
+ return -EINVAL;
+ }
+
+ VERBOSE("%s%x.\n", "SPM Core execution state 0x",
+ spmc_attrs.exec_state);
+
+#if SPMD_SPM_AT_SEL2
+ /* Ensure manifest has not requested AArch32 state in S-EL2 */
+ if (spmc_attrs.exec_state == MODE_RW_32) {
+ WARN("AArch32 state at S-EL2 is not supported.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Check if S-EL2 is supported on this system if S-EL2
+ * is required for SPM
+ */
+ if (!is_armv8_4_sel2_present()) {
+ WARN("SPM Core run time S-EL2 is not supported.\n");
+ return -EINVAL;
+ }
+#endif /* SPMD_SPM_AT_SEL2 */
+
+ /* Initialise an entrypoint to set up the CPU context */
+ ep_attr = SECURE | EP_ST_ENABLE;
+ if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) {
+ ep_attr |= EP_EE_BIG;
+ }
+
+ SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
+
+ /*
+ * Populate SPSR for SPM Core based upon validated parameters from the
+ * manifest.
+ */
+ if (spmc_attrs.exec_state == MODE_RW_32) {
+ spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
+ SPSR_E_LITTLE,
+ DAIF_FIQ_BIT |
+ DAIF_IRQ_BIT |
+ DAIF_ABT_BIT);
+ } else {
+
+#if SPMD_SPM_AT_SEL2
+ static const uint32_t runtime_el = MODE_EL2;
+#else
+ static const uint32_t runtime_el = MODE_EL1;
+#endif
+ spmc_ep_info->spsr = SPSR_64(runtime_el,
+ MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS);
+ }
+
+ /* Initialise SPM Core context with this entry point information */
+ cm_setup_context(&spm_ctx->cpu_ctx, spmc_ep_info);
+
+ /* Reuse PSCI affinity states to mark this SPMC context as off */
+ spm_ctx->state = AFF_STATE_OFF;
+
+ INFO("SPM Core setup done.\n");
+
+ /* Register power management hooks with PSCI */
+ psci_register_spd_pm_hook(&spmd_pm);
+
+ /* Register init function for deferred init. */
+ bl31_register_bl32_init(&spmd_init);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Initialize context of SPM Core.
+ ******************************************************************************/
+int spmd_setup(void)
+{
+ void *spmc_manifest;
+ int rc;
+
+ spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+ if (spmc_ep_info == NULL) {
+ WARN("No SPM Core image provided by BL2 boot loader.\n");
+ return -EINVAL;
+ }
+
+ /* Under no circumstances will this parameter be 0 */
+ assert(spmc_ep_info->pc != 0ULL);
+
+ /*
+ * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will
+ * be used as a manifest for the SPM Core at the next lower EL/mode.
+ */
+ spmc_manifest = (void *)spmc_ep_info->args.arg0;
+ if (spmc_manifest == NULL) {
+ ERROR("Invalid or absent SPM Core manifest.\n");
+ return -EINVAL;
+ }
+
+ /* Load manifest, init SPMC */
+ rc = spmd_spmc_init(spmc_manifest);
+ if (rc != 0) {
+ WARN("Booting device without SPM initialization.\n");
+ }
+
+ return rc;
+}
+
+/*******************************************************************************
+ * Forward SMC to the other security state
+ ******************************************************************************/
+static uint64_t spmd_smc_forward(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *handle)
+{
+ unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
+ unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
+
+ /* Save incoming security state */
+ cm_el1_sysregs_context_save(secure_state_in);
+#if CTX_INCLUDE_FPREGS
+ fpregs_context_save(get_fpregs_ctx(cm_get_context(secure_state_in)));
+#endif
+#if SPMD_SPM_AT_SEL2
+ cm_el2_sysregs_context_save(secure_state_in);
+#endif
+
+ /* Restore outgoing security state */
+ cm_el1_sysregs_context_restore(secure_state_out);
+#if CTX_INCLUDE_FPREGS
+ fpregs_context_restore(get_fpregs_ctx(cm_get_context(secure_state_out)));
+#endif
+#if SPMD_SPM_AT_SEL2
+ cm_el2_sysregs_context_restore(secure_state_out);
+#endif
+ cm_set_next_eret_context(secure_state_out);
+
+ SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4,
+ SMC_GET_GP(handle, CTX_GPREG_X5),
+ SMC_GET_GP(handle, CTX_GPREG_X6),
+ SMC_GET_GP(handle, CTX_GPREG_X7));
+}
+
+/*******************************************************************************
+ * Return FFA_ERROR with specified error code
+ ******************************************************************************/
+static uint64_t spmd_ffa_error_return(void *handle, int error_code)
+{
+ SMC_RET8(handle, FFA_ERROR,
+ FFA_TARGET_INFO_MBZ, error_code,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ);
+}
+
+/*******************************************************************************
+ * spmd_check_address_in_binary_image
+ ******************************************************************************/
+bool spmd_check_address_in_binary_image(uint64_t address)
+{
+ assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size));
+
+ return ((address >= spmc_attrs.load_address) &&
+ (address < (spmc_attrs.load_address + spmc_attrs.binary_size)));
+}
+
+/******************************************************************************
+ * spmd_is_spmc_message
+ *****************************************************************************/
+static bool spmd_is_spmc_message(unsigned int ep)
+{
+ return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID)
+ && (ffa_endpoint_source(ep) == spmc_attrs.spmc_id));
+}
+
+/******************************************************************************
+ * spmd_handle_spmc_message
+ *****************************************************************************/
+static int spmd_handle_spmc_message(unsigned long long msg,
+ unsigned long long parm1, unsigned long long parm2,
+ unsigned long long parm3, unsigned long long parm4)
+{
+ VERBOSE("%s %llx %llx %llx %llx %llx\n", __func__,
+ msg, parm1, parm2, parm3, parm4);
+
+ switch (msg) {
+ case SPMD_DIRECT_MSG_SET_ENTRY_POINT:
+ return spmd_pm_secondary_core_set_ep(parm1, parm2, parm3);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+/*******************************************************************************
+ * This function handles all SMCs in the range reserved for FFA. Each call is
+ * either forwarded to the other security state or handled by the SPM dispatcher
+ ******************************************************************************/
+uint64_t spmd_smc_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ spmd_spm_core_context_t *ctx = spmd_get_context();
+ bool secure_origin;
+ int32_t ret;
+ uint32_t input_version;
+
+ /* Determine which security state this SMC originated from */
+ secure_origin = is_caller_secure(flags);
+
+ INFO("SPM: 0x%x 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n",
+ smc_fid, x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5),
+ SMC_GET_GP(handle, CTX_GPREG_X6),
+ SMC_GET_GP(handle, CTX_GPREG_X7));
+
+ switch (smc_fid) {
+ case FFA_ERROR:
+ /*
+ * Check if this is the first invocation of this interface on
+ * this CPU. If so, then indicate that the SPM Core initialised
+ * unsuccessfully.
+ */
+ if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
+ spmd_spm_core_sync_exit(x2);
+ }
+
+ return spmd_smc_forward(smc_fid, secure_origin,
+ x1, x2, x3, x4, handle);
+ break; /* not reached */
+
+ case FFA_VERSION:
+ input_version = (uint32_t)(0xFFFFFFFF & x1);
+ /*
+ * If caller is secure and SPMC was initialized,
+ * return FFA_VERSION of SPMD.
+ * If caller is non secure and SPMC was initialized,
+ * return SPMC's version.
+ * Sanity check to "input_version".
+ */
+ if ((input_version & FFA_VERSION_BIT31_MASK) ||
+ (ctx->state == SPMC_STATE_RESET)) {
+ ret = FFA_ERROR_NOT_SUPPORTED;
+ } else if (!secure_origin) {
+ ret = MAKE_FFA_VERSION(spmc_attrs.major_version, spmc_attrs.minor_version);
+ } else {
+ ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR);
+ }
+
+ SMC_RET8(handle, ret, FFA_TARGET_INFO_MBZ, FFA_TARGET_INFO_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ);
+ break; /* not reached */
+
+ case FFA_FEATURES:
+ /*
+ * This is an optional interface. Do the minimal checks and
+ * forward to SPM Core which will handle it if implemented.
+ */
+
+ /*
+ * Check if x1 holds a valid FFA fid. This is an
+ * optimization.
+ */
+ if (!is_ffa_fid(x1)) {
+ return spmd_ffa_error_return(handle,
+ FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ /* Forward SMC from Normal world to the SPM Core */
+ if (!secure_origin) {
+ return spmd_smc_forward(smc_fid, secure_origin,
+ x1, x2, x3, x4, handle);
+ }
+
+ /*
+ * Return success if call was from secure world i.e. all
+ * FFA functions are supported. This is essentially a
+ * nop.
+ */
+ SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4,
+ SMC_GET_GP(handle, CTX_GPREG_X5),
+ SMC_GET_GP(handle, CTX_GPREG_X6),
+ SMC_GET_GP(handle, CTX_GPREG_X7));
+
+ break; /* not reached */
+
+ case FFA_ID_GET:
+ /*
+ * Returns the ID of the calling FFA component.
+ */
+ if (!secure_origin) {
+ SMC_RET8(handle, FFA_SUCCESS_SMC32,
+ FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ);
+ }
+
+ SMC_RET8(handle, FFA_SUCCESS_SMC32,
+ FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ);
+
+ break; /* not reached */
+
+ case FFA_MSG_SEND_DIRECT_REQ_SMC32:
+ if (secure_origin && spmd_is_spmc_message(x1)) {
+ ret = spmd_handle_spmc_message(x3, x4,
+ SMC_GET_GP(handle, CTX_GPREG_X5),
+ SMC_GET_GP(handle, CTX_GPREG_X6),
+ SMC_GET_GP(handle, CTX_GPREG_X7));
+
+ SMC_RET8(handle, FFA_SUCCESS_SMC32,
+ FFA_TARGET_INFO_MBZ, ret,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ);
+ } else {
+ /* Forward direct message to the other world */
+ return spmd_smc_forward(smc_fid, secure_origin,
+ x1, x2, x3, x4, handle);
+ }
+ break; /* Not reached */
+
+ case FFA_MSG_SEND_DIRECT_RESP_SMC32:
+ if (secure_origin && spmd_is_spmc_message(x1)) {
+ spmd_spm_core_sync_exit(0);
+ } else {
+ /* Forward direct message to the other world */
+ return spmd_smc_forward(smc_fid, secure_origin,
+ x1, x2, x3, x4, handle);
+ }
+ break; /* Not reached */
+
+ case FFA_RX_RELEASE:
+ case FFA_RXTX_MAP_SMC32:
+ case FFA_RXTX_MAP_SMC64:
+ case FFA_RXTX_UNMAP:
+ case FFA_PARTITION_INFO_GET:
+ /*
+ * Should not be allowed to forward FFA_PARTITION_INFO_GET
+ * from Secure world to Normal world
+ *
+ * Fall through to forward the call to the other world
+ */
+ case FFA_MSG_RUN:
+ /* This interface must be invoked only by the Normal world */
+
+ if (secure_origin) {
+ return spmd_ffa_error_return(handle,
+ FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ /* Fall through to forward the call to the other world */
+ case FFA_MSG_SEND:
+ case FFA_MSG_SEND_DIRECT_REQ_SMC64:
+ case FFA_MSG_SEND_DIRECT_RESP_SMC64:
+ case FFA_MEM_DONATE_SMC32:
+ case FFA_MEM_DONATE_SMC64:
+ case FFA_MEM_LEND_SMC32:
+ case FFA_MEM_LEND_SMC64:
+ case FFA_MEM_SHARE_SMC32:
+ case FFA_MEM_SHARE_SMC64:
+ case FFA_MEM_RETRIEVE_REQ_SMC32:
+ case FFA_MEM_RETRIEVE_REQ_SMC64:
+ case FFA_MEM_RETRIEVE_RESP:
+ case FFA_MEM_RELINQUISH:
+ case FFA_MEM_RECLAIM:
+ case FFA_SUCCESS_SMC32:
+ case FFA_SUCCESS_SMC64:
+ /*
+ * TODO: Assume that no requests originate from EL3 at the
+ * moment. This will change if a SP service is required in
+ * response to secure interrupts targeted to EL3. Until then
+ * simply forward the call to the Normal world.
+ */
+
+ return spmd_smc_forward(smc_fid, secure_origin,
+ x1, x2, x3, x4, handle);
+ break; /* not reached */
+
+ case FFA_MSG_WAIT:
+ /*
+ * Check if this is the first invocation of this interface on
+ * this CPU from the Secure world. If so, then indicate that the
+ * SPM Core initialised successfully.
+ */
+ if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
+ spmd_spm_core_sync_exit(0);
+ }
+
+ /* Fall through to forward the call to the other world */
+
+ case FFA_MSG_YIELD:
+ /* This interface must be invoked only by the Secure world */
+ if (!secure_origin) {
+ return spmd_ffa_error_return(handle,
+ FFA_ERROR_NOT_SUPPORTED);
+ }
+
+ return spmd_smc_forward(smc_fid, secure_origin,
+ x1, x2, x3, x4, handle);
+ break; /* not reached */
+
+ default:
+ WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
+ return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
+ }
+}
diff --git a/services/std_svc/spmd/spmd_pm.c b/services/std_svc/spmd/spmd_pm.c
new file mode 100644
index 000000000..5433e5d25
--- /dev/null
+++ b/services/std_svc/spmd/spmd_pm.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include "spmd_private.h"
+
+/*******************************************************************************
+ * spmd_build_spmc_message
+ *
+ * Builds an SPMD to SPMC direct message request.
+ ******************************************************************************/
+static void spmd_build_spmc_message(gp_regs_t *gpregs, unsigned long long message)
+{
+ write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
+ write_ctx_reg(gpregs, CTX_GPREG_X1,
+ (SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
+ spmd_spmc_id_get());
+ write_ctx_reg(gpregs, CTX_GPREG_X2, FFA_PARAM_MBZ);
+ write_ctx_reg(gpregs, CTX_GPREG_X3, message);
+}
+
+/*******************************************************************************
+ * spmd_pm_secondary_core_set_ep
+ ******************************************************************************/
+int spmd_pm_secondary_core_set_ep(unsigned long long mpidr,
+ uintptr_t entry_point, unsigned long long context)
+{
+ int id = plat_core_pos_by_mpidr(mpidr);
+
+ if ((id < 0) || ((unsigned int)id >= PLATFORM_CORE_COUNT)) {
+ ERROR("%s inconsistent MPIDR (%llx)\n", __func__, mpidr);
+ return -EINVAL;
+ }
+
+ /*
+ * Check entry_point address is a PA within
+ * load_address <= entry_point < load_address + binary_size
+ */
+ if (!spmd_check_address_in_binary_image(entry_point)) {
+ ERROR("%s entry point is not within image boundaries (%llx)\n",
+ __func__, mpidr);
+ return -EINVAL;
+ }
+
+ spmd_spm_core_context_t *ctx = spmd_get_context_by_mpidr(mpidr);
+ spmd_pm_secondary_ep_t *secondary_ep = &ctx->secondary_ep;
+ if (secondary_ep->locked) {
+ ERROR("%s entry locked (%llx)\n", __func__, mpidr);
+ return -EINVAL;
+ }
+
+ /* Fill new entry to corresponding secondary core id and lock it */
+ secondary_ep->entry_point = entry_point;
+ secondary_ep->context = context;
+ secondary_ep->locked = true;
+
+ VERBOSE("%s %d %llx %lx %llx\n",
+ __func__, id, mpidr, entry_point, context);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * This CPU has been turned on. Enter SPMC to initialise S-EL1 or S-EL2. As part
+ * of the SPMC initialization path, they will initialize any SPs that they
+ * manage. Entry into SPMC is done after initialising minimal architectural
+ * state that guarantees safe execution.
+ ******************************************************************************/
+static void spmd_cpu_on_finish_handler(u_register_t unused)
+{
+ entry_point_info_t *spmc_ep_info = spmd_spmc_ep_info_get();
+ spmd_spm_core_context_t *ctx = spmd_get_context();
+ unsigned int linear_id = plat_my_core_pos();
+ uint64_t rc;
+
+ assert(ctx != NULL);
+ assert(ctx->state != SPMC_STATE_ON);
+ assert(spmc_ep_info != NULL);
+
+ /*
+ * TODO: this might require locking the spmc_ep_info structure,
+ * or provisioning one structure per cpu
+ */
+ if (ctx->secondary_ep.entry_point == 0UL) {
+ goto exit;
+ }
+
+ spmc_ep_info->pc = ctx->secondary_ep.entry_point;
+ cm_setup_context(&ctx->cpu_ctx, spmc_ep_info);
+ write_ctx_reg(get_gpregs_ctx(&ctx->cpu_ctx), CTX_GPREG_X0,
+ ctx->secondary_ep.context);
+
+ /* Mark CPU as initiating ON operation */
+ ctx->state = SPMC_STATE_ON_PENDING;
+
+ rc = spmd_spm_core_sync_entry(ctx);
+ if (rc != 0ULL) {
+ ERROR("%s failed (%llu) on CPU%u\n", __func__, rc,
+ linear_id);
+ ctx->state = SPMC_STATE_OFF;
+ return;
+ }
+
+exit:
+ ctx->state = SPMC_STATE_ON;
+
+ VERBOSE("CPU %u on!\n", linear_id);
+}
+
+/*******************************************************************************
+ * spmd_cpu_off_handler
+ ******************************************************************************/
+static int32_t spmd_cpu_off_handler(u_register_t unused)
+{
+ spmd_spm_core_context_t *ctx = spmd_get_context();
+ unsigned int linear_id = plat_my_core_pos();
+ int64_t rc;
+
+ assert(ctx != NULL);
+ assert(ctx->state != SPMC_STATE_OFF);
+
+ if (ctx->secondary_ep.entry_point == 0UL) {
+ goto exit;
+ }
+
+ /* Build an SPMD to SPMC direct message request. */
+ spmd_build_spmc_message(get_gpregs_ctx(&ctx->cpu_ctx), PSCI_CPU_OFF);
+
+ rc = spmd_spm_core_sync_entry(ctx);
+ if (rc != 0ULL) {
+ ERROR("%s failed (%llu) on CPU%u\n", __func__, rc, linear_id);
+ }
+
+ /* TODO expect FFA_DIRECT_MSG_RESP returned from SPMC */
+
+exit:
+ ctx->state = SPMC_STATE_OFF;
+
+ VERBOSE("CPU %u off!\n", linear_id);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Structure populated by the SPM Dispatcher to perform any bookkeeping before
+ * PSCI executes a power mgmt. operation.
+ ******************************************************************************/
+const spd_pm_ops_t spmd_pm = {
+ .svc_on_finish = spmd_cpu_on_finish_handler,
+ .svc_off = spmd_cpu_off_handler
+};
diff --git a/services/std_svc/spmd/spmd_private.h b/services/std_svc/spmd/spmd_private.h
new file mode 100644
index 000000000..eff0dd9f2
--- /dev/null
+++ b/services/std_svc/spmd/spmd_private.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPMD_PRIVATE_H
+#define SPMD_PRIVATE_H
+
+#include <context.h>
+
+/*******************************************************************************
+ * Constants that allow assembler code to preserve callee-saved registers of the
+ * C runtime context while performing a security state switch.
+ ******************************************************************************/
+#define SPMD_C_RT_CTX_X19 0x0
+#define SPMD_C_RT_CTX_X20 0x8
+#define SPMD_C_RT_CTX_X21 0x10
+#define SPMD_C_RT_CTX_X22 0x18
+#define SPMD_C_RT_CTX_X23 0x20
+#define SPMD_C_RT_CTX_X24 0x28
+#define SPMD_C_RT_CTX_X25 0x30
+#define SPMD_C_RT_CTX_X26 0x38
+#define SPMD_C_RT_CTX_X27 0x40
+#define SPMD_C_RT_CTX_X28 0x48
+#define SPMD_C_RT_CTX_X29 0x50
+#define SPMD_C_RT_CTX_X30 0x58
+
+#define SPMD_C_RT_CTX_SIZE 0x60
+#define SPMD_C_RT_CTX_ENTRIES (SPMD_C_RT_CTX_SIZE >> DWORD_SHIFT)
+
+#ifndef __ASSEMBLER__
+#include <stdint.h>
+#include <lib/psci/psci_lib.h>
+#include <plat/common/platform.h>
+#include <services/ffa_svc.h>
+
+typedef enum spmc_state {
+ SPMC_STATE_RESET = 0,
+ SPMC_STATE_OFF,
+ SPMC_STATE_ON_PENDING,
+ SPMC_STATE_ON
+} spmc_state_t;
+
+typedef struct spmd_pm_secondary_ep {
+ uintptr_t entry_point;
+ uintptr_t context;
+ bool locked;
+} spmd_pm_secondary_ep_t;
+
+/*
+ * Data structure used by the SPM dispatcher (SPMD) in EL3 to track context of
+ * the SPM core (SPMC) at the next lower EL.
+ */
+typedef struct spmd_spm_core_context {
+ uint64_t c_rt_ctx;
+ cpu_context_t cpu_ctx;
+ spmc_state_t state;
+ spmd_pm_secondary_ep_t secondary_ep;
+} spmd_spm_core_context_t;
+
+/*
+ * Reserve ID for NS physical FFA Endpoint.
+ */
+#define FFA_NS_ENDPOINT_ID U(0)
+
+/* Mask and shift to check valid secure FF-A Endpoint ID. */
+#define SPMC_SECURE_ID_MASK U(1)
+#define SPMC_SECURE_ID_SHIFT U(15)
+
+#define SPMD_DIRECT_MSG_ENDPOINT_ID U(FFA_ENDPOINT_ID_MAX - 1)
+#define SPMD_DIRECT_MSG_SET_ENTRY_POINT U(1)
+
+/* Functions used to enter/exit SPMC synchronously */
+uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *ctx);
+__dead2 void spmd_spm_core_sync_exit(uint64_t rc);
+
+/* Assembly helpers */
+uint64_t spmd_spm_core_enter(uint64_t *c_rt_ctx);
+void __dead2 spmd_spm_core_exit(uint64_t c_rt_ctx, uint64_t ret);
+
+/* SPMD SPD power management handlers */
+extern const spd_pm_ops_t spmd_pm;
+
+/* SPMC entry point information helper */
+entry_point_info_t *spmd_spmc_ep_info_get(void);
+
+/* SPMC ID getter */
+uint16_t spmd_spmc_id_get(void);
+
+/* SPMC context on CPU based on mpidr */
+spmd_spm_core_context_t *spmd_get_context_by_mpidr(uint64_t mpidr);
+
+/* SPMC context on current CPU get helper */
+spmd_spm_core_context_t *spmd_get_context(void);
+
+int spmd_pm_secondary_core_set_ep(unsigned long long mpidr,
+ uintptr_t entry_point, unsigned long long context);
+bool spmd_check_address_in_binary_image(uint64_t address);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* SPMD_PRIVATE_H */
diff --git a/services/std_svc/std_svc_setup.c b/services/std_svc/std_svc_setup.c
index 7787a2fa2..23f13ab82 100644
--- a/services/std_svc/std_svc_setup.c
+++ b/services/std_svc/std_svc_setup.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -15,7 +15,9 @@
#include <lib/runtime_instr.h>
#include <services/sdei.h>
#include <services/spm_mm_svc.h>
+#include <services/spmd_svc.h>
#include <services/std_svc.h>
+#include <services/trng_svc.h>
#include <smccc_helpers.h>
#include <tools_share/uuid.h>
@@ -51,11 +53,19 @@ static int32_t std_svc_setup(void)
}
#endif
+#if defined(SPD_spmd)
+ if (spmd_setup() != 0) {
+ ret = 1;
+ }
+#endif
+
#if SDEI_SUPPORT
/* SDEI initialisation */
sdei_init();
#endif
+ trng_setup();
+
return ret;
}
@@ -114,6 +124,17 @@ static uintptr_t std_svc_smc_handler(uint32_t smc_fid,
}
#endif
+#if defined(SPD_spmd)
+ /*
+ * Dispatch FFA calls to the FFA SMC handler implemented by the SPM
+ * dispatcher and return its return value
+ */
+ if (is_ffa_fid(smc_fid)) {
+ return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
+ handle, flags);
+ }
+#endif
+
#if SDEI_SUPPORT
if (is_sdei_fid(smc_fid)) {
return sdei_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle,
@@ -121,6 +142,13 @@ static uintptr_t std_svc_smc_handler(uint32_t smc_fid,
}
#endif
+#if TRNG_SUPPORT
+ if (is_trng_fid(smc_fid)) {
+ return trng_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle,
+ flags);
+ }
+#endif
+
switch (smc_fid) {
case ARM_STD_SVC_CALL_COUNT:
/*
diff --git a/services/std_svc/trng/trng_entropy_pool.c b/services/std_svc/trng/trng_entropy_pool.c
new file mode 100644
index 000000000..ac13b1d7a
--- /dev/null
+++ b/services/std_svc/trng/trng_entropy_pool.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2021, ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <lib/spinlock.h>
+#include <plat/common/plat_trng.h>
+
+/*
+ * # Entropy pool
+ * Note that the TRNG Firmware interface can request up to 192 bits of entropy
+ * in a single call or three 64bit words per call. We have 4 words in the pool
+ * so that when we have 1-63 bits in the pool, and we have a request for
+ * 192 bits of entropy, we don't have to throw out the leftover 1-63 bits of
+ * entropy.
+ */
+#define WORDS_IN_POOL (4)
+static uint64_t entropy[WORDS_IN_POOL];
+/* index in bits of the first bit of usable entropy */
+static uint32_t entropy_bit_index;
+/* then number of valid bits in the entropy pool */
+static uint32_t entropy_bit_size;
+
+static spinlock_t trng_pool_lock;
+
+#define BITS_PER_WORD (sizeof(entropy[0]) * 8)
+#define BITS_IN_POOL (WORDS_IN_POOL * BITS_PER_WORD)
+#define ENTROPY_MIN_WORD (entropy_bit_index / BITS_PER_WORD)
+#define ENTROPY_FREE_BIT (entropy_bit_size + entropy_bit_index)
+#define _ENTROPY_FREE_WORD (ENTROPY_FREE_BIT / BITS_PER_WORD)
+#define ENTROPY_FREE_INDEX (_ENTROPY_FREE_WORD % WORDS_IN_POOL)
+/* ENTROPY_WORD_INDEX(0) includes leftover bits in the lower bits */
+#define ENTROPY_WORD_INDEX(i) ((ENTROPY_MIN_WORD + i) % WORDS_IN_POOL)
+
+/*
+ * Fill the entropy pool until we have at least as many bits as requested.
+ * Returns true after filling the pool, and false if the entropy source is out
+ * of entropy and the pool could not be filled.
+ * Assumes locks are taken.
+ */
+static bool trng_fill_entropy(uint32_t nbits)
+{
+ while (nbits > entropy_bit_size) {
+ bool valid = plat_get_entropy(&entropy[ENTROPY_FREE_INDEX]);
+
+ if (valid) {
+ entropy_bit_size += BITS_PER_WORD;
+ assert(entropy_bit_size <= BITS_IN_POOL);
+ } else {
+ return false;
+ }
+ }
+ return true;
+}
+
+/*
+ * Pack entropy into the out buffer, filling and taking locks as needed.
+ * Returns true on success, false on failure.
+ *
+ * Note: out must have enough space for nbits of entropy
+ */
+bool trng_pack_entropy(uint32_t nbits, uint64_t *out)
+{
+ bool success = true;
+
+ spin_lock(&trng_pool_lock);
+
+ if (!trng_fill_entropy(nbits)) {
+ success = false;
+ goto out;
+ }
+
+ const unsigned int rshift = entropy_bit_index % BITS_PER_WORD;
+ const unsigned int lshift = BITS_PER_WORD - rshift;
+ const int to_fill = ((nbits + BITS_PER_WORD - 1) / BITS_PER_WORD);
+ int word_i;
+
+ for (word_i = 0; word_i < to_fill; word_i++) {
+ /*
+ * Repack the entropy from the pool into the passed in out
+ * buffer. This takes the lower bits from the valid upper bits
+ * of word_i and the upper bits from the lower bits of
+ * (word_i + 1).
+ *
+ * I found the following diagram useful. note: `e` represents
+ * valid entropy, ` ` represents invalid bits (not entropy) and
+ * `x` represents valid entropy that must not end up in the
+ * packed word.
+ *
+ * |---------entropy pool----------|
+ * C var |--(word_i + 1)-|----word_i-----|
+ * bit idx |7 6 5 4 3 2 1 0|7 6 5 4 3 2 1 0|
+ * [x,x,e,e,e,e,e,e|e,e, , , , , , ]
+ * | [e,e,e,e,e,e,e,e] |
+ * | |--out[word_i]--| |
+ * lshift|---| |--rshift---|
+ *
+ * ==== Which is implemented as ====
+ *
+ * |---------entropy pool----------|
+ * C var |--(word_i + 1)-|----word_i-----|
+ * bit idx |7 6 5 4 3 2 1 0|7 6 5 4 3 2 1 0|
+ * [x,x,e,e,e,e,e,e|e,e, , , , , , ]
+ * C expr << lshift >> rshift
+ * bit idx 5 4 3 2 1 0 7 6
+ * [e,e,e,e,e,e,0,0|0,0,0,0,0,0,e,e]
+ * ==== bit-wise or ====
+ * 5 4 3 2 1 0 7 6
+ * [e,e,e,e,e,e,e,e]
+ */
+ out[word_i] = 0;
+ out[word_i] |= entropy[ENTROPY_WORD_INDEX(word_i)] >> rshift;
+
+ /*
+ * Note that a shift of 64 bits is treated as a shift of 0 bits.
+ * When the shift amount is the same as the BITS_PER_WORD, we
+ * don't want to include the next word of entropy, so we skip
+ * the `|=` operation.
+ */
+ if (lshift != BITS_PER_WORD) {
+ out[word_i] |= entropy[ENTROPY_WORD_INDEX(word_i + 1)]
+ << lshift;
+ }
+ }
+ const uint64_t mask = ~0ULL >> (BITS_PER_WORD - (nbits % BITS_PER_WORD));
+
+ out[to_fill - 1] &= mask;
+
+ entropy_bit_index = (entropy_bit_index + nbits) % BITS_IN_POOL;
+ entropy_bit_size -= nbits;
+
+out:
+ spin_unlock(&trng_pool_lock);
+
+ return success;
+}
+
+void trng_entropy_pool_setup(void)
+{
+ int i;
+
+ for (i = 0; i < WORDS_IN_POOL; i++) {
+ entropy[i] = 0;
+ }
+ entropy_bit_index = 0;
+ entropy_bit_size = 0;
+}
diff --git a/services/std_svc/trng/trng_entropy_pool.h b/services/std_svc/trng/trng_entropy_pool.h
new file mode 100644
index 000000000..fab2367d2
--- /dev/null
+++ b/services/std_svc/trng/trng_entropy_pool.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TRNG_ENTROPY_POOL_H
+#define TRNG_ENTROPY_POOL_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+bool trng_pack_entropy(uint32_t nbits, uint64_t *out);
+void trng_entropy_pool_setup(void);
+
+#endif /* TRNG_ENTROPY_POOL_H */
diff --git a/services/std_svc/trng/trng_main.c b/services/std_svc/trng/trng_main.c
new file mode 100644
index 000000000..38aa64997
--- /dev/null
+++ b/services/std_svc/trng/trng_main.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <arch_features.h>
+#include <lib/smccc.h>
+#include <services/trng_svc.h>
+#include <smccc_helpers.h>
+
+#include <plat/common/plat_trng.h>
+
+#include "trng_entropy_pool.h"
+
+static const uuid_t uuid_null;
+
+/* handle the RND call in SMC 32 bit mode */
+static uintptr_t trng_rnd32(uint32_t nbits, void *handle)
+{
+ uint32_t mask = ~0U;
+ uint64_t ent[2];
+
+ if (nbits == 0U || nbits > 96U) {
+ SMC_RET1(handle, TRNG_E_INVALID_PARAMS);
+ }
+
+ if (!trng_pack_entropy(nbits, &ent[0])) {
+ SMC_RET1(handle, TRNG_E_NO_ENTROPY);
+ }
+
+ if ((nbits % 32U) != 0U) {
+ mask >>= 32U - (nbits % 32U);
+ }
+
+ switch ((nbits - 1U) / 32U) {
+ case 0:
+ SMC_RET4(handle, TRNG_E_SUCCESS, 0, 0, ent[0] & mask);
+ break; /* unreachable */
+ case 1:
+ SMC_RET4(handle, TRNG_E_SUCCESS, 0, (ent[0] >> 32) & mask,
+ ent[0] & 0xFFFFFFFF);
+ break; /* unreachable */
+ case 2:
+ SMC_RET4(handle, TRNG_E_SUCCESS, ent[1] & mask,
+ (ent[0] >> 32) & 0xFFFFFFFF, ent[0] & 0xFFFFFFFF);
+ break; /* unreachable */
+ default:
+ SMC_RET1(handle, TRNG_E_INVALID_PARAMS);
+ break; /* unreachable */
+ }
+}
+
+/* handle the RND call in SMC 64 bit mode */
+static uintptr_t trng_rnd64(uint32_t nbits, void *handle)
+{
+ uint64_t mask = ~0ULL;
+ uint64_t ent[3];
+
+ if (nbits == 0U || nbits > 192U) {
+ SMC_RET1(handle, TRNG_E_INVALID_PARAMS);
+ }
+
+ if (!trng_pack_entropy(nbits, &ent[0])) {
+ SMC_RET1(handle, TRNG_E_NO_ENTROPY);
+ }
+
+ /* Mask off higher bits if only part of register requested */
+ if ((nbits % 64U) != 0U) {
+ mask >>= 64U - (nbits % 64U);
+ }
+
+ switch ((nbits - 1U) / 64U) {
+ case 0:
+ SMC_RET4(handle, TRNG_E_SUCCESS, 0, 0, ent[0] & mask);
+ break; /* unreachable */
+ case 1:
+ SMC_RET4(handle, TRNG_E_SUCCESS, 0, ent[1] & mask, ent[0]);
+ break; /* unreachable */
+ case 2:
+ SMC_RET4(handle, TRNG_E_SUCCESS, ent[2] & mask, ent[1], ent[0]);
+ break; /* unreachable */
+ default:
+ SMC_RET1(handle, TRNG_E_INVALID_PARAMS);
+ break; /* unreachable */
+ }
+}
+
+void trng_setup(void)
+{
+ trng_entropy_pool_setup();
+ plat_entropy_setup();
+}
+
+/* Predicate indicating that a function id is part of TRNG */
+bool is_trng_fid(uint32_t smc_fid)
+{
+ return ((smc_fid == ARM_TRNG_VERSION) ||
+ (smc_fid == ARM_TRNG_FEATURES) ||
+ (smc_fid == ARM_TRNG_GET_UUID) ||
+ (smc_fid == ARM_TRNG_RND32) ||
+ (smc_fid == ARM_TRNG_RND64));
+}
+
+uintptr_t trng_smc_handler(uint32_t smc_fid, u_register_t x1, u_register_t x2,
+ u_register_t x3, u_register_t x4, void *cookie,
+ void *handle, u_register_t flags)
+{
+ if (!memcmp(&plat_trng_uuid, &uuid_null, sizeof(uuid_t))) {
+ SMC_RET1(handle, TRNG_E_NOT_IMPLEMENTED);
+ }
+
+ switch (smc_fid) {
+ case ARM_TRNG_VERSION:
+ SMC_RET1(handle, MAKE_SMCCC_VERSION(
+ TRNG_VERSION_MAJOR, TRNG_VERSION_MINOR
+ ));
+ break; /* unreachable */
+ case ARM_TRNG_FEATURES:
+ if (is_trng_fid((uint32_t)x1)) {
+ SMC_RET1(handle, TRNG_E_SUCCESS);
+ } else {
+ SMC_RET1(handle, TRNG_E_NOT_SUPPORTED);
+ }
+ break; /* unreachable */
+ case ARM_TRNG_GET_UUID:
+ SMC_UUID_RET(handle, plat_trng_uuid);
+ break; /* unreachable */
+ case ARM_TRNG_RND32:
+ return trng_rnd32((uint32_t)x1, handle);
+ case ARM_TRNG_RND64:
+ return trng_rnd64((uint32_t)x1, handle);
+ default:
+ WARN("Unimplemented TRNG Service Call: 0x%x\n",
+ smc_fid);
+ SMC_RET1(handle, TRNG_E_NOT_IMPLEMENTED);
+ break; /* unreachable */
+ }
+}