aboutsummaryrefslogtreecommitdiffstats
path: root/bl32
diff options
context:
space:
mode:
Diffstat (limited to 'bl32')
-rw-r--r--bl32/sp_min/sp_min.ld.S141
-rw-r--r--bl32/sp_min/sp_min.mk11
-rw-r--r--bl32/tsp/aarch64/tsp_entrypoint.S2
-rw-r--r--bl32/tsp/aarch64/tsp_request.S13
-rw-r--r--bl32/tsp/tsp.ld.S70
-rw-r--r--bl32/tsp/tsp_main.c38
-rw-r--r--bl32/tsp/tsp_private.h4
7 files changed, 65 insertions, 214 deletions
diff --git a/bl32/sp_min/sp_min.ld.S b/bl32/sp_min/sp_min.ld.S
index 6997a7fdb..f202c7ada 100644
--- a/bl32/sp_min/sp_min.ld.S
+++ b/bl32/sp_min/sp_min.ld.S
@@ -1,11 +1,10 @@
/*
- * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <platform_def.h>
-
+#include <common/bl_common.ld.h>
#include <lib/xlat_tables/xlat_tables_defs.h>
OUTPUT_FORMAT(elf32-littlearm)
@@ -23,14 +22,14 @@ MEMORY {
SECTIONS
{
. = BL32_BASE;
- ASSERT(. == ALIGN(PAGE_SIZE),
- "BL32_BASE address is not aligned on a page boundary.")
+ ASSERT(. == ALIGN(PAGE_SIZE),
+ "BL32_BASE address is not aligned on a page boundary.")
#if SEPARATE_CODE_AND_RODATA
.text . : {
__TEXT_START__ = .;
*entrypoint.o(.text*)
- *(.text*)
+ *(SORT_BY_ALIGNMENT(.text*))
*(.vectors)
. = ALIGN(PAGE_SIZE);
__TEXT_END__ = .;
@@ -47,30 +46,9 @@ SECTIONS
.rodata . : {
__RODATA_START__ = .;
- *(.rodata*)
-
- /* Ensure 4-byte alignment for descriptors and ensure inclusion */
- . = ALIGN(4);
- __RT_SVC_DESCS_START__ = .;
- KEEP(*(rt_svc_descs))
- __RT_SVC_DESCS_END__ = .;
-
-#if ENABLE_PMF
- /* Ensure 4-byte alignment for descriptors and ensure inclusion */
- . = ALIGN(4);
- __PMF_SVC_DESCS_START__ = .;
- KEEP(*(pmf_svc_descs))
- __PMF_SVC_DESCS_END__ = .;
-#endif /* ENABLE_PMF */
+ *(SORT_BY_ALIGNMENT(.rodata*))
- /*
- * Ensure 4-byte alignment for cpu_ops so that its fields are also
- * aligned. Also ensure cpu_ops inclusion.
- */
- . = ALIGN(4);
- __CPU_OPS_START__ = .;
- KEEP(*(cpu_ops))
- __CPU_OPS_END__ = .;
+ RODATA_COMMON
/* Place pubsub sections for events */
. = ALIGN(8);
@@ -83,23 +61,10 @@ SECTIONS
ro . : {
__RO_START__ = .;
*entrypoint.o(.text*)
- *(.text*)
- *(.rodata*)
+ *(SORT_BY_ALIGNMENT(.text*))
+ *(SORT_BY_ALIGNMENT(.rodata*))
- /* Ensure 4-byte alignment for descriptors and ensure inclusion */
- . = ALIGN(4);
- __RT_SVC_DESCS_START__ = .;
- KEEP(*(rt_svc_descs))
- __RT_SVC_DESCS_END__ = .;
-
- /*
- * Ensure 4-byte alignment for cpu_ops so that its fields are also
- * aligned. Also ensure cpu_ops inclusion.
- */
- . = ALIGN(4);
- __CPU_OPS_START__ = .;
- KEEP(*(cpu_ops))
- __CPU_OPS_END__ = .;
+ RODATA_COMMON
/* Place pubsub sections for events */
. = ALIGN(8);
@@ -111,7 +76,7 @@ SECTIONS
/*
* Memory page(s) mapped to this section will be marked as
* read-only, executable. No RW data from the next section must
- * creep in. Ensure the rest of the current memory block is unused.
+ * creep in. Ensure the rest of the current memory page is unused.
*/
. = ALIGN(PAGE_SIZE);
__RO_END__ = .;
@@ -126,85 +91,15 @@ SECTIONS
*/
__RW_START__ = . ;
- .data . : {
- __DATA_START__ = .;
- *(.data*)
- __DATA_END__ = .;
- } >RAM
+ DATA_SECTION >RAM
#ifdef BL32_PROGBITS_LIMIT
ASSERT(. <= BL32_PROGBITS_LIMIT, "BL32 progbits has exceeded its limit.")
#endif
- stacks (NOLOAD) : {
- __STACKS_START__ = .;
- *(tzfw_normal_stacks)
- __STACKS_END__ = .;
- } >RAM
-
- /*
- * The .bss section gets initialised to 0 at runtime.
- * Its base address should be 8-byte aligned for better performance of the
- * zero-initialization code.
- */
- .bss (NOLOAD) : ALIGN(8) {
- __BSS_START__ = .;
- *(.bss*)
- *(COMMON)
-#if !USE_COHERENT_MEM
- /*
- * Bakery locks are stored in normal .bss memory
- *
- * Each lock's data is spread across multiple cache lines, one per CPU,
- * but multiple locks can share the same cache line.
- * The compiler will allocate enough memory for one CPU's bakery locks,
- * the remaining cache lines are allocated by the linker script
- */
- . = ALIGN(CACHE_WRITEBACK_GRANULE);
- __BAKERY_LOCK_START__ = .;
- __PERCPU_BAKERY_LOCK_START__ = .;
- *(bakery_lock)
- . = ALIGN(CACHE_WRITEBACK_GRANULE);
- __PERCPU_BAKERY_LOCK_END__ = .;
- __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__);
- . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
- __BAKERY_LOCK_END__ = .;
-#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
- ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE,
- "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
-#endif
-#endif
-
-#if ENABLE_PMF
- /*
- * Time-stamps are stored in normal .bss memory
- *
- * The compiler will allocate enough memory for one CPU's time-stamps,
- * the remaining memory for other CPUs is allocated by the
- * linker script
- */
- . = ALIGN(CACHE_WRITEBACK_GRANULE);
- __PMF_TIMESTAMP_START__ = .;
- KEEP(*(pmf_timestamp_array))
- . = ALIGN(CACHE_WRITEBACK_GRANULE);
- __PMF_PERCPU_TIMESTAMP_END__ = .;
- __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__);
- . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1));
- __PMF_TIMESTAMP_END__ = .;
-#endif /* ENABLE_PMF */
-
- __BSS_END__ = .;
- } >RAM
-
- /*
- * The xlat_table section is for full, aligned page tables (4K).
- * Removing them from .bss avoids forcing 4K alignment on
- * the .bss section. The tables are initialized to zero by the translation
- * tables library.
- */
- xlat_table (NOLOAD) : {
- *(xlat_table)
- } >RAM
+ STACK_SECTION >RAM
+ BSS_SECTION >RAM
+ XLAT_TABLE_SECTION >RAM
__BSS_SIZE__ = SIZEOF(.bss);
@@ -239,10 +134,12 @@ SECTIONS
#endif
/*
- * Define a linker symbol to mark end of the RW memory area for this
+ * Define a linker symbol to mark the end of the RW memory area for this
* image.
*/
__RW_END__ = .;
- __BL32_END__ = .;
+ __BL32_END__ = .;
+
+ ASSERT(. <= BL32_LIMIT, "BL32 image has exceeded its limit.")
}
diff --git a/bl32/sp_min/sp_min.mk b/bl32/sp_min/sp_min.mk
index 6233299d7..8b5eddd66 100644
--- a/bl32/sp_min/sp_min.mk
+++ b/bl32/sp_min/sp_min.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -19,6 +19,10 @@ BL32_SOURCES += bl32/sp_min/sp_min_main.c \
services/std_svc/std_svc_setup.c \
${PSCI_LIB_SOURCES}
+ifeq (${DISABLE_MTPMU},1)
+BL32_SOURCES += lib/extensions/mtpmu/aarch32/mtpmu.S
+endif
+
ifeq (${ENABLE_PMF}, 1)
BL32_SOURCES += lib/pmf/pmf_main.c
endif
@@ -33,6 +37,11 @@ BL32_SOURCES += bl32/sp_min/wa_cve_2017_5715_bpiall.S \
bl32/sp_min/wa_cve_2017_5715_icache_inv.S
endif
+ifeq (${TRNG_SUPPORT},1)
+BL32_SOURCES += services/std_svc/trng/trng_main.c \
+ services/std_svc/trng/trng_entropy_pool.c
+endif
+
BL32_LINKERFILE := bl32/sp_min/sp_min.ld.S
# Include the platform-specific SP_MIN Makefile
diff --git a/bl32/tsp/aarch64/tsp_entrypoint.S b/bl32/tsp/aarch64/tsp_entrypoint.S
index ebc5c2c3d..a007bab30 100644
--- a/bl32/tsp/aarch64/tsp_entrypoint.S
+++ b/bl32/tsp/aarch64/tsp_entrypoint.S
@@ -60,7 +60,7 @@ func tsp_entrypoint _align=3
*/
pie_fixup:
ldr x0, =pie_fixup
- and x0, x0, #~(PAGE_SIZE - 1)
+ and x0, x0, #~(PAGE_SIZE_MASK)
mov_imm x1, (BL32_LIMIT - BL32_BASE)
add x1, x1, x0
bl fixup_gdt_reloc
diff --git a/bl32/tsp/aarch64/tsp_request.S b/bl32/tsp/aarch64/tsp_request.S
index 5ad16da66..6e238ea4c 100644
--- a/bl32/tsp/aarch64/tsp_request.S
+++ b/bl32/tsp/aarch64/tsp_request.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,28 +9,19 @@
.globl tsp_get_magic
-
/*
* This function raises an SMC to retrieve arguments from secure
* monitor/dispatcher, saves the returned arguments the array received in x0,
* and then returns to the caller
*/
func tsp_get_magic
- /* Save address to stack */
- stp x0, xzr, [sp, #-16]!
-
/* Load arguments */
ldr w0, _tsp_fid_get_magic
/* Raise SMC */
smc #0
- /* Restore address from stack */
- ldp x4, xzr, [sp], #16
-
- /* Store returned arguments to the array */
- stp x0, x1, [x4, #0]
-
+ /* Return arguments in x1:x0 */
ret
endfunc tsp_get_magic
diff --git a/bl32/tsp/tsp.ld.S b/bl32/tsp/tsp.ld.S
index 592e24557..d86ae5587 100644
--- a/bl32/tsp/tsp.ld.S
+++ b/bl32/tsp/tsp.ld.S
@@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <common/bl_common.ld.h>
#include <lib/xlat_tables/xlat_tables_defs.h>
-#include <platform_def.h>
OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
@@ -37,15 +37,7 @@ SECTIONS
__RODATA_START__ = .;
*(.rodata*)
- /*
- * Keep the .got section in the RO section as it is patched
- * prior to enabling the MMU and having the .got in RO is better for
- * security. GOT is a table of addresses so ensure 8-byte alignment.
- */
- . = ALIGN(8);
- __GOT_START__ = .;
- *(.got)
- __GOT_END__ = .;
+ RODATA_COMMON
. = ALIGN(PAGE_SIZE);
__RODATA_END__ = .;
@@ -57,15 +49,7 @@ SECTIONS
*(.text*)
*(.rodata*)
- /*
- * Keep the .got section in the RO section as it is patched
- * prior to enabling the MMU and having the .got in RO is better for
- * security. GOT is a table of addresses so ensure 8-byte alignment.
- */
- . = ALIGN(8);
- __GOT_START__ = .;
- *(.got)
- __GOT_END__ = .;
+ RODATA_COMMON
*(.vectors)
@@ -86,54 +70,16 @@ SECTIONS
*/
__RW_START__ = . ;
- .data . : {
- __DATA_START__ = .;
- *(.data*)
- __DATA_END__ = .;
- } >RAM
-
- /*
- * .rela.dyn needs to come after .data for the read-elf utility to parse
- * this section correctly. Ensure 8-byte alignment so that the fields of
- * RELA data structure are aligned.
- */
- . = ALIGN(8);
- __RELA_START__ = .;
- .rela.dyn . : {
- } >RAM
- __RELA_END__ = .;
+ DATA_SECTION >RAM
+ RELA_SECTION >RAM
#ifdef TSP_PROGBITS_LIMIT
ASSERT(. <= TSP_PROGBITS_LIMIT, "TSP progbits has exceeded its limit.")
#endif
- stacks (NOLOAD) : {
- __STACKS_START__ = .;
- *(tzfw_normal_stacks)
- __STACKS_END__ = .;
- } >RAM
-
- /*
- * The .bss section gets initialised to 0 at runtime.
- * Its base address should be 16-byte aligned for better performance of the
- * zero-initialization code.
- */
- .bss : ALIGN(16) {
- __BSS_START__ = .;
- *(SORT_BY_ALIGNMENT(.bss*))
- *(COMMON)
- __BSS_END__ = .;
- } >RAM
-
- /*
- * The xlat_table section is for full, aligned page tables (4K).
- * Removing them from .bss avoids forcing 4K alignment on
- * the .bss section. The tables are initialized to zero by the translation
- * tables library.
- */
- xlat_table (NOLOAD) : {
- *(xlat_table)
- } >RAM
+ STACK_SECTION >RAM
+ BSS_SECTION >RAM
+ XLAT_TABLE_SECTION >RAM
#if USE_COHERENT_MEM
/*
diff --git a/bl32/tsp/tsp_main.c b/bl32/tsp/tsp_main.c
index e1d961cc6..01c9ec58f 100644
--- a/bl32/tsp/tsp_main.c
+++ b/bl32/tsp/tsp_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -273,11 +273,11 @@ tsp_args_t *tsp_cpu_resume_main(uint64_t max_off_pwrlvl,
spin_lock(&console_lock);
INFO("TSP: cpu 0x%lx resumed. maximum off power level %lld\n",
read_mpidr(), max_off_pwrlvl);
- INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu suspend requests\n",
+ INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu resume requests\n",
read_mpidr(),
tsp_stats[linear_id].smc_count,
tsp_stats[linear_id].eret_count,
- tsp_stats[linear_id].cpu_suspend_count);
+ tsp_stats[linear_id].cpu_resume_count);
spin_unlock(&console_lock);
#endif
/* Indicate to the SPD that we have completed this request */
@@ -363,30 +363,38 @@ tsp_args_t *tsp_smc_handler(uint64_t func,
uint64_t arg6,
uint64_t arg7)
{
+ uint128_t service_args;
+ uint64_t service_arg0;
+ uint64_t service_arg1;
uint64_t results[2];
- uint64_t service_args[2];
uint32_t linear_id = plat_my_core_pos();
/* Update this cpu's statistics */
tsp_stats[linear_id].smc_count++;
tsp_stats[linear_id].eret_count++;
+#if LOG_LEVEL >= LOG_LEVEL_INFO
+ spin_lock(&console_lock);
INFO("TSP: cpu 0x%lx received %s smc 0x%llx\n", read_mpidr(),
((func >> 31) & 1) == 1 ? "fast" : "yielding",
func);
INFO("TSP: cpu 0x%lx: %d smcs, %d erets\n", read_mpidr(),
tsp_stats[linear_id].smc_count,
tsp_stats[linear_id].eret_count);
+ spin_unlock(&console_lock);
+#endif
/* Render secure services and obtain results here */
results[0] = arg1;
results[1] = arg2;
/*
- * Request a service back from dispatcher/secure monitor. This call
- * return and thereafter resume execution
+ * Request a service back from dispatcher/secure monitor.
+ * This call returns and thereafter resumes execution.
*/
- tsp_get_magic(service_args);
+ service_args = tsp_get_magic();
+ service_arg0 = (uint64_t)service_args;
+ service_arg1 = (uint64_t)(service_args >> 64U);
#if CTX_INCLUDE_MTE_REGS
/*
@@ -399,20 +407,20 @@ tsp_args_t *tsp_smc_handler(uint64_t func,
/* Determine the function to perform based on the function ID */
switch (TSP_BARE_FID(func)) {
case TSP_ADD:
- results[0] += service_args[0];
- results[1] += service_args[1];
+ results[0] += service_arg0;
+ results[1] += service_arg1;
break;
case TSP_SUB:
- results[0] -= service_args[0];
- results[1] -= service_args[1];
+ results[0] -= service_arg0;
+ results[1] -= service_arg1;
break;
case TSP_MUL:
- results[0] *= service_args[0];
- results[1] *= service_args[1];
+ results[0] *= service_arg0;
+ results[1] *= service_arg1;
break;
case TSP_DIV:
- results[0] /= service_args[0] ? service_args[0] : 1;
- results[1] /= service_args[1] ? service_args[1] : 1;
+ results[0] /= service_arg0 ? service_arg0 : 1;
+ results[1] /= service_arg1 ? service_arg1 : 1;
break;
default:
break;
diff --git a/bl32/tsp/tsp_private.h b/bl32/tsp/tsp_private.h
index cbd527f37..38d9732f5 100644
--- a/bl32/tsp/tsp_private.h
+++ b/bl32/tsp/tsp_private.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -61,7 +61,7 @@ typedef struct tsp_args {
*/
CASSERT(TSP_ARGS_SIZE == sizeof(tsp_args_t), assert_sp_args_size_mismatch);
-void tsp_get_magic(uint64_t args[4]);
+uint128_t tsp_get_magic(void);
tsp_args_t *tsp_cpu_resume_main(uint64_t max_off_pwrlvl,
uint64_t arg1,