From a16bc84560efdbfcd242e6c708992624a76ec7cd Mon Sep 17 00:00:00 2001 From: Manish Pandey Date: Fri, 6 Mar 2020 14:36:25 +0000 Subject: TSP: corrected log information In CPU resume function, CPU suspend count was printed instead of CPU resume count. Signed-off-by: Manish Pandey Change-Id: I0c081dc03a4ccfb2129687f690667c5ceed00a5f --- bl32/tsp/tsp_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'bl32') diff --git a/bl32/tsp/tsp_main.c b/bl32/tsp/tsp_main.c index e1d961cc6..9da2f9af9 100644 --- a/bl32/tsp/tsp_main.c +++ b/bl32/tsp/tsp_main.c @@ -273,11 +273,11 @@ tsp_args_t *tsp_cpu_resume_main(uint64_t max_off_pwrlvl, spin_lock(&console_lock); INFO("TSP: cpu 0x%lx resumed. maximum off power level %lld\n", read_mpidr(), max_off_pwrlvl); - INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu suspend requests\n", + INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu resume requests\n", read_mpidr(), tsp_stats[linear_id].smc_count, tsp_stats[linear_id].eret_count, - tsp_stats[linear_id].cpu_suspend_count); + tsp_stats[linear_id].cpu_resume_count); spin_unlock(&console_lock); #endif /* Indicate to the SPD that we have completed this request */ -- cgit v1.2.3 From 665e71b8ea28162ec7737c1411bca3ea89e5957e Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 9 Mar 2020 17:39:48 +0900 Subject: Factor xlat_table sections in linker scripts out into a header file TF-A has so many linker scripts, at least one linker script for each BL image, and some platforms have their own ones. They duplicate quite similar code (and comments). When we add some changes to linker scripts, we end up with touching so many files. This is not nice in the maintainability perspective. When you look at Linux kernel, the common code is macrofied in include/asm-generic/vmlinux.lds.h, which is included from each arch linker script, arch/*/kernel/vmlinux.lds.S TF-A can follow this approach. Let's factor out the common code into include/common/bl_common.ld.h As a start point, this commit factors out the xlat_table section. Change-Id: Ifa369e9b48e8e12702535d721cc2a16d12397895 Signed-off-by: Masahiro Yamada --- bl32/sp_min/sp_min.ld.S | 13 +++---------- bl32/tsp/tsp.ld.S | 11 ++--------- 2 files changed, 5 insertions(+), 19 deletions(-) (limited to 'bl32') diff --git a/bl32/sp_min/sp_min.ld.S b/bl32/sp_min/sp_min.ld.S index 6997a7fdb..3b1ca1b58 100644 --- a/bl32/sp_min/sp_min.ld.S +++ b/bl32/sp_min/sp_min.ld.S @@ -1,11 +1,12 @@ /* - * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ #include +#include #include OUTPUT_FORMAT(elf32-littlearm) @@ -196,15 +197,7 @@ SECTIONS __BSS_END__ = .; } >RAM - /* - * The xlat_table section is for full, aligned page tables (4K). - * Removing them from .bss avoids forcing 4K alignment on - * the .bss section. The tables are initialized to zero by the translation - * tables library. - */ - xlat_table (NOLOAD) : { - *(xlat_table) - } >RAM + XLAT_TABLE_SECTION >RAM __BSS_SIZE__ = SIZEOF(.bss); diff --git a/bl32/tsp/tsp.ld.S b/bl32/tsp/tsp.ld.S index 592e24557..da60c63a7 100644 --- a/bl32/tsp/tsp.ld.S +++ b/bl32/tsp/tsp.ld.S @@ -4,6 +4,7 @@ * SPDX-License-Identifier: BSD-3-Clause */ +#include #include #include @@ -125,15 +126,7 @@ SECTIONS __BSS_END__ = .; } >RAM - /* - * The xlat_table section is for full, aligned page tables (4K). - * Removing them from .bss avoids forcing 4K alignment on - * the .bss section. The tables are initialized to zero by the translation - * tables library. - */ - xlat_table (NOLOAD) : { - *(xlat_table) - } >RAM + XLAT_TABLE_SECTION >RAM #if USE_COHERENT_MEM /* -- cgit v1.2.3 From 26d1e0c330981505315408c2537b87854d15d720 Mon Sep 17 00:00:00 2001 From: Madhukar Pappireddy Date: Mon, 27 Jan 2020 13:37:51 -0600 Subject: fconf: necessary modifications to support fconf in BL31 & SP_MIN Necessary infrastructure added to integrate fconf framework in BL31 & SP_MIN. Created few populator() functions which parse HW_CONFIG device tree and registered them with fconf framework. Many of the changes are only applicable for fvp platform. This patch: 1. Adds necessary symbols and sections in BL31, SP_MIN linker script 2. Adds necessary memory map entry for translation in BL31, SP_MIN 3. Creates an abstraction layer for hardware configuration based on fconf framework 4. Adds necessary changes to build flow (makefiles) 5. Minimal callback to read hw_config dtb for capturing properties related to GIC(interrupt-controller node) 6. updates the fconf documentation Change-Id: Ib6292071f674ef093962b9e8ba0d322b7bf919af Signed-off-by: Madhukar Pappireddy --- bl32/sp_min/sp_min.ld.S | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'bl32') diff --git a/bl32/sp_min/sp_min.ld.S b/bl32/sp_min/sp_min.ld.S index 6997a7fdb..3231f9aec 100644 --- a/bl32/sp_min/sp_min.ld.S +++ b/bl32/sp_min/sp_min.ld.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -55,6 +55,11 @@ SECTIONS KEEP(*(rt_svc_descs)) __RT_SVC_DESCS_END__ = .; + . = ALIGN(8); + __FCONF_POPULATOR_START__ = .; + KEEP(*(.fconf_populator)) + __FCONF_POPULATOR_END__ = .; + #if ENABLE_PMF /* Ensure 4-byte alignment for descriptors and ensure inclusion */ . = ALIGN(4); @@ -92,6 +97,11 @@ SECTIONS KEEP(*(rt_svc_descs)) __RT_SVC_DESCS_END__ = .; + . = ALIGN(8); + __FCONF_POPULATOR_START__ = .; + KEEP(*(.fconf_populator)) + __FCONF_POPULATOR_END__ = .; + /* * Ensure 4-byte alignment for cpu_ops so that its fields are also * aligned. Also ensure cpu_ops inclusion. -- cgit v1.2.3 From ae7b922d87597ecde226be632633f11a17ccba20 Mon Sep 17 00:00:00 2001 From: Madhukar Pappireddy Date: Fri, 20 Mar 2020 01:46:21 -0500 Subject: Bug fix: Protect TSP prints with lock CPUs use console to print debug/info messages. This critical section must be guarded by locks to avoid overlaps in messages from multiple CPUs. Change-Id: I786bf90072c1ed73c4f53d8c950979d95255e67e Signed-off-by: Madhukar Pappireddy --- bl32/tsp/tsp_main.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'bl32') diff --git a/bl32/tsp/tsp_main.c b/bl32/tsp/tsp_main.c index 9da2f9af9..e9478380c 100644 --- a/bl32/tsp/tsp_main.c +++ b/bl32/tsp/tsp_main.c @@ -371,12 +371,16 @@ tsp_args_t *tsp_smc_handler(uint64_t func, tsp_stats[linear_id].smc_count++; tsp_stats[linear_id].eret_count++; +#if LOG_LEVEL >= LOG_LEVEL_INFO + spin_lock(&console_lock); INFO("TSP: cpu 0x%lx received %s smc 0x%llx\n", read_mpidr(), ((func >> 31) & 1) == 1 ? "fast" : "yielding", func); INFO("TSP: cpu 0x%lx: %d smcs, %d erets\n", read_mpidr(), tsp_stats[linear_id].smc_count, tsp_stats[linear_id].eret_count); + spin_unlock(&console_lock); +#endif /* Render secure services and obtain results here */ results[0] = arg1; -- cgit v1.2.3 From 85ee795ca286a5a07a215902cc8d39a92c845637 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 25 Mar 2020 20:52:44 +0900 Subject: bl32: sp_min: reduce the alignment for fconf_populator sp_min.ld.S is used for aarch32. ALIGN(4) is used for alignment of the other structures. I do not think struct fconf_populator is a special case. Let's use ALIGN(4) here too. Perhaps, this is just a copy-paste mistake of commit 26d1e0c33098 ("fconf: necessary modifications to support fconf in BL31 & SP_MIN"). Change-Id: I29f4c68680842c1b5ef913934b4ccf378e9bfcfb Signed-off-by: Masahiro Yamada --- bl32/sp_min/sp_min.ld.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'bl32') diff --git a/bl32/sp_min/sp_min.ld.S b/bl32/sp_min/sp_min.ld.S index a90a805a0..66f3b113d 100644 --- a/bl32/sp_min/sp_min.ld.S +++ b/bl32/sp_min/sp_min.ld.S @@ -56,7 +56,7 @@ SECTIONS KEEP(*(rt_svc_descs)) __RT_SVC_DESCS_END__ = .; - . = ALIGN(8); + . = ALIGN(4); __FCONF_POPULATOR_START__ = .; KEEP(*(.fconf_populator)) __FCONF_POPULATOR_END__ = .; @@ -98,7 +98,7 @@ SECTIONS KEEP(*(rt_svc_descs)) __RT_SVC_DESCS_END__ = .; - . = ALIGN(8); + . = ALIGN(4); __FCONF_POPULATOR_START__ = .; KEEP(*(.fconf_populator)) __FCONF_POPULATOR_END__ = .; -- cgit v1.2.3 From 9fb288a03ed2ced7706defbbf78f008e921e17e2 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 26 Mar 2020 10:51:39 +0900 Subject: linker_script: move more common code to bl_common.ld.h These are mostly used to collect data from special structure, and repeated in many linker scripts. To differentiate the alignment size between aarch32/aarch64, I added a new macro STRUCT_ALIGN. While I moved the PMF_SVC_DESCS, I dropped #if ENABLE_PMF conditional. As you can see in include/lib/pmf/pmf_helpers.h, PMF_REGISTER_SERVICE* are no-op when ENABLE_PMF=0. So, pmf_svc_descs and pmf_timestamp_array data are not populated. Change-Id: I3f4ab7fa18f76339f1789103407ba76bda7e56d0 Signed-off-by: Masahiro Yamada --- bl32/sp_min/sp_min.ld.S | 99 +++++-------------------------------------------- bl32/tsp/tsp.ld.S | 21 +---------- 2 files changed, 11 insertions(+), 109 deletions(-) (limited to 'bl32') diff --git a/bl32/sp_min/sp_min.ld.S b/bl32/sp_min/sp_min.ld.S index 66f3b113d..d83b4e018 100644 --- a/bl32/sp_min/sp_min.ld.S +++ b/bl32/sp_min/sp_min.ld.S @@ -4,8 +4,6 @@ * SPDX-License-Identifier: BSD-3-Clause */ -#include - #include #include @@ -50,33 +48,10 @@ SECTIONS __RODATA_START__ = .; *(.rodata*) - /* Ensure 4-byte alignment for descriptors and ensure inclusion */ - . = ALIGN(4); - __RT_SVC_DESCS_START__ = .; - KEEP(*(rt_svc_descs)) - __RT_SVC_DESCS_END__ = .; - - . = ALIGN(4); - __FCONF_POPULATOR_START__ = .; - KEEP(*(.fconf_populator)) - __FCONF_POPULATOR_END__ = .; - -#if ENABLE_PMF - /* Ensure 4-byte alignment for descriptors and ensure inclusion */ - . = ALIGN(4); - __PMF_SVC_DESCS_START__ = .; - KEEP(*(pmf_svc_descs)) - __PMF_SVC_DESCS_END__ = .; -#endif /* ENABLE_PMF */ - - /* - * Ensure 4-byte alignment for cpu_ops so that its fields are also - * aligned. Also ensure cpu_ops inclusion. - */ - . = ALIGN(4); - __CPU_OPS_START__ = .; - KEEP(*(cpu_ops)) - __CPU_OPS_END__ = .; + RT_SVC_DESCS + FCONF_POPULATOR + PMF_SVC_DESCS + CPU_OPS /* Place pubsub sections for events */ . = ALIGN(8); @@ -92,25 +67,9 @@ SECTIONS *(.text*) *(.rodata*) - /* Ensure 4-byte alignment for descriptors and ensure inclusion */ - . = ALIGN(4); - __RT_SVC_DESCS_START__ = .; - KEEP(*(rt_svc_descs)) - __RT_SVC_DESCS_END__ = .; - - . = ALIGN(4); - __FCONF_POPULATOR_START__ = .; - KEEP(*(.fconf_populator)) - __FCONF_POPULATOR_END__ = .; - - /* - * Ensure 4-byte alignment for cpu_ops so that its fields are also - * aligned. Also ensure cpu_ops inclusion. - */ - . = ALIGN(4); - __CPU_OPS_START__ = .; - KEEP(*(cpu_ops)) - __CPU_OPS_END__ = .; + RT_SVC_DESCS + FCONF_POPULATOR + CPU_OPS /* Place pubsub sections for events */ . = ALIGN(8); @@ -162,48 +121,8 @@ SECTIONS __BSS_START__ = .; *(.bss*) *(COMMON) -#if !USE_COHERENT_MEM - /* - * Bakery locks are stored in normal .bss memory - * - * Each lock's data is spread across multiple cache lines, one per CPU, - * but multiple locks can share the same cache line. - * The compiler will allocate enough memory for one CPU's bakery locks, - * the remaining cache lines are allocated by the linker script - */ - . = ALIGN(CACHE_WRITEBACK_GRANULE); - __BAKERY_LOCK_START__ = .; - __PERCPU_BAKERY_LOCK_START__ = .; - *(bakery_lock) - . = ALIGN(CACHE_WRITEBACK_GRANULE); - __PERCPU_BAKERY_LOCK_END__ = .; - __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__); - . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1)); - __BAKERY_LOCK_END__ = .; -#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE - ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE, - "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements"); -#endif -#endif - -#if ENABLE_PMF - /* - * Time-stamps are stored in normal .bss memory - * - * The compiler will allocate enough memory for one CPU's time-stamps, - * the remaining memory for other CPUs is allocated by the - * linker script - */ - . = ALIGN(CACHE_WRITEBACK_GRANULE); - __PMF_TIMESTAMP_START__ = .; - KEEP(*(pmf_timestamp_array)) - . = ALIGN(CACHE_WRITEBACK_GRANULE); - __PMF_PERCPU_TIMESTAMP_END__ = .; - __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__); - . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1)); - __PMF_TIMESTAMP_END__ = .; -#endif /* ENABLE_PMF */ - + BAKERY_LOCK_NORMAL + PMF_TIMESTAMP __BSS_END__ = .; } >RAM diff --git a/bl32/tsp/tsp.ld.S b/bl32/tsp/tsp.ld.S index da60c63a7..b1ec42350 100644 --- a/bl32/tsp/tsp.ld.S +++ b/bl32/tsp/tsp.ld.S @@ -6,7 +6,6 @@ #include #include -#include OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT) OUTPUT_ARCH(PLATFORM_LINKER_ARCH) @@ -38,15 +37,7 @@ SECTIONS __RODATA_START__ = .; *(.rodata*) - /* - * Keep the .got section in the RO section as it is patched - * prior to enabling the MMU and having the .got in RO is better for - * security. GOT is a table of addresses so ensure 8-byte alignment. - */ - . = ALIGN(8); - __GOT_START__ = .; - *(.got) - __GOT_END__ = .; + GOT . = ALIGN(PAGE_SIZE); __RODATA_END__ = .; @@ -58,15 +49,7 @@ SECTIONS *(.text*) *(.rodata*) - /* - * Keep the .got section in the RO section as it is patched - * prior to enabling the MMU and having the .got in RO is better for - * security. GOT is a table of addresses so ensure 8-byte alignment. - */ - . = ALIGN(8); - __GOT_START__ = .; - *(.got) - __GOT_END__ = .; + GOT *(.vectors) -- cgit v1.2.3 From 0a0a7a9ac82cb79af91f098cedc69cc67bca3978 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 26 Mar 2020 10:57:12 +0900 Subject: linker_script: replace common read-only data with RODATA_COMMON The common section data are repeated in many linker scripts (often twice in each script to support SEPARATE_CODE_AND_RODATA). When you add a new read-only data section, you end up with touching lots of places. After this commit, you will only need to touch bl_common.ld.h when you add a new section to RODATA_COMMON. Replace a series of RO section with RODATA_COMMON, which contains 6 sections, some of which did not exist before. This is not a big deal because unneeded data should not be compiled in the first place. I believe this should be controlled by BL*_SOURCES in Makefiles, not by linker scripts. When I was working on this commit, the BL1 image size increased due to the fconf_populator. Commit c452ba159c14 ("fconf: exclude fconf_dyn_cfg_getter.c from BL1_SOURCES") fixed this issue. I investigated BL1, BL2, BL2U, BL31 for plat=fvp, and BL2-AT-EL3, BL31, BL31 for plat=uniphier. I did not see any more unexpected code addition. Change-Id: I5d14d60dbe3c821765bce3ae538968ef266f1460 Signed-off-by: Masahiro Yamada --- bl32/sp_min/sp_min.ld.S | 9 ++------- bl32/tsp/tsp.ld.S | 4 ++-- 2 files changed, 4 insertions(+), 9 deletions(-) (limited to 'bl32') diff --git a/bl32/sp_min/sp_min.ld.S b/bl32/sp_min/sp_min.ld.S index d83b4e018..f652f17e2 100644 --- a/bl32/sp_min/sp_min.ld.S +++ b/bl32/sp_min/sp_min.ld.S @@ -48,10 +48,7 @@ SECTIONS __RODATA_START__ = .; *(.rodata*) - RT_SVC_DESCS - FCONF_POPULATOR - PMF_SVC_DESCS - CPU_OPS + RODATA_COMMON /* Place pubsub sections for events */ . = ALIGN(8); @@ -67,9 +64,7 @@ SECTIONS *(.text*) *(.rodata*) - RT_SVC_DESCS - FCONF_POPULATOR - CPU_OPS + RODATA_COMMON /* Place pubsub sections for events */ . = ALIGN(8); diff --git a/bl32/tsp/tsp.ld.S b/bl32/tsp/tsp.ld.S index b1ec42350..b071e82fd 100644 --- a/bl32/tsp/tsp.ld.S +++ b/bl32/tsp/tsp.ld.S @@ -37,7 +37,7 @@ SECTIONS __RODATA_START__ = .; *(.rodata*) - GOT + RODATA_COMMON . = ALIGN(PAGE_SIZE); __RODATA_END__ = .; @@ -49,7 +49,7 @@ SECTIONS *(.text*) *(.rodata*) - GOT + RODATA_COMMON *(.vectors) -- cgit v1.2.3 From a7739bc7b16bf3e43f370864f8a800cf8943b391 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 26 Mar 2020 13:16:33 +0900 Subject: linker_script: move bss section to bl_common.ld.h Move the bss section to the common header. This adds BAKERY_LOCK_NORMAL and PMF_TIMESTAMP, which previously existed only in BL31. This is not a big deal because unused data should not be compiled in the first place. I believe this should be controlled by BL*_SOURCES in Makefiles, not by linker scripts. I investigated BL1, BL2, BL2U, BL31 for plat=fvp, and BL2-AT-EL3, BL31, BL31 for plat=uniphier. I did not see any more unexpected code addition. The bss section has bigger alignment. I added BSS_ALIGN for this. Currently, SORT_BY_ALIGNMENT() is missing in sp_min.ld.S, and with this change, the BSS symbols in SP_MIN will be sorted by the alignment. This is not a big deal (or, even better in terms of the image size). Change-Id: I680ee61f84067a559bac0757f9d03e73119beb33 Signed-off-by: Masahiro Yamada --- bl32/sp_min/sp_min.ld.S | 15 +-------------- bl32/tsp/tsp.ld.S | 13 +------------ 2 files changed, 2 insertions(+), 26 deletions(-) (limited to 'bl32') diff --git a/bl32/sp_min/sp_min.ld.S b/bl32/sp_min/sp_min.ld.S index f652f17e2..da005db64 100644 --- a/bl32/sp_min/sp_min.ld.S +++ b/bl32/sp_min/sp_min.ld.S @@ -107,20 +107,7 @@ SECTIONS __STACKS_END__ = .; } >RAM - /* - * The .bss section gets initialised to 0 at runtime. - * Its base address should be 8-byte aligned for better performance of the - * zero-initialization code. - */ - .bss (NOLOAD) : ALIGN(8) { - __BSS_START__ = .; - *(.bss*) - *(COMMON) - BAKERY_LOCK_NORMAL - PMF_TIMESTAMP - __BSS_END__ = .; - } >RAM - + BSS_SECTION >RAM XLAT_TABLE_SECTION >RAM __BSS_SIZE__ = SIZEOF(.bss); diff --git a/bl32/tsp/tsp.ld.S b/bl32/tsp/tsp.ld.S index b071e82fd..bf77c9234 100644 --- a/bl32/tsp/tsp.ld.S +++ b/bl32/tsp/tsp.ld.S @@ -97,18 +97,7 @@ SECTIONS __STACKS_END__ = .; } >RAM - /* - * The .bss section gets initialised to 0 at runtime. - * Its base address should be 16-byte aligned for better performance of the - * zero-initialization code. - */ - .bss : ALIGN(16) { - __BSS_START__ = .; - *(SORT_BY_ALIGNMENT(.bss*)) - *(COMMON) - __BSS_END__ = .; - } >RAM - + BSS_SECTION >RAM XLAT_TABLE_SECTION >RAM #if USE_COHERENT_MEM -- cgit v1.2.3 From a926a9f60aa94a034b0a06eed296996363245d30 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 7 Apr 2020 13:04:24 +0900 Subject: linker_script: move stacks section to bl_common.ld.h The stacks section is the same for all BL linker scripts. Move it to the common header file. Change-Id: Ibd253488667ab4f69702d56ff9e9929376704f6c Signed-off-by: Masahiro Yamada --- bl32/sp_min/sp_min.ld.S | 7 +------ bl32/tsp/tsp.ld.S | 7 +------ 2 files changed, 2 insertions(+), 12 deletions(-) (limited to 'bl32') diff --git a/bl32/sp_min/sp_min.ld.S b/bl32/sp_min/sp_min.ld.S index da005db64..8e91cec91 100644 --- a/bl32/sp_min/sp_min.ld.S +++ b/bl32/sp_min/sp_min.ld.S @@ -101,12 +101,7 @@ SECTIONS ASSERT(. <= BL32_PROGBITS_LIMIT, "BL32 progbits has exceeded its limit.") #endif - stacks (NOLOAD) : { - __STACKS_START__ = .; - *(tzfw_normal_stacks) - __STACKS_END__ = .; - } >RAM - + STACK_SECTION >RAM BSS_SECTION >RAM XLAT_TABLE_SECTION >RAM diff --git a/bl32/tsp/tsp.ld.S b/bl32/tsp/tsp.ld.S index bf77c9234..7428c0345 100644 --- a/bl32/tsp/tsp.ld.S +++ b/bl32/tsp/tsp.ld.S @@ -91,12 +91,7 @@ SECTIONS ASSERT(. <= TSP_PROGBITS_LIMIT, "TSP progbits has exceeded its limit.") #endif - stacks (NOLOAD) : { - __STACKS_START__ = .; - *(tzfw_normal_stacks) - __STACKS_END__ = .; - } >RAM - + STACK_SECTION >RAM BSS_SECTION >RAM XLAT_TABLE_SECTION >RAM -- cgit v1.2.3 From caa3e7e0a4aeb657873bbd2c002c0e33a614eb1d Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 22 Apr 2020 10:50:12 +0900 Subject: linker_script: move .data section to bl_common.ld.h Move the data section to the common header. I slightly tweaked some scripts as follows: [1] bl1.ld.S has ALIGN(16). I added DATA_ALIGN macro, which is 1 by default, but overridden by bl1.ld.S. Currently, ALIGN(16) of the .data section is redundant because commit 412865907699 ("Fix boot failures on some builds linked with ld.lld.") padded out the previous section to work around the issue of LLD version <= 10.0. This will be fixed in the future release of LLVM, so I am keeping the proper way to align LMA. [2] bl1.ld.S and bl2_el3.ld.S define __DATA_RAM_{START,END}__ instead of __DATA_{START,END}__. I put them out of the .data section. [3] SORT_BY_ALIGNMENT() is missing tsp.ld.S, sp_min.ld.S, and mediatek/mt6795/bl31.ld.S. This commit adds SORT_BY_ALIGNMENT() for all images, so the symbol order in those three will change, but I do not think it is a big deal. Change-Id: I215bb23c319f045cd88e6f4e8ee2518c67f03692 Signed-off-by: Masahiro Yamada --- bl32/sp_min/sp_min.ld.S | 6 +----- bl32/tsp/tsp.ld.S | 6 +----- 2 files changed, 2 insertions(+), 10 deletions(-) (limited to 'bl32') diff --git a/bl32/sp_min/sp_min.ld.S b/bl32/sp_min/sp_min.ld.S index 8e91cec91..9e0596f1f 100644 --- a/bl32/sp_min/sp_min.ld.S +++ b/bl32/sp_min/sp_min.ld.S @@ -91,11 +91,7 @@ SECTIONS */ __RW_START__ = . ; - .data . : { - __DATA_START__ = .; - *(.data*) - __DATA_END__ = .; - } >RAM + DATA_SECTION >RAM #ifdef BL32_PROGBITS_LIMIT ASSERT(. <= BL32_PROGBITS_LIMIT, "BL32 progbits has exceeded its limit.") diff --git a/bl32/tsp/tsp.ld.S b/bl32/tsp/tsp.ld.S index 7428c0345..bdcd2cf70 100644 --- a/bl32/tsp/tsp.ld.S +++ b/bl32/tsp/tsp.ld.S @@ -70,11 +70,7 @@ SECTIONS */ __RW_START__ = . ; - .data . : { - __DATA_START__ = .; - *(.data*) - __DATA_END__ = .; - } >RAM + DATA_SECTION >RAM /* * .rela.dyn needs to come after .data for the read-elf utility to parse -- cgit v1.2.3 From e8ad6168b0153e09f1a54ee887555db7833019df Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 22 Apr 2020 11:27:55 +0900 Subject: linker_script: move .rela.dyn section to bl_common.ld.h The .rela.dyn section is the same for BL2-AT-EL3, BL31, TSP. Move it to the common header file. I slightly changed the definition so that we can do "RELA_SECTION >RAM". It still produced equivalent elf images. Please note I got rid of '.' from the VMA field. Otherwise, if the end of previous .data section is not 8-byte aligned, it fails to link. aarch64-linux-gnu-ld.bfd: warning: changing start of section .rela.dyn by 4 bytes aarch64-linux-gnu-ld.bfd: warning: changing start of section .rela.dyn by 4 bytes aarch64-linux-gnu-ld.bfd: warning: changing start of section .rela.dyn by 4 bytes make: *** [Makefile:1071: build/qemu/release/bl31/bl31.elf] Error 1 Change-Id: Iba7422d99c0374d4d9e97e6fd47bae129dba5cc9 Signed-off-by: Masahiro Yamada --- bl32/tsp/tsp.ld.S | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) (limited to 'bl32') diff --git a/bl32/tsp/tsp.ld.S b/bl32/tsp/tsp.ld.S index bdcd2cf70..d86ae5587 100644 --- a/bl32/tsp/tsp.ld.S +++ b/bl32/tsp/tsp.ld.S @@ -71,17 +71,7 @@ SECTIONS __RW_START__ = . ; DATA_SECTION >RAM - - /* - * .rela.dyn needs to come after .data for the read-elf utility to parse - * this section correctly. Ensure 8-byte alignment so that the fields of - * RELA data structure are aligned. - */ - . = ALIGN(8); - __RELA_START__ = .; - .rela.dyn . : { - } >RAM - __RELA_END__ = .; + RELA_SECTION >RAM #ifdef TSP_PROGBITS_LIMIT ASSERT(. <= TSP_PROGBITS_LIMIT, "TSP progbits has exceeded its limit.") -- cgit v1.2.3 From b1f596b68b040cfcdb19d06252b2998e6354a36a Mon Sep 17 00:00:00 2001 From: Yann Gautier Date: Mon, 5 Oct 2020 09:54:09 +0200 Subject: bl32: use SORT_BY_ALIGNMENT macro in sp_min.ld.S The macro SORT_BY_ALIGNMENT is used for .text* and .rodata*. This allows reducing the space lost to object alignment. This is an alignment with the following patch: ebd6efae67c6a086bc97d807a638bde324d936dc Some comments are also aligned with other linker scripts. Change-Id: I2ea59edb445af0ed8c08fd883ffbf56852570d0c Signed-off-by: Yann Gautier --- bl32/sp_min/sp_min.ld.S | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'bl32') diff --git a/bl32/sp_min/sp_min.ld.S b/bl32/sp_min/sp_min.ld.S index 9e0596f1f..5223915e5 100644 --- a/bl32/sp_min/sp_min.ld.S +++ b/bl32/sp_min/sp_min.ld.S @@ -22,14 +22,14 @@ MEMORY { SECTIONS { . = BL32_BASE; - ASSERT(. == ALIGN(PAGE_SIZE), - "BL32_BASE address is not aligned on a page boundary.") + ASSERT(. == ALIGN(PAGE_SIZE), + "BL32_BASE address is not aligned on a page boundary.") #if SEPARATE_CODE_AND_RODATA .text . : { __TEXT_START__ = .; *entrypoint.o(.text*) - *(.text*) + *(SORT_BY_ALIGNMENT(.text*)) *(.vectors) . = ALIGN(PAGE_SIZE); __TEXT_END__ = .; @@ -46,7 +46,7 @@ SECTIONS .rodata . : { __RODATA_START__ = .; - *(.rodata*) + *(SORT_BY_ALIGNMENT(.rodata*)) RODATA_COMMON @@ -61,8 +61,8 @@ SECTIONS ro . : { __RO_START__ = .; *entrypoint.o(.text*) - *(.text*) - *(.rodata*) + *(SORT_BY_ALIGNMENT(.text*)) + *(SORT_BY_ALIGNMENT(.rodata*)) RODATA_COMMON @@ -76,7 +76,7 @@ SECTIONS /* * Memory page(s) mapped to this section will be marked as * read-only, executable. No RW data from the next section must - * creep in. Ensure the rest of the current memory block is unused. + * creep in. Ensure the rest of the current memory page is unused. */ . = ALIGN(PAGE_SIZE); __RO_END__ = .; @@ -134,10 +134,10 @@ SECTIONS #endif /* - * Define a linker symbol to mark end of the RW memory area for this + * Define a linker symbol to mark the end of the RW memory area for this * image. */ __RW_END__ = .; - __BL32_END__ = .; + __BL32_END__ = .; } -- cgit v1.2.3 From fdd97d7c64edb256812e786a7aa224a3010a7fec Mon Sep 17 00:00:00 2001 From: Yann Gautier Date: Mon, 5 Oct 2020 11:39:19 +0200 Subject: bl32: add an assert on BL32_SIZE in sp_min.ld.S This assert is present in all other linker scripts. This checks the size of BL32 doesn't exceed its defined limit. Change-Id: I0005959b5591d3eebd870045adafe437108bc9e1 Signed-off-by: Yann Gautier --- bl32/sp_min/sp_min.ld.S | 2 ++ 1 file changed, 2 insertions(+) (limited to 'bl32') diff --git a/bl32/sp_min/sp_min.ld.S b/bl32/sp_min/sp_min.ld.S index 5223915e5..f202c7ada 100644 --- a/bl32/sp_min/sp_min.ld.S +++ b/bl32/sp_min/sp_min.ld.S @@ -140,4 +140,6 @@ SECTIONS __RW_END__ = .; __BL32_END__ = .; + + ASSERT(. <= BL32_LIMIT, "BL32 image has exceeded its limit.") } -- cgit v1.2.3 From d7b5f40823d449cc79e6440174390997cf11a9d9 Mon Sep 17 00:00:00 2001 From: Jimmy Brisson Date: Tue, 4 Aug 2020 16:18:52 -0500 Subject: Increase type widths to satisfy width requirements Usually, C has no problem up-converting types to larger bit sizes. MISRA rule 10.7 requires that you not do this, or be very explicit about this. This resolves the following required rule: bl1/aarch64/bl1_context_mgmt.c:81:[MISRA C-2012 Rule 10.7 (required)] The width of the composite expression "0U | ((mode & 3U) << 2U) | 1U | 0x3c0U" (32 bits) is less that the right hand operand "18446744073709547519ULL" (64 bits). This also resolves MISRA defects such as: bl2/aarch64/bl2arch_setup.c:18:[MISRA C-2012 Rule 12.2 (required)] In the expression "3U << 20", shifting more than 7 bits, the number of bits in the essential type of the left expression, "3U", is not allowed. Further, MISRA requires that all shifts don't overflow. The definition of PAGE_SIZE was (1U << 12), and 1U is 8 bits. This caused about 50 issues. This fixes the violation by changing the definition to 1UL << 12. Since this uses 32bits, it should not create any issues for aarch32. This patch also contains a fix for a build failure in the sun50i_a64 platform. Specifically, these misra fixes removed a single and instruction, 92407e73 and x19, x19, #0xffffffff from the cm_setup_context function caused a relocation in psci_cpus_on_start to require a linker-generated stub. This increased the size of the .text section and caused an alignment later on to go over a page boundary and round up to the end of RAM before placing the .data section. This sectionn is of non-zero size and therefore causes a link error. The fix included in this reorders the functions during link time without changing their ording with respect to alignment. Change-Id: I76b4b662c3d262296728a8b9aab7a33b02087f16 Signed-off-by: Jimmy Brisson --- bl32/tsp/aarch64/tsp_entrypoint.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'bl32') diff --git a/bl32/tsp/aarch64/tsp_entrypoint.S b/bl32/tsp/aarch64/tsp_entrypoint.S index ebc5c2c3d..a007bab30 100644 --- a/bl32/tsp/aarch64/tsp_entrypoint.S +++ b/bl32/tsp/aarch64/tsp_entrypoint.S @@ -60,7 +60,7 @@ func tsp_entrypoint _align=3 */ pie_fixup: ldr x0, =pie_fixup - and x0, x0, #~(PAGE_SIZE - 1) + and x0, x0, #~(PAGE_SIZE_MASK) mov_imm x1, (BL32_LIMIT - BL32_BASE) add x1, x1, x0 bl fixup_gdt_reloc -- cgit v1.2.3 From caff3c87245cab1c95c4f2958144d8f78f42685e Mon Sep 17 00:00:00 2001 From: Alexei Fedorov Date: Fri, 13 Nov 2020 12:36:49 +0000 Subject: TSP: Fix GCC 11.0.0 compilation error. This patch fixes the following compilation error reported by aarch64-none-elf-gcc 11.0.0: bl32/tsp/tsp_main.c: In function 'tsp_smc_handler': bl32/tsp/tsp_main.c:393:9: error: 'tsp_get_magic' accessing 32 bytes in a region of size 16 [-Werror=stringop-overflow=] 393 | tsp_get_magic(service_args); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~ bl32/tsp/tsp_main.c:393:9: note: referencing argument 1 of type 'uint64_t *' {aka 'long long unsigned int *'} In file included from bl32/tsp/tsp_main.c:19: bl32/tsp/tsp_private.h:64:6: note: in a call to function 'tsp_get_magic' 64 | void tsp_get_magic(uint64_t args[4]); | ^~~~~~~~~~~~~ by changing declaration of tsp_get_magic function from void tsp_get_magic(uint64_t args[4]); to uint128_t tsp_get_magic(void); which returns arguments directly in x0 and x1 registers. In bl32\tsp\tsp_main.c the current tsp_smc_handler() implementation calls tsp_get_magic(service_args); , where service_args array is declared as uint64_t service_args[2]; and tsp_get_magic() in bl32\tsp\aarch64\tsp_request.S copies only 2 registers in output buffer: /* Store returned arguments to the array */ stp x0, x1, [x4, #0] Change-Id: Ib34759fc5d7bb803e6c734540d91ea278270b330 Signed-off-by: Alexei Fedorov --- bl32/tsp/aarch64/tsp_request.S | 13 ++----------- bl32/tsp/tsp_main.c | 30 +++++++++++++++++------------- bl32/tsp/tsp_private.h | 4 ++-- 3 files changed, 21 insertions(+), 26 deletions(-) (limited to 'bl32') diff --git a/bl32/tsp/aarch64/tsp_request.S b/bl32/tsp/aarch64/tsp_request.S index 5ad16da66..6e238ea4c 100644 --- a/bl32/tsp/aarch64/tsp_request.S +++ b/bl32/tsp/aarch64/tsp_request.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -9,28 +9,19 @@ .globl tsp_get_magic - /* * This function raises an SMC to retrieve arguments from secure * monitor/dispatcher, saves the returned arguments the array received in x0, * and then returns to the caller */ func tsp_get_magic - /* Save address to stack */ - stp x0, xzr, [sp, #-16]! - /* Load arguments */ ldr w0, _tsp_fid_get_magic /* Raise SMC */ smc #0 - /* Restore address from stack */ - ldp x4, xzr, [sp], #16 - - /* Store returned arguments to the array */ - stp x0, x1, [x4, #0] - + /* Return arguments in x1:x0 */ ret endfunc tsp_get_magic diff --git a/bl32/tsp/tsp_main.c b/bl32/tsp/tsp_main.c index e9478380c..01c9ec58f 100644 --- a/bl32/tsp/tsp_main.c +++ b/bl32/tsp/tsp_main.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -363,8 +363,10 @@ tsp_args_t *tsp_smc_handler(uint64_t func, uint64_t arg6, uint64_t arg7) { + uint128_t service_args; + uint64_t service_arg0; + uint64_t service_arg1; uint64_t results[2]; - uint64_t service_args[2]; uint32_t linear_id = plat_my_core_pos(); /* Update this cpu's statistics */ @@ -387,10 +389,12 @@ tsp_args_t *tsp_smc_handler(uint64_t func, results[1] = arg2; /* - * Request a service back from dispatcher/secure monitor. This call - * return and thereafter resume execution + * Request a service back from dispatcher/secure monitor. + * This call returns and thereafter resumes execution. */ - tsp_get_magic(service_args); + service_args = tsp_get_magic(); + service_arg0 = (uint64_t)service_args; + service_arg1 = (uint64_t)(service_args >> 64U); #if CTX_INCLUDE_MTE_REGS /* @@ -403,20 +407,20 @@ tsp_args_t *tsp_smc_handler(uint64_t func, /* Determine the function to perform based on the function ID */ switch (TSP_BARE_FID(func)) { case TSP_ADD: - results[0] += service_args[0]; - results[1] += service_args[1]; + results[0] += service_arg0; + results[1] += service_arg1; break; case TSP_SUB: - results[0] -= service_args[0]; - results[1] -= service_args[1]; + results[0] -= service_arg0; + results[1] -= service_arg1; break; case TSP_MUL: - results[0] *= service_args[0]; - results[1] *= service_args[1]; + results[0] *= service_arg0; + results[1] *= service_arg1; break; case TSP_DIV: - results[0] /= service_args[0] ? service_args[0] : 1; - results[1] /= service_args[1] ? service_args[1] : 1; + results[0] /= service_arg0 ? service_arg0 : 1; + results[1] /= service_arg1 ? service_arg1 : 1; break; default: break; diff --git a/bl32/tsp/tsp_private.h b/bl32/tsp/tsp_private.h index cbd527f37..38d9732f5 100644 --- a/bl32/tsp/tsp_private.h +++ b/bl32/tsp/tsp_private.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2020, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -61,7 +61,7 @@ typedef struct tsp_args { */ CASSERT(TSP_ARGS_SIZE == sizeof(tsp_args_t), assert_sp_args_size_mismatch); -void tsp_get_magic(uint64_t args[4]); +uint128_t tsp_get_magic(void); tsp_args_t *tsp_cpu_resume_main(uint64_t max_off_pwrlvl, uint64_t arg1, -- cgit v1.2.3 From 0063dd1708e67e5d36168caaf2a0df383bbe1455 Mon Sep 17 00:00:00 2001 From: Javier Almansa Sobrino Date: Mon, 23 Nov 2020 18:38:15 +0000 Subject: Add support for FEAT_MTPMU for Armv8.6 If FEAT_PMUv3 is implemented and PMEVTYPER(_EL0).MT bit is implemented as well, it is possible to control whether PMU counters take into account events happening on other threads. If FEAT_MTPMU is implemented, EL3 (or EL2) can override the MT bit leaving it to effective state of 0 regardless of any write to it. This patch introduces the DISABLE_MTPMU flag, which allows to diable multithread event count from EL3 (or EL2). The flag is disabled by default so the behavior is consistent with those architectures that do not implement FEAT_MTPMU. Signed-off-by: Javier Almansa Sobrino Change-Id: Iee3a8470ae8ba13316af1bd40c8d4aa86e0cb85e --- bl32/sp_min/sp_min.mk | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'bl32') diff --git a/bl32/sp_min/sp_min.mk b/bl32/sp_min/sp_min.mk index 6233299d7..afd7ae196 100644 --- a/bl32/sp_min/sp_min.mk +++ b/bl32/sp_min/sp_min.mk @@ -1,5 +1,5 @@ # -# Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved. +# Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # @@ -19,6 +19,10 @@ BL32_SOURCES += bl32/sp_min/sp_min_main.c \ services/std_svc/std_svc_setup.c \ ${PSCI_LIB_SOURCES} +ifeq (${DISABLE_MTPMU},1) +BL32_SOURCES += lib/extensions/mtpmu/aarch32/mtpmu.S +endif + ifeq (${ENABLE_PMF}, 1) BL32_SOURCES += lib/pmf/pmf_main.c endif -- cgit v1.2.3 From 0e14948e2ae2c0b491a1cf2e2de00433e4a95dbe Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Mon, 8 Feb 2021 18:07:23 +0000 Subject: bl32: Enable TRNG service build The Trusted Random Number Generator service is using the standard SMC service dispatcher, running in BL31. For that reason we list the files implementing the service in bl31.mk. However when building for a 32-bit TF-A runtime, sp_min.mk is the Makefile snippet used, so we have to add the files into there as well. This fixes 32-bit builds of platforms that provide the TRNG service. Change-Id: I8be61522300d36477a9ee0a9ce159a140390b254 Signed-off-by: Andre Przywara --- bl32/sp_min/sp_min.mk | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'bl32') diff --git a/bl32/sp_min/sp_min.mk b/bl32/sp_min/sp_min.mk index afd7ae196..8b5eddd66 100644 --- a/bl32/sp_min/sp_min.mk +++ b/bl32/sp_min/sp_min.mk @@ -37,6 +37,11 @@ BL32_SOURCES += bl32/sp_min/wa_cve_2017_5715_bpiall.S \ bl32/sp_min/wa_cve_2017_5715_icache_inv.S endif +ifeq (${TRNG_SUPPORT},1) +BL32_SOURCES += services/std_svc/trng/trng_main.c \ + services/std_svc/trng/trng_entropy_pool.c +endif + BL32_LINKERFILE := bl32/sp_min/sp_min.ld.S # Include the platform-specific SP_MIN Makefile -- cgit v1.2.3